xref: /openbmc/linux/net/ipv4/ipip.c (revision 74ba9207)
1 /*
2  *	Linux NET3:	IP/IP protocol decoder.
3  *
4  *	Authors:
5  *		Sam Lantinga (slouken@cs.ucdavis.edu)  02/01/95
6  *
7  *	Fixes:
8  *		Alan Cox	:	Merged and made usable non modular (its so tiny its silly as
9  *					a module taking up 2 pages).
10  *		Alan Cox	: 	Fixed bug with 1.3.18 and IPIP not working (now needs to set skb->h.iph)
11  *					to keep ip_forward happy.
12  *		Alan Cox	:	More fixes for 1.3.21, and firewall fix. Maybe this will work soon 8).
13  *		Kai Schulte	:	Fixed #defines for IP_FIREWALL->FIREWALL
14  *              David Woodhouse :       Perform some basic ICMP handling.
15  *                                      IPIP Routing without decapsulation.
16  *              Carlos Picoto   :       GRE over IP support
17  *		Alexey Kuznetsov:	Reworked. Really, now it is truncated version of ipv4/ip_gre.c.
18  *					I do not want to merge them together.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *	modify it under the terms of the GNU General Public License
22  *	as published by the Free Software Foundation; either version
23  *	2 of the License, or (at your option) any later version.
24  *
25  */
26 
27 /* tunnel.c: an IP tunnel driver
28 
29 	The purpose of this driver is to provide an IP tunnel through
30 	which you can tunnel network traffic transparently across subnets.
31 
32 	This was written by looking at Nick Holloway's dummy driver
33 	Thanks for the great code!
34 
35 		-Sam Lantinga	(slouken@cs.ucdavis.edu)  02/01/95
36 
37 	Minor tweaks:
38 		Cleaned up the code a little and added some pre-1.3.0 tweaks.
39 		dev->hard_header/hard_header_len changed to use no headers.
40 		Comments/bracketing tweaked.
41 		Made the tunnels use dev->name not tunnel: when error reporting.
42 		Added tx_dropped stat
43 
44 		-Alan Cox	(alan@lxorguk.ukuu.org.uk) 21 March 95
45 
46 	Reworked:
47 		Changed to tunnel to destination gateway in addition to the
48 			tunnel's pointopoint address
49 		Almost completely rewritten
50 		Note:  There is currently no firewall or ICMP handling done.
51 
52 		-Sam Lantinga	(slouken@cs.ucdavis.edu) 02/13/96
53 
54 */
55 
56 /* Things I wish I had known when writing the tunnel driver:
57 
58 	When the tunnel_xmit() function is called, the skb contains the
59 	packet to be sent (plus a great deal of extra info), and dev
60 	contains the tunnel device that _we_ are.
61 
62 	When we are passed a packet, we are expected to fill in the
63 	source address with our source IP address.
64 
65 	What is the proper way to allocate, copy and free a buffer?
66 	After you allocate it, it is a "0 length" chunk of memory
67 	starting at zero.  If you want to add headers to the buffer
68 	later, you'll have to call "skb_reserve(skb, amount)" with
69 	the amount of memory you want reserved.  Then, you call
70 	"skb_put(skb, amount)" with the amount of space you want in
71 	the buffer.  skb_put() returns a pointer to the top (#0) of
72 	that buffer.  skb->len is set to the amount of space you have
73 	"allocated" with skb_put().  You can then write up to skb->len
74 	bytes to that buffer.  If you need more, you can call skb_put()
75 	again with the additional amount of space you need.  You can
76 	find out how much more space you can allocate by calling
77 	"skb_tailroom(skb)".
78 	Now, to add header space, call "skb_push(skb, header_len)".
79 	This creates space at the beginning of the buffer and returns
80 	a pointer to this new space.  If later you need to strip a
81 	header from a buffer, call "skb_pull(skb, header_len)".
82 	skb_headroom() will return how much space is left at the top
83 	of the buffer (before the main data).  Remember, this headroom
84 	space must be reserved before the skb_put() function is called.
85 	*/
86 
87 /*
88    This version of net/ipv4/ipip.c is cloned of net/ipv4/ip_gre.c
89 
90    For comments look at net/ipv4/ip_gre.c --ANK
91  */
92 
93 
94 #include <linux/capability.h>
95 #include <linux/module.h>
96 #include <linux/types.h>
97 #include <linux/kernel.h>
98 #include <linux/slab.h>
99 #include <linux/uaccess.h>
100 #include <linux/skbuff.h>
101 #include <linux/netdevice.h>
102 #include <linux/in.h>
103 #include <linux/tcp.h>
104 #include <linux/udp.h>
105 #include <linux/if_arp.h>
106 #include <linux/init.h>
107 #include <linux/netfilter_ipv4.h>
108 #include <linux/if_ether.h>
109 
110 #include <net/sock.h>
111 #include <net/ip.h>
112 #include <net/icmp.h>
113 #include <net/ip_tunnels.h>
114 #include <net/inet_ecn.h>
115 #include <net/xfrm.h>
116 #include <net/net_namespace.h>
117 #include <net/netns/generic.h>
118 #include <net/dst_metadata.h>
119 
120 static bool log_ecn_error = true;
121 module_param(log_ecn_error, bool, 0644);
122 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
123 
124 static unsigned int ipip_net_id __read_mostly;
125 
126 static int ipip_tunnel_init(struct net_device *dev);
127 static struct rtnl_link_ops ipip_link_ops __read_mostly;
128 
129 static int ipip_err(struct sk_buff *skb, u32 info)
130 {
131 	/* All the routers (except for Linux) return only
132 	 * 8 bytes of packet payload. It means, that precise relaying of
133 	 * ICMP in the real Internet is absolutely infeasible.
134 	 */
135 	struct net *net = dev_net(skb->dev);
136 	struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
137 	const struct iphdr *iph = (const struct iphdr *)skb->data;
138 	const int type = icmp_hdr(skb)->type;
139 	const int code = icmp_hdr(skb)->code;
140 	struct ip_tunnel *t;
141 	int err = 0;
142 
143 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
144 			     iph->daddr, iph->saddr, 0);
145 	if (!t) {
146 		err = -ENOENT;
147 		goto out;
148 	}
149 
150 	switch (type) {
151 	case ICMP_DEST_UNREACH:
152 		switch (code) {
153 		case ICMP_SR_FAILED:
154 			/* Impossible event. */
155 			goto out;
156 		default:
157 			/* All others are translated to HOST_UNREACH.
158 			 * rfc2003 contains "deep thoughts" about NET_UNREACH,
159 			 * I believe they are just ether pollution. --ANK
160 			 */
161 			break;
162 		}
163 		break;
164 
165 	case ICMP_TIME_EXCEEDED:
166 		if (code != ICMP_EXC_TTL)
167 			goto out;
168 		break;
169 
170 	case ICMP_REDIRECT:
171 		break;
172 
173 	default:
174 		goto out;
175 	}
176 
177 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
178 		ipv4_update_pmtu(skb, net, info, t->parms.link, iph->protocol);
179 		goto out;
180 	}
181 
182 	if (type == ICMP_REDIRECT) {
183 		ipv4_redirect(skb, net, t->parms.link, iph->protocol);
184 		goto out;
185 	}
186 
187 	if (t->parms.iph.daddr == 0) {
188 		err = -ENOENT;
189 		goto out;
190 	}
191 
192 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
193 		goto out;
194 
195 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
196 		t->err_count++;
197 	else
198 		t->err_count = 1;
199 	t->err_time = jiffies;
200 
201 out:
202 	return err;
203 }
204 
205 static const struct tnl_ptk_info ipip_tpi = {
206 	/* no tunnel info required for ipip. */
207 	.proto = htons(ETH_P_IP),
208 };
209 
210 #if IS_ENABLED(CONFIG_MPLS)
211 static const struct tnl_ptk_info mplsip_tpi = {
212 	/* no tunnel info required for mplsip. */
213 	.proto = htons(ETH_P_MPLS_UC),
214 };
215 #endif
216 
217 static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
218 {
219 	struct net *net = dev_net(skb->dev);
220 	struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
221 	struct metadata_dst *tun_dst = NULL;
222 	struct ip_tunnel *tunnel;
223 	const struct iphdr *iph;
224 
225 	iph = ip_hdr(skb);
226 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
227 			iph->saddr, iph->daddr, 0);
228 	if (tunnel) {
229 		const struct tnl_ptk_info *tpi;
230 
231 		if (tunnel->parms.iph.protocol != ipproto &&
232 		    tunnel->parms.iph.protocol != 0)
233 			goto drop;
234 
235 		if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
236 			goto drop;
237 #if IS_ENABLED(CONFIG_MPLS)
238 		if (ipproto == IPPROTO_MPLS)
239 			tpi = &mplsip_tpi;
240 		else
241 #endif
242 			tpi = &ipip_tpi;
243 		if (iptunnel_pull_header(skb, 0, tpi->proto, false))
244 			goto drop;
245 		if (tunnel->collect_md) {
246 			tun_dst = ip_tun_rx_dst(skb, 0, 0, 0);
247 			if (!tun_dst)
248 				return 0;
249 		}
250 		return ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
251 	}
252 
253 	return -1;
254 
255 drop:
256 	kfree_skb(skb);
257 	return 0;
258 }
259 
260 static int ipip_rcv(struct sk_buff *skb)
261 {
262 	return ipip_tunnel_rcv(skb, IPPROTO_IPIP);
263 }
264 
265 #if IS_ENABLED(CONFIG_MPLS)
266 static int mplsip_rcv(struct sk_buff *skb)
267 {
268 	return ipip_tunnel_rcv(skb, IPPROTO_MPLS);
269 }
270 #endif
271 
272 /*
273  *	This function assumes it is being called from dev_queue_xmit()
274  *	and that skb is filled properly by that function.
275  */
276 static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
277 				    struct net_device *dev)
278 {
279 	struct ip_tunnel *tunnel = netdev_priv(dev);
280 	const struct iphdr  *tiph = &tunnel->parms.iph;
281 	u8 ipproto;
282 
283 	switch (skb->protocol) {
284 	case htons(ETH_P_IP):
285 		ipproto = IPPROTO_IPIP;
286 		break;
287 #if IS_ENABLED(CONFIG_MPLS)
288 	case htons(ETH_P_MPLS_UC):
289 		ipproto = IPPROTO_MPLS;
290 		break;
291 #endif
292 	default:
293 		goto tx_error;
294 	}
295 
296 	if (tiph->protocol != ipproto && tiph->protocol != 0)
297 		goto tx_error;
298 
299 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP4))
300 		goto tx_error;
301 
302 	skb_set_inner_ipproto(skb, ipproto);
303 
304 	if (tunnel->collect_md)
305 		ip_md_tunnel_xmit(skb, dev, ipproto, 0);
306 	else
307 		ip_tunnel_xmit(skb, dev, tiph, ipproto);
308 	return NETDEV_TX_OK;
309 
310 tx_error:
311 	kfree_skb(skb);
312 
313 	dev->stats.tx_errors++;
314 	return NETDEV_TX_OK;
315 }
316 
317 static bool ipip_tunnel_ioctl_verify_protocol(u8 ipproto)
318 {
319 	switch (ipproto) {
320 	case 0:
321 	case IPPROTO_IPIP:
322 #if IS_ENABLED(CONFIG_MPLS)
323 	case IPPROTO_MPLS:
324 #endif
325 		return true;
326 	}
327 
328 	return false;
329 }
330 
331 static int
332 ipip_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
333 {
334 	int err = 0;
335 	struct ip_tunnel_parm p;
336 
337 	if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
338 		return -EFAULT;
339 
340 	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
341 		if (p.iph.version != 4 ||
342 		    !ipip_tunnel_ioctl_verify_protocol(p.iph.protocol) ||
343 		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
344 			return -EINVAL;
345 	}
346 
347 	p.i_key = p.o_key = 0;
348 	p.i_flags = p.o_flags = 0;
349 	err = ip_tunnel_ioctl(dev, &p, cmd);
350 	if (err)
351 		return err;
352 
353 	if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
354 		return -EFAULT;
355 
356 	return 0;
357 }
358 
359 static const struct net_device_ops ipip_netdev_ops = {
360 	.ndo_init       = ipip_tunnel_init,
361 	.ndo_uninit     = ip_tunnel_uninit,
362 	.ndo_start_xmit	= ipip_tunnel_xmit,
363 	.ndo_do_ioctl	= ipip_tunnel_ioctl,
364 	.ndo_change_mtu = ip_tunnel_change_mtu,
365 	.ndo_get_stats64 = ip_tunnel_get_stats64,
366 	.ndo_get_iflink = ip_tunnel_get_iflink,
367 };
368 
369 #define IPIP_FEATURES (NETIF_F_SG |		\
370 		       NETIF_F_FRAGLIST |	\
371 		       NETIF_F_HIGHDMA |	\
372 		       NETIF_F_GSO_SOFTWARE |	\
373 		       NETIF_F_HW_CSUM)
374 
375 static void ipip_tunnel_setup(struct net_device *dev)
376 {
377 	dev->netdev_ops		= &ipip_netdev_ops;
378 
379 	dev->type		= ARPHRD_TUNNEL;
380 	dev->flags		= IFF_NOARP;
381 	dev->addr_len		= 4;
382 	dev->features		|= NETIF_F_LLTX;
383 	netif_keep_dst(dev);
384 
385 	dev->features		|= IPIP_FEATURES;
386 	dev->hw_features	|= IPIP_FEATURES;
387 	ip_tunnel_setup(dev, ipip_net_id);
388 }
389 
390 static int ipip_tunnel_init(struct net_device *dev)
391 {
392 	struct ip_tunnel *tunnel = netdev_priv(dev);
393 
394 	memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
395 	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
396 
397 	tunnel->tun_hlen = 0;
398 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
399 	return ip_tunnel_init(dev);
400 }
401 
402 static int ipip_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
403 				struct netlink_ext_ack *extack)
404 {
405 	u8 proto;
406 
407 	if (!data || !data[IFLA_IPTUN_PROTO])
408 		return 0;
409 
410 	proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
411 	if (proto != IPPROTO_IPIP && proto != IPPROTO_MPLS && proto != 0)
412 		return -EINVAL;
413 
414 	return 0;
415 }
416 
417 static void ipip_netlink_parms(struct nlattr *data[],
418 			       struct ip_tunnel_parm *parms, bool *collect_md,
419 			       __u32 *fwmark)
420 {
421 	memset(parms, 0, sizeof(*parms));
422 
423 	parms->iph.version = 4;
424 	parms->iph.protocol = IPPROTO_IPIP;
425 	parms->iph.ihl = 5;
426 	*collect_md = false;
427 
428 	if (!data)
429 		return;
430 
431 	if (data[IFLA_IPTUN_LINK])
432 		parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
433 
434 	if (data[IFLA_IPTUN_LOCAL])
435 		parms->iph.saddr = nla_get_in_addr(data[IFLA_IPTUN_LOCAL]);
436 
437 	if (data[IFLA_IPTUN_REMOTE])
438 		parms->iph.daddr = nla_get_in_addr(data[IFLA_IPTUN_REMOTE]);
439 
440 	if (data[IFLA_IPTUN_TTL]) {
441 		parms->iph.ttl = nla_get_u8(data[IFLA_IPTUN_TTL]);
442 		if (parms->iph.ttl)
443 			parms->iph.frag_off = htons(IP_DF);
444 	}
445 
446 	if (data[IFLA_IPTUN_TOS])
447 		parms->iph.tos = nla_get_u8(data[IFLA_IPTUN_TOS]);
448 
449 	if (data[IFLA_IPTUN_PROTO])
450 		parms->iph.protocol = nla_get_u8(data[IFLA_IPTUN_PROTO]);
451 
452 	if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC]))
453 		parms->iph.frag_off = htons(IP_DF);
454 
455 	if (data[IFLA_IPTUN_COLLECT_METADATA])
456 		*collect_md = true;
457 
458 	if (data[IFLA_IPTUN_FWMARK])
459 		*fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
460 }
461 
462 /* This function returns true when ENCAP attributes are present in the nl msg */
463 static bool ipip_netlink_encap_parms(struct nlattr *data[],
464 				     struct ip_tunnel_encap *ipencap)
465 {
466 	bool ret = false;
467 
468 	memset(ipencap, 0, sizeof(*ipencap));
469 
470 	if (!data)
471 		return ret;
472 
473 	if (data[IFLA_IPTUN_ENCAP_TYPE]) {
474 		ret = true;
475 		ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
476 	}
477 
478 	if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
479 		ret = true;
480 		ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
481 	}
482 
483 	if (data[IFLA_IPTUN_ENCAP_SPORT]) {
484 		ret = true;
485 		ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
486 	}
487 
488 	if (data[IFLA_IPTUN_ENCAP_DPORT]) {
489 		ret = true;
490 		ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
491 	}
492 
493 	return ret;
494 }
495 
496 static int ipip_newlink(struct net *src_net, struct net_device *dev,
497 			struct nlattr *tb[], struct nlattr *data[],
498 			struct netlink_ext_ack *extack)
499 {
500 	struct ip_tunnel *t = netdev_priv(dev);
501 	struct ip_tunnel_parm p;
502 	struct ip_tunnel_encap ipencap;
503 	__u32 fwmark = 0;
504 
505 	if (ipip_netlink_encap_parms(data, &ipencap)) {
506 		int err = ip_tunnel_encap_setup(t, &ipencap);
507 
508 		if (err < 0)
509 			return err;
510 	}
511 
512 	ipip_netlink_parms(data, &p, &t->collect_md, &fwmark);
513 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
514 }
515 
516 static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
517 			   struct nlattr *data[],
518 			   struct netlink_ext_ack *extack)
519 {
520 	struct ip_tunnel *t = netdev_priv(dev);
521 	struct ip_tunnel_parm p;
522 	struct ip_tunnel_encap ipencap;
523 	bool collect_md;
524 	__u32 fwmark = t->fwmark;
525 
526 	if (ipip_netlink_encap_parms(data, &ipencap)) {
527 		int err = ip_tunnel_encap_setup(t, &ipencap);
528 
529 		if (err < 0)
530 			return err;
531 	}
532 
533 	ipip_netlink_parms(data, &p, &collect_md, &fwmark);
534 	if (collect_md)
535 		return -EINVAL;
536 
537 	if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) ||
538 	    (!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr))
539 		return -EINVAL;
540 
541 	return ip_tunnel_changelink(dev, tb, &p, fwmark);
542 }
543 
544 static size_t ipip_get_size(const struct net_device *dev)
545 {
546 	return
547 		/* IFLA_IPTUN_LINK */
548 		nla_total_size(4) +
549 		/* IFLA_IPTUN_LOCAL */
550 		nla_total_size(4) +
551 		/* IFLA_IPTUN_REMOTE */
552 		nla_total_size(4) +
553 		/* IFLA_IPTUN_TTL */
554 		nla_total_size(1) +
555 		/* IFLA_IPTUN_TOS */
556 		nla_total_size(1) +
557 		/* IFLA_IPTUN_PROTO */
558 		nla_total_size(1) +
559 		/* IFLA_IPTUN_PMTUDISC */
560 		nla_total_size(1) +
561 		/* IFLA_IPTUN_ENCAP_TYPE */
562 		nla_total_size(2) +
563 		/* IFLA_IPTUN_ENCAP_FLAGS */
564 		nla_total_size(2) +
565 		/* IFLA_IPTUN_ENCAP_SPORT */
566 		nla_total_size(2) +
567 		/* IFLA_IPTUN_ENCAP_DPORT */
568 		nla_total_size(2) +
569 		/* IFLA_IPTUN_COLLECT_METADATA */
570 		nla_total_size(0) +
571 		/* IFLA_IPTUN_FWMARK */
572 		nla_total_size(4) +
573 		0;
574 }
575 
576 static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev)
577 {
578 	struct ip_tunnel *tunnel = netdev_priv(dev);
579 	struct ip_tunnel_parm *parm = &tunnel->parms;
580 
581 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
582 	    nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
583 	    nla_put_in_addr(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
584 	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->iph.ttl) ||
585 	    nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) ||
586 	    nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->iph.protocol) ||
587 	    nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
588 		       !!(parm->iph.frag_off & htons(IP_DF))) ||
589 	    nla_put_u32(skb, IFLA_IPTUN_FWMARK, tunnel->fwmark))
590 		goto nla_put_failure;
591 
592 	if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
593 			tunnel->encap.type) ||
594 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
595 			 tunnel->encap.sport) ||
596 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
597 			 tunnel->encap.dport) ||
598 	    nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
599 			tunnel->encap.flags))
600 		goto nla_put_failure;
601 
602 	if (tunnel->collect_md)
603 		if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
604 			goto nla_put_failure;
605 	return 0;
606 
607 nla_put_failure:
608 	return -EMSGSIZE;
609 }
610 
611 static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
612 	[IFLA_IPTUN_LINK]		= { .type = NLA_U32 },
613 	[IFLA_IPTUN_LOCAL]		= { .type = NLA_U32 },
614 	[IFLA_IPTUN_REMOTE]		= { .type = NLA_U32 },
615 	[IFLA_IPTUN_TTL]		= { .type = NLA_U8 },
616 	[IFLA_IPTUN_TOS]		= { .type = NLA_U8 },
617 	[IFLA_IPTUN_PROTO]		= { .type = NLA_U8 },
618 	[IFLA_IPTUN_PMTUDISC]		= { .type = NLA_U8 },
619 	[IFLA_IPTUN_ENCAP_TYPE]		= { .type = NLA_U16 },
620 	[IFLA_IPTUN_ENCAP_FLAGS]	= { .type = NLA_U16 },
621 	[IFLA_IPTUN_ENCAP_SPORT]	= { .type = NLA_U16 },
622 	[IFLA_IPTUN_ENCAP_DPORT]	= { .type = NLA_U16 },
623 	[IFLA_IPTUN_COLLECT_METADATA]	= { .type = NLA_FLAG },
624 	[IFLA_IPTUN_FWMARK]		= { .type = NLA_U32 },
625 };
626 
627 static struct rtnl_link_ops ipip_link_ops __read_mostly = {
628 	.kind		= "ipip",
629 	.maxtype	= IFLA_IPTUN_MAX,
630 	.policy		= ipip_policy,
631 	.priv_size	= sizeof(struct ip_tunnel),
632 	.setup		= ipip_tunnel_setup,
633 	.validate	= ipip_tunnel_validate,
634 	.newlink	= ipip_newlink,
635 	.changelink	= ipip_changelink,
636 	.dellink	= ip_tunnel_dellink,
637 	.get_size	= ipip_get_size,
638 	.fill_info	= ipip_fill_info,
639 	.get_link_net	= ip_tunnel_get_link_net,
640 };
641 
642 static struct xfrm_tunnel ipip_handler __read_mostly = {
643 	.handler	=	ipip_rcv,
644 	.err_handler	=	ipip_err,
645 	.priority	=	1,
646 };
647 
648 #if IS_ENABLED(CONFIG_MPLS)
649 static struct xfrm_tunnel mplsip_handler __read_mostly = {
650 	.handler	=	mplsip_rcv,
651 	.err_handler	=	ipip_err,
652 	.priority	=	1,
653 };
654 #endif
655 
656 static int __net_init ipip_init_net(struct net *net)
657 {
658 	return ip_tunnel_init_net(net, ipip_net_id, &ipip_link_ops, "tunl0");
659 }
660 
661 static void __net_exit ipip_exit_batch_net(struct list_head *list_net)
662 {
663 	ip_tunnel_delete_nets(list_net, ipip_net_id, &ipip_link_ops);
664 }
665 
666 static struct pernet_operations ipip_net_ops = {
667 	.init = ipip_init_net,
668 	.exit_batch = ipip_exit_batch_net,
669 	.id   = &ipip_net_id,
670 	.size = sizeof(struct ip_tunnel_net),
671 };
672 
673 static int __init ipip_init(void)
674 {
675 	int err;
676 
677 	pr_info("ipip: IPv4 and MPLS over IPv4 tunneling driver\n");
678 
679 	err = register_pernet_device(&ipip_net_ops);
680 	if (err < 0)
681 		return err;
682 	err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
683 	if (err < 0) {
684 		pr_info("%s: can't register tunnel\n", __func__);
685 		goto xfrm_tunnel_ipip_failed;
686 	}
687 #if IS_ENABLED(CONFIG_MPLS)
688 	err = xfrm4_tunnel_register(&mplsip_handler, AF_MPLS);
689 	if (err < 0) {
690 		pr_info("%s: can't register tunnel\n", __func__);
691 		goto xfrm_tunnel_mplsip_failed;
692 	}
693 #endif
694 	err = rtnl_link_register(&ipip_link_ops);
695 	if (err < 0)
696 		goto rtnl_link_failed;
697 
698 out:
699 	return err;
700 
701 rtnl_link_failed:
702 #if IS_ENABLED(CONFIG_MPLS)
703 	xfrm4_tunnel_deregister(&mplsip_handler, AF_INET);
704 xfrm_tunnel_mplsip_failed:
705 
706 #endif
707 	xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
708 xfrm_tunnel_ipip_failed:
709 	unregister_pernet_device(&ipip_net_ops);
710 	goto out;
711 }
712 
713 static void __exit ipip_fini(void)
714 {
715 	rtnl_link_unregister(&ipip_link_ops);
716 	if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET))
717 		pr_info("%s: can't deregister tunnel\n", __func__);
718 #if IS_ENABLED(CONFIG_MPLS)
719 	if (xfrm4_tunnel_deregister(&mplsip_handler, AF_MPLS))
720 		pr_info("%s: can't deregister tunnel\n", __func__);
721 #endif
722 	unregister_pernet_device(&ipip_net_ops);
723 }
724 
725 module_init(ipip_init);
726 module_exit(ipip_fini);
727 MODULE_LICENSE("GPL");
728 MODULE_ALIAS_RTNL_LINK("ipip");
729 MODULE_ALIAS_NETDEV("tunl0");
730