xref: /openbmc/linux/net/ipv4/ip_vti.c (revision 37be287c)
1 /*
2  *	Linux NET3: IP/IP protocol decoder modified to support
3  *		    virtual tunnel interface
4  *
5  *	Authors:
6  *		Saurabh Mohan (saurabh.mohan@vyatta.com) 05/07/2012
7  *
8  *	This program is free software; you can redistribute it and/or
9  *	modify it under the terms of the GNU General Public License
10  *	as published by the Free Software Foundation; either version
11  *	2 of the License, or (at your option) any later version.
12  *
13  */
14 
15 /*
16    This version of net/ipv4/ip_vti.c is cloned of net/ipv4/ipip.c
17 
18    For comments look at net/ipv4/ip_gre.c --ANK
19  */
20 
21 
22 #include <linux/capability.h>
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/uaccess.h>
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
29 #include <linux/in.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/mroute.h>
34 #include <linux/init.h>
35 #include <linux/netfilter_ipv4.h>
36 #include <linux/if_ether.h>
37 
38 #include <net/sock.h>
39 #include <net/ip.h>
40 #include <net/icmp.h>
41 #include <net/ip_tunnels.h>
42 #include <net/inet_ecn.h>
43 #include <net/xfrm.h>
44 #include <net/net_namespace.h>
45 #include <net/netns/generic.h>
46 
47 static struct rtnl_link_ops vti_link_ops __read_mostly;
48 
49 static int vti_net_id __read_mostly;
50 static int vti_tunnel_init(struct net_device *dev);
51 
52 /* We dont digest the packet therefore let the packet pass */
53 static int vti_rcv(struct sk_buff *skb)
54 {
55 	struct ip_tunnel *tunnel;
56 	const struct iphdr *iph = ip_hdr(skb);
57 	struct net *net = dev_net(skb->dev);
58 	struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
59 
60 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
61 				  iph->saddr, iph->daddr, 0);
62 	if (tunnel != NULL) {
63 		struct pcpu_sw_netstats *tstats;
64 		u32 oldmark = skb->mark;
65 		int ret;
66 
67 
68 		/* temporarily mark the skb with the tunnel o_key, to
69 		 * only match policies with this mark.
70 		 */
71 		skb->mark = be32_to_cpu(tunnel->parms.o_key);
72 		ret = xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb);
73 		skb->mark = oldmark;
74 		if (!ret)
75 			return -1;
76 
77 		tstats = this_cpu_ptr(tunnel->dev->tstats);
78 		u64_stats_update_begin(&tstats->syncp);
79 		tstats->rx_packets++;
80 		tstats->rx_bytes += skb->len;
81 		u64_stats_update_end(&tstats->syncp);
82 
83 		secpath_reset(skb);
84 		skb->dev = tunnel->dev;
85 		return 1;
86 	}
87 
88 	return -1;
89 }
90 
91 /* This function assumes it is being called from dev_queue_xmit()
92  * and that skb is filled properly by that function.
93  */
94 
95 static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
96 {
97 	struct ip_tunnel *tunnel = netdev_priv(dev);
98 	struct iphdr  *tiph = &tunnel->parms.iph;
99 	u8     tos;
100 	struct rtable *rt;		/* Route to the other host */
101 	struct net_device *tdev;	/* Device to other host */
102 	struct iphdr  *old_iph = ip_hdr(skb);
103 	__be32 dst = tiph->daddr;
104 	struct flowi4 fl4;
105 	int err;
106 
107 	if (skb->protocol != htons(ETH_P_IP))
108 		goto tx_error;
109 
110 	tos = old_iph->tos;
111 
112 	memset(&fl4, 0, sizeof(fl4));
113 	flowi4_init_output(&fl4, tunnel->parms.link,
114 			   be32_to_cpu(tunnel->parms.o_key), RT_TOS(tos),
115 			   RT_SCOPE_UNIVERSE,
116 			   IPPROTO_IPIP, 0,
117 			   dst, tiph->saddr, 0, 0);
118 	rt = ip_route_output_key(dev_net(dev), &fl4);
119 	if (IS_ERR(rt)) {
120 		dev->stats.tx_carrier_errors++;
121 		goto tx_error_icmp;
122 	}
123 	/* if there is no transform then this tunnel is not functional.
124 	 * Or if the xfrm is not mode tunnel.
125 	 */
126 	if (!rt->dst.xfrm ||
127 	    rt->dst.xfrm->props.mode != XFRM_MODE_TUNNEL) {
128 		dev->stats.tx_carrier_errors++;
129 		ip_rt_put(rt);
130 		goto tx_error_icmp;
131 	}
132 	tdev = rt->dst.dev;
133 
134 	if (tdev == dev) {
135 		ip_rt_put(rt);
136 		dev->stats.collisions++;
137 		goto tx_error;
138 	}
139 
140 	if (tunnel->err_count > 0) {
141 		if (time_before(jiffies,
142 				tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
143 			tunnel->err_count--;
144 			dst_link_failure(skb);
145 		} else
146 			tunnel->err_count = 0;
147 	}
148 
149 	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
150 	skb_dst_drop(skb);
151 	skb_dst_set(skb, &rt->dst);
152 	nf_reset(skb);
153 	skb->dev = skb_dst(skb)->dev;
154 
155 	err = dst_output(skb);
156 	if (net_xmit_eval(err) == 0)
157 		err = skb->len;
158 	iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
159 	return NETDEV_TX_OK;
160 
161 tx_error_icmp:
162 	dst_link_failure(skb);
163 tx_error:
164 	dev->stats.tx_errors++;
165 	kfree_skb(skb);
166 	return NETDEV_TX_OK;
167 }
168 
169 static int
170 vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
171 {
172 	int err = 0;
173 	struct ip_tunnel_parm p;
174 
175 	if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
176 		return -EFAULT;
177 
178 	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
179 		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
180 		    p.iph.ihl != 5)
181 			return -EINVAL;
182 	}
183 
184 	err = ip_tunnel_ioctl(dev, &p, cmd);
185 	if (err)
186 		return err;
187 
188 	if (cmd != SIOCDELTUNNEL) {
189 		p.i_flags |= GRE_KEY | VTI_ISVTI;
190 		p.o_flags |= GRE_KEY;
191 	}
192 
193 	if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
194 		return -EFAULT;
195 	return 0;
196 }
197 
198 static const struct net_device_ops vti_netdev_ops = {
199 	.ndo_init	= vti_tunnel_init,
200 	.ndo_uninit	= ip_tunnel_uninit,
201 	.ndo_start_xmit	= vti_tunnel_xmit,
202 	.ndo_do_ioctl	= vti_tunnel_ioctl,
203 	.ndo_change_mtu	= ip_tunnel_change_mtu,
204 	.ndo_get_stats64 = ip_tunnel_get_stats64,
205 };
206 
207 static void vti_tunnel_setup(struct net_device *dev)
208 {
209 	dev->netdev_ops		= &vti_netdev_ops;
210 	ip_tunnel_setup(dev, vti_net_id);
211 }
212 
213 static int vti_tunnel_init(struct net_device *dev)
214 {
215 	struct ip_tunnel *tunnel = netdev_priv(dev);
216 	struct iphdr *iph = &tunnel->parms.iph;
217 
218 	memcpy(dev->dev_addr, &iph->saddr, 4);
219 	memcpy(dev->broadcast, &iph->daddr, 4);
220 
221 	dev->type		= ARPHRD_TUNNEL;
222 	dev->hard_header_len	= LL_MAX_HEADER + sizeof(struct iphdr);
223 	dev->mtu		= ETH_DATA_LEN;
224 	dev->flags		= IFF_NOARP;
225 	dev->iflink		= 0;
226 	dev->addr_len		= 4;
227 	dev->features		|= NETIF_F_NETNS_LOCAL;
228 	dev->features		|= NETIF_F_LLTX;
229 	dev->priv_flags		&= ~IFF_XMIT_DST_RELEASE;
230 
231 	return ip_tunnel_init(dev);
232 }
233 
234 static void __net_init vti_fb_tunnel_init(struct net_device *dev)
235 {
236 	struct ip_tunnel *tunnel = netdev_priv(dev);
237 	struct iphdr *iph = &tunnel->parms.iph;
238 
239 	iph->version		= 4;
240 	iph->protocol		= IPPROTO_IPIP;
241 	iph->ihl		= 5;
242 }
243 
244 static struct xfrm_tunnel_notifier vti_handler __read_mostly = {
245 	.handler	=	vti_rcv,
246 	.priority	=	1,
247 };
248 
249 static int __net_init vti_init_net(struct net *net)
250 {
251 	int err;
252 	struct ip_tunnel_net *itn;
253 
254 	err = ip_tunnel_init_net(net, vti_net_id, &vti_link_ops, "ip_vti0");
255 	if (err)
256 		return err;
257 	itn = net_generic(net, vti_net_id);
258 	vti_fb_tunnel_init(itn->fb_tunnel_dev);
259 	return 0;
260 }
261 
262 static void __net_exit vti_exit_net(struct net *net)
263 {
264 	struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
265 	ip_tunnel_delete_net(itn, &vti_link_ops);
266 }
267 
268 static struct pernet_operations vti_net_ops = {
269 	.init = vti_init_net,
270 	.exit = vti_exit_net,
271 	.id   = &vti_net_id,
272 	.size = sizeof(struct ip_tunnel_net),
273 };
274 
275 static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
276 {
277 	return 0;
278 }
279 
280 static void vti_netlink_parms(struct nlattr *data[],
281 			      struct ip_tunnel_parm *parms)
282 {
283 	memset(parms, 0, sizeof(*parms));
284 
285 	parms->iph.protocol = IPPROTO_IPIP;
286 
287 	if (!data)
288 		return;
289 
290 	if (data[IFLA_VTI_LINK])
291 		parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
292 
293 	if (data[IFLA_VTI_IKEY])
294 		parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]);
295 
296 	if (data[IFLA_VTI_OKEY])
297 		parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]);
298 
299 	if (data[IFLA_VTI_LOCAL])
300 		parms->iph.saddr = nla_get_be32(data[IFLA_VTI_LOCAL]);
301 
302 	if (data[IFLA_VTI_REMOTE])
303 		parms->iph.daddr = nla_get_be32(data[IFLA_VTI_REMOTE]);
304 
305 }
306 
307 static int vti_newlink(struct net *src_net, struct net_device *dev,
308 		       struct nlattr *tb[], struct nlattr *data[])
309 {
310 	struct ip_tunnel_parm parms;
311 
312 	vti_netlink_parms(data, &parms);
313 	return ip_tunnel_newlink(dev, tb, &parms);
314 }
315 
316 static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
317 			  struct nlattr *data[])
318 {
319 	struct ip_tunnel_parm p;
320 
321 	vti_netlink_parms(data, &p);
322 	return ip_tunnel_changelink(dev, tb, &p);
323 }
324 
325 static size_t vti_get_size(const struct net_device *dev)
326 {
327 	return
328 		/* IFLA_VTI_LINK */
329 		nla_total_size(4) +
330 		/* IFLA_VTI_IKEY */
331 		nla_total_size(4) +
332 		/* IFLA_VTI_OKEY */
333 		nla_total_size(4) +
334 		/* IFLA_VTI_LOCAL */
335 		nla_total_size(4) +
336 		/* IFLA_VTI_REMOTE */
337 		nla_total_size(4) +
338 		0;
339 }
340 
341 static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev)
342 {
343 	struct ip_tunnel *t = netdev_priv(dev);
344 	struct ip_tunnel_parm *p = &t->parms;
345 
346 	nla_put_u32(skb, IFLA_VTI_LINK, p->link);
347 	nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key);
348 	nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key);
349 	nla_put_be32(skb, IFLA_VTI_LOCAL, p->iph.saddr);
350 	nla_put_be32(skb, IFLA_VTI_REMOTE, p->iph.daddr);
351 
352 	return 0;
353 }
354 
355 static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
356 	[IFLA_VTI_LINK]		= { .type = NLA_U32 },
357 	[IFLA_VTI_IKEY]		= { .type = NLA_U32 },
358 	[IFLA_VTI_OKEY]		= { .type = NLA_U32 },
359 	[IFLA_VTI_LOCAL]	= { .len = FIELD_SIZEOF(struct iphdr, saddr) },
360 	[IFLA_VTI_REMOTE]	= { .len = FIELD_SIZEOF(struct iphdr, daddr) },
361 };
362 
363 static struct rtnl_link_ops vti_link_ops __read_mostly = {
364 	.kind		= "vti",
365 	.maxtype	= IFLA_VTI_MAX,
366 	.policy		= vti_policy,
367 	.priv_size	= sizeof(struct ip_tunnel),
368 	.setup		= vti_tunnel_setup,
369 	.validate	= vti_tunnel_validate,
370 	.newlink	= vti_newlink,
371 	.changelink	= vti_changelink,
372 	.get_size	= vti_get_size,
373 	.fill_info	= vti_fill_info,
374 };
375 
376 static int __init vti_init(void)
377 {
378 	int err;
379 
380 	pr_info("IPv4 over IPSec tunneling driver\n");
381 
382 	err = register_pernet_device(&vti_net_ops);
383 	if (err < 0)
384 		return err;
385 	err = xfrm4_mode_tunnel_input_register(&vti_handler);
386 	if (err < 0) {
387 		unregister_pernet_device(&vti_net_ops);
388 		pr_info("vti init: can't register tunnel\n");
389 	}
390 
391 	err = rtnl_link_register(&vti_link_ops);
392 	if (err < 0)
393 		goto rtnl_link_failed;
394 
395 	return err;
396 
397 rtnl_link_failed:
398 	xfrm4_mode_tunnel_input_deregister(&vti_handler);
399 	unregister_pernet_device(&vti_net_ops);
400 	return err;
401 }
402 
403 static void __exit vti_fini(void)
404 {
405 	rtnl_link_unregister(&vti_link_ops);
406 	if (xfrm4_mode_tunnel_input_deregister(&vti_handler))
407 		pr_info("vti close: can't deregister tunnel\n");
408 
409 	unregister_pernet_device(&vti_net_ops);
410 }
411 
412 module_init(vti_init);
413 module_exit(vti_fini);
414 MODULE_LICENSE("GPL");
415 MODULE_ALIAS_RTNL_LINK("vti");
416 MODULE_ALIAS_NETDEV("ip_vti0");
417