xref: /openbmc/linux/net/ipv4/ip_vti.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   *	Linux NET3: IP/IP protocol decoder modified to support
4   *		    virtual tunnel interface
5   *
6   *	Authors:
7   *		Saurabh Mohan (saurabh.mohan@vyatta.com) 05/07/2012
8   */
9  
10  /*
11     This version of net/ipv4/ip_vti.c is cloned of net/ipv4/ipip.c
12  
13     For comments look at net/ipv4/ip_gre.c --ANK
14   */
15  
16  
17  #include <linux/capability.h>
18  #include <linux/module.h>
19  #include <linux/types.h>
20  #include <linux/kernel.h>
21  #include <linux/uaccess.h>
22  #include <linux/skbuff.h>
23  #include <linux/netdevice.h>
24  #include <linux/in.h>
25  #include <linux/tcp.h>
26  #include <linux/udp.h>
27  #include <linux/if_arp.h>
28  #include <linux/init.h>
29  #include <linux/netfilter_ipv4.h>
30  #include <linux/if_ether.h>
31  #include <linux/icmpv6.h>
32  
33  #include <net/sock.h>
34  #include <net/ip.h>
35  #include <net/icmp.h>
36  #include <net/ip_tunnels.h>
37  #include <net/inet_ecn.h>
38  #include <net/xfrm.h>
39  #include <net/net_namespace.h>
40  #include <net/netns/generic.h>
41  
42  static struct rtnl_link_ops vti_link_ops __read_mostly;
43  
44  static unsigned int vti_net_id __read_mostly;
45  static int vti_tunnel_init(struct net_device *dev);
46  
vti_input(struct sk_buff * skb,int nexthdr,__be32 spi,int encap_type,bool update_skb_dev)47  static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
48  		     int encap_type, bool update_skb_dev)
49  {
50  	struct ip_tunnel *tunnel;
51  	const struct iphdr *iph = ip_hdr(skb);
52  	struct net *net = dev_net(skb->dev);
53  	struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
54  
55  	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
56  				  iph->saddr, iph->daddr, 0);
57  	if (tunnel) {
58  		if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
59  			goto drop;
60  
61  		XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
62  
63  		if (update_skb_dev)
64  			skb->dev = tunnel->dev;
65  
66  		return xfrm_input(skb, nexthdr, spi, encap_type);
67  	}
68  
69  	return -EINVAL;
70  drop:
71  	kfree_skb(skb);
72  	return 0;
73  }
74  
vti_input_proto(struct sk_buff * skb,int nexthdr,__be32 spi,int encap_type)75  static int vti_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi,
76  			   int encap_type)
77  {
78  	return vti_input(skb, nexthdr, spi, encap_type, false);
79  }
80  
vti_rcv(struct sk_buff * skb,__be32 spi,bool update_skb_dev)81  static int vti_rcv(struct sk_buff *skb, __be32 spi, bool update_skb_dev)
82  {
83  	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
84  	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
85  
86  	return vti_input(skb, ip_hdr(skb)->protocol, spi, 0, update_skb_dev);
87  }
88  
vti_rcv_proto(struct sk_buff * skb)89  static int vti_rcv_proto(struct sk_buff *skb)
90  {
91  	return vti_rcv(skb, 0, false);
92  }
93  
vti_rcv_cb(struct sk_buff * skb,int err)94  static int vti_rcv_cb(struct sk_buff *skb, int err)
95  {
96  	unsigned short family;
97  	struct net_device *dev;
98  	struct xfrm_state *x;
99  	const struct xfrm_mode *inner_mode;
100  	struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4;
101  	u32 orig_mark = skb->mark;
102  	int ret;
103  
104  	if (!tunnel)
105  		return 1;
106  
107  	dev = tunnel->dev;
108  
109  	if (err) {
110  		DEV_STATS_INC(dev, rx_errors);
111  		DEV_STATS_INC(dev, rx_dropped);
112  
113  		return 0;
114  	}
115  
116  	x = xfrm_input_state(skb);
117  
118  	inner_mode = &x->inner_mode;
119  
120  	if (x->sel.family == AF_UNSPEC) {
121  		inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
122  		if (inner_mode == NULL) {
123  			XFRM_INC_STATS(dev_net(skb->dev),
124  				       LINUX_MIB_XFRMINSTATEMODEERROR);
125  			return -EINVAL;
126  		}
127  	}
128  
129  	family = inner_mode->family;
130  
131  	skb->mark = be32_to_cpu(tunnel->parms.i_key);
132  	ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
133  	skb->mark = orig_mark;
134  
135  	if (!ret)
136  		return -EPERM;
137  
138  	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev)));
139  	skb->dev = dev;
140  	dev_sw_netstats_rx_add(dev, skb->len);
141  
142  	return 0;
143  }
144  
vti_state_check(const struct xfrm_state * x,__be32 dst,__be32 src)145  static bool vti_state_check(const struct xfrm_state *x, __be32 dst, __be32 src)
146  {
147  	xfrm_address_t *daddr = (xfrm_address_t *)&dst;
148  	xfrm_address_t *saddr = (xfrm_address_t *)&src;
149  
150  	/* if there is no transform then this tunnel is not functional.
151  	 * Or if the xfrm is not mode tunnel.
152  	 */
153  	if (!x || x->props.mode != XFRM_MODE_TUNNEL ||
154  	    x->props.family != AF_INET)
155  		return false;
156  
157  	if (!dst)
158  		return xfrm_addr_equal(saddr, &x->props.saddr, AF_INET);
159  
160  	if (!xfrm_state_addr_check(x, daddr, saddr, AF_INET))
161  		return false;
162  
163  	return true;
164  }
165  
vti_xmit(struct sk_buff * skb,struct net_device * dev,struct flowi * fl)166  static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
167  			    struct flowi *fl)
168  {
169  	struct ip_tunnel *tunnel = netdev_priv(dev);
170  	struct ip_tunnel_parm *parms = &tunnel->parms;
171  	struct dst_entry *dst = skb_dst(skb);
172  	struct net_device *tdev;	/* Device to other host */
173  	int pkt_len = skb->len;
174  	int err;
175  	int mtu;
176  
177  	if (!dst) {
178  		switch (skb->protocol) {
179  		case htons(ETH_P_IP): {
180  			struct rtable *rt;
181  
182  			fl->u.ip4.flowi4_oif = dev->ifindex;
183  			fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
184  			rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
185  			if (IS_ERR(rt)) {
186  				DEV_STATS_INC(dev, tx_carrier_errors);
187  				goto tx_error_icmp;
188  			}
189  			dst = &rt->dst;
190  			skb_dst_set(skb, dst);
191  			break;
192  		}
193  #if IS_ENABLED(CONFIG_IPV6)
194  		case htons(ETH_P_IPV6):
195  			fl->u.ip6.flowi6_oif = dev->ifindex;
196  			fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
197  			dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
198  			if (dst->error) {
199  				dst_release(dst);
200  				dst = NULL;
201  				DEV_STATS_INC(dev, tx_carrier_errors);
202  				goto tx_error_icmp;
203  			}
204  			skb_dst_set(skb, dst);
205  			break;
206  #endif
207  		default:
208  			DEV_STATS_INC(dev, tx_carrier_errors);
209  			goto tx_error_icmp;
210  		}
211  	}
212  
213  	dst_hold(dst);
214  	dst = xfrm_lookup_route(tunnel->net, dst, fl, NULL, 0);
215  	if (IS_ERR(dst)) {
216  		DEV_STATS_INC(dev, tx_carrier_errors);
217  		goto tx_error_icmp;
218  	}
219  
220  	if (dst->flags & DST_XFRM_QUEUE)
221  		goto xmit;
222  
223  	if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) {
224  		DEV_STATS_INC(dev, tx_carrier_errors);
225  		dst_release(dst);
226  		goto tx_error_icmp;
227  	}
228  
229  	tdev = dst->dev;
230  
231  	if (tdev == dev) {
232  		dst_release(dst);
233  		DEV_STATS_INC(dev, collisions);
234  		goto tx_error;
235  	}
236  
237  	mtu = dst_mtu(dst);
238  	if (skb->len > mtu) {
239  		skb_dst_update_pmtu_no_confirm(skb, mtu);
240  		if (skb->protocol == htons(ETH_P_IP)) {
241  			if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
242  				goto xmit;
243  			icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
244  				      htonl(mtu));
245  		} else {
246  			if (mtu < IPV6_MIN_MTU)
247  				mtu = IPV6_MIN_MTU;
248  
249  			icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
250  		}
251  
252  		dst_release(dst);
253  		goto tx_error;
254  	}
255  
256  xmit:
257  	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
258  	skb_dst_set(skb, dst);
259  	skb->dev = skb_dst(skb)->dev;
260  
261  	err = dst_output(tunnel->net, skb->sk, skb);
262  	if (net_xmit_eval(err) == 0)
263  		err = pkt_len;
264  	iptunnel_xmit_stats(dev, err);
265  	return NETDEV_TX_OK;
266  
267  tx_error_icmp:
268  	dst_link_failure(skb);
269  tx_error:
270  	DEV_STATS_INC(dev, tx_errors);
271  	kfree_skb(skb);
272  	return NETDEV_TX_OK;
273  }
274  
275  /* This function assumes it is being called from dev_queue_xmit()
276   * and that skb is filled properly by that function.
277   */
vti_tunnel_xmit(struct sk_buff * skb,struct net_device * dev)278  static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
279  {
280  	struct ip_tunnel *tunnel = netdev_priv(dev);
281  	struct flowi fl;
282  
283  	if (!pskb_inet_may_pull(skb))
284  		goto tx_err;
285  
286  	memset(&fl, 0, sizeof(fl));
287  
288  	switch (skb->protocol) {
289  	case htons(ETH_P_IP):
290  		memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
291  		xfrm_decode_session(skb, &fl, AF_INET);
292  		break;
293  	case htons(ETH_P_IPV6):
294  		memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
295  		xfrm_decode_session(skb, &fl, AF_INET6);
296  		break;
297  	default:
298  		goto tx_err;
299  	}
300  
301  	/* override mark with tunnel output key */
302  	fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);
303  
304  	return vti_xmit(skb, dev, &fl);
305  
306  tx_err:
307  	DEV_STATS_INC(dev, tx_errors);
308  	kfree_skb(skb);
309  	return NETDEV_TX_OK;
310  }
311  
vti4_err(struct sk_buff * skb,u32 info)312  static int vti4_err(struct sk_buff *skb, u32 info)
313  {
314  	__be32 spi;
315  	__u32 mark;
316  	struct xfrm_state *x;
317  	struct ip_tunnel *tunnel;
318  	struct ip_esp_hdr *esph;
319  	struct ip_auth_hdr *ah ;
320  	struct ip_comp_hdr *ipch;
321  	struct net *net = dev_net(skb->dev);
322  	const struct iphdr *iph = (const struct iphdr *)skb->data;
323  	int protocol = iph->protocol;
324  	struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
325  
326  	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
327  				  iph->daddr, iph->saddr, 0);
328  	if (!tunnel)
329  		return -1;
330  
331  	mark = be32_to_cpu(tunnel->parms.o_key);
332  
333  	switch (protocol) {
334  	case IPPROTO_ESP:
335  		esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
336  		spi = esph->spi;
337  		break;
338  	case IPPROTO_AH:
339  		ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
340  		spi = ah->spi;
341  		break;
342  	case IPPROTO_COMP:
343  		ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
344  		spi = htonl(ntohs(ipch->cpi));
345  		break;
346  	default:
347  		return 0;
348  	}
349  
350  	switch (icmp_hdr(skb)->type) {
351  	case ICMP_DEST_UNREACH:
352  		if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
353  			return 0;
354  		break;
355  	case ICMP_REDIRECT:
356  		break;
357  	default:
358  		return 0;
359  	}
360  
361  	x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
362  			      spi, protocol, AF_INET);
363  	if (!x)
364  		return 0;
365  
366  	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
367  		ipv4_update_pmtu(skb, net, info, 0, protocol);
368  	else
369  		ipv4_redirect(skb, net, 0, protocol);
370  	xfrm_state_put(x);
371  
372  	return 0;
373  }
374  
375  static int
vti_tunnel_ctl(struct net_device * dev,struct ip_tunnel_parm * p,int cmd)376  vti_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
377  {
378  	int err = 0;
379  
380  	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
381  		if (p->iph.version != 4 || p->iph.protocol != IPPROTO_IPIP ||
382  		    p->iph.ihl != 5)
383  			return -EINVAL;
384  	}
385  
386  	if (!(p->i_flags & GRE_KEY))
387  		p->i_key = 0;
388  	if (!(p->o_flags & GRE_KEY))
389  		p->o_key = 0;
390  
391  	p->i_flags = VTI_ISVTI;
392  
393  	err = ip_tunnel_ctl(dev, p, cmd);
394  	if (err)
395  		return err;
396  
397  	if (cmd != SIOCDELTUNNEL) {
398  		p->i_flags |= GRE_KEY;
399  		p->o_flags |= GRE_KEY;
400  	}
401  	return 0;
402  }
403  
404  static const struct net_device_ops vti_netdev_ops = {
405  	.ndo_init	= vti_tunnel_init,
406  	.ndo_uninit	= ip_tunnel_uninit,
407  	.ndo_start_xmit	= vti_tunnel_xmit,
408  	.ndo_siocdevprivate = ip_tunnel_siocdevprivate,
409  	.ndo_change_mtu	= ip_tunnel_change_mtu,
410  	.ndo_get_stats64 = dev_get_tstats64,
411  	.ndo_get_iflink = ip_tunnel_get_iflink,
412  	.ndo_tunnel_ctl	= vti_tunnel_ctl,
413  };
414  
vti_tunnel_setup(struct net_device * dev)415  static void vti_tunnel_setup(struct net_device *dev)
416  {
417  	dev->netdev_ops		= &vti_netdev_ops;
418  	dev->header_ops		= &ip_tunnel_header_ops;
419  	dev->type		= ARPHRD_TUNNEL;
420  	ip_tunnel_setup(dev, vti_net_id);
421  }
422  
vti_tunnel_init(struct net_device * dev)423  static int vti_tunnel_init(struct net_device *dev)
424  {
425  	struct ip_tunnel *tunnel = netdev_priv(dev);
426  	struct iphdr *iph = &tunnel->parms.iph;
427  
428  	__dev_addr_set(dev, &iph->saddr, 4);
429  	memcpy(dev->broadcast, &iph->daddr, 4);
430  
431  	dev->flags		= IFF_NOARP;
432  	dev->addr_len		= 4;
433  	dev->features		|= NETIF_F_LLTX;
434  	netif_keep_dst(dev);
435  
436  	return ip_tunnel_init(dev);
437  }
438  
vti_fb_tunnel_init(struct net_device * dev)439  static void __net_init vti_fb_tunnel_init(struct net_device *dev)
440  {
441  	struct ip_tunnel *tunnel = netdev_priv(dev);
442  	struct iphdr *iph = &tunnel->parms.iph;
443  
444  	iph->version		= 4;
445  	iph->protocol		= IPPROTO_IPIP;
446  	iph->ihl		= 5;
447  }
448  
449  static struct xfrm4_protocol vti_esp4_protocol __read_mostly = {
450  	.handler	=	vti_rcv_proto,
451  	.input_handler	=	vti_input_proto,
452  	.cb_handler	=	vti_rcv_cb,
453  	.err_handler	=	vti4_err,
454  	.priority	=	100,
455  };
456  
457  static struct xfrm4_protocol vti_ah4_protocol __read_mostly = {
458  	.handler	=	vti_rcv_proto,
459  	.input_handler	=	vti_input_proto,
460  	.cb_handler	=	vti_rcv_cb,
461  	.err_handler	=	vti4_err,
462  	.priority	=	100,
463  };
464  
465  static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
466  	.handler	=	vti_rcv_proto,
467  	.input_handler	=	vti_input_proto,
468  	.cb_handler	=	vti_rcv_cb,
469  	.err_handler	=	vti4_err,
470  	.priority	=	100,
471  };
472  
473  #if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL)
vti_rcv_tunnel(struct sk_buff * skb)474  static int vti_rcv_tunnel(struct sk_buff *skb)
475  {
476  	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
477  	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
478  
479  	return vti_input(skb, IPPROTO_IPIP, ip_hdr(skb)->saddr, 0, false);
480  }
481  
482  static struct xfrm_tunnel vti_ipip_handler __read_mostly = {
483  	.handler	=	vti_rcv_tunnel,
484  	.cb_handler	=	vti_rcv_cb,
485  	.err_handler	=	vti4_err,
486  	.priority	=	0,
487  };
488  
489  #if IS_ENABLED(CONFIG_IPV6)
490  static struct xfrm_tunnel vti_ipip6_handler __read_mostly = {
491  	.handler	=	vti_rcv_tunnel,
492  	.cb_handler	=	vti_rcv_cb,
493  	.err_handler	=	vti4_err,
494  	.priority	=	0,
495  };
496  #endif
497  #endif
498  
vti_init_net(struct net * net)499  static int __net_init vti_init_net(struct net *net)
500  {
501  	int err;
502  	struct ip_tunnel_net *itn;
503  
504  	err = ip_tunnel_init_net(net, vti_net_id, &vti_link_ops, "ip_vti0");
505  	if (err)
506  		return err;
507  	itn = net_generic(net, vti_net_id);
508  	if (itn->fb_tunnel_dev)
509  		vti_fb_tunnel_init(itn->fb_tunnel_dev);
510  	return 0;
511  }
512  
vti_exit_batch_net(struct list_head * list_net)513  static void __net_exit vti_exit_batch_net(struct list_head *list_net)
514  {
515  	ip_tunnel_delete_nets(list_net, vti_net_id, &vti_link_ops);
516  }
517  
518  static struct pernet_operations vti_net_ops = {
519  	.init = vti_init_net,
520  	.exit_batch = vti_exit_batch_net,
521  	.id   = &vti_net_id,
522  	.size = sizeof(struct ip_tunnel_net),
523  };
524  
vti_tunnel_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)525  static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
526  			       struct netlink_ext_ack *extack)
527  {
528  	return 0;
529  }
530  
vti_netlink_parms(struct nlattr * data[],struct ip_tunnel_parm * parms,__u32 * fwmark)531  static void vti_netlink_parms(struct nlattr *data[],
532  			      struct ip_tunnel_parm *parms,
533  			      __u32 *fwmark)
534  {
535  	memset(parms, 0, sizeof(*parms));
536  
537  	parms->iph.protocol = IPPROTO_IPIP;
538  
539  	if (!data)
540  		return;
541  
542  	parms->i_flags = VTI_ISVTI;
543  
544  	if (data[IFLA_VTI_LINK])
545  		parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
546  
547  	if (data[IFLA_VTI_IKEY])
548  		parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]);
549  
550  	if (data[IFLA_VTI_OKEY])
551  		parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]);
552  
553  	if (data[IFLA_VTI_LOCAL])
554  		parms->iph.saddr = nla_get_in_addr(data[IFLA_VTI_LOCAL]);
555  
556  	if (data[IFLA_VTI_REMOTE])
557  		parms->iph.daddr = nla_get_in_addr(data[IFLA_VTI_REMOTE]);
558  
559  	if (data[IFLA_VTI_FWMARK])
560  		*fwmark = nla_get_u32(data[IFLA_VTI_FWMARK]);
561  }
562  
vti_newlink(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)563  static int vti_newlink(struct net *src_net, struct net_device *dev,
564  		       struct nlattr *tb[], struct nlattr *data[],
565  		       struct netlink_ext_ack *extack)
566  {
567  	struct ip_tunnel_parm parms;
568  	__u32 fwmark = 0;
569  
570  	vti_netlink_parms(data, &parms, &fwmark);
571  	return ip_tunnel_newlink(dev, tb, &parms, fwmark);
572  }
573  
vti_changelink(struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)574  static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
575  			  struct nlattr *data[],
576  			  struct netlink_ext_ack *extack)
577  {
578  	struct ip_tunnel *t = netdev_priv(dev);
579  	__u32 fwmark = t->fwmark;
580  	struct ip_tunnel_parm p;
581  
582  	vti_netlink_parms(data, &p, &fwmark);
583  	return ip_tunnel_changelink(dev, tb, &p, fwmark);
584  }
585  
vti_get_size(const struct net_device * dev)586  static size_t vti_get_size(const struct net_device *dev)
587  {
588  	return
589  		/* IFLA_VTI_LINK */
590  		nla_total_size(4) +
591  		/* IFLA_VTI_IKEY */
592  		nla_total_size(4) +
593  		/* IFLA_VTI_OKEY */
594  		nla_total_size(4) +
595  		/* IFLA_VTI_LOCAL */
596  		nla_total_size(4) +
597  		/* IFLA_VTI_REMOTE */
598  		nla_total_size(4) +
599  		/* IFLA_VTI_FWMARK */
600  		nla_total_size(4) +
601  		0;
602  }
603  
vti_fill_info(struct sk_buff * skb,const struct net_device * dev)604  static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev)
605  {
606  	struct ip_tunnel *t = netdev_priv(dev);
607  	struct ip_tunnel_parm *p = &t->parms;
608  
609  	if (nla_put_u32(skb, IFLA_VTI_LINK, p->link) ||
610  	    nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key) ||
611  	    nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key) ||
612  	    nla_put_in_addr(skb, IFLA_VTI_LOCAL, p->iph.saddr) ||
613  	    nla_put_in_addr(skb, IFLA_VTI_REMOTE, p->iph.daddr) ||
614  	    nla_put_u32(skb, IFLA_VTI_FWMARK, t->fwmark))
615  		return -EMSGSIZE;
616  
617  	return 0;
618  }
619  
620  static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
621  	[IFLA_VTI_LINK]		= { .type = NLA_U32 },
622  	[IFLA_VTI_IKEY]		= { .type = NLA_U32 },
623  	[IFLA_VTI_OKEY]		= { .type = NLA_U32 },
624  	[IFLA_VTI_LOCAL]	= { .len = sizeof_field(struct iphdr, saddr) },
625  	[IFLA_VTI_REMOTE]	= { .len = sizeof_field(struct iphdr, daddr) },
626  	[IFLA_VTI_FWMARK]	= { .type = NLA_U32 },
627  };
628  
629  static struct rtnl_link_ops vti_link_ops __read_mostly = {
630  	.kind		= "vti",
631  	.maxtype	= IFLA_VTI_MAX,
632  	.policy		= vti_policy,
633  	.priv_size	= sizeof(struct ip_tunnel),
634  	.setup		= vti_tunnel_setup,
635  	.validate	= vti_tunnel_validate,
636  	.newlink	= vti_newlink,
637  	.changelink	= vti_changelink,
638  	.dellink        = ip_tunnel_dellink,
639  	.get_size	= vti_get_size,
640  	.fill_info	= vti_fill_info,
641  	.get_link_net	= ip_tunnel_get_link_net,
642  };
643  
vti_init(void)644  static int __init vti_init(void)
645  {
646  	const char *msg;
647  	int err;
648  
649  	pr_info("IPv4 over IPsec tunneling driver\n");
650  
651  	msg = "tunnel device";
652  	err = register_pernet_device(&vti_net_ops);
653  	if (err < 0)
654  		goto pernet_dev_failed;
655  
656  	msg = "tunnel protocols";
657  	err = xfrm4_protocol_register(&vti_esp4_protocol, IPPROTO_ESP);
658  	if (err < 0)
659  		goto xfrm_proto_esp_failed;
660  	err = xfrm4_protocol_register(&vti_ah4_protocol, IPPROTO_AH);
661  	if (err < 0)
662  		goto xfrm_proto_ah_failed;
663  	err = xfrm4_protocol_register(&vti_ipcomp4_protocol, IPPROTO_COMP);
664  	if (err < 0)
665  		goto xfrm_proto_comp_failed;
666  
667  #if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL)
668  	msg = "ipip tunnel";
669  	err = xfrm4_tunnel_register(&vti_ipip_handler, AF_INET);
670  	if (err < 0)
671  		goto xfrm_tunnel_ipip_failed;
672  #if IS_ENABLED(CONFIG_IPV6)
673  	err = xfrm4_tunnel_register(&vti_ipip6_handler, AF_INET6);
674  	if (err < 0)
675  		goto xfrm_tunnel_ipip6_failed;
676  #endif
677  #endif
678  
679  	msg = "netlink interface";
680  	err = rtnl_link_register(&vti_link_ops);
681  	if (err < 0)
682  		goto rtnl_link_failed;
683  
684  	return err;
685  
686  rtnl_link_failed:
687  #if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL)
688  #if IS_ENABLED(CONFIG_IPV6)
689  	xfrm4_tunnel_deregister(&vti_ipip6_handler, AF_INET6);
690  xfrm_tunnel_ipip6_failed:
691  #endif
692  	xfrm4_tunnel_deregister(&vti_ipip_handler, AF_INET);
693  xfrm_tunnel_ipip_failed:
694  #endif
695  	xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
696  xfrm_proto_comp_failed:
697  	xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
698  xfrm_proto_ah_failed:
699  	xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
700  xfrm_proto_esp_failed:
701  	unregister_pernet_device(&vti_net_ops);
702  pernet_dev_failed:
703  	pr_err("vti init: failed to register %s\n", msg);
704  	return err;
705  }
706  
vti_fini(void)707  static void __exit vti_fini(void)
708  {
709  	rtnl_link_unregister(&vti_link_ops);
710  #if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL)
711  #if IS_ENABLED(CONFIG_IPV6)
712  	xfrm4_tunnel_deregister(&vti_ipip6_handler, AF_INET6);
713  #endif
714  	xfrm4_tunnel_deregister(&vti_ipip_handler, AF_INET);
715  #endif
716  	xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
717  	xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
718  	xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
719  	unregister_pernet_device(&vti_net_ops);
720  }
721  
722  module_init(vti_init);
723  module_exit(vti_fini);
724  MODULE_LICENSE("GPL");
725  MODULE_ALIAS_RTNL_LINK("vti");
726  MODULE_ALIAS_NETDEV("ip_vti0");
727