xref: /openbmc/linux/net/ipv4/ip_tunnel_core.c (revision bc5aa3a0)
1 /*
2  * Copyright (c) 2013 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18 
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/skbuff.h>
24 #include <linux/netdevice.h>
25 #include <linux/in.h>
26 #include <linux/if_arp.h>
27 #include <linux/init.h>
28 #include <linux/in6.h>
29 #include <linux/inetdevice.h>
30 #include <linux/netfilter_ipv4.h>
31 #include <linux/etherdevice.h>
32 #include <linux/if_ether.h>
33 #include <linux/if_vlan.h>
34 #include <linux/static_key.h>
35 
36 #include <net/ip.h>
37 #include <net/icmp.h>
38 #include <net/protocol.h>
39 #include <net/ip_tunnels.h>
40 #include <net/ip6_tunnel.h>
41 #include <net/arp.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
45 #include <net/xfrm.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
49 #include <net/dst_metadata.h>
50 
51 const struct ip_tunnel_encap_ops __rcu *
52 		iptun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly;
53 EXPORT_SYMBOL(iptun_encaps);
54 
55 const struct ip6_tnl_encap_ops __rcu *
56 		ip6tun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly;
57 EXPORT_SYMBOL(ip6tun_encaps);
58 
59 void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
60 		   __be32 src, __be32 dst, __u8 proto,
61 		   __u8 tos, __u8 ttl, __be16 df, bool xnet)
62 {
63 	int pkt_len = skb->len - skb_inner_network_offset(skb);
64 	struct net *net = dev_net(rt->dst.dev);
65 	struct net_device *dev = skb->dev;
66 	int skb_iif = skb->skb_iif;
67 	struct iphdr *iph;
68 	int err;
69 
70 	skb_scrub_packet(skb, xnet);
71 
72 	skb_clear_hash(skb);
73 	skb_dst_set(skb, &rt->dst);
74 	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
75 
76 	if (skb_iif && !(df & htons(IP_DF))) {
77 		/* Arrived from an ingress interface, got encapsulated, with
78 		 * fragmentation of encapulating frames allowed.
79 		 * If skb is gso, the resulting encapsulated network segments
80 		 * may exceed dst mtu.
81 		 * Allow IP Fragmentation of segments.
82 		 */
83 		IPCB(skb)->flags |= IPSKB_FRAG_SEGS;
84 	}
85 
86 	/* Push down and install the IP header. */
87 	skb_push(skb, sizeof(struct iphdr));
88 	skb_reset_network_header(skb);
89 
90 	iph = ip_hdr(skb);
91 
92 	iph->version	=	4;
93 	iph->ihl	=	sizeof(struct iphdr) >> 2;
94 	iph->frag_off	=	df;
95 	iph->protocol	=	proto;
96 	iph->tos	=	tos;
97 	iph->daddr	=	dst;
98 	iph->saddr	=	src;
99 	iph->ttl	=	ttl;
100 	__ip_select_ident(net, iph, skb_shinfo(skb)->gso_segs ?: 1);
101 
102 	err = ip_local_out(net, sk, skb);
103 	if (unlikely(net_xmit_eval(err)))
104 		pkt_len = 0;
105 	iptunnel_xmit_stats(dev, pkt_len);
106 }
107 EXPORT_SYMBOL_GPL(iptunnel_xmit);
108 
109 int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
110 			   __be16 inner_proto, bool raw_proto, bool xnet)
111 {
112 	if (unlikely(!pskb_may_pull(skb, hdr_len)))
113 		return -ENOMEM;
114 
115 	skb_pull_rcsum(skb, hdr_len);
116 
117 	if (!raw_proto && inner_proto == htons(ETH_P_TEB)) {
118 		struct ethhdr *eh;
119 
120 		if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
121 			return -ENOMEM;
122 
123 		eh = (struct ethhdr *)skb->data;
124 		if (likely(eth_proto_is_802_3(eh->h_proto)))
125 			skb->protocol = eh->h_proto;
126 		else
127 			skb->protocol = htons(ETH_P_802_2);
128 
129 	} else {
130 		skb->protocol = inner_proto;
131 	}
132 
133 	skb_clear_hash_if_not_l4(skb);
134 	skb->vlan_tci = 0;
135 	skb_set_queue_mapping(skb, 0);
136 	skb_scrub_packet(skb, xnet);
137 
138 	return iptunnel_pull_offloads(skb);
139 }
140 EXPORT_SYMBOL_GPL(__iptunnel_pull_header);
141 
142 struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
143 					     gfp_t flags)
144 {
145 	struct metadata_dst *res;
146 	struct ip_tunnel_info *dst, *src;
147 
148 	if (!md || md->u.tun_info.mode & IP_TUNNEL_INFO_TX)
149 		return NULL;
150 
151 	res = metadata_dst_alloc(0, flags);
152 	if (!res)
153 		return NULL;
154 
155 	dst = &res->u.tun_info;
156 	src = &md->u.tun_info;
157 	dst->key.tun_id = src->key.tun_id;
158 	if (src->mode & IP_TUNNEL_INFO_IPV6)
159 		memcpy(&dst->key.u.ipv6.dst, &src->key.u.ipv6.src,
160 		       sizeof(struct in6_addr));
161 	else
162 		dst->key.u.ipv4.dst = src->key.u.ipv4.src;
163 	dst->mode = src->mode | IP_TUNNEL_INFO_TX;
164 
165 	return res;
166 }
167 EXPORT_SYMBOL_GPL(iptunnel_metadata_reply);
168 
169 int iptunnel_handle_offloads(struct sk_buff *skb,
170 			     int gso_type_mask)
171 {
172 	int err;
173 
174 	if (likely(!skb->encapsulation)) {
175 		skb_reset_inner_headers(skb);
176 		skb->encapsulation = 1;
177 	}
178 
179 	if (skb_is_gso(skb)) {
180 		err = skb_header_unclone(skb, GFP_ATOMIC);
181 		if (unlikely(err))
182 			return err;
183 		skb_shinfo(skb)->gso_type |= gso_type_mask;
184 		return 0;
185 	}
186 
187 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
188 		skb->ip_summed = CHECKSUM_NONE;
189 		/* We clear encapsulation here to prevent badly-written
190 		 * drivers potentially deciding to offload an inner checksum
191 		 * if we set CHECKSUM_PARTIAL on the outer header.
192 		 * This should go away when the drivers are all fixed.
193 		 */
194 		skb->encapsulation = 0;
195 	}
196 
197 	return 0;
198 }
199 EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
200 
201 /* Often modified stats are per cpu, other are shared (netdev->stats) */
202 struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
203 						struct rtnl_link_stats64 *tot)
204 {
205 	int i;
206 
207 	netdev_stats_to_stats64(tot, &dev->stats);
208 
209 	for_each_possible_cpu(i) {
210 		const struct pcpu_sw_netstats *tstats =
211 						   per_cpu_ptr(dev->tstats, i);
212 		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
213 		unsigned int start;
214 
215 		do {
216 			start = u64_stats_fetch_begin_irq(&tstats->syncp);
217 			rx_packets = tstats->rx_packets;
218 			tx_packets = tstats->tx_packets;
219 			rx_bytes = tstats->rx_bytes;
220 			tx_bytes = tstats->tx_bytes;
221 		} while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
222 
223 		tot->rx_packets += rx_packets;
224 		tot->tx_packets += tx_packets;
225 		tot->rx_bytes   += rx_bytes;
226 		tot->tx_bytes   += tx_bytes;
227 	}
228 
229 	return tot;
230 }
231 EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
232 
233 static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = {
234 	[LWTUNNEL_IP_ID]	= { .type = NLA_U64 },
235 	[LWTUNNEL_IP_DST]	= { .type = NLA_U32 },
236 	[LWTUNNEL_IP_SRC]	= { .type = NLA_U32 },
237 	[LWTUNNEL_IP_TTL]	= { .type = NLA_U8 },
238 	[LWTUNNEL_IP_TOS]	= { .type = NLA_U8 },
239 	[LWTUNNEL_IP_FLAGS]	= { .type = NLA_U16 },
240 };
241 
242 static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
243 			      unsigned int family, const void *cfg,
244 			      struct lwtunnel_state **ts)
245 {
246 	struct ip_tunnel_info *tun_info;
247 	struct lwtunnel_state *new_state;
248 	struct nlattr *tb[LWTUNNEL_IP_MAX + 1];
249 	int err;
250 
251 	err = nla_parse_nested(tb, LWTUNNEL_IP_MAX, attr, ip_tun_policy);
252 	if (err < 0)
253 		return err;
254 
255 	new_state = lwtunnel_state_alloc(sizeof(*tun_info));
256 	if (!new_state)
257 		return -ENOMEM;
258 
259 	new_state->type = LWTUNNEL_ENCAP_IP;
260 
261 	tun_info = lwt_tun_info(new_state);
262 
263 	if (tb[LWTUNNEL_IP_ID])
264 		tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP_ID]);
265 
266 	if (tb[LWTUNNEL_IP_DST])
267 		tun_info->key.u.ipv4.dst = nla_get_in_addr(tb[LWTUNNEL_IP_DST]);
268 
269 	if (tb[LWTUNNEL_IP_SRC])
270 		tun_info->key.u.ipv4.src = nla_get_in_addr(tb[LWTUNNEL_IP_SRC]);
271 
272 	if (tb[LWTUNNEL_IP_TTL])
273 		tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]);
274 
275 	if (tb[LWTUNNEL_IP_TOS])
276 		tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
277 
278 	if (tb[LWTUNNEL_IP_FLAGS])
279 		tun_info->key.tun_flags = nla_get_be16(tb[LWTUNNEL_IP_FLAGS]);
280 
281 	tun_info->mode = IP_TUNNEL_INFO_TX;
282 	tun_info->options_len = 0;
283 
284 	*ts = new_state;
285 
286 	return 0;
287 }
288 
289 static int ip_tun_fill_encap_info(struct sk_buff *skb,
290 				  struct lwtunnel_state *lwtstate)
291 {
292 	struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
293 
294 	if (nla_put_be64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id,
295 			 LWTUNNEL_IP_PAD) ||
296 	    nla_put_in_addr(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) ||
297 	    nla_put_in_addr(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
298 	    nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
299 	    nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
300 	    nla_put_be16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags))
301 		return -ENOMEM;
302 
303 	return 0;
304 }
305 
306 static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
307 {
308 	return nla_total_size_64bit(8)	/* LWTUNNEL_IP_ID */
309 		+ nla_total_size(4)	/* LWTUNNEL_IP_DST */
310 		+ nla_total_size(4)	/* LWTUNNEL_IP_SRC */
311 		+ nla_total_size(1)	/* LWTUNNEL_IP_TOS */
312 		+ nla_total_size(1)	/* LWTUNNEL_IP_TTL */
313 		+ nla_total_size(2);	/* LWTUNNEL_IP_FLAGS */
314 }
315 
316 static int ip_tun_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
317 {
318 	return memcmp(lwt_tun_info(a), lwt_tun_info(b),
319 		      sizeof(struct ip_tunnel_info));
320 }
321 
322 static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
323 	.build_state = ip_tun_build_state,
324 	.fill_encap = ip_tun_fill_encap_info,
325 	.get_encap_size = ip_tun_encap_nlsize,
326 	.cmp_encap = ip_tun_cmp_encap,
327 };
328 
329 static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
330 	[LWTUNNEL_IP6_ID]		= { .type = NLA_U64 },
331 	[LWTUNNEL_IP6_DST]		= { .len = sizeof(struct in6_addr) },
332 	[LWTUNNEL_IP6_SRC]		= { .len = sizeof(struct in6_addr) },
333 	[LWTUNNEL_IP6_HOPLIMIT]		= { .type = NLA_U8 },
334 	[LWTUNNEL_IP6_TC]		= { .type = NLA_U8 },
335 	[LWTUNNEL_IP6_FLAGS]		= { .type = NLA_U16 },
336 };
337 
338 static int ip6_tun_build_state(struct net_device *dev, struct nlattr *attr,
339 			       unsigned int family, const void *cfg,
340 			       struct lwtunnel_state **ts)
341 {
342 	struct ip_tunnel_info *tun_info;
343 	struct lwtunnel_state *new_state;
344 	struct nlattr *tb[LWTUNNEL_IP6_MAX + 1];
345 	int err;
346 
347 	err = nla_parse_nested(tb, LWTUNNEL_IP6_MAX, attr, ip6_tun_policy);
348 	if (err < 0)
349 		return err;
350 
351 	new_state = lwtunnel_state_alloc(sizeof(*tun_info));
352 	if (!new_state)
353 		return -ENOMEM;
354 
355 	new_state->type = LWTUNNEL_ENCAP_IP6;
356 
357 	tun_info = lwt_tun_info(new_state);
358 
359 	if (tb[LWTUNNEL_IP6_ID])
360 		tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP6_ID]);
361 
362 	if (tb[LWTUNNEL_IP6_DST])
363 		tun_info->key.u.ipv6.dst = nla_get_in6_addr(tb[LWTUNNEL_IP6_DST]);
364 
365 	if (tb[LWTUNNEL_IP6_SRC])
366 		tun_info->key.u.ipv6.src = nla_get_in6_addr(tb[LWTUNNEL_IP6_SRC]);
367 
368 	if (tb[LWTUNNEL_IP6_HOPLIMIT])
369 		tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP6_HOPLIMIT]);
370 
371 	if (tb[LWTUNNEL_IP6_TC])
372 		tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
373 
374 	if (tb[LWTUNNEL_IP6_FLAGS])
375 		tun_info->key.tun_flags = nla_get_be16(tb[LWTUNNEL_IP6_FLAGS]);
376 
377 	tun_info->mode = IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_IPV6;
378 	tun_info->options_len = 0;
379 
380 	*ts = new_state;
381 
382 	return 0;
383 }
384 
385 static int ip6_tun_fill_encap_info(struct sk_buff *skb,
386 				   struct lwtunnel_state *lwtstate)
387 {
388 	struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
389 
390 	if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id,
391 			 LWTUNNEL_IP6_PAD) ||
392 	    nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) ||
393 	    nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
394 	    nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) ||
395 	    nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.ttl) ||
396 	    nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags))
397 		return -ENOMEM;
398 
399 	return 0;
400 }
401 
402 static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
403 {
404 	return nla_total_size_64bit(8)	/* LWTUNNEL_IP6_ID */
405 		+ nla_total_size(16)	/* LWTUNNEL_IP6_DST */
406 		+ nla_total_size(16)	/* LWTUNNEL_IP6_SRC */
407 		+ nla_total_size(1)	/* LWTUNNEL_IP6_HOPLIMIT */
408 		+ nla_total_size(1)	/* LWTUNNEL_IP6_TC */
409 		+ nla_total_size(2);	/* LWTUNNEL_IP6_FLAGS */
410 }
411 
412 static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
413 	.build_state = ip6_tun_build_state,
414 	.fill_encap = ip6_tun_fill_encap_info,
415 	.get_encap_size = ip6_tun_encap_nlsize,
416 	.cmp_encap = ip_tun_cmp_encap,
417 };
418 
419 void __init ip_tunnel_core_init(void)
420 {
421 	/* If you land here, make sure whether increasing ip_tunnel_info's
422 	 * options_len is a reasonable choice with its usage in front ends
423 	 * (f.e., it's part of flow keys, etc).
424 	 */
425 	BUILD_BUG_ON(IP_TUNNEL_OPTS_MAX != 255);
426 
427 	lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP);
428 	lwtunnel_encap_add_ops(&ip6_tun_lwt_ops, LWTUNNEL_ENCAP_IP6);
429 }
430 
431 struct static_key ip_tunnel_metadata_cnt = STATIC_KEY_INIT_FALSE;
432 EXPORT_SYMBOL(ip_tunnel_metadata_cnt);
433 
434 void ip_tunnel_need_metadata(void)
435 {
436 	static_key_slow_inc(&ip_tunnel_metadata_cnt);
437 }
438 EXPORT_SYMBOL_GPL(ip_tunnel_need_metadata);
439 
440 void ip_tunnel_unneed_metadata(void)
441 {
442 	static_key_slow_dec(&ip_tunnel_metadata_cnt);
443 }
444 EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata);
445