xref: /openbmc/linux/net/ipv4/ip_tunnel_core.c (revision 4cb47a86)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2013 Nicira, Inc.
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/netdevice.h>
12 #include <linux/in.h>
13 #include <linux/if_arp.h>
14 #include <linux/init.h>
15 #include <linux/in6.h>
16 #include <linux/inetdevice.h>
17 #include <linux/netfilter_ipv4.h>
18 #include <linux/etherdevice.h>
19 #include <linux/if_ether.h>
20 #include <linux/if_vlan.h>
21 #include <linux/static_key.h>
22 
23 #include <net/ip.h>
24 #include <net/icmp.h>
25 #include <net/protocol.h>
26 #include <net/ip_tunnels.h>
27 #include <net/ip6_tunnel.h>
28 #include <net/arp.h>
29 #include <net/checksum.h>
30 #include <net/dsfield.h>
31 #include <net/inet_ecn.h>
32 #include <net/xfrm.h>
33 #include <net/net_namespace.h>
34 #include <net/netns/generic.h>
35 #include <net/rtnetlink.h>
36 #include <net/dst_metadata.h>
37 #include <net/geneve.h>
38 #include <net/vxlan.h>
39 #include <net/erspan.h>
40 
41 const struct ip_tunnel_encap_ops __rcu *
42 		iptun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly;
43 EXPORT_SYMBOL(iptun_encaps);
44 
45 const struct ip6_tnl_encap_ops __rcu *
46 		ip6tun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly;
47 EXPORT_SYMBOL(ip6tun_encaps);
48 
49 void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
50 		   __be32 src, __be32 dst, __u8 proto,
51 		   __u8 tos, __u8 ttl, __be16 df, bool xnet)
52 {
53 	int pkt_len = skb->len - skb_inner_network_offset(skb);
54 	struct net *net = dev_net(rt->dst.dev);
55 	struct net_device *dev = skb->dev;
56 	struct iphdr *iph;
57 	int err;
58 
59 	skb_scrub_packet(skb, xnet);
60 
61 	skb_clear_hash_if_not_l4(skb);
62 	skb_dst_set(skb, &rt->dst);
63 	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
64 
65 	/* Push down and install the IP header. */
66 	skb_push(skb, sizeof(struct iphdr));
67 	skb_reset_network_header(skb);
68 
69 	iph = ip_hdr(skb);
70 
71 	iph->version	=	4;
72 	iph->ihl	=	sizeof(struct iphdr) >> 2;
73 	iph->frag_off	=	ip_mtu_locked(&rt->dst) ? 0 : df;
74 	iph->protocol	=	proto;
75 	iph->tos	=	tos;
76 	iph->daddr	=	dst;
77 	iph->saddr	=	src;
78 	iph->ttl	=	ttl;
79 	__ip_select_ident(net, iph, skb_shinfo(skb)->gso_segs ?: 1);
80 
81 	err = ip_local_out(net, sk, skb);
82 
83 	if (dev) {
84 		if (unlikely(net_xmit_eval(err)))
85 			pkt_len = 0;
86 		iptunnel_xmit_stats(dev, pkt_len);
87 	}
88 }
89 EXPORT_SYMBOL_GPL(iptunnel_xmit);
90 
91 int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
92 			   __be16 inner_proto, bool raw_proto, bool xnet)
93 {
94 	if (unlikely(!pskb_may_pull(skb, hdr_len)))
95 		return -ENOMEM;
96 
97 	skb_pull_rcsum(skb, hdr_len);
98 
99 	if (!raw_proto && inner_proto == htons(ETH_P_TEB)) {
100 		struct ethhdr *eh;
101 
102 		if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
103 			return -ENOMEM;
104 
105 		eh = (struct ethhdr *)skb->data;
106 		if (likely(eth_proto_is_802_3(eh->h_proto)))
107 			skb->protocol = eh->h_proto;
108 		else
109 			skb->protocol = htons(ETH_P_802_2);
110 
111 	} else {
112 		skb->protocol = inner_proto;
113 	}
114 
115 	skb_clear_hash_if_not_l4(skb);
116 	__vlan_hwaccel_clear_tag(skb);
117 	skb_set_queue_mapping(skb, 0);
118 	skb_scrub_packet(skb, xnet);
119 
120 	return iptunnel_pull_offloads(skb);
121 }
122 EXPORT_SYMBOL_GPL(__iptunnel_pull_header);
123 
124 struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
125 					     gfp_t flags)
126 {
127 	struct metadata_dst *res;
128 	struct ip_tunnel_info *dst, *src;
129 
130 	if (!md || md->type != METADATA_IP_TUNNEL ||
131 	    md->u.tun_info.mode & IP_TUNNEL_INFO_TX)
132 		return NULL;
133 
134 	src = &md->u.tun_info;
135 	res = metadata_dst_alloc(src->options_len, METADATA_IP_TUNNEL, flags);
136 	if (!res)
137 		return NULL;
138 
139 	dst = &res->u.tun_info;
140 	dst->key.tun_id = src->key.tun_id;
141 	if (src->mode & IP_TUNNEL_INFO_IPV6)
142 		memcpy(&dst->key.u.ipv6.dst, &src->key.u.ipv6.src,
143 		       sizeof(struct in6_addr));
144 	else
145 		dst->key.u.ipv4.dst = src->key.u.ipv4.src;
146 	dst->key.tun_flags = src->key.tun_flags;
147 	dst->mode = src->mode | IP_TUNNEL_INFO_TX;
148 	ip_tunnel_info_opts_set(dst, ip_tunnel_info_opts(src),
149 				src->options_len, 0);
150 
151 	return res;
152 }
153 EXPORT_SYMBOL_GPL(iptunnel_metadata_reply);
154 
155 int iptunnel_handle_offloads(struct sk_buff *skb,
156 			     int gso_type_mask)
157 {
158 	int err;
159 
160 	if (likely(!skb->encapsulation)) {
161 		skb_reset_inner_headers(skb);
162 		skb->encapsulation = 1;
163 	}
164 
165 	if (skb_is_gso(skb)) {
166 		err = skb_header_unclone(skb, GFP_ATOMIC);
167 		if (unlikely(err))
168 			return err;
169 		skb_shinfo(skb)->gso_type |= gso_type_mask;
170 		return 0;
171 	}
172 
173 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
174 		skb->ip_summed = CHECKSUM_NONE;
175 		/* We clear encapsulation here to prevent badly-written
176 		 * drivers potentially deciding to offload an inner checksum
177 		 * if we set CHECKSUM_PARTIAL on the outer header.
178 		 * This should go away when the drivers are all fixed.
179 		 */
180 		skb->encapsulation = 0;
181 	}
182 
183 	return 0;
184 }
185 EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
186 
187 /**
188  * iptunnel_pmtud_build_icmp() - Build ICMP error message for PMTUD
189  * @skb:	Original packet with L2 header
190  * @mtu:	MTU value for ICMP error
191  *
192  * Return: length on success, negative error code if message couldn't be built.
193  */
194 static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu)
195 {
196 	const struct iphdr *iph = ip_hdr(skb);
197 	struct icmphdr *icmph;
198 	struct iphdr *niph;
199 	struct ethhdr eh;
200 	int len, err;
201 
202 	if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
203 		return -EINVAL;
204 
205 	skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
206 	pskb_pull(skb, ETH_HLEN);
207 	skb_reset_network_header(skb);
208 
209 	err = pskb_trim(skb, 576 - sizeof(*niph) - sizeof(*icmph));
210 	if (err)
211 		return err;
212 
213 	len = skb->len + sizeof(*icmph);
214 	err = skb_cow(skb, sizeof(*niph) + sizeof(*icmph) + ETH_HLEN);
215 	if (err)
216 		return err;
217 
218 	icmph = skb_push(skb, sizeof(*icmph));
219 	*icmph = (struct icmphdr) {
220 		.type			= ICMP_DEST_UNREACH,
221 		.code			= ICMP_FRAG_NEEDED,
222 		.checksum		= 0,
223 		.un.frag.__unused	= 0,
224 		.un.frag.mtu		= ntohs(mtu),
225 	};
226 	icmph->checksum = ip_compute_csum(icmph, len);
227 	skb_reset_transport_header(skb);
228 
229 	niph = skb_push(skb, sizeof(*niph));
230 	*niph = (struct iphdr) {
231 		.ihl			= sizeof(*niph) / 4u,
232 		.version 		= 4,
233 		.tos 			= 0,
234 		.tot_len		= htons(len + sizeof(*niph)),
235 		.id			= 0,
236 		.frag_off		= htons(IP_DF),
237 		.ttl			= iph->ttl,
238 		.protocol		= IPPROTO_ICMP,
239 		.saddr			= iph->daddr,
240 		.daddr			= iph->saddr,
241 	};
242 	ip_send_check(niph);
243 	skb_reset_network_header(skb);
244 
245 	skb->ip_summed = CHECKSUM_NONE;
246 
247 	eth_header(skb, skb->dev, htons(eh.h_proto), eh.h_source, eh.h_dest, 0);
248 	skb_reset_mac_header(skb);
249 
250 	return skb->len;
251 }
252 
253 /**
254  * iptunnel_pmtud_check_icmp() - Trigger ICMP reply if needed and allowed
255  * @skb:	Buffer being sent by encapsulation, L2 headers expected
256  * @mtu:	Network MTU for path
257  *
258  * Return: 0 for no ICMP reply, length if built, negative value on error.
259  */
260 static int iptunnel_pmtud_check_icmp(struct sk_buff *skb, int mtu)
261 {
262 	const struct icmphdr *icmph = icmp_hdr(skb);
263 	const struct iphdr *iph = ip_hdr(skb);
264 
265 	if (mtu <= 576 || iph->frag_off != htons(IP_DF))
266 		return 0;
267 
268 	if (ipv4_is_lbcast(iph->daddr)  || ipv4_is_multicast(iph->daddr) ||
269 	    ipv4_is_zeronet(iph->saddr) || ipv4_is_loopback(iph->saddr)  ||
270 	    ipv4_is_lbcast(iph->saddr)  || ipv4_is_multicast(iph->saddr))
271 		return 0;
272 
273 	if (iph->protocol == IPPROTO_ICMP && icmp_is_err(icmph->type))
274 		return 0;
275 
276 	return iptunnel_pmtud_build_icmp(skb, mtu);
277 }
278 
279 #if IS_ENABLED(CONFIG_IPV6)
280 /**
281  * iptunnel_pmtud_build_icmpv6() - Build ICMPv6 error message for PMTUD
282  * @skb:	Original packet with L2 header
283  * @mtu:	MTU value for ICMPv6 error
284  *
285  * Return: length on success, negative error code if message couldn't be built.
286  */
287 static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu)
288 {
289 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
290 	struct icmp6hdr *icmp6h;
291 	struct ipv6hdr *nip6h;
292 	struct ethhdr eh;
293 	int len, err;
294 	__wsum csum;
295 
296 	if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
297 		return -EINVAL;
298 
299 	skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
300 	pskb_pull(skb, ETH_HLEN);
301 	skb_reset_network_header(skb);
302 
303 	err = pskb_trim(skb, IPV6_MIN_MTU - sizeof(*nip6h) - sizeof(*icmp6h));
304 	if (err)
305 		return err;
306 
307 	len = skb->len + sizeof(*icmp6h);
308 	err = skb_cow(skb, sizeof(*nip6h) + sizeof(*icmp6h) + ETH_HLEN);
309 	if (err)
310 		return err;
311 
312 	icmp6h = skb_push(skb, sizeof(*icmp6h));
313 	*icmp6h = (struct icmp6hdr) {
314 		.icmp6_type		= ICMPV6_PKT_TOOBIG,
315 		.icmp6_code		= 0,
316 		.icmp6_cksum		= 0,
317 		.icmp6_mtu		= htonl(mtu),
318 	};
319 	skb_reset_transport_header(skb);
320 
321 	nip6h = skb_push(skb, sizeof(*nip6h));
322 	*nip6h = (struct ipv6hdr) {
323 		.priority		= 0,
324 		.version		= 6,
325 		.flow_lbl		= { 0 },
326 		.payload_len		= htons(len),
327 		.nexthdr		= IPPROTO_ICMPV6,
328 		.hop_limit		= ip6h->hop_limit,
329 		.saddr			= ip6h->daddr,
330 		.daddr			= ip6h->saddr,
331 	};
332 	skb_reset_network_header(skb);
333 
334 	csum = csum_partial(icmp6h, len, 0);
335 	icmp6h->icmp6_cksum = csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr, len,
336 					      IPPROTO_ICMPV6, csum);
337 
338 	skb->ip_summed = CHECKSUM_NONE;
339 
340 	eth_header(skb, skb->dev, htons(eh.h_proto), eh.h_source, eh.h_dest, 0);
341 	skb_reset_mac_header(skb);
342 
343 	return skb->len;
344 }
345 
346 /**
347  * iptunnel_pmtud_check_icmpv6() - Trigger ICMPv6 reply if needed and allowed
348  * @skb:	Buffer being sent by encapsulation, L2 headers expected
349  * @mtu:	Network MTU for path
350  *
351  * Return: 0 for no ICMPv6 reply, length if built, negative value on error.
352  */
353 static int iptunnel_pmtud_check_icmpv6(struct sk_buff *skb, int mtu)
354 {
355 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
356 	int stype = ipv6_addr_type(&ip6h->saddr);
357 	u8 proto = ip6h->nexthdr;
358 	__be16 frag_off;
359 	int offset;
360 
361 	if (mtu <= IPV6_MIN_MTU)
362 		return 0;
363 
364 	if (stype == IPV6_ADDR_ANY || stype == IPV6_ADDR_MULTICAST ||
365 	    stype == IPV6_ADDR_LOOPBACK)
366 		return 0;
367 
368 	offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto,
369 				  &frag_off);
370 	if (offset < 0 || (frag_off & htons(~0x7)))
371 		return 0;
372 
373 	if (proto == IPPROTO_ICMPV6) {
374 		struct icmp6hdr *icmp6h;
375 
376 		if (!pskb_may_pull(skb, skb_network_header(skb) +
377 					offset + 1 - skb->data))
378 			return 0;
379 
380 		icmp6h = (struct icmp6hdr *)(skb_network_header(skb) + offset);
381 		if (icmpv6_is_err(icmp6h->icmp6_type) ||
382 		    icmp6h->icmp6_type == NDISC_REDIRECT)
383 			return 0;
384 	}
385 
386 	return iptunnel_pmtud_build_icmpv6(skb, mtu);
387 }
388 #endif /* IS_ENABLED(CONFIG_IPV6) */
389 
390 /**
391  * skb_tunnel_check_pmtu() - Check, update PMTU and trigger ICMP reply as needed
392  * @skb:	Buffer being sent by encapsulation, L2 headers expected
393  * @encap_dst:	Destination for tunnel encapsulation (outer IP)
394  * @headroom:	Encapsulation header size, bytes
395  * @reply:	Build matching ICMP or ICMPv6 message as a result
396  *
397  * L2 tunnel implementations that can carry IP and can be directly bridged
398  * (currently UDP tunnels) can't always rely on IP forwarding paths to handle
399  * PMTU discovery. In the bridged case, ICMP or ICMPv6 messages need to be built
400  * based on payload and sent back by the encapsulation itself.
401  *
402  * For routable interfaces, we just need to update the PMTU for the destination.
403  *
404  * Return: 0 if ICMP error not needed, length if built, negative value on error
405  */
406 int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
407 			  int headroom, bool reply)
408 {
409 	u32 mtu = dst_mtu(encap_dst) - headroom;
410 
411 	if ((skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) ||
412 	    (!skb_is_gso(skb) && (skb->len - skb_mac_header_len(skb)) <= mtu))
413 		return 0;
414 
415 	skb_dst_update_pmtu_no_confirm(skb, mtu);
416 
417 	if (!reply || skb->pkt_type == PACKET_HOST)
418 		return 0;
419 
420 	if (skb->protocol == htons(ETH_P_IP))
421 		return iptunnel_pmtud_check_icmp(skb, mtu);
422 
423 #if IS_ENABLED(CONFIG_IPV6)
424 	if (skb->protocol == htons(ETH_P_IPV6))
425 		return iptunnel_pmtud_check_icmpv6(skb, mtu);
426 #endif
427 	return 0;
428 }
429 EXPORT_SYMBOL(skb_tunnel_check_pmtu);
430 
431 /* Often modified stats are per cpu, other are shared (netdev->stats) */
432 void ip_tunnel_get_stats64(struct net_device *dev,
433 			   struct rtnl_link_stats64 *tot)
434 {
435 	int i;
436 
437 	netdev_stats_to_stats64(tot, &dev->stats);
438 
439 	for_each_possible_cpu(i) {
440 		const struct pcpu_sw_netstats *tstats =
441 						   per_cpu_ptr(dev->tstats, i);
442 		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
443 		unsigned int start;
444 
445 		do {
446 			start = u64_stats_fetch_begin_irq(&tstats->syncp);
447 			rx_packets = tstats->rx_packets;
448 			tx_packets = tstats->tx_packets;
449 			rx_bytes = tstats->rx_bytes;
450 			tx_bytes = tstats->tx_bytes;
451 		} while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
452 
453 		tot->rx_packets += rx_packets;
454 		tot->tx_packets += tx_packets;
455 		tot->rx_bytes   += rx_bytes;
456 		tot->tx_bytes   += tx_bytes;
457 	}
458 }
459 EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
460 
461 static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = {
462 	[LWTUNNEL_IP_UNSPEC]	= { .strict_start_type = LWTUNNEL_IP_OPTS },
463 	[LWTUNNEL_IP_ID]	= { .type = NLA_U64 },
464 	[LWTUNNEL_IP_DST]	= { .type = NLA_U32 },
465 	[LWTUNNEL_IP_SRC]	= { .type = NLA_U32 },
466 	[LWTUNNEL_IP_TTL]	= { .type = NLA_U8 },
467 	[LWTUNNEL_IP_TOS]	= { .type = NLA_U8 },
468 	[LWTUNNEL_IP_FLAGS]	= { .type = NLA_U16 },
469 	[LWTUNNEL_IP_OPTS]	= { .type = NLA_NESTED },
470 };
471 
472 static const struct nla_policy ip_opts_policy[LWTUNNEL_IP_OPTS_MAX + 1] = {
473 	[LWTUNNEL_IP_OPTS_GENEVE]	= { .type = NLA_NESTED },
474 	[LWTUNNEL_IP_OPTS_VXLAN]	= { .type = NLA_NESTED },
475 	[LWTUNNEL_IP_OPTS_ERSPAN]	= { .type = NLA_NESTED },
476 };
477 
478 static const struct nla_policy
479 geneve_opt_policy[LWTUNNEL_IP_OPT_GENEVE_MAX + 1] = {
480 	[LWTUNNEL_IP_OPT_GENEVE_CLASS]	= { .type = NLA_U16 },
481 	[LWTUNNEL_IP_OPT_GENEVE_TYPE]	= { .type = NLA_U8 },
482 	[LWTUNNEL_IP_OPT_GENEVE_DATA]	= { .type = NLA_BINARY, .len = 128 },
483 };
484 
485 static const struct nla_policy
486 vxlan_opt_policy[LWTUNNEL_IP_OPT_VXLAN_MAX + 1] = {
487 	[LWTUNNEL_IP_OPT_VXLAN_GBP]	= { .type = NLA_U32 },
488 };
489 
490 static const struct nla_policy
491 erspan_opt_policy[LWTUNNEL_IP_OPT_ERSPAN_MAX + 1] = {
492 	[LWTUNNEL_IP_OPT_ERSPAN_VER]	= { .type = NLA_U8 },
493 	[LWTUNNEL_IP_OPT_ERSPAN_INDEX]	= { .type = NLA_U32 },
494 	[LWTUNNEL_IP_OPT_ERSPAN_DIR]	= { .type = NLA_U8 },
495 	[LWTUNNEL_IP_OPT_ERSPAN_HWID]	= { .type = NLA_U8 },
496 };
497 
498 static int ip_tun_parse_opts_geneve(struct nlattr *attr,
499 				    struct ip_tunnel_info *info, int opts_len,
500 				    struct netlink_ext_ack *extack)
501 {
502 	struct nlattr *tb[LWTUNNEL_IP_OPT_GENEVE_MAX + 1];
503 	int data_len, err;
504 
505 	err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_GENEVE_MAX, attr,
506 			       geneve_opt_policy, extack);
507 	if (err)
508 		return err;
509 
510 	if (!tb[LWTUNNEL_IP_OPT_GENEVE_CLASS] ||
511 	    !tb[LWTUNNEL_IP_OPT_GENEVE_TYPE] ||
512 	    !tb[LWTUNNEL_IP_OPT_GENEVE_DATA])
513 		return -EINVAL;
514 
515 	attr = tb[LWTUNNEL_IP_OPT_GENEVE_DATA];
516 	data_len = nla_len(attr);
517 	if (data_len % 4)
518 		return -EINVAL;
519 
520 	if (info) {
521 		struct geneve_opt *opt = ip_tunnel_info_opts(info) + opts_len;
522 
523 		memcpy(opt->opt_data, nla_data(attr), data_len);
524 		opt->length = data_len / 4;
525 		attr = tb[LWTUNNEL_IP_OPT_GENEVE_CLASS];
526 		opt->opt_class = nla_get_be16(attr);
527 		attr = tb[LWTUNNEL_IP_OPT_GENEVE_TYPE];
528 		opt->type = nla_get_u8(attr);
529 		info->key.tun_flags |= TUNNEL_GENEVE_OPT;
530 	}
531 
532 	return sizeof(struct geneve_opt) + data_len;
533 }
534 
535 static int ip_tun_parse_opts_vxlan(struct nlattr *attr,
536 				   struct ip_tunnel_info *info, int opts_len,
537 				   struct netlink_ext_ack *extack)
538 {
539 	struct nlattr *tb[LWTUNNEL_IP_OPT_VXLAN_MAX + 1];
540 	int err;
541 
542 	err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_VXLAN_MAX, attr,
543 			       vxlan_opt_policy, extack);
544 	if (err)
545 		return err;
546 
547 	if (!tb[LWTUNNEL_IP_OPT_VXLAN_GBP])
548 		return -EINVAL;
549 
550 	if (info) {
551 		struct vxlan_metadata *md =
552 			ip_tunnel_info_opts(info) + opts_len;
553 
554 		attr = tb[LWTUNNEL_IP_OPT_VXLAN_GBP];
555 		md->gbp = nla_get_u32(attr);
556 		info->key.tun_flags |= TUNNEL_VXLAN_OPT;
557 	}
558 
559 	return sizeof(struct vxlan_metadata);
560 }
561 
562 static int ip_tun_parse_opts_erspan(struct nlattr *attr,
563 				    struct ip_tunnel_info *info, int opts_len,
564 				    struct netlink_ext_ack *extack)
565 {
566 	struct nlattr *tb[LWTUNNEL_IP_OPT_ERSPAN_MAX + 1];
567 	int err;
568 	u8 ver;
569 
570 	err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_ERSPAN_MAX, attr,
571 			       erspan_opt_policy, extack);
572 	if (err)
573 		return err;
574 
575 	if (!tb[LWTUNNEL_IP_OPT_ERSPAN_VER])
576 		return -EINVAL;
577 
578 	ver = nla_get_u8(tb[LWTUNNEL_IP_OPT_ERSPAN_VER]);
579 	if (ver == 1) {
580 		if (!tb[LWTUNNEL_IP_OPT_ERSPAN_INDEX])
581 			return -EINVAL;
582 	} else if (ver == 2) {
583 		if (!tb[LWTUNNEL_IP_OPT_ERSPAN_DIR] ||
584 		    !tb[LWTUNNEL_IP_OPT_ERSPAN_HWID])
585 			return -EINVAL;
586 	} else {
587 		return -EINVAL;
588 	}
589 
590 	if (info) {
591 		struct erspan_metadata *md =
592 			ip_tunnel_info_opts(info) + opts_len;
593 
594 		md->version = ver;
595 		if (ver == 1) {
596 			attr = tb[LWTUNNEL_IP_OPT_ERSPAN_INDEX];
597 			md->u.index = nla_get_be32(attr);
598 		} else {
599 			attr = tb[LWTUNNEL_IP_OPT_ERSPAN_DIR];
600 			md->u.md2.dir = nla_get_u8(attr);
601 			attr = tb[LWTUNNEL_IP_OPT_ERSPAN_HWID];
602 			set_hwid(&md->u.md2, nla_get_u8(attr));
603 		}
604 
605 		info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
606 	}
607 
608 	return sizeof(struct erspan_metadata);
609 }
610 
611 static int ip_tun_parse_opts(struct nlattr *attr, struct ip_tunnel_info *info,
612 			     struct netlink_ext_ack *extack)
613 {
614 	int err, rem, opt_len, opts_len = 0, type = 0;
615 	struct nlattr *nla;
616 
617 	if (!attr)
618 		return 0;
619 
620 	err = nla_validate(nla_data(attr), nla_len(attr), LWTUNNEL_IP_OPTS_MAX,
621 			   ip_opts_policy, extack);
622 	if (err)
623 		return err;
624 
625 	nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
626 		switch (nla_type(nla)) {
627 		case LWTUNNEL_IP_OPTS_GENEVE:
628 			if (type && type != TUNNEL_GENEVE_OPT)
629 				return -EINVAL;
630 			opt_len = ip_tun_parse_opts_geneve(nla, info, opts_len,
631 							   extack);
632 			if (opt_len < 0)
633 				return opt_len;
634 			opts_len += opt_len;
635 			if (opts_len > IP_TUNNEL_OPTS_MAX)
636 				return -EINVAL;
637 			type = TUNNEL_GENEVE_OPT;
638 			break;
639 		case LWTUNNEL_IP_OPTS_VXLAN:
640 			if (type)
641 				return -EINVAL;
642 			opt_len = ip_tun_parse_opts_vxlan(nla, info, opts_len,
643 							  extack);
644 			if (opt_len < 0)
645 				return opt_len;
646 			opts_len += opt_len;
647 			type = TUNNEL_VXLAN_OPT;
648 			break;
649 		case LWTUNNEL_IP_OPTS_ERSPAN:
650 			if (type)
651 				return -EINVAL;
652 			opt_len = ip_tun_parse_opts_erspan(nla, info, opts_len,
653 							   extack);
654 			if (opt_len < 0)
655 				return opt_len;
656 			opts_len += opt_len;
657 			type = TUNNEL_ERSPAN_OPT;
658 			break;
659 		default:
660 			return -EINVAL;
661 		}
662 	}
663 
664 	return opts_len;
665 }
666 
667 static int ip_tun_get_optlen(struct nlattr *attr,
668 			     struct netlink_ext_ack *extack)
669 {
670 	return ip_tun_parse_opts(attr, NULL, extack);
671 }
672 
673 static int ip_tun_set_opts(struct nlattr *attr, struct ip_tunnel_info *info,
674 			   struct netlink_ext_ack *extack)
675 {
676 	return ip_tun_parse_opts(attr, info, extack);
677 }
678 
679 static int ip_tun_build_state(struct net *net, struct nlattr *attr,
680 			      unsigned int family, const void *cfg,
681 			      struct lwtunnel_state **ts,
682 			      struct netlink_ext_ack *extack)
683 {
684 	struct nlattr *tb[LWTUNNEL_IP_MAX + 1];
685 	struct lwtunnel_state *new_state;
686 	struct ip_tunnel_info *tun_info;
687 	int err, opt_len;
688 
689 	err = nla_parse_nested_deprecated(tb, LWTUNNEL_IP_MAX, attr,
690 					  ip_tun_policy, extack);
691 	if (err < 0)
692 		return err;
693 
694 	opt_len = ip_tun_get_optlen(tb[LWTUNNEL_IP_OPTS], extack);
695 	if (opt_len < 0)
696 		return opt_len;
697 
698 	new_state = lwtunnel_state_alloc(sizeof(*tun_info) + opt_len);
699 	if (!new_state)
700 		return -ENOMEM;
701 
702 	new_state->type = LWTUNNEL_ENCAP_IP;
703 
704 	tun_info = lwt_tun_info(new_state);
705 
706 	err = ip_tun_set_opts(tb[LWTUNNEL_IP_OPTS], tun_info, extack);
707 	if (err < 0) {
708 		lwtstate_free(new_state);
709 		return err;
710 	}
711 
712 #ifdef CONFIG_DST_CACHE
713 	err = dst_cache_init(&tun_info->dst_cache, GFP_KERNEL);
714 	if (err) {
715 		lwtstate_free(new_state);
716 		return err;
717 	}
718 #endif
719 
720 	if (tb[LWTUNNEL_IP_ID])
721 		tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP_ID]);
722 
723 	if (tb[LWTUNNEL_IP_DST])
724 		tun_info->key.u.ipv4.dst = nla_get_in_addr(tb[LWTUNNEL_IP_DST]);
725 
726 	if (tb[LWTUNNEL_IP_SRC])
727 		tun_info->key.u.ipv4.src = nla_get_in_addr(tb[LWTUNNEL_IP_SRC]);
728 
729 	if (tb[LWTUNNEL_IP_TTL])
730 		tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]);
731 
732 	if (tb[LWTUNNEL_IP_TOS])
733 		tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
734 
735 	if (tb[LWTUNNEL_IP_FLAGS])
736 		tun_info->key.tun_flags |=
737 				(nla_get_be16(tb[LWTUNNEL_IP_FLAGS]) &
738 				 ~TUNNEL_OPTIONS_PRESENT);
739 
740 	tun_info->mode = IP_TUNNEL_INFO_TX;
741 	tun_info->options_len = opt_len;
742 
743 	*ts = new_state;
744 
745 	return 0;
746 }
747 
748 static void ip_tun_destroy_state(struct lwtunnel_state *lwtstate)
749 {
750 #ifdef CONFIG_DST_CACHE
751 	struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
752 
753 	dst_cache_destroy(&tun_info->dst_cache);
754 #endif
755 }
756 
757 static int ip_tun_fill_encap_opts_geneve(struct sk_buff *skb,
758 					 struct ip_tunnel_info *tun_info)
759 {
760 	struct geneve_opt *opt;
761 	struct nlattr *nest;
762 	int offset = 0;
763 
764 	nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_GENEVE);
765 	if (!nest)
766 		return -ENOMEM;
767 
768 	while (tun_info->options_len > offset) {
769 		opt = ip_tunnel_info_opts(tun_info) + offset;
770 		if (nla_put_be16(skb, LWTUNNEL_IP_OPT_GENEVE_CLASS,
771 				 opt->opt_class) ||
772 		    nla_put_u8(skb, LWTUNNEL_IP_OPT_GENEVE_TYPE, opt->type) ||
773 		    nla_put(skb, LWTUNNEL_IP_OPT_GENEVE_DATA, opt->length * 4,
774 			    opt->opt_data)) {
775 			nla_nest_cancel(skb, nest);
776 			return -ENOMEM;
777 		}
778 		offset += sizeof(*opt) + opt->length * 4;
779 	}
780 
781 	nla_nest_end(skb, nest);
782 	return 0;
783 }
784 
785 static int ip_tun_fill_encap_opts_vxlan(struct sk_buff *skb,
786 					struct ip_tunnel_info *tun_info)
787 {
788 	struct vxlan_metadata *md;
789 	struct nlattr *nest;
790 
791 	nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_VXLAN);
792 	if (!nest)
793 		return -ENOMEM;
794 
795 	md = ip_tunnel_info_opts(tun_info);
796 	if (nla_put_u32(skb, LWTUNNEL_IP_OPT_VXLAN_GBP, md->gbp)) {
797 		nla_nest_cancel(skb, nest);
798 		return -ENOMEM;
799 	}
800 
801 	nla_nest_end(skb, nest);
802 	return 0;
803 }
804 
805 static int ip_tun_fill_encap_opts_erspan(struct sk_buff *skb,
806 					 struct ip_tunnel_info *tun_info)
807 {
808 	struct erspan_metadata *md;
809 	struct nlattr *nest;
810 
811 	nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_ERSPAN);
812 	if (!nest)
813 		return -ENOMEM;
814 
815 	md = ip_tunnel_info_opts(tun_info);
816 	if (nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_VER, md->version))
817 		goto err;
818 
819 	if (md->version == 1 &&
820 	    nla_put_be32(skb, LWTUNNEL_IP_OPT_ERSPAN_INDEX, md->u.index))
821 		goto err;
822 
823 	if (md->version == 2 &&
824 	    (nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_DIR, md->u.md2.dir) ||
825 	     nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_HWID,
826 			get_hwid(&md->u.md2))))
827 		goto err;
828 
829 	nla_nest_end(skb, nest);
830 	return 0;
831 err:
832 	nla_nest_cancel(skb, nest);
833 	return -ENOMEM;
834 }
835 
836 static int ip_tun_fill_encap_opts(struct sk_buff *skb, int type,
837 				  struct ip_tunnel_info *tun_info)
838 {
839 	struct nlattr *nest;
840 	int err = 0;
841 
842 	if (!(tun_info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))
843 		return 0;
844 
845 	nest = nla_nest_start_noflag(skb, type);
846 	if (!nest)
847 		return -ENOMEM;
848 
849 	if (tun_info->key.tun_flags & TUNNEL_GENEVE_OPT)
850 		err = ip_tun_fill_encap_opts_geneve(skb, tun_info);
851 	else if (tun_info->key.tun_flags & TUNNEL_VXLAN_OPT)
852 		err = ip_tun_fill_encap_opts_vxlan(skb, tun_info);
853 	else if (tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)
854 		err = ip_tun_fill_encap_opts_erspan(skb, tun_info);
855 
856 	if (err) {
857 		nla_nest_cancel(skb, nest);
858 		return err;
859 	}
860 
861 	nla_nest_end(skb, nest);
862 	return 0;
863 }
864 
865 static int ip_tun_fill_encap_info(struct sk_buff *skb,
866 				  struct lwtunnel_state *lwtstate)
867 {
868 	struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
869 
870 	if (nla_put_be64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id,
871 			 LWTUNNEL_IP_PAD) ||
872 	    nla_put_in_addr(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) ||
873 	    nla_put_in_addr(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
874 	    nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
875 	    nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
876 	    nla_put_be16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags) ||
877 	    ip_tun_fill_encap_opts(skb, LWTUNNEL_IP_OPTS, tun_info))
878 		return -ENOMEM;
879 
880 	return 0;
881 }
882 
883 static int ip_tun_opts_nlsize(struct ip_tunnel_info *info)
884 {
885 	int opt_len;
886 
887 	if (!(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))
888 		return 0;
889 
890 	opt_len = nla_total_size(0);		/* LWTUNNEL_IP_OPTS */
891 	if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
892 		struct geneve_opt *opt;
893 		int offset = 0;
894 
895 		opt_len += nla_total_size(0);	/* LWTUNNEL_IP_OPTS_GENEVE */
896 		while (info->options_len > offset) {
897 			opt = ip_tunnel_info_opts(info) + offset;
898 			opt_len += nla_total_size(2)	/* OPT_GENEVE_CLASS */
899 				   + nla_total_size(1)	/* OPT_GENEVE_TYPE */
900 				   + nla_total_size(opt->length * 4);
901 							/* OPT_GENEVE_DATA */
902 			offset += sizeof(*opt) + opt->length * 4;
903 		}
904 	} else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
905 		opt_len += nla_total_size(0)	/* LWTUNNEL_IP_OPTS_VXLAN */
906 			   + nla_total_size(4);	/* OPT_VXLAN_GBP */
907 	} else if (info->key.tun_flags & TUNNEL_ERSPAN_OPT) {
908 		struct erspan_metadata *md = ip_tunnel_info_opts(info);
909 
910 		opt_len += nla_total_size(0)	/* LWTUNNEL_IP_OPTS_ERSPAN */
911 			   + nla_total_size(1)	/* OPT_ERSPAN_VER */
912 			   + (md->version == 1 ? nla_total_size(4)
913 						/* OPT_ERSPAN_INDEX (v1) */
914 					       : nla_total_size(1) +
915 						 nla_total_size(1));
916 						/* OPT_ERSPAN_DIR + HWID (v2) */
917 	}
918 
919 	return opt_len;
920 }
921 
922 static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
923 {
924 	return nla_total_size_64bit(8)	/* LWTUNNEL_IP_ID */
925 		+ nla_total_size(4)	/* LWTUNNEL_IP_DST */
926 		+ nla_total_size(4)	/* LWTUNNEL_IP_SRC */
927 		+ nla_total_size(1)	/* LWTUNNEL_IP_TOS */
928 		+ nla_total_size(1)	/* LWTUNNEL_IP_TTL */
929 		+ nla_total_size(2)	/* LWTUNNEL_IP_FLAGS */
930 		+ ip_tun_opts_nlsize(lwt_tun_info(lwtstate));
931 					/* LWTUNNEL_IP_OPTS */
932 }
933 
934 static int ip_tun_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
935 {
936 	struct ip_tunnel_info *info_a = lwt_tun_info(a);
937 	struct ip_tunnel_info *info_b = lwt_tun_info(b);
938 
939 	return memcmp(info_a, info_b, sizeof(info_a->key)) ||
940 	       info_a->mode != info_b->mode ||
941 	       info_a->options_len != info_b->options_len ||
942 	       memcmp(ip_tunnel_info_opts(info_a),
943 		      ip_tunnel_info_opts(info_b), info_a->options_len);
944 }
945 
946 static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
947 	.build_state = ip_tun_build_state,
948 	.destroy_state = ip_tun_destroy_state,
949 	.fill_encap = ip_tun_fill_encap_info,
950 	.get_encap_size = ip_tun_encap_nlsize,
951 	.cmp_encap = ip_tun_cmp_encap,
952 	.owner = THIS_MODULE,
953 };
954 
955 static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
956 	[LWTUNNEL_IP6_UNSPEC]	= { .strict_start_type = LWTUNNEL_IP6_OPTS },
957 	[LWTUNNEL_IP6_ID]		= { .type = NLA_U64 },
958 	[LWTUNNEL_IP6_DST]		= { .len = sizeof(struct in6_addr) },
959 	[LWTUNNEL_IP6_SRC]		= { .len = sizeof(struct in6_addr) },
960 	[LWTUNNEL_IP6_HOPLIMIT]		= { .type = NLA_U8 },
961 	[LWTUNNEL_IP6_TC]		= { .type = NLA_U8 },
962 	[LWTUNNEL_IP6_FLAGS]		= { .type = NLA_U16 },
963 	[LWTUNNEL_IP6_OPTS]		= { .type = NLA_NESTED },
964 };
965 
966 static int ip6_tun_build_state(struct net *net, struct nlattr *attr,
967 			       unsigned int family, const void *cfg,
968 			       struct lwtunnel_state **ts,
969 			       struct netlink_ext_ack *extack)
970 {
971 	struct nlattr *tb[LWTUNNEL_IP6_MAX + 1];
972 	struct lwtunnel_state *new_state;
973 	struct ip_tunnel_info *tun_info;
974 	int err, opt_len;
975 
976 	err = nla_parse_nested_deprecated(tb, LWTUNNEL_IP6_MAX, attr,
977 					  ip6_tun_policy, extack);
978 	if (err < 0)
979 		return err;
980 
981 	opt_len = ip_tun_get_optlen(tb[LWTUNNEL_IP6_OPTS], extack);
982 	if (opt_len < 0)
983 		return opt_len;
984 
985 	new_state = lwtunnel_state_alloc(sizeof(*tun_info) + opt_len);
986 	if (!new_state)
987 		return -ENOMEM;
988 
989 	new_state->type = LWTUNNEL_ENCAP_IP6;
990 
991 	tun_info = lwt_tun_info(new_state);
992 
993 	err = ip_tun_set_opts(tb[LWTUNNEL_IP6_OPTS], tun_info, extack);
994 	if (err < 0) {
995 		lwtstate_free(new_state);
996 		return err;
997 	}
998 
999 	if (tb[LWTUNNEL_IP6_ID])
1000 		tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP6_ID]);
1001 
1002 	if (tb[LWTUNNEL_IP6_DST])
1003 		tun_info->key.u.ipv6.dst = nla_get_in6_addr(tb[LWTUNNEL_IP6_DST]);
1004 
1005 	if (tb[LWTUNNEL_IP6_SRC])
1006 		tun_info->key.u.ipv6.src = nla_get_in6_addr(tb[LWTUNNEL_IP6_SRC]);
1007 
1008 	if (tb[LWTUNNEL_IP6_HOPLIMIT])
1009 		tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP6_HOPLIMIT]);
1010 
1011 	if (tb[LWTUNNEL_IP6_TC])
1012 		tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
1013 
1014 	if (tb[LWTUNNEL_IP6_FLAGS])
1015 		tun_info->key.tun_flags |=
1016 				(nla_get_be16(tb[LWTUNNEL_IP6_FLAGS]) &
1017 				 ~TUNNEL_OPTIONS_PRESENT);
1018 
1019 	tun_info->mode = IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_IPV6;
1020 	tun_info->options_len = opt_len;
1021 
1022 	*ts = new_state;
1023 
1024 	return 0;
1025 }
1026 
1027 static int ip6_tun_fill_encap_info(struct sk_buff *skb,
1028 				   struct lwtunnel_state *lwtstate)
1029 {
1030 	struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
1031 
1032 	if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id,
1033 			 LWTUNNEL_IP6_PAD) ||
1034 	    nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) ||
1035 	    nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
1036 	    nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) ||
1037 	    nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.ttl) ||
1038 	    nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags) ||
1039 	    ip_tun_fill_encap_opts(skb, LWTUNNEL_IP6_OPTS, tun_info))
1040 		return -ENOMEM;
1041 
1042 	return 0;
1043 }
1044 
1045 static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
1046 {
1047 	return nla_total_size_64bit(8)	/* LWTUNNEL_IP6_ID */
1048 		+ nla_total_size(16)	/* LWTUNNEL_IP6_DST */
1049 		+ nla_total_size(16)	/* LWTUNNEL_IP6_SRC */
1050 		+ nla_total_size(1)	/* LWTUNNEL_IP6_HOPLIMIT */
1051 		+ nla_total_size(1)	/* LWTUNNEL_IP6_TC */
1052 		+ nla_total_size(2)	/* LWTUNNEL_IP6_FLAGS */
1053 		+ ip_tun_opts_nlsize(lwt_tun_info(lwtstate));
1054 					/* LWTUNNEL_IP6_OPTS */
1055 }
1056 
1057 static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
1058 	.build_state = ip6_tun_build_state,
1059 	.fill_encap = ip6_tun_fill_encap_info,
1060 	.get_encap_size = ip6_tun_encap_nlsize,
1061 	.cmp_encap = ip_tun_cmp_encap,
1062 	.owner = THIS_MODULE,
1063 };
1064 
1065 void __init ip_tunnel_core_init(void)
1066 {
1067 	/* If you land here, make sure whether increasing ip_tunnel_info's
1068 	 * options_len is a reasonable choice with its usage in front ends
1069 	 * (f.e., it's part of flow keys, etc).
1070 	 */
1071 	BUILD_BUG_ON(IP_TUNNEL_OPTS_MAX != 255);
1072 
1073 	lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP);
1074 	lwtunnel_encap_add_ops(&ip6_tun_lwt_ops, LWTUNNEL_ENCAP_IP6);
1075 }
1076 
1077 DEFINE_STATIC_KEY_FALSE(ip_tunnel_metadata_cnt);
1078 EXPORT_SYMBOL(ip_tunnel_metadata_cnt);
1079 
1080 void ip_tunnel_need_metadata(void)
1081 {
1082 	static_branch_inc(&ip_tunnel_metadata_cnt);
1083 }
1084 EXPORT_SYMBOL_GPL(ip_tunnel_need_metadata);
1085 
1086 void ip_tunnel_unneed_metadata(void)
1087 {
1088 	static_branch_dec(&ip_tunnel_metadata_cnt);
1089 }
1090 EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata);
1091 
1092 /* Returns either the correct skb->protocol value, or 0 if invalid. */
1093 __be16 ip_tunnel_parse_protocol(const struct sk_buff *skb)
1094 {
1095 	if (skb_network_header(skb) >= skb->head &&
1096 	    (skb_network_header(skb) + sizeof(struct iphdr)) <= skb_tail_pointer(skb) &&
1097 	    ip_hdr(skb)->version == 4)
1098 		return htons(ETH_P_IP);
1099 	if (skb_network_header(skb) >= skb->head &&
1100 	    (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= skb_tail_pointer(skb) &&
1101 	    ipv6_hdr(skb)->version == 6)
1102 		return htons(ETH_P_IPV6);
1103 	return 0;
1104 }
1105 EXPORT_SYMBOL(ip_tunnel_parse_protocol);
1106 
1107 const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tunnel_parse_protocol };
1108 EXPORT_SYMBOL(ip_tunnel_header_ops);
1109