xref: /openbmc/linux/include/net/ip_tunnels.h (revision 6548d543)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_IP_TUNNELS_H
3 #define __NET_IP_TUNNELS_H 1
4 
5 #include <linux/if_tunnel.h>
6 #include <linux/netdevice.h>
7 #include <linux/skbuff.h>
8 #include <linux/socket.h>
9 #include <linux/types.h>
10 #include <linux/u64_stats_sync.h>
11 #include <linux/bitops.h>
12 
13 #include <net/dsfield.h>
14 #include <net/gro_cells.h>
15 #include <net/inet_ecn.h>
16 #include <net/netns/generic.h>
17 #include <net/rtnetlink.h>
18 #include <net/lwtunnel.h>
19 #include <net/dst_cache.h>
20 
21 #if IS_ENABLED(CONFIG_IPV6)
22 #include <net/ipv6.h>
23 #include <net/ip6_fib.h>
24 #include <net/ip6_route.h>
25 #endif
26 
27 /* Keep error state on tunnel for 30 sec */
28 #define IPTUNNEL_ERR_TIMEO	(30*HZ)
29 
30 /* Used to memset ip_tunnel padding. */
31 #define IP_TUNNEL_KEY_SIZE	offsetofend(struct ip_tunnel_key, tp_dst)
32 
33 /* Used to memset ipv4 address padding. */
34 #define IP_TUNNEL_KEY_IPV4_PAD	offsetofend(struct ip_tunnel_key, u.ipv4.dst)
35 #define IP_TUNNEL_KEY_IPV4_PAD_LEN				\
36 	(sizeof_field(struct ip_tunnel_key, u) -		\
37 	 sizeof_field(struct ip_tunnel_key, u.ipv4))
38 
39 struct ip_tunnel_key {
40 	__be64			tun_id;
41 	union {
42 		struct {
43 			__be32	src;
44 			__be32	dst;
45 		} ipv4;
46 		struct {
47 			struct in6_addr src;
48 			struct in6_addr dst;
49 		} ipv6;
50 	} u;
51 	__be16			tun_flags;
52 	u8			tos;		/* TOS for IPv4, TC for IPv6 */
53 	u8			ttl;		/* TTL for IPv4, HL for IPv6 */
54 	__be32			label;		/* Flow Label for IPv6 */
55 	u32			nhid;
56 	__be16			tp_src;
57 	__be16			tp_dst;
58 	__u8			flow_flags;
59 };
60 
61 struct ip_tunnel_encap {
62 	u16			type;
63 	u16			flags;
64 	__be16			sport;
65 	__be16			dport;
66 };
67 
68 /* Flags for ip_tunnel_info mode. */
69 #define IP_TUNNEL_INFO_TX	0x01	/* represents tx tunnel parameters */
70 #define IP_TUNNEL_INFO_IPV6	0x02	/* key contains IPv6 addresses */
71 #define IP_TUNNEL_INFO_BRIDGE	0x04	/* represents a bridged tunnel id */
72 
73 /* Maximum tunnel options length. */
74 #define IP_TUNNEL_OPTS_MAX					\
75 	GENMASK((sizeof_field(struct ip_tunnel_info,		\
76 			      options_len) * BITS_PER_BYTE) - 1, 0)
77 
78 #define ip_tunnel_info_opts(info)				\
79 	_Generic(info,						\
80 		 const struct ip_tunnel_info * : ((const void *)((info) + 1)),\
81 		 struct ip_tunnel_info * : ((void *)((info) + 1))\
82 	)
83 
84 struct ip_tunnel_info {
85 	struct ip_tunnel_key	key;
86 	struct ip_tunnel_encap	encap;
87 #ifdef CONFIG_DST_CACHE
88 	struct dst_cache	dst_cache;
89 #endif
90 	u8			options_len;
91 	u8			mode;
92 };
93 
94 /* 6rd prefix/relay information */
95 #ifdef CONFIG_IPV6_SIT_6RD
96 struct ip_tunnel_6rd_parm {
97 	struct in6_addr		prefix;
98 	__be32			relay_prefix;
99 	u16			prefixlen;
100 	u16			relay_prefixlen;
101 };
102 #endif
103 
104 struct ip_tunnel_prl_entry {
105 	struct ip_tunnel_prl_entry __rcu *next;
106 	__be32				addr;
107 	u16				flags;
108 	struct rcu_head			rcu_head;
109 };
110 
111 struct metadata_dst;
112 
113 struct ip_tunnel {
114 	struct ip_tunnel __rcu	*next;
115 	struct hlist_node hash_node;
116 
117 	struct net_device	*dev;
118 	netdevice_tracker	dev_tracker;
119 
120 	struct net		*net;	/* netns for packet i/o */
121 
122 	unsigned long	err_time;	/* Time when the last ICMP error
123 					 * arrived */
124 	int		err_count;	/* Number of arrived ICMP errors */
125 
126 	/* These four fields used only by GRE */
127 	u32		i_seqno;	/* The last seen seqno	*/
128 	atomic_t	o_seqno;	/* The last output seqno */
129 	int		tun_hlen;	/* Precalculated header length */
130 
131 	/* These four fields used only by ERSPAN */
132 	u32		index;		/* ERSPAN type II index */
133 	u8		erspan_ver;	/* ERSPAN version */
134 	u8		dir;		/* ERSPAN direction */
135 	u16		hwid;		/* ERSPAN hardware ID */
136 
137 	struct dst_cache dst_cache;
138 
139 	struct ip_tunnel_parm parms;
140 
141 	int		mlink;
142 	int		encap_hlen;	/* Encap header length (FOU,GUE) */
143 	int		hlen;		/* tun_hlen + encap_hlen */
144 	struct ip_tunnel_encap encap;
145 
146 	/* for SIT */
147 #ifdef CONFIG_IPV6_SIT_6RD
148 	struct ip_tunnel_6rd_parm ip6rd;
149 #endif
150 	struct ip_tunnel_prl_entry __rcu *prl;	/* potential router list */
151 	unsigned int		prl_count;	/* # of entries in PRL */
152 	unsigned int		ip_tnl_net_id;
153 	struct gro_cells	gro_cells;
154 	__u32			fwmark;
155 	bool			collect_md;
156 	bool			ignore_df;
157 };
158 
159 struct tnl_ptk_info {
160 	__be16 flags;
161 	__be16 proto;
162 	__be32 key;
163 	__be32 seq;
164 	int hdr_len;
165 };
166 
167 #define PACKET_RCVD	0
168 #define PACKET_REJECT	1
169 #define PACKET_NEXT	2
170 
171 #define IP_TNL_HASH_BITS   7
172 #define IP_TNL_HASH_SIZE   (1 << IP_TNL_HASH_BITS)
173 
174 struct ip_tunnel_net {
175 	struct net_device *fb_tunnel_dev;
176 	struct rtnl_link_ops *rtnl_link_ops;
177 	struct hlist_head tunnels[IP_TNL_HASH_SIZE];
178 	struct ip_tunnel __rcu *collect_md_tun;
179 	int type;
180 };
181 
182 static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
183 				      __be32 saddr, __be32 daddr,
184 				      u8 tos, u8 ttl, __be32 label,
185 				      __be16 tp_src, __be16 tp_dst,
186 				      __be64 tun_id, __be16 tun_flags)
187 {
188 	key->tun_id = tun_id;
189 	key->u.ipv4.src = saddr;
190 	key->u.ipv4.dst = daddr;
191 	memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD,
192 	       0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
193 	key->tos = tos;
194 	key->ttl = ttl;
195 	key->label = label;
196 	key->tun_flags = tun_flags;
197 
198 	/* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
199 	 * the upper tunnel are used.
200 	 * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
201 	 */
202 	key->tp_src = tp_src;
203 	key->tp_dst = tp_dst;
204 
205 	/* Clear struct padding. */
206 	if (sizeof(*key) != IP_TUNNEL_KEY_SIZE)
207 		memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE,
208 		       0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
209 }
210 
211 static inline bool
212 ip_tunnel_dst_cache_usable(const struct sk_buff *skb,
213 			   const struct ip_tunnel_info *info)
214 {
215 	if (skb->mark)
216 		return false;
217 	if (!info)
218 		return true;
219 	if (info->key.tun_flags & TUNNEL_NOCACHE)
220 		return false;
221 
222 	return true;
223 }
224 
225 static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
226 					       *tun_info)
227 {
228 	return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
229 }
230 
231 static inline __be64 key32_to_tunnel_id(__be32 key)
232 {
233 #ifdef __BIG_ENDIAN
234 	return (__force __be64)key;
235 #else
236 	return (__force __be64)((__force u64)key << 32);
237 #endif
238 }
239 
240 /* Returns the least-significant 32 bits of a __be64. */
241 static inline __be32 tunnel_id_to_key32(__be64 tun_id)
242 {
243 #ifdef __BIG_ENDIAN
244 	return (__force __be32)tun_id;
245 #else
246 	return (__force __be32)((__force u64)tun_id >> 32);
247 #endif
248 }
249 
250 #ifdef CONFIG_INET
251 
252 static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
253 				       int proto,
254 				       __be32 daddr, __be32 saddr,
255 				       __be32 key, __u8 tos,
256 				       struct net *net, int oif,
257 				       __u32 mark, __u32 tun_inner_hash,
258 				       __u8 flow_flags)
259 {
260 	memset(fl4, 0, sizeof(*fl4));
261 
262 	if (oif) {
263 		fl4->flowi4_l3mdev = l3mdev_master_upper_ifindex_by_index_rcu(net, oif);
264 		/* Legacy VRF/l3mdev use case */
265 		fl4->flowi4_oif = fl4->flowi4_l3mdev ? 0 : oif;
266 	}
267 
268 	fl4->daddr = daddr;
269 	fl4->saddr = saddr;
270 	fl4->flowi4_tos = tos;
271 	fl4->flowi4_proto = proto;
272 	fl4->fl4_gre_key = key;
273 	fl4->flowi4_mark = mark;
274 	fl4->flowi4_multipath_hash = tun_inner_hash;
275 	fl4->flowi4_flags = flow_flags;
276 }
277 
278 int ip_tunnel_init(struct net_device *dev);
279 void ip_tunnel_uninit(struct net_device *dev);
280 void  ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
281 struct net *ip_tunnel_get_link_net(const struct net_device *dev);
282 int ip_tunnel_get_iflink(const struct net_device *dev);
283 int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
284 		       struct rtnl_link_ops *ops, char *devname);
285 
286 void ip_tunnel_delete_nets(struct list_head *list_net, unsigned int id,
287 			   struct rtnl_link_ops *ops);
288 
289 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
290 		    const struct iphdr *tnl_params, const u8 protocol);
291 void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
292 		       const u8 proto, int tunnel_hlen);
293 int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
294 int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
295 			     void __user *data, int cmd);
296 int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
297 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
298 
299 struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
300 				   int link, __be16 flags,
301 				   __be32 remote, __be32 local,
302 				   __be32 key);
303 
304 void ip_tunnel_md_udp_encap(struct sk_buff *skb, struct ip_tunnel_info *info);
305 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
306 		  const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
307 		  bool log_ecn_error);
308 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
309 			 struct ip_tunnel_parm *p, __u32 fwmark);
310 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
311 		      struct ip_tunnel_parm *p, __u32 fwmark);
312 void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
313 
314 bool ip_tunnel_netlink_encap_parms(struct nlattr *data[],
315 				   struct ip_tunnel_encap *encap);
316 
317 void ip_tunnel_netlink_parms(struct nlattr *data[],
318 			     struct ip_tunnel_parm *parms);
319 
320 extern const struct header_ops ip_tunnel_header_ops;
321 __be16 ip_tunnel_parse_protocol(const struct sk_buff *skb);
322 
323 struct ip_tunnel_encap_ops {
324 	size_t (*encap_hlen)(struct ip_tunnel_encap *e);
325 	int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
326 			    u8 *protocol, struct flowi4 *fl4);
327 	int (*err_handler)(struct sk_buff *skb, u32 info);
328 };
329 
330 #define MAX_IPTUN_ENCAP_OPS 8
331 
332 extern const struct ip_tunnel_encap_ops __rcu *
333 		iptun_encaps[MAX_IPTUN_ENCAP_OPS];
334 
335 int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
336 			    unsigned int num);
337 int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
338 			    unsigned int num);
339 
340 int ip_tunnel_encap_setup(struct ip_tunnel *t,
341 			  struct ip_tunnel_encap *ipencap);
342 
343 static inline bool pskb_inet_may_pull(struct sk_buff *skb)
344 {
345 	int nhlen;
346 
347 	switch (skb->protocol) {
348 #if IS_ENABLED(CONFIG_IPV6)
349 	case htons(ETH_P_IPV6):
350 		nhlen = sizeof(struct ipv6hdr);
351 		break;
352 #endif
353 	case htons(ETH_P_IP):
354 		nhlen = sizeof(struct iphdr);
355 		break;
356 	default:
357 		nhlen = 0;
358 	}
359 
360 	return pskb_network_may_pull(skb, nhlen);
361 }
362 
363 /* Variant of pskb_inet_may_pull().
364  */
365 static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
366 {
367 	int nhlen = 0, maclen = ETH_HLEN;
368 	__be16 type = skb->protocol;
369 
370 	/* Essentially this is skb_protocol(skb, true)
371 	 * And we get MAC len.
372 	 */
373 	if (eth_type_vlan(type))
374 		type = __vlan_get_protocol(skb, type, &maclen);
375 
376 	switch (type) {
377 #if IS_ENABLED(CONFIG_IPV6)
378 	case htons(ETH_P_IPV6):
379 		nhlen = sizeof(struct ipv6hdr);
380 		break;
381 #endif
382 	case htons(ETH_P_IP):
383 		nhlen = sizeof(struct iphdr);
384 		break;
385 	}
386 	/* For ETH_P_IPV6/ETH_P_IP we make sure to pull
387 	 * a base network header in skb->head.
388 	 */
389 	if (!pskb_may_pull(skb, maclen + nhlen))
390 		return false;
391 
392 	skb_set_network_header(skb, maclen);
393 	return true;
394 }
395 
396 static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
397 {
398 	const struct ip_tunnel_encap_ops *ops;
399 	int hlen = -EINVAL;
400 
401 	if (e->type == TUNNEL_ENCAP_NONE)
402 		return 0;
403 
404 	if (e->type >= MAX_IPTUN_ENCAP_OPS)
405 		return -EINVAL;
406 
407 	rcu_read_lock();
408 	ops = rcu_dereference(iptun_encaps[e->type]);
409 	if (likely(ops && ops->encap_hlen))
410 		hlen = ops->encap_hlen(e);
411 	rcu_read_unlock();
412 
413 	return hlen;
414 }
415 
416 static inline int ip_tunnel_encap(struct sk_buff *skb,
417 				  struct ip_tunnel_encap *e,
418 				  u8 *protocol, struct flowi4 *fl4)
419 {
420 	const struct ip_tunnel_encap_ops *ops;
421 	int ret = -EINVAL;
422 
423 	if (e->type == TUNNEL_ENCAP_NONE)
424 		return 0;
425 
426 	if (e->type >= MAX_IPTUN_ENCAP_OPS)
427 		return -EINVAL;
428 
429 	rcu_read_lock();
430 	ops = rcu_dereference(iptun_encaps[e->type]);
431 	if (likely(ops && ops->build_header))
432 		ret = ops->build_header(skb, e, protocol, fl4);
433 	rcu_read_unlock();
434 
435 	return ret;
436 }
437 
438 /* Extract dsfield from inner protocol */
439 static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
440 				       const struct sk_buff *skb)
441 {
442 	__be16 payload_protocol = skb_protocol(skb, true);
443 
444 	if (payload_protocol == htons(ETH_P_IP))
445 		return iph->tos;
446 	else if (payload_protocol == htons(ETH_P_IPV6))
447 		return ipv6_get_dsfield((const struct ipv6hdr *)iph);
448 	else
449 		return 0;
450 }
451 
452 static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph,
453 				       const struct sk_buff *skb)
454 {
455 	__be16 payload_protocol = skb_protocol(skb, true);
456 
457 	if (payload_protocol == htons(ETH_P_IP))
458 		return iph->ttl;
459 	else if (payload_protocol == htons(ETH_P_IPV6))
460 		return ((const struct ipv6hdr *)iph)->hop_limit;
461 	else
462 		return 0;
463 }
464 
465 /* Propogate ECN bits out */
466 static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
467 				     const struct sk_buff *skb)
468 {
469 	u8 inner = ip_tunnel_get_dsfield(iph, skb);
470 
471 	return INET_ECN_encapsulate(tos, inner);
472 }
473 
474 int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
475 			   __be16 inner_proto, bool raw_proto, bool xnet);
476 
477 static inline int iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
478 				       __be16 inner_proto, bool xnet)
479 {
480 	return __iptunnel_pull_header(skb, hdr_len, inner_proto, false, xnet);
481 }
482 
483 void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
484 		   __be32 src, __be32 dst, u8 proto,
485 		   u8 tos, u8 ttl, __be16 df, bool xnet);
486 struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
487 					     gfp_t flags);
488 int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
489 			  int headroom, bool reply);
490 
491 int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
492 
493 static inline int iptunnel_pull_offloads(struct sk_buff *skb)
494 {
495 	if (skb_is_gso(skb)) {
496 		int err;
497 
498 		err = skb_unclone(skb, GFP_ATOMIC);
499 		if (unlikely(err))
500 			return err;
501 		skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >>
502 					       NETIF_F_GSO_SHIFT);
503 	}
504 
505 	skb->encapsulation = 0;
506 	return 0;
507 }
508 
509 static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
510 {
511 	if (pkt_len > 0) {
512 		struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
513 
514 		u64_stats_update_begin(&tstats->syncp);
515 		u64_stats_add(&tstats->tx_bytes, pkt_len);
516 		u64_stats_inc(&tstats->tx_packets);
517 		u64_stats_update_end(&tstats->syncp);
518 		put_cpu_ptr(tstats);
519 		return;
520 	}
521 
522 	if (pkt_len < 0) {
523 		DEV_STATS_INC(dev, tx_errors);
524 		DEV_STATS_INC(dev, tx_aborted_errors);
525 	} else {
526 		DEV_STATS_INC(dev, tx_dropped);
527 	}
528 }
529 
530 static inline void ip_tunnel_info_opts_get(void *to,
531 					   const struct ip_tunnel_info *info)
532 {
533 	memcpy(to, info + 1, info->options_len);
534 }
535 
536 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
537 					   const void *from, int len,
538 					   __be16 flags)
539 {
540 	info->options_len = len;
541 	if (len > 0) {
542 		memcpy(ip_tunnel_info_opts(info), from, len);
543 		info->key.tun_flags |= flags;
544 	}
545 }
546 
547 static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
548 {
549 	return (struct ip_tunnel_info *)lwtstate->data;
550 }
551 
552 DECLARE_STATIC_KEY_FALSE(ip_tunnel_metadata_cnt);
553 
554 /* Returns > 0 if metadata should be collected */
555 static inline int ip_tunnel_collect_metadata(void)
556 {
557 	return static_branch_unlikely(&ip_tunnel_metadata_cnt);
558 }
559 
560 void __init ip_tunnel_core_init(void);
561 
562 void ip_tunnel_need_metadata(void);
563 void ip_tunnel_unneed_metadata(void);
564 
565 #else /* CONFIG_INET */
566 
567 static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
568 {
569 	return NULL;
570 }
571 
572 static inline void ip_tunnel_need_metadata(void)
573 {
574 }
575 
576 static inline void ip_tunnel_unneed_metadata(void)
577 {
578 }
579 
580 static inline void ip_tunnel_info_opts_get(void *to,
581 					   const struct ip_tunnel_info *info)
582 {
583 }
584 
585 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
586 					   const void *from, int len,
587 					   __be16 flags)
588 {
589 	info->options_len = 0;
590 }
591 
592 #endif /* CONFIG_INET */
593 
594 #endif /* __NET_IP_TUNNELS_H */
595