xref: /openbmc/linux/include/net/ip_tunnels.h (revision 1dd24dae)
1 #ifndef __NET_IP_TUNNELS_H
2 #define __NET_IP_TUNNELS_H 1
3 
4 #include <linux/if_tunnel.h>
5 #include <linux/netdevice.h>
6 #include <linux/skbuff.h>
7 #include <linux/types.h>
8 #include <linux/u64_stats_sync.h>
9 #include <net/dsfield.h>
10 #include <net/gro_cells.h>
11 #include <net/inet_ecn.h>
12 #include <net/ip.h>
13 #include <net/rtnetlink.h>
14 
15 #if IS_ENABLED(CONFIG_IPV6)
16 #include <net/ipv6.h>
17 #include <net/ip6_fib.h>
18 #include <net/ip6_route.h>
19 #endif
20 
21 /* Keep error state on tunnel for 30 sec */
22 #define IPTUNNEL_ERR_TIMEO	(30*HZ)
23 
24 /* 6rd prefix/relay information */
25 #ifdef CONFIG_IPV6_SIT_6RD
26 struct ip_tunnel_6rd_parm {
27 	struct in6_addr		prefix;
28 	__be32			relay_prefix;
29 	u16			prefixlen;
30 	u16			relay_prefixlen;
31 };
32 #endif
33 
34 struct ip_tunnel_prl_entry {
35 	struct ip_tunnel_prl_entry __rcu *next;
36 	__be32				addr;
37 	u16				flags;
38 	struct rcu_head			rcu_head;
39 };
40 
41 struct ip_tunnel {
42 	struct ip_tunnel __rcu	*next;
43 	struct hlist_node hash_node;
44 	struct net_device	*dev;
45 
46 	int		err_count;	/* Number of arrived ICMP errors */
47 	unsigned long	err_time;	/* Time when the last ICMP error
48 					 * arrived */
49 
50 	/* These four fields used only by GRE */
51 	__u32		i_seqno;	/* The last seen seqno	*/
52 	__u32		o_seqno;	/* The last output seqno */
53 	int		hlen;		/* Precalculated header length */
54 	int		mlink;
55 
56 	struct ip_tunnel_parm parms;
57 
58 	/* for SIT */
59 #ifdef CONFIG_IPV6_SIT_6RD
60 	struct ip_tunnel_6rd_parm ip6rd;
61 #endif
62 	struct ip_tunnel_prl_entry __rcu *prl;	/* potential router list */
63 	unsigned int		prl_count;	/* # of entries in PRL */
64 	int			ip_tnl_net_id;
65 	struct gro_cells	gro_cells;
66 };
67 
68 #define TUNNEL_CSUM	__cpu_to_be16(0x01)
69 #define TUNNEL_ROUTING	__cpu_to_be16(0x02)
70 #define TUNNEL_KEY	__cpu_to_be16(0x04)
71 #define TUNNEL_SEQ	__cpu_to_be16(0x08)
72 #define TUNNEL_STRICT	__cpu_to_be16(0x10)
73 #define TUNNEL_REC	__cpu_to_be16(0x20)
74 #define TUNNEL_VERSION	__cpu_to_be16(0x40)
75 #define TUNNEL_NO_KEY	__cpu_to_be16(0x80)
76 
77 struct tnl_ptk_info {
78 	__be16 flags;
79 	__be16 proto;
80 	__be32 key;
81 	__be32 seq;
82 };
83 
84 #define PACKET_RCVD	0
85 #define PACKET_REJECT	1
86 
87 #define IP_TNL_HASH_BITS   10
88 #define IP_TNL_HASH_SIZE   (1 << IP_TNL_HASH_BITS)
89 
90 struct ip_tunnel_net {
91 	struct hlist_head *tunnels;
92 	struct net_device *fb_tunnel_dev;
93 };
94 
95 int ip_tunnel_init(struct net_device *dev);
96 void ip_tunnel_uninit(struct net_device *dev);
97 void  ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
98 int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
99 				  struct rtnl_link_ops *ops, char *devname);
100 
101 void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn);
102 
103 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
104 		    const struct iphdr *tnl_params);
105 int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
106 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
107 
108 struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
109 						struct rtnl_link_stats64 *tot);
110 struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
111 				   int link, __be16 flags,
112 				   __be32 remote, __be32 local,
113 				   __be32 key);
114 
115 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
116 		  const struct tnl_ptk_info *tpi, bool log_ecn_error);
117 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
118 			 struct ip_tunnel_parm *p);
119 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
120 		      struct ip_tunnel_parm *p);
121 void ip_tunnel_setup(struct net_device *dev, int net_id);
122 
123 /* Extract dsfield from inner protocol */
124 static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
125 				       const struct sk_buff *skb)
126 {
127 	if (skb->protocol == htons(ETH_P_IP))
128 		return iph->tos;
129 	else if (skb->protocol == htons(ETH_P_IPV6))
130 		return ipv6_get_dsfield((const struct ipv6hdr *)iph);
131 	else
132 		return 0;
133 }
134 
135 /* Propogate ECN bits out */
136 static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
137 				     const struct sk_buff *skb)
138 {
139 	u8 inner = ip_tunnel_get_dsfield(iph, skb);
140 
141 	return INET_ECN_encapsulate(tos, inner);
142 }
143 
144 static inline void tunnel_ip_select_ident(struct sk_buff *skb,
145 					  const struct iphdr  *old_iph,
146 					  struct dst_entry *dst)
147 {
148 	struct iphdr *iph = ip_hdr(skb);
149 
150 	/* Use inner packet iph-id if possible. */
151 	if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
152 		iph->id	= old_iph->id;
153 	else
154 		__ip_select_ident(iph, dst,
155 				  (skb_shinfo(skb)->gso_segs ?: 1) - 1);
156 }
157 
158 static inline void iptunnel_xmit(struct sk_buff *skb, struct net_device *dev)
159 {
160 	int err;
161 	int pkt_len = skb->len - skb_transport_offset(skb);
162 	struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats);
163 
164 	nf_reset(skb);
165 
166 	err = ip_local_out(skb);
167 	if (likely(net_xmit_eval(err) == 0)) {
168 		u64_stats_update_begin(&tstats->syncp);
169 		tstats->tx_bytes += pkt_len;
170 		tstats->tx_packets++;
171 		u64_stats_update_end(&tstats->syncp);
172 	} else {
173 		dev->stats.tx_errors++;
174 		dev->stats.tx_aborted_errors++;
175 	}
176 }
177 #endif /* __NET_IP_TUNNELS_H */
178