xref: /openbmc/linux/net/openvswitch/vport-gre.c (revision 5bd8e16d)
1 /*
2  * Copyright (c) 2007-2013 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18 
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 
21 #include <linux/if.h>
22 #include <linux/skbuff.h>
23 #include <linux/ip.h>
24 #include <linux/if_tunnel.h>
25 #include <linux/if_vlan.h>
26 #include <linux/in.h>
27 #include <linux/if_vlan.h>
28 #include <linux/in.h>
29 #include <linux/in_route.h>
30 #include <linux/inetdevice.h>
31 #include <linux/jhash.h>
32 #include <linux/list.h>
33 #include <linux/kernel.h>
34 #include <linux/workqueue.h>
35 #include <linux/rculist.h>
36 #include <net/route.h>
37 #include <net/xfrm.h>
38 
39 #include <net/icmp.h>
40 #include <net/ip.h>
41 #include <net/ip_tunnels.h>
42 #include <net/gre.h>
43 #include <net/net_namespace.h>
44 #include <net/netns/generic.h>
45 #include <net/protocol.h>
46 
47 #include "datapath.h"
48 #include "vport.h"
49 
50 /* Returns the least-significant 32 bits of a __be64. */
51 static __be32 be64_get_low32(__be64 x)
52 {
53 #ifdef __BIG_ENDIAN
54 	return (__force __be32)x;
55 #else
56 	return (__force __be32)((__force u64)x >> 32);
57 #endif
58 }
59 
60 static __be16 filter_tnl_flags(__be16 flags)
61 {
62 	return flags & (TUNNEL_CSUM | TUNNEL_KEY);
63 }
64 
65 static struct sk_buff *__build_header(struct sk_buff *skb,
66 				      int tunnel_hlen)
67 {
68 	const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key;
69 	struct tnl_ptk_info tpi;
70 
71 	skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
72 	if (IS_ERR(skb))
73 		return NULL;
74 
75 	tpi.flags = filter_tnl_flags(tun_key->tun_flags);
76 	tpi.proto = htons(ETH_P_TEB);
77 	tpi.key = be64_get_low32(tun_key->tun_id);
78 	tpi.seq = 0;
79 	gre_build_header(skb, &tpi, tunnel_hlen);
80 
81 	return skb;
82 }
83 
84 static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
85 {
86 #ifdef __BIG_ENDIAN
87 	return (__force __be64)((__force u64)seq << 32 | (__force u32)key);
88 #else
89 	return (__force __be64)((__force u64)key << 32 | (__force u32)seq);
90 #endif
91 }
92 
93 /* Called with rcu_read_lock and BH disabled. */
94 static int gre_rcv(struct sk_buff *skb,
95 		   const struct tnl_ptk_info *tpi)
96 {
97 	struct ovs_key_ipv4_tunnel tun_key;
98 	struct ovs_net *ovs_net;
99 	struct vport *vport;
100 	__be64 key;
101 
102 	ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
103 	vport = rcu_dereference(ovs_net->vport_net.gre_vport);
104 	if (unlikely(!vport))
105 		return PACKET_REJECT;
106 
107 	key = key_to_tunnel_id(tpi->key, tpi->seq);
108 	ovs_flow_tun_key_init(&tun_key, ip_hdr(skb), key,
109 			      filter_tnl_flags(tpi->flags));
110 
111 	ovs_vport_receive(vport, skb, &tun_key);
112 	return PACKET_RCVD;
113 }
114 
115 static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
116 {
117 	struct net *net = ovs_dp_get_net(vport->dp);
118 	struct flowi4 fl;
119 	struct rtable *rt;
120 	int min_headroom;
121 	int tunnel_hlen;
122 	__be16 df;
123 	int err;
124 
125 	if (unlikely(!OVS_CB(skb)->tun_key)) {
126 		err = -EINVAL;
127 		goto error;
128 	}
129 
130 	/* Route lookup */
131 	memset(&fl, 0, sizeof(fl));
132 	fl.daddr = OVS_CB(skb)->tun_key->ipv4_dst;
133 	fl.saddr = OVS_CB(skb)->tun_key->ipv4_src;
134 	fl.flowi4_tos = RT_TOS(OVS_CB(skb)->tun_key->ipv4_tos);
135 	fl.flowi4_mark = skb->mark;
136 	fl.flowi4_proto = IPPROTO_GRE;
137 
138 	rt = ip_route_output_key(net, &fl);
139 	if (IS_ERR(rt))
140 		return PTR_ERR(rt);
141 
142 	tunnel_hlen = ip_gre_calc_hlen(OVS_CB(skb)->tun_key->tun_flags);
143 
144 	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
145 			+ tunnel_hlen + sizeof(struct iphdr)
146 			+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
147 	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
148 		int head_delta = SKB_DATA_ALIGN(min_headroom -
149 						skb_headroom(skb) +
150 						16);
151 		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
152 					0, GFP_ATOMIC);
153 		if (unlikely(err))
154 			goto err_free_rt;
155 	}
156 
157 	if (vlan_tx_tag_present(skb)) {
158 		if (unlikely(!__vlan_put_tag(skb,
159 					     skb->vlan_proto,
160 					     vlan_tx_tag_get(skb)))) {
161 			err = -ENOMEM;
162 			goto err_free_rt;
163 		}
164 		skb->vlan_tci = 0;
165 	}
166 
167 	/* Push Tunnel header. */
168 	skb = __build_header(skb, tunnel_hlen);
169 	if (unlikely(!skb)) {
170 		err = 0;
171 		goto err_free_rt;
172 	}
173 
174 	df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
175 		htons(IP_DF) : 0;
176 
177 	skb->local_df = 1;
178 
179 	return iptunnel_xmit(rt, skb, fl.saddr,
180 			     OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE,
181 			     OVS_CB(skb)->tun_key->ipv4_tos,
182 			     OVS_CB(skb)->tun_key->ipv4_ttl, df, false);
183 err_free_rt:
184 	ip_rt_put(rt);
185 error:
186 	return err;
187 }
188 
189 static struct gre_cisco_protocol gre_protocol = {
190 	.handler        = gre_rcv,
191 	.priority       = 1,
192 };
193 
194 static int gre_ports;
195 static int gre_init(void)
196 {
197 	int err;
198 
199 	gre_ports++;
200 	if (gre_ports > 1)
201 		return 0;
202 
203 	err = gre_cisco_register(&gre_protocol);
204 	if (err)
205 		pr_warn("cannot register gre protocol handler\n");
206 
207 	return err;
208 }
209 
210 static void gre_exit(void)
211 {
212 	gre_ports--;
213 	if (gre_ports > 0)
214 		return;
215 
216 	gre_cisco_unregister(&gre_protocol);
217 }
218 
219 static const char *gre_get_name(const struct vport *vport)
220 {
221 	return vport_priv(vport);
222 }
223 
224 static struct vport *gre_create(const struct vport_parms *parms)
225 {
226 	struct net *net = ovs_dp_get_net(parms->dp);
227 	struct ovs_net *ovs_net;
228 	struct vport *vport;
229 	int err;
230 
231 	err = gre_init();
232 	if (err)
233 		return ERR_PTR(err);
234 
235 	ovs_net = net_generic(net, ovs_net_id);
236 	if (ovsl_dereference(ovs_net->vport_net.gre_vport)) {
237 		vport = ERR_PTR(-EEXIST);
238 		goto error;
239 	}
240 
241 	vport = ovs_vport_alloc(IFNAMSIZ, &ovs_gre_vport_ops, parms);
242 	if (IS_ERR(vport))
243 		goto error;
244 
245 	strncpy(vport_priv(vport), parms->name, IFNAMSIZ);
246 	rcu_assign_pointer(ovs_net->vport_net.gre_vport, vport);
247 	return vport;
248 
249 error:
250 	gre_exit();
251 	return vport;
252 }
253 
254 static void gre_tnl_destroy(struct vport *vport)
255 {
256 	struct net *net = ovs_dp_get_net(vport->dp);
257 	struct ovs_net *ovs_net;
258 
259 	ovs_net = net_generic(net, ovs_net_id);
260 
261 	rcu_assign_pointer(ovs_net->vport_net.gre_vport, NULL);
262 	ovs_vport_deferred_free(vport);
263 	gre_exit();
264 }
265 
266 const struct vport_ops ovs_gre_vport_ops = {
267 	.type		= OVS_VPORT_TYPE_GRE,
268 	.create		= gre_create,
269 	.destroy	= gre_tnl_destroy,
270 	.get_name	= gre_get_name,
271 	.send		= gre_tnl_send,
272 };
273