1 /* 2 * Copyright (c) 2007-2013 Nicira, Inc. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * 02110-1301, USA 17 */ 18 19 #ifdef CONFIG_OPENVSWITCH_GRE 20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22 #include <linux/if.h> 23 #include <linux/skbuff.h> 24 #include <linux/ip.h> 25 #include <linux/if_tunnel.h> 26 #include <linux/if_vlan.h> 27 #include <linux/in.h> 28 #include <linux/if_vlan.h> 29 #include <linux/in.h> 30 #include <linux/in_route.h> 31 #include <linux/inetdevice.h> 32 #include <linux/jhash.h> 33 #include <linux/list.h> 34 #include <linux/kernel.h> 35 #include <linux/workqueue.h> 36 #include <linux/rculist.h> 37 #include <net/route.h> 38 #include <net/xfrm.h> 39 40 #include <net/icmp.h> 41 #include <net/ip.h> 42 #include <net/ip_tunnels.h> 43 #include <net/gre.h> 44 #include <net/net_namespace.h> 45 #include <net/netns/generic.h> 46 #include <net/protocol.h> 47 48 #include "datapath.h" 49 #include "vport.h" 50 51 /* Returns the least-significant 32 bits of a __be64. */ 52 static __be32 be64_get_low32(__be64 x) 53 { 54 #ifdef __BIG_ENDIAN 55 return (__force __be32)x; 56 #else 57 return (__force __be32)((__force u64)x >> 32); 58 #endif 59 } 60 61 static __be16 filter_tnl_flags(__be16 flags) 62 { 63 return flags & (TUNNEL_CSUM | TUNNEL_KEY); 64 } 65 66 static struct sk_buff *__build_header(struct sk_buff *skb, 67 int tunnel_hlen) 68 { 69 const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key; 70 struct tnl_ptk_info tpi; 71 72 skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM)); 73 if (IS_ERR(skb)) 74 return NULL; 75 76 tpi.flags = filter_tnl_flags(tun_key->tun_flags); 77 tpi.proto = htons(ETH_P_TEB); 78 tpi.key = be64_get_low32(tun_key->tun_id); 79 tpi.seq = 0; 80 gre_build_header(skb, &tpi, tunnel_hlen); 81 82 return skb; 83 } 84 85 static __be64 key_to_tunnel_id(__be32 key, __be32 seq) 86 { 87 #ifdef __BIG_ENDIAN 88 return (__force __be64)((__force u64)seq << 32 | (__force u32)key); 89 #else 90 return (__force __be64)((__force u64)key << 32 | (__force u32)seq); 91 #endif 92 } 93 94 /* Called with rcu_read_lock and BH disabled. */ 95 static int gre_rcv(struct sk_buff *skb, 96 const struct tnl_ptk_info *tpi) 97 { 98 struct ovs_key_ipv4_tunnel tun_key; 99 struct ovs_net *ovs_net; 100 struct vport *vport; 101 __be64 key; 102 103 ovs_net = net_generic(dev_net(skb->dev), ovs_net_id); 104 vport = rcu_dereference(ovs_net->vport_net.gre_vport); 105 if (unlikely(!vport)) 106 return PACKET_REJECT; 107 108 key = key_to_tunnel_id(tpi->key, tpi->seq); 109 ovs_flow_tun_key_init(&tun_key, ip_hdr(skb), key, 110 filter_tnl_flags(tpi->flags)); 111 112 ovs_vport_receive(vport, skb, &tun_key); 113 return PACKET_RCVD; 114 } 115 116 static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) 117 { 118 struct net *net = ovs_dp_get_net(vport->dp); 119 struct flowi4 fl; 120 struct rtable *rt; 121 int min_headroom; 122 int tunnel_hlen; 123 __be16 df; 124 int err; 125 126 if (unlikely(!OVS_CB(skb)->tun_key)) { 127 err = -EINVAL; 128 goto error; 129 } 130 131 /* Route lookup */ 132 memset(&fl, 0, sizeof(fl)); 133 fl.daddr = OVS_CB(skb)->tun_key->ipv4_dst; 134 fl.saddr = OVS_CB(skb)->tun_key->ipv4_src; 135 fl.flowi4_tos = RT_TOS(OVS_CB(skb)->tun_key->ipv4_tos); 136 fl.flowi4_mark = skb->mark; 137 fl.flowi4_proto = IPPROTO_GRE; 138 139 rt = ip_route_output_key(net, &fl); 140 if (IS_ERR(rt)) 141 return PTR_ERR(rt); 142 143 tunnel_hlen = ip_gre_calc_hlen(OVS_CB(skb)->tun_key->tun_flags); 144 145 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len 146 + tunnel_hlen + sizeof(struct iphdr) 147 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); 148 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) { 149 int head_delta = SKB_DATA_ALIGN(min_headroom - 150 skb_headroom(skb) + 151 16); 152 err = pskb_expand_head(skb, max_t(int, head_delta, 0), 153 0, GFP_ATOMIC); 154 if (unlikely(err)) 155 goto err_free_rt; 156 } 157 158 if (vlan_tx_tag_present(skb)) { 159 if (unlikely(!__vlan_put_tag(skb, 160 skb->vlan_proto, 161 vlan_tx_tag_get(skb)))) { 162 err = -ENOMEM; 163 goto err_free_rt; 164 } 165 skb->vlan_tci = 0; 166 } 167 168 /* Push Tunnel header. */ 169 skb = __build_header(skb, tunnel_hlen); 170 if (unlikely(!skb)) { 171 err = 0; 172 goto err_free_rt; 173 } 174 175 df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? 176 htons(IP_DF) : 0; 177 178 skb->local_df = 1; 179 180 return iptunnel_xmit(net, rt, skb, fl.saddr, 181 OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE, 182 OVS_CB(skb)->tun_key->ipv4_tos, 183 OVS_CB(skb)->tun_key->ipv4_ttl, df); 184 err_free_rt: 185 ip_rt_put(rt); 186 error: 187 return err; 188 } 189 190 static struct gre_cisco_protocol gre_protocol = { 191 .handler = gre_rcv, 192 .priority = 1, 193 }; 194 195 static int gre_ports; 196 static int gre_init(void) 197 { 198 int err; 199 200 gre_ports++; 201 if (gre_ports > 1) 202 return 0; 203 204 err = gre_cisco_register(&gre_protocol); 205 if (err) 206 pr_warn("cannot register gre protocol handler\n"); 207 208 return err; 209 } 210 211 static void gre_exit(void) 212 { 213 gre_ports--; 214 if (gre_ports > 0) 215 return; 216 217 gre_cisco_unregister(&gre_protocol); 218 } 219 220 static const char *gre_get_name(const struct vport *vport) 221 { 222 return vport_priv(vport); 223 } 224 225 static struct vport *gre_create(const struct vport_parms *parms) 226 { 227 struct net *net = ovs_dp_get_net(parms->dp); 228 struct ovs_net *ovs_net; 229 struct vport *vport; 230 int err; 231 232 err = gre_init(); 233 if (err) 234 return ERR_PTR(err); 235 236 ovs_net = net_generic(net, ovs_net_id); 237 if (ovsl_dereference(ovs_net->vport_net.gre_vport)) { 238 vport = ERR_PTR(-EEXIST); 239 goto error; 240 } 241 242 vport = ovs_vport_alloc(IFNAMSIZ, &ovs_gre_vport_ops, parms); 243 if (IS_ERR(vport)) 244 goto error; 245 246 strncpy(vport_priv(vport), parms->name, IFNAMSIZ); 247 rcu_assign_pointer(ovs_net->vport_net.gre_vport, vport); 248 return vport; 249 250 error: 251 gre_exit(); 252 return vport; 253 } 254 255 static void gre_tnl_destroy(struct vport *vport) 256 { 257 struct net *net = ovs_dp_get_net(vport->dp); 258 struct ovs_net *ovs_net; 259 260 ovs_net = net_generic(net, ovs_net_id); 261 262 rcu_assign_pointer(ovs_net->vport_net.gre_vport, NULL); 263 ovs_vport_deferred_free(vport); 264 gre_exit(); 265 } 266 267 const struct vport_ops ovs_gre_vport_ops = { 268 .type = OVS_VPORT_TYPE_GRE, 269 .create = gre_create, 270 .destroy = gre_tnl_destroy, 271 .get_name = gre_get_name, 272 .send = gre_tnl_send, 273 }; 274 275 #endif /* OPENVSWITCH_GRE */ 276