1 /* 2 * Copyright (c) 2013 Nicira, Inc. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * 02110-1301, USA 17 */ 18 19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 21 #include <linux/types.h> 22 #include <linux/kernel.h> 23 #include <linux/skbuff.h> 24 #include <linux/netdevice.h> 25 #include <linux/in.h> 26 #include <linux/if_arp.h> 27 #include <linux/init.h> 28 #include <linux/in6.h> 29 #include <linux/inetdevice.h> 30 #include <linux/netfilter_ipv4.h> 31 #include <linux/etherdevice.h> 32 #include <linux/if_ether.h> 33 #include <linux/if_vlan.h> 34 #include <linux/static_key.h> 35 36 #include <net/ip.h> 37 #include <net/icmp.h> 38 #include <net/protocol.h> 39 #include <net/ip_tunnels.h> 40 #include <net/ip6_tunnel.h> 41 #include <net/arp.h> 42 #include <net/checksum.h> 43 #include <net/dsfield.h> 44 #include <net/inet_ecn.h> 45 #include <net/xfrm.h> 46 #include <net/net_namespace.h> 47 #include <net/netns/generic.h> 48 #include <net/rtnetlink.h> 49 #include <net/dst_metadata.h> 50 51 const struct ip_tunnel_encap_ops __rcu * 52 iptun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly; 53 EXPORT_SYMBOL(iptun_encaps); 54 55 const struct ip6_tnl_encap_ops __rcu * 56 ip6tun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly; 57 EXPORT_SYMBOL(ip6tun_encaps); 58 59 void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, 60 __be32 src, __be32 dst, __u8 proto, 61 __u8 tos, __u8 ttl, __be16 df, bool xnet) 62 { 63 int pkt_len = skb->len - skb_inner_network_offset(skb); 64 struct net *net = dev_net(rt->dst.dev); 65 struct net_device *dev = skb->dev; 66 int skb_iif = skb->skb_iif; 67 struct iphdr *iph; 68 int err; 69 70 skb_scrub_packet(skb, xnet); 71 72 skb_clear_hash(skb); 73 skb_dst_set(skb, &rt->dst); 74 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 75 76 if (skb_iif && proto == IPPROTO_UDP) { 77 /* Arrived from an ingress interface and got udp encapuslated. 78 * The encapsulated network segment length may exceed dst mtu. 79 * Allow IP Fragmentation of segments. 80 */ 81 IPCB(skb)->flags |= IPSKB_FRAG_SEGS; 82 } 83 84 /* Push down and install the IP header. */ 85 skb_push(skb, sizeof(struct iphdr)); 86 skb_reset_network_header(skb); 87 88 iph = ip_hdr(skb); 89 90 iph->version = 4; 91 iph->ihl = sizeof(struct iphdr) >> 2; 92 iph->frag_off = df; 93 iph->protocol = proto; 94 iph->tos = tos; 95 iph->daddr = dst; 96 iph->saddr = src; 97 iph->ttl = ttl; 98 __ip_select_ident(net, iph, skb_shinfo(skb)->gso_segs ?: 1); 99 100 err = ip_local_out(net, sk, skb); 101 if (unlikely(net_xmit_eval(err))) 102 pkt_len = 0; 103 iptunnel_xmit_stats(dev, pkt_len); 104 } 105 EXPORT_SYMBOL_GPL(iptunnel_xmit); 106 107 int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len, 108 __be16 inner_proto, bool raw_proto, bool xnet) 109 { 110 if (unlikely(!pskb_may_pull(skb, hdr_len))) 111 return -ENOMEM; 112 113 skb_pull_rcsum(skb, hdr_len); 114 115 if (!raw_proto && inner_proto == htons(ETH_P_TEB)) { 116 struct ethhdr *eh; 117 118 if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) 119 return -ENOMEM; 120 121 eh = (struct ethhdr *)skb->data; 122 if (likely(eth_proto_is_802_3(eh->h_proto))) 123 skb->protocol = eh->h_proto; 124 else 125 skb->protocol = htons(ETH_P_802_2); 126 127 } else { 128 skb->protocol = inner_proto; 129 } 130 131 skb_clear_hash_if_not_l4(skb); 132 skb->vlan_tci = 0; 133 skb_set_queue_mapping(skb, 0); 134 skb_scrub_packet(skb, xnet); 135 136 return iptunnel_pull_offloads(skb); 137 } 138 EXPORT_SYMBOL_GPL(__iptunnel_pull_header); 139 140 struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, 141 gfp_t flags) 142 { 143 struct metadata_dst *res; 144 struct ip_tunnel_info *dst, *src; 145 146 if (!md || md->u.tun_info.mode & IP_TUNNEL_INFO_TX) 147 return NULL; 148 149 res = metadata_dst_alloc(0, flags); 150 if (!res) 151 return NULL; 152 153 dst = &res->u.tun_info; 154 src = &md->u.tun_info; 155 dst->key.tun_id = src->key.tun_id; 156 if (src->mode & IP_TUNNEL_INFO_IPV6) 157 memcpy(&dst->key.u.ipv6.dst, &src->key.u.ipv6.src, 158 sizeof(struct in6_addr)); 159 else 160 dst->key.u.ipv4.dst = src->key.u.ipv4.src; 161 dst->mode = src->mode | IP_TUNNEL_INFO_TX; 162 163 return res; 164 } 165 EXPORT_SYMBOL_GPL(iptunnel_metadata_reply); 166 167 int iptunnel_handle_offloads(struct sk_buff *skb, 168 int gso_type_mask) 169 { 170 int err; 171 172 if (likely(!skb->encapsulation)) { 173 skb_reset_inner_headers(skb); 174 skb->encapsulation = 1; 175 } 176 177 if (skb_is_gso(skb)) { 178 err = skb_header_unclone(skb, GFP_ATOMIC); 179 if (unlikely(err)) 180 return err; 181 skb_shinfo(skb)->gso_type |= gso_type_mask; 182 return 0; 183 } 184 185 if (skb->ip_summed != CHECKSUM_PARTIAL) { 186 skb->ip_summed = CHECKSUM_NONE; 187 /* We clear encapsulation here to prevent badly-written 188 * drivers potentially deciding to offload an inner checksum 189 * if we set CHECKSUM_PARTIAL on the outer header. 190 * This should go away when the drivers are all fixed. 191 */ 192 skb->encapsulation = 0; 193 } 194 195 return 0; 196 } 197 EXPORT_SYMBOL_GPL(iptunnel_handle_offloads); 198 199 /* Often modified stats are per cpu, other are shared (netdev->stats) */ 200 struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, 201 struct rtnl_link_stats64 *tot) 202 { 203 int i; 204 205 netdev_stats_to_stats64(tot, &dev->stats); 206 207 for_each_possible_cpu(i) { 208 const struct pcpu_sw_netstats *tstats = 209 per_cpu_ptr(dev->tstats, i); 210 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 211 unsigned int start; 212 213 do { 214 start = u64_stats_fetch_begin_irq(&tstats->syncp); 215 rx_packets = tstats->rx_packets; 216 tx_packets = tstats->tx_packets; 217 rx_bytes = tstats->rx_bytes; 218 tx_bytes = tstats->tx_bytes; 219 } while (u64_stats_fetch_retry_irq(&tstats->syncp, start)); 220 221 tot->rx_packets += rx_packets; 222 tot->tx_packets += tx_packets; 223 tot->rx_bytes += rx_bytes; 224 tot->tx_bytes += tx_bytes; 225 } 226 227 return tot; 228 } 229 EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64); 230 231 static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = { 232 [LWTUNNEL_IP_ID] = { .type = NLA_U64 }, 233 [LWTUNNEL_IP_DST] = { .type = NLA_U32 }, 234 [LWTUNNEL_IP_SRC] = { .type = NLA_U32 }, 235 [LWTUNNEL_IP_TTL] = { .type = NLA_U8 }, 236 [LWTUNNEL_IP_TOS] = { .type = NLA_U8 }, 237 [LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 }, 238 }; 239 240 static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr, 241 unsigned int family, const void *cfg, 242 struct lwtunnel_state **ts) 243 { 244 struct ip_tunnel_info *tun_info; 245 struct lwtunnel_state *new_state; 246 struct nlattr *tb[LWTUNNEL_IP_MAX + 1]; 247 int err; 248 249 err = nla_parse_nested(tb, LWTUNNEL_IP_MAX, attr, ip_tun_policy); 250 if (err < 0) 251 return err; 252 253 new_state = lwtunnel_state_alloc(sizeof(*tun_info)); 254 if (!new_state) 255 return -ENOMEM; 256 257 new_state->type = LWTUNNEL_ENCAP_IP; 258 259 tun_info = lwt_tun_info(new_state); 260 261 if (tb[LWTUNNEL_IP_ID]) 262 tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP_ID]); 263 264 if (tb[LWTUNNEL_IP_DST]) 265 tun_info->key.u.ipv4.dst = nla_get_in_addr(tb[LWTUNNEL_IP_DST]); 266 267 if (tb[LWTUNNEL_IP_SRC]) 268 tun_info->key.u.ipv4.src = nla_get_in_addr(tb[LWTUNNEL_IP_SRC]); 269 270 if (tb[LWTUNNEL_IP_TTL]) 271 tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]); 272 273 if (tb[LWTUNNEL_IP_TOS]) 274 tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]); 275 276 if (tb[LWTUNNEL_IP_FLAGS]) 277 tun_info->key.tun_flags = nla_get_be16(tb[LWTUNNEL_IP_FLAGS]); 278 279 tun_info->mode = IP_TUNNEL_INFO_TX; 280 tun_info->options_len = 0; 281 282 *ts = new_state; 283 284 return 0; 285 } 286 287 static int ip_tun_fill_encap_info(struct sk_buff *skb, 288 struct lwtunnel_state *lwtstate) 289 { 290 struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate); 291 292 if (nla_put_be64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id, 293 LWTUNNEL_IP_PAD) || 294 nla_put_in_addr(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) || 295 nla_put_in_addr(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) || 296 nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) || 297 nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) || 298 nla_put_be16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags)) 299 return -ENOMEM; 300 301 return 0; 302 } 303 304 static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate) 305 { 306 return nla_total_size_64bit(8) /* LWTUNNEL_IP_ID */ 307 + nla_total_size(4) /* LWTUNNEL_IP_DST */ 308 + nla_total_size(4) /* LWTUNNEL_IP_SRC */ 309 + nla_total_size(1) /* LWTUNNEL_IP_TOS */ 310 + nla_total_size(1) /* LWTUNNEL_IP_TTL */ 311 + nla_total_size(2); /* LWTUNNEL_IP_FLAGS */ 312 } 313 314 static int ip_tun_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b) 315 { 316 return memcmp(lwt_tun_info(a), lwt_tun_info(b), 317 sizeof(struct ip_tunnel_info)); 318 } 319 320 static const struct lwtunnel_encap_ops ip_tun_lwt_ops = { 321 .build_state = ip_tun_build_state, 322 .fill_encap = ip_tun_fill_encap_info, 323 .get_encap_size = ip_tun_encap_nlsize, 324 .cmp_encap = ip_tun_cmp_encap, 325 }; 326 327 static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = { 328 [LWTUNNEL_IP6_ID] = { .type = NLA_U64 }, 329 [LWTUNNEL_IP6_DST] = { .len = sizeof(struct in6_addr) }, 330 [LWTUNNEL_IP6_SRC] = { .len = sizeof(struct in6_addr) }, 331 [LWTUNNEL_IP6_HOPLIMIT] = { .type = NLA_U8 }, 332 [LWTUNNEL_IP6_TC] = { .type = NLA_U8 }, 333 [LWTUNNEL_IP6_FLAGS] = { .type = NLA_U16 }, 334 }; 335 336 static int ip6_tun_build_state(struct net_device *dev, struct nlattr *attr, 337 unsigned int family, const void *cfg, 338 struct lwtunnel_state **ts) 339 { 340 struct ip_tunnel_info *tun_info; 341 struct lwtunnel_state *new_state; 342 struct nlattr *tb[LWTUNNEL_IP6_MAX + 1]; 343 int err; 344 345 err = nla_parse_nested(tb, LWTUNNEL_IP6_MAX, attr, ip6_tun_policy); 346 if (err < 0) 347 return err; 348 349 new_state = lwtunnel_state_alloc(sizeof(*tun_info)); 350 if (!new_state) 351 return -ENOMEM; 352 353 new_state->type = LWTUNNEL_ENCAP_IP6; 354 355 tun_info = lwt_tun_info(new_state); 356 357 if (tb[LWTUNNEL_IP6_ID]) 358 tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP6_ID]); 359 360 if (tb[LWTUNNEL_IP6_DST]) 361 tun_info->key.u.ipv6.dst = nla_get_in6_addr(tb[LWTUNNEL_IP6_DST]); 362 363 if (tb[LWTUNNEL_IP6_SRC]) 364 tun_info->key.u.ipv6.src = nla_get_in6_addr(tb[LWTUNNEL_IP6_SRC]); 365 366 if (tb[LWTUNNEL_IP6_HOPLIMIT]) 367 tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP6_HOPLIMIT]); 368 369 if (tb[LWTUNNEL_IP6_TC]) 370 tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]); 371 372 if (tb[LWTUNNEL_IP6_FLAGS]) 373 tun_info->key.tun_flags = nla_get_be16(tb[LWTUNNEL_IP6_FLAGS]); 374 375 tun_info->mode = IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_IPV6; 376 tun_info->options_len = 0; 377 378 *ts = new_state; 379 380 return 0; 381 } 382 383 static int ip6_tun_fill_encap_info(struct sk_buff *skb, 384 struct lwtunnel_state *lwtstate) 385 { 386 struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate); 387 388 if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id, 389 LWTUNNEL_IP6_PAD) || 390 nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) || 391 nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) || 392 nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) || 393 nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.ttl) || 394 nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags)) 395 return -ENOMEM; 396 397 return 0; 398 } 399 400 static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate) 401 { 402 return nla_total_size_64bit(8) /* LWTUNNEL_IP6_ID */ 403 + nla_total_size(16) /* LWTUNNEL_IP6_DST */ 404 + nla_total_size(16) /* LWTUNNEL_IP6_SRC */ 405 + nla_total_size(1) /* LWTUNNEL_IP6_HOPLIMIT */ 406 + nla_total_size(1) /* LWTUNNEL_IP6_TC */ 407 + nla_total_size(2); /* LWTUNNEL_IP6_FLAGS */ 408 } 409 410 static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = { 411 .build_state = ip6_tun_build_state, 412 .fill_encap = ip6_tun_fill_encap_info, 413 .get_encap_size = ip6_tun_encap_nlsize, 414 .cmp_encap = ip_tun_cmp_encap, 415 }; 416 417 void __init ip_tunnel_core_init(void) 418 { 419 /* If you land here, make sure whether increasing ip_tunnel_info's 420 * options_len is a reasonable choice with its usage in front ends 421 * (f.e., it's part of flow keys, etc). 422 */ 423 BUILD_BUG_ON(IP_TUNNEL_OPTS_MAX != 255); 424 425 lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP); 426 lwtunnel_encap_add_ops(&ip6_tun_lwt_ops, LWTUNNEL_ENCAP_IP6); 427 } 428 429 struct static_key ip_tunnel_metadata_cnt = STATIC_KEY_INIT_FALSE; 430 EXPORT_SYMBOL(ip_tunnel_metadata_cnt); 431 432 void ip_tunnel_need_metadata(void) 433 { 434 static_key_slow_inc(&ip_tunnel_metadata_cnt); 435 } 436 EXPORT_SYMBOL_GPL(ip_tunnel_need_metadata); 437 438 void ip_tunnel_unneed_metadata(void) 439 { 440 static_key_slow_dec(&ip_tunnel_metadata_cnt); 441 } 442 EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata); 443