1 /* 2 * GENEVE: Generic Network Virtualization Encapsulation 3 * 4 * Copyright (c) 2015 Red Hat, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/netdevice.h> 16 #include <linux/etherdevice.h> 17 #include <linux/hash.h> 18 #include <net/dst_metadata.h> 19 #include <net/rtnetlink.h> 20 #include <net/geneve.h> 21 #include <net/protocol.h> 22 23 #define GENEVE_NETDEV_VER "0.6" 24 25 #define GENEVE_UDP_PORT 6081 26 27 #define GENEVE_N_VID (1u << 24) 28 #define GENEVE_VID_MASK (GENEVE_N_VID - 1) 29 30 #define VNI_HASH_BITS 10 31 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS) 32 33 static bool log_ecn_error = true; 34 module_param(log_ecn_error, bool, 0644); 35 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 36 37 #define GENEVE_VER 0 38 #define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr)) 39 40 /* per-network namespace private data for this module */ 41 struct geneve_net { 42 struct list_head geneve_list; 43 struct list_head sock_list; 44 }; 45 46 static int geneve_net_id; 47 48 /* Pseudo network device */ 49 struct geneve_dev { 50 struct hlist_node hlist; /* vni hash table */ 51 struct net *net; /* netns for packet i/o */ 52 struct net_device *dev; /* netdev for geneve tunnel */ 53 struct geneve_sock *sock; /* socket used for geneve tunnel */ 54 u8 vni[3]; /* virtual network ID for tunnel */ 55 u8 ttl; /* TTL override */ 56 u8 tos; /* TOS override */ 57 struct sockaddr_in remote; /* IPv4 address for link partner */ 58 struct list_head next; /* geneve's per namespace list */ 59 __be16 dst_port; 60 bool collect_md; 61 }; 62 63 struct geneve_sock { 64 bool collect_md; 65 struct list_head list; 66 struct socket *sock; 67 struct rcu_head rcu; 68 int refcnt; 69 struct udp_offload udp_offloads; 70 struct hlist_head vni_list[VNI_HASH_SIZE]; 71 }; 72 73 static inline __u32 geneve_net_vni_hash(u8 vni[3]) 74 { 75 __u32 vnid; 76 77 vnid = (vni[0] << 16) | (vni[1] << 8) | vni[2]; 78 return hash_32(vnid, VNI_HASH_BITS); 79 } 80 81 static __be64 vni_to_tunnel_id(const __u8 *vni) 82 { 83 #ifdef __BIG_ENDIAN 84 return (vni[0] << 16) | (vni[1] << 8) | vni[2]; 85 #else 86 return (__force __be64)(((__force u64)vni[0] << 40) | 87 ((__force u64)vni[1] << 48) | 88 ((__force u64)vni[2] << 56)); 89 #endif 90 } 91 92 static struct geneve_dev *geneve_lookup(struct geneve_sock *gs, 93 __be32 addr, u8 vni[]) 94 { 95 struct hlist_head *vni_list_head; 96 struct geneve_dev *geneve; 97 __u32 hash; 98 99 /* Find the device for this VNI */ 100 hash = geneve_net_vni_hash(vni); 101 vni_list_head = &gs->vni_list[hash]; 102 hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) { 103 if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) && 104 addr == geneve->remote.sin_addr.s_addr) 105 return geneve; 106 } 107 return NULL; 108 } 109 110 static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb) 111 { 112 return (struct genevehdr *)(udp_hdr(skb) + 1); 113 } 114 115 /* geneve receive/decap routine */ 116 static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) 117 { 118 struct genevehdr *gnvh = geneve_hdr(skb); 119 struct metadata_dst *tun_dst = NULL; 120 struct geneve_dev *geneve = NULL; 121 struct pcpu_sw_netstats *stats; 122 struct iphdr *iph; 123 u8 *vni; 124 __be32 addr; 125 int err; 126 127 if (gs->collect_md) { 128 static u8 zero_vni[3]; 129 130 vni = zero_vni; 131 addr = 0; 132 } else { 133 vni = gnvh->vni; 134 iph = ip_hdr(skb); /* Still outer IP header... */ 135 addr = iph->saddr; 136 } 137 138 geneve = geneve_lookup(gs, addr, vni); 139 if (!geneve) 140 goto drop; 141 142 if (ip_tunnel_collect_metadata() || gs->collect_md) { 143 __be16 flags; 144 void *opts; 145 146 flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT | 147 (gnvh->oam ? TUNNEL_OAM : 0) | 148 (gnvh->critical ? TUNNEL_CRIT_OPT : 0); 149 150 tun_dst = udp_tun_rx_dst(skb, AF_INET, flags, 151 vni_to_tunnel_id(gnvh->vni), 152 gnvh->opt_len * 4); 153 if (!tun_dst) 154 goto drop; 155 156 /* Update tunnel dst according to Geneve options. */ 157 opts = ip_tunnel_info_opts(&tun_dst->u.tun_info, 158 gnvh->opt_len * 4); 159 memcpy(opts, gnvh->options, gnvh->opt_len * 4); 160 } else { 161 /* Drop packets w/ critical options, 162 * since we don't support any... 163 */ 164 if (gnvh->critical) 165 goto drop; 166 } 167 168 skb_reset_mac_header(skb); 169 skb_scrub_packet(skb, !net_eq(geneve->net, dev_net(geneve->dev))); 170 skb->protocol = eth_type_trans(skb, geneve->dev); 171 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 172 173 if (tun_dst) 174 skb_dst_set(skb, &tun_dst->dst); 175 176 /* Ignore packet loops (and multicast echo) */ 177 if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) 178 goto drop; 179 180 skb_reset_network_header(skb); 181 182 iph = ip_hdr(skb); /* Now inner IP header... */ 183 err = IP_ECN_decapsulate(iph, skb); 184 185 if (unlikely(err)) { 186 if (log_ecn_error) 187 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", 188 &iph->saddr, iph->tos); 189 if (err > 1) { 190 ++geneve->dev->stats.rx_frame_errors; 191 ++geneve->dev->stats.rx_errors; 192 goto drop; 193 } 194 } 195 196 stats = this_cpu_ptr(geneve->dev->tstats); 197 u64_stats_update_begin(&stats->syncp); 198 stats->rx_packets++; 199 stats->rx_bytes += skb->len; 200 u64_stats_update_end(&stats->syncp); 201 202 netif_rx(skb); 203 return; 204 drop: 205 /* Consume bad packet */ 206 kfree_skb(skb); 207 } 208 209 /* Setup stats when device is created */ 210 static int geneve_init(struct net_device *dev) 211 { 212 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 213 if (!dev->tstats) 214 return -ENOMEM; 215 return 0; 216 } 217 218 static void geneve_uninit(struct net_device *dev) 219 { 220 free_percpu(dev->tstats); 221 } 222 223 /* Callback from net/ipv4/udp.c to receive packets */ 224 static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) 225 { 226 struct genevehdr *geneveh; 227 struct geneve_sock *gs; 228 int opts_len; 229 230 /* Need Geneve and inner Ethernet header to be present */ 231 if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN))) 232 goto error; 233 234 /* Return packets with reserved bits set */ 235 geneveh = geneve_hdr(skb); 236 if (unlikely(geneveh->ver != GENEVE_VER)) 237 goto error; 238 239 if (unlikely(geneveh->proto_type != htons(ETH_P_TEB))) 240 goto error; 241 242 opts_len = geneveh->opt_len * 4; 243 if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, 244 htons(ETH_P_TEB))) 245 goto drop; 246 247 gs = rcu_dereference_sk_user_data(sk); 248 if (!gs) 249 goto drop; 250 251 geneve_rx(gs, skb); 252 return 0; 253 254 drop: 255 /* Consume bad packet */ 256 kfree_skb(skb); 257 return 0; 258 259 error: 260 /* Let the UDP layer deal with the skb */ 261 return 1; 262 } 263 264 static struct socket *geneve_create_sock(struct net *net, bool ipv6, 265 __be16 port) 266 { 267 struct socket *sock; 268 struct udp_port_cfg udp_conf; 269 int err; 270 271 memset(&udp_conf, 0, sizeof(udp_conf)); 272 273 if (ipv6) { 274 udp_conf.family = AF_INET6; 275 } else { 276 udp_conf.family = AF_INET; 277 udp_conf.local_ip.s_addr = htonl(INADDR_ANY); 278 } 279 280 udp_conf.local_udp_port = port; 281 282 /* Open UDP socket */ 283 err = udp_sock_create(net, &udp_conf, &sock); 284 if (err < 0) 285 return ERR_PTR(err); 286 287 return sock; 288 } 289 290 static void geneve_notify_add_rx_port(struct geneve_sock *gs) 291 { 292 struct sock *sk = gs->sock->sk; 293 sa_family_t sa_family = sk->sk_family; 294 int err; 295 296 if (sa_family == AF_INET) { 297 err = udp_add_offload(&gs->udp_offloads); 298 if (err) 299 pr_warn("geneve: udp_add_offload failed with status %d\n", 300 err); 301 } 302 } 303 304 static int geneve_hlen(struct genevehdr *gh) 305 { 306 return sizeof(*gh) + gh->opt_len * 4; 307 } 308 309 static struct sk_buff **geneve_gro_receive(struct sk_buff **head, 310 struct sk_buff *skb, 311 struct udp_offload *uoff) 312 { 313 struct sk_buff *p, **pp = NULL; 314 struct genevehdr *gh, *gh2; 315 unsigned int hlen, gh_len, off_gnv; 316 const struct packet_offload *ptype; 317 __be16 type; 318 int flush = 1; 319 320 off_gnv = skb_gro_offset(skb); 321 hlen = off_gnv + sizeof(*gh); 322 gh = skb_gro_header_fast(skb, off_gnv); 323 if (skb_gro_header_hard(skb, hlen)) { 324 gh = skb_gro_header_slow(skb, hlen, off_gnv); 325 if (unlikely(!gh)) 326 goto out; 327 } 328 329 if (gh->ver != GENEVE_VER || gh->oam) 330 goto out; 331 gh_len = geneve_hlen(gh); 332 333 hlen = off_gnv + gh_len; 334 if (skb_gro_header_hard(skb, hlen)) { 335 gh = skb_gro_header_slow(skb, hlen, off_gnv); 336 if (unlikely(!gh)) 337 goto out; 338 } 339 340 flush = 0; 341 342 for (p = *head; p; p = p->next) { 343 if (!NAPI_GRO_CB(p)->same_flow) 344 continue; 345 346 gh2 = (struct genevehdr *)(p->data + off_gnv); 347 if (gh->opt_len != gh2->opt_len || 348 memcmp(gh, gh2, gh_len)) { 349 NAPI_GRO_CB(p)->same_flow = 0; 350 continue; 351 } 352 } 353 354 type = gh->proto_type; 355 356 rcu_read_lock(); 357 ptype = gro_find_receive_by_type(type); 358 if (!ptype) { 359 flush = 1; 360 goto out_unlock; 361 } 362 363 skb_gro_pull(skb, gh_len); 364 skb_gro_postpull_rcsum(skb, gh, gh_len); 365 pp = ptype->callbacks.gro_receive(head, skb); 366 367 out_unlock: 368 rcu_read_unlock(); 369 out: 370 NAPI_GRO_CB(skb)->flush |= flush; 371 372 return pp; 373 } 374 375 static int geneve_gro_complete(struct sk_buff *skb, int nhoff, 376 struct udp_offload *uoff) 377 { 378 struct genevehdr *gh; 379 struct packet_offload *ptype; 380 __be16 type; 381 int gh_len; 382 int err = -ENOSYS; 383 384 udp_tunnel_gro_complete(skb, nhoff); 385 386 gh = (struct genevehdr *)(skb->data + nhoff); 387 gh_len = geneve_hlen(gh); 388 type = gh->proto_type; 389 390 rcu_read_lock(); 391 ptype = gro_find_complete_by_type(type); 392 if (ptype) 393 err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); 394 395 rcu_read_unlock(); 396 return err; 397 } 398 399 /* Create new listen socket if needed */ 400 static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port, 401 bool ipv6) 402 { 403 struct geneve_net *gn = net_generic(net, geneve_net_id); 404 struct geneve_sock *gs; 405 struct socket *sock; 406 struct udp_tunnel_sock_cfg tunnel_cfg; 407 int h; 408 409 gs = kzalloc(sizeof(*gs), GFP_KERNEL); 410 if (!gs) 411 return ERR_PTR(-ENOMEM); 412 413 sock = geneve_create_sock(net, ipv6, port); 414 if (IS_ERR(sock)) { 415 kfree(gs); 416 return ERR_CAST(sock); 417 } 418 419 gs->sock = sock; 420 gs->refcnt = 1; 421 for (h = 0; h < VNI_HASH_SIZE; ++h) 422 INIT_HLIST_HEAD(&gs->vni_list[h]); 423 424 /* Initialize the geneve udp offloads structure */ 425 gs->udp_offloads.port = port; 426 gs->udp_offloads.callbacks.gro_receive = geneve_gro_receive; 427 gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete; 428 geneve_notify_add_rx_port(gs); 429 430 /* Mark socket as an encapsulation socket */ 431 tunnel_cfg.sk_user_data = gs; 432 tunnel_cfg.encap_type = 1; 433 tunnel_cfg.encap_rcv = geneve_udp_encap_recv; 434 tunnel_cfg.encap_destroy = NULL; 435 setup_udp_tunnel_sock(net, sock, &tunnel_cfg); 436 list_add(&gs->list, &gn->sock_list); 437 return gs; 438 } 439 440 static void geneve_notify_del_rx_port(struct geneve_sock *gs) 441 { 442 struct sock *sk = gs->sock->sk; 443 sa_family_t sa_family = sk->sk_family; 444 445 if (sa_family == AF_INET) 446 udp_del_offload(&gs->udp_offloads); 447 } 448 449 static void geneve_sock_release(struct geneve_sock *gs) 450 { 451 if (--gs->refcnt) 452 return; 453 454 list_del(&gs->list); 455 geneve_notify_del_rx_port(gs); 456 udp_tunnel_sock_release(gs->sock); 457 kfree_rcu(gs, rcu); 458 } 459 460 static struct geneve_sock *geneve_find_sock(struct geneve_net *gn, 461 __be16 dst_port) 462 { 463 struct geneve_sock *gs; 464 465 list_for_each_entry(gs, &gn->sock_list, list) { 466 if (inet_sk(gs->sock->sk)->inet_sport == dst_port && 467 inet_sk(gs->sock->sk)->sk.sk_family == AF_INET) { 468 return gs; 469 } 470 } 471 return NULL; 472 } 473 474 static int geneve_open(struct net_device *dev) 475 { 476 struct geneve_dev *geneve = netdev_priv(dev); 477 struct net *net = geneve->net; 478 struct geneve_net *gn = net_generic(net, geneve_net_id); 479 struct geneve_sock *gs; 480 __u32 hash; 481 482 gs = geneve_find_sock(gn, geneve->dst_port); 483 if (gs) { 484 gs->refcnt++; 485 goto out; 486 } 487 488 gs = geneve_socket_create(net, geneve->dst_port, false); 489 if (IS_ERR(gs)) 490 return PTR_ERR(gs); 491 492 out: 493 gs->collect_md = geneve->collect_md; 494 geneve->sock = gs; 495 496 hash = geneve_net_vni_hash(geneve->vni); 497 hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]); 498 return 0; 499 } 500 501 static int geneve_stop(struct net_device *dev) 502 { 503 struct geneve_dev *geneve = netdev_priv(dev); 504 struct geneve_sock *gs = geneve->sock; 505 506 if (!hlist_unhashed(&geneve->hlist)) 507 hlist_del_rcu(&geneve->hlist); 508 geneve_sock_release(gs); 509 return 0; 510 } 511 512 static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb, 513 __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt, 514 bool csum) 515 { 516 struct genevehdr *gnvh; 517 int min_headroom; 518 int err; 519 520 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len 521 + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr); 522 err = skb_cow_head(skb, min_headroom); 523 if (unlikely(err)) { 524 kfree_skb(skb); 525 goto free_rt; 526 } 527 528 skb = udp_tunnel_handle_offloads(skb, csum); 529 if (IS_ERR(skb)) { 530 err = PTR_ERR(skb); 531 goto free_rt; 532 } 533 534 gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); 535 gnvh->ver = GENEVE_VER; 536 gnvh->opt_len = opt_len / 4; 537 gnvh->oam = !!(tun_flags & TUNNEL_OAM); 538 gnvh->critical = !!(tun_flags & TUNNEL_CRIT_OPT); 539 gnvh->rsvd1 = 0; 540 memcpy(gnvh->vni, vni, 3); 541 gnvh->proto_type = htons(ETH_P_TEB); 542 gnvh->rsvd2 = 0; 543 memcpy(gnvh->options, opt, opt_len); 544 545 skb_set_inner_protocol(skb, htons(ETH_P_TEB)); 546 return 0; 547 548 free_rt: 549 ip_rt_put(rt); 550 return err; 551 } 552 553 static struct rtable *geneve_get_rt(struct sk_buff *skb, 554 struct net_device *dev, 555 struct flowi4 *fl4, 556 struct ip_tunnel_info *info) 557 { 558 struct geneve_dev *geneve = netdev_priv(dev); 559 struct rtable *rt = NULL; 560 __u8 tos; 561 562 memset(fl4, 0, sizeof(*fl4)); 563 fl4->flowi4_mark = skb->mark; 564 fl4->flowi4_proto = IPPROTO_UDP; 565 566 if (info) { 567 fl4->daddr = info->key.u.ipv4.dst; 568 fl4->saddr = info->key.u.ipv4.src; 569 fl4->flowi4_tos = RT_TOS(info->key.tos); 570 } else { 571 tos = geneve->tos; 572 if (tos == 1) { 573 const struct iphdr *iip = ip_hdr(skb); 574 575 tos = ip_tunnel_get_dsfield(iip, skb); 576 } 577 578 fl4->flowi4_tos = RT_TOS(tos); 579 fl4->daddr = geneve->remote.sin_addr.s_addr; 580 } 581 582 rt = ip_route_output_key(geneve->net, fl4); 583 if (IS_ERR(rt)) { 584 netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr); 585 dev->stats.tx_carrier_errors++; 586 return rt; 587 } 588 if (rt->dst.dev == dev) { /* is this necessary? */ 589 netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr); 590 dev->stats.collisions++; 591 ip_rt_put(rt); 592 return ERR_PTR(-EINVAL); 593 } 594 return rt; 595 } 596 597 /* Convert 64 bit tunnel ID to 24 bit VNI. */ 598 static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni) 599 { 600 #ifdef __BIG_ENDIAN 601 vni[0] = (__force __u8)(tun_id >> 16); 602 vni[1] = (__force __u8)(tun_id >> 8); 603 vni[2] = (__force __u8)tun_id; 604 #else 605 vni[0] = (__force __u8)((__force u64)tun_id >> 40); 606 vni[1] = (__force __u8)((__force u64)tun_id >> 48); 607 vni[2] = (__force __u8)((__force u64)tun_id >> 56); 608 #endif 609 } 610 611 static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) 612 { 613 struct geneve_dev *geneve = netdev_priv(dev); 614 struct geneve_sock *gs = geneve->sock; 615 struct ip_tunnel_info *info = NULL; 616 struct rtable *rt = NULL; 617 struct flowi4 fl4; 618 __u8 tos, ttl; 619 __be16 sport; 620 bool udp_csum; 621 __be16 df; 622 int err; 623 624 if (geneve->collect_md) { 625 info = skb_tunnel_info(skb); 626 if (unlikely(info && !(info->mode & IP_TUNNEL_INFO_TX))) { 627 netdev_dbg(dev, "no tunnel metadata\n"); 628 goto tx_error; 629 } 630 if (info && ip_tunnel_info_af(info) != AF_INET) 631 goto tx_error; 632 } 633 634 rt = geneve_get_rt(skb, dev, &fl4, info); 635 if (IS_ERR(rt)) { 636 netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr); 637 dev->stats.tx_carrier_errors++; 638 goto tx_error; 639 } 640 641 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 642 skb_reset_mac_header(skb); 643 644 if (info) { 645 const struct ip_tunnel_key *key = &info->key; 646 u8 *opts = NULL; 647 u8 vni[3]; 648 649 tunnel_id_to_vni(key->tun_id, vni); 650 if (key->tun_flags & TUNNEL_GENEVE_OPT) 651 opts = ip_tunnel_info_opts(info, info->options_len); 652 653 udp_csum = !!(key->tun_flags & TUNNEL_CSUM); 654 err = geneve_build_skb(rt, skb, key->tun_flags, vni, 655 info->options_len, opts, udp_csum); 656 if (unlikely(err)) 657 goto err; 658 659 tos = key->tos; 660 ttl = key->ttl; 661 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; 662 } else { 663 const struct iphdr *iip; /* interior IP header */ 664 665 udp_csum = false; 666 err = geneve_build_skb(rt, skb, 0, geneve->vni, 667 0, NULL, udp_csum); 668 if (unlikely(err)) 669 goto err; 670 671 iip = ip_hdr(skb); 672 tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb); 673 ttl = geneve->ttl; 674 if (!ttl && IN_MULTICAST(ntohl(fl4.daddr))) 675 ttl = 1; 676 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 677 df = 0; 678 } 679 err = udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, fl4.saddr, fl4.daddr, 680 tos, ttl, df, sport, geneve->dst_port, 681 !net_eq(geneve->net, dev_net(geneve->dev)), 682 !udp_csum); 683 684 iptunnel_xmit_stats(err, &dev->stats, dev->tstats); 685 return NETDEV_TX_OK; 686 687 tx_error: 688 dev_kfree_skb(skb); 689 err: 690 dev->stats.tx_errors++; 691 return NETDEV_TX_OK; 692 } 693 694 static const struct net_device_ops geneve_netdev_ops = { 695 .ndo_init = geneve_init, 696 .ndo_uninit = geneve_uninit, 697 .ndo_open = geneve_open, 698 .ndo_stop = geneve_stop, 699 .ndo_start_xmit = geneve_xmit, 700 .ndo_get_stats64 = ip_tunnel_get_stats64, 701 .ndo_change_mtu = eth_change_mtu, 702 .ndo_validate_addr = eth_validate_addr, 703 .ndo_set_mac_address = eth_mac_addr, 704 }; 705 706 static void geneve_get_drvinfo(struct net_device *dev, 707 struct ethtool_drvinfo *drvinfo) 708 { 709 strlcpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version)); 710 strlcpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver)); 711 } 712 713 static const struct ethtool_ops geneve_ethtool_ops = { 714 .get_drvinfo = geneve_get_drvinfo, 715 .get_link = ethtool_op_get_link, 716 }; 717 718 /* Info for udev, that this is a virtual tunnel endpoint */ 719 static struct device_type geneve_type = { 720 .name = "geneve", 721 }; 722 723 /* Initialize the device structure. */ 724 static void geneve_setup(struct net_device *dev) 725 { 726 ether_setup(dev); 727 728 dev->netdev_ops = &geneve_netdev_ops; 729 dev->ethtool_ops = &geneve_ethtool_ops; 730 dev->destructor = free_netdev; 731 732 SET_NETDEV_DEVTYPE(dev, &geneve_type); 733 734 dev->features |= NETIF_F_LLTX; 735 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 736 dev->features |= NETIF_F_RXCSUM; 737 dev->features |= NETIF_F_GSO_SOFTWARE; 738 739 dev->vlan_features = dev->features; 740 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 741 742 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 743 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 744 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 745 746 netif_keep_dst(dev); 747 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; 748 eth_hw_addr_random(dev); 749 } 750 751 static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { 752 [IFLA_GENEVE_ID] = { .type = NLA_U32 }, 753 [IFLA_GENEVE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, 754 [IFLA_GENEVE_TTL] = { .type = NLA_U8 }, 755 [IFLA_GENEVE_TOS] = { .type = NLA_U8 }, 756 [IFLA_GENEVE_PORT] = { .type = NLA_U16 }, 757 [IFLA_GENEVE_COLLECT_METADATA] = { .type = NLA_FLAG }, 758 }; 759 760 static int geneve_validate(struct nlattr *tb[], struct nlattr *data[]) 761 { 762 if (tb[IFLA_ADDRESS]) { 763 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 764 return -EINVAL; 765 766 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 767 return -EADDRNOTAVAIL; 768 } 769 770 if (!data) 771 return -EINVAL; 772 773 if (data[IFLA_GENEVE_ID]) { 774 __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]); 775 776 if (vni >= GENEVE_VID_MASK) 777 return -ERANGE; 778 } 779 780 return 0; 781 } 782 783 static struct geneve_dev *geneve_find_dev(struct geneve_net *gn, 784 __be16 dst_port, 785 __be32 rem_addr, 786 u8 vni[], 787 bool *tun_on_same_port, 788 bool *tun_collect_md) 789 { 790 struct geneve_dev *geneve, *t; 791 792 *tun_on_same_port = false; 793 *tun_collect_md = false; 794 t = NULL; 795 list_for_each_entry(geneve, &gn->geneve_list, next) { 796 if (geneve->dst_port == dst_port) { 797 *tun_collect_md = geneve->collect_md; 798 *tun_on_same_port = true; 799 } 800 if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) && 801 rem_addr == geneve->remote.sin_addr.s_addr && 802 dst_port == geneve->dst_port) 803 t = geneve; 804 } 805 return t; 806 } 807 808 static int geneve_configure(struct net *net, struct net_device *dev, 809 __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos, 810 __u16 dst_port, bool metadata) 811 { 812 struct geneve_net *gn = net_generic(net, geneve_net_id); 813 struct geneve_dev *t, *geneve = netdev_priv(dev); 814 bool tun_collect_md, tun_on_same_port; 815 int err; 816 817 if (metadata) { 818 if (rem_addr || vni || tos || ttl) 819 return -EINVAL; 820 } 821 822 geneve->net = net; 823 geneve->dev = dev; 824 825 geneve->vni[0] = (vni & 0x00ff0000) >> 16; 826 geneve->vni[1] = (vni & 0x0000ff00) >> 8; 827 geneve->vni[2] = vni & 0x000000ff; 828 829 geneve->remote.sin_addr.s_addr = rem_addr; 830 if (IN_MULTICAST(ntohl(geneve->remote.sin_addr.s_addr))) 831 return -EINVAL; 832 833 geneve->ttl = ttl; 834 geneve->tos = tos; 835 geneve->dst_port = htons(dst_port); 836 geneve->collect_md = metadata; 837 838 t = geneve_find_dev(gn, htons(dst_port), rem_addr, geneve->vni, 839 &tun_on_same_port, &tun_collect_md); 840 if (t) 841 return -EBUSY; 842 843 if (metadata) { 844 if (tun_on_same_port) 845 return -EPERM; 846 } else { 847 if (tun_collect_md) 848 return -EPERM; 849 } 850 851 err = register_netdevice(dev); 852 if (err) 853 return err; 854 855 list_add(&geneve->next, &gn->geneve_list); 856 return 0; 857 } 858 859 static int geneve_newlink(struct net *net, struct net_device *dev, 860 struct nlattr *tb[], struct nlattr *data[]) 861 { 862 __u16 dst_port = GENEVE_UDP_PORT; 863 __u8 ttl = 0, tos = 0; 864 bool metadata = false; 865 __be32 rem_addr; 866 __u32 vni; 867 868 if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE]) 869 return -EINVAL; 870 871 vni = nla_get_u32(data[IFLA_GENEVE_ID]); 872 rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); 873 874 if (data[IFLA_GENEVE_TTL]) 875 ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); 876 877 if (data[IFLA_GENEVE_TOS]) 878 tos = nla_get_u8(data[IFLA_GENEVE_TOS]); 879 880 if (data[IFLA_GENEVE_PORT]) 881 dst_port = nla_get_u16(data[IFLA_GENEVE_PORT]); 882 883 if (data[IFLA_GENEVE_COLLECT_METADATA]) 884 metadata = true; 885 886 return geneve_configure(net, dev, rem_addr, vni, 887 ttl, tos, dst_port, metadata); 888 } 889 890 static void geneve_dellink(struct net_device *dev, struct list_head *head) 891 { 892 struct geneve_dev *geneve = netdev_priv(dev); 893 894 list_del(&geneve->next); 895 unregister_netdevice_queue(dev, head); 896 } 897 898 static size_t geneve_get_size(const struct net_device *dev) 899 { 900 return nla_total_size(sizeof(__u32)) + /* IFLA_GENEVE_ID */ 901 nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */ 902 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ 903 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ 904 nla_total_size(sizeof(__u16)) + /* IFLA_GENEVE_PORT */ 905 nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */ 906 0; 907 } 908 909 static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) 910 { 911 struct geneve_dev *geneve = netdev_priv(dev); 912 __u32 vni; 913 914 vni = (geneve->vni[0] << 16) | (geneve->vni[1] << 8) | geneve->vni[2]; 915 if (nla_put_u32(skb, IFLA_GENEVE_ID, vni)) 916 goto nla_put_failure; 917 918 if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE, 919 geneve->remote.sin_addr.s_addr)) 920 goto nla_put_failure; 921 922 if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl) || 923 nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos)) 924 goto nla_put_failure; 925 926 if (nla_put_u16(skb, IFLA_GENEVE_PORT, ntohs(geneve->dst_port))) 927 goto nla_put_failure; 928 929 if (geneve->collect_md) { 930 if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA)) 931 goto nla_put_failure; 932 } 933 934 return 0; 935 936 nla_put_failure: 937 return -EMSGSIZE; 938 } 939 940 static struct rtnl_link_ops geneve_link_ops __read_mostly = { 941 .kind = "geneve", 942 .maxtype = IFLA_GENEVE_MAX, 943 .policy = geneve_policy, 944 .priv_size = sizeof(struct geneve_dev), 945 .setup = geneve_setup, 946 .validate = geneve_validate, 947 .newlink = geneve_newlink, 948 .dellink = geneve_dellink, 949 .get_size = geneve_get_size, 950 .fill_info = geneve_fill_info, 951 }; 952 953 struct net_device *geneve_dev_create_fb(struct net *net, const char *name, 954 u8 name_assign_type, u16 dst_port) 955 { 956 struct nlattr *tb[IFLA_MAX + 1]; 957 struct net_device *dev; 958 int err; 959 960 memset(tb, 0, sizeof(tb)); 961 dev = rtnl_create_link(net, name, name_assign_type, 962 &geneve_link_ops, tb); 963 if (IS_ERR(dev)) 964 return dev; 965 966 err = geneve_configure(net, dev, 0, 0, 0, 0, dst_port, true); 967 if (err) { 968 free_netdev(dev); 969 return ERR_PTR(err); 970 } 971 return dev; 972 } 973 EXPORT_SYMBOL_GPL(geneve_dev_create_fb); 974 975 static __net_init int geneve_init_net(struct net *net) 976 { 977 struct geneve_net *gn = net_generic(net, geneve_net_id); 978 979 INIT_LIST_HEAD(&gn->geneve_list); 980 INIT_LIST_HEAD(&gn->sock_list); 981 return 0; 982 } 983 984 static void __net_exit geneve_exit_net(struct net *net) 985 { 986 struct geneve_net *gn = net_generic(net, geneve_net_id); 987 struct geneve_dev *geneve, *next; 988 struct net_device *dev, *aux; 989 LIST_HEAD(list); 990 991 rtnl_lock(); 992 993 /* gather any geneve devices that were moved into this ns */ 994 for_each_netdev_safe(net, dev, aux) 995 if (dev->rtnl_link_ops == &geneve_link_ops) 996 unregister_netdevice_queue(dev, &list); 997 998 /* now gather any other geneve devices that were created in this ns */ 999 list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) { 1000 /* If geneve->dev is in the same netns, it was already added 1001 * to the list by the previous loop. 1002 */ 1003 if (!net_eq(dev_net(geneve->dev), net)) 1004 unregister_netdevice_queue(geneve->dev, &list); 1005 } 1006 1007 /* unregister the devices gathered above */ 1008 unregister_netdevice_many(&list); 1009 rtnl_unlock(); 1010 } 1011 1012 static struct pernet_operations geneve_net_ops = { 1013 .init = geneve_init_net, 1014 .exit = geneve_exit_net, 1015 .id = &geneve_net_id, 1016 .size = sizeof(struct geneve_net), 1017 }; 1018 1019 static int __init geneve_init_module(void) 1020 { 1021 int rc; 1022 1023 rc = register_pernet_subsys(&geneve_net_ops); 1024 if (rc) 1025 goto out1; 1026 1027 rc = rtnl_link_register(&geneve_link_ops); 1028 if (rc) 1029 goto out2; 1030 1031 return 0; 1032 out2: 1033 unregister_pernet_subsys(&geneve_net_ops); 1034 out1: 1035 return rc; 1036 } 1037 late_initcall(geneve_init_module); 1038 1039 static void __exit geneve_cleanup_module(void) 1040 { 1041 rtnl_link_unregister(&geneve_link_ops); 1042 unregister_pernet_subsys(&geneve_net_ops); 1043 } 1044 module_exit(geneve_cleanup_module); 1045 1046 MODULE_LICENSE("GPL"); 1047 MODULE_VERSION(GENEVE_NETDEV_VER); 1048 MODULE_AUTHOR("John W. Linville <linville@tuxdriver.com>"); 1049 MODULE_DESCRIPTION("Interface driver for GENEVE encapsulated traffic"); 1050 MODULE_ALIAS_RTNL_LINK("geneve"); 1051