1 /* 2 * GENEVE: Generic Network Virtualization Encapsulation 3 * 4 * Copyright (c) 2015 Red Hat, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/netdevice.h> 16 #include <linux/etherdevice.h> 17 #include <linux/hash.h> 18 #include <net/dst_metadata.h> 19 #include <net/gro_cells.h> 20 #include <net/rtnetlink.h> 21 #include <net/geneve.h> 22 #include <net/protocol.h> 23 24 #define GENEVE_NETDEV_VER "0.6" 25 26 #define GENEVE_UDP_PORT 6081 27 28 #define GENEVE_N_VID (1u << 24) 29 #define GENEVE_VID_MASK (GENEVE_N_VID - 1) 30 31 #define VNI_HASH_BITS 10 32 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS) 33 34 static bool log_ecn_error = true; 35 module_param(log_ecn_error, bool, 0644); 36 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 37 38 #define GENEVE_VER 0 39 #define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr)) 40 41 /* per-network namespace private data for this module */ 42 struct geneve_net { 43 struct list_head geneve_list; 44 struct list_head sock_list; 45 }; 46 47 static int geneve_net_id; 48 49 /* Pseudo network device */ 50 struct geneve_dev { 51 struct hlist_node hlist; /* vni hash table */ 52 struct net *net; /* netns for packet i/o */ 53 struct net_device *dev; /* netdev for geneve tunnel */ 54 struct geneve_sock *sock; /* socket used for geneve tunnel */ 55 u8 vni[3]; /* virtual network ID for tunnel */ 56 u8 ttl; /* TTL override */ 57 u8 tos; /* TOS override */ 58 struct sockaddr_in remote; /* IPv4 address for link partner */ 59 struct list_head next; /* geneve's per namespace list */ 60 __be16 dst_port; 61 bool collect_md; 62 struct gro_cells gro_cells; 63 }; 64 65 struct geneve_sock { 66 bool collect_md; 67 struct list_head list; 68 struct socket *sock; 69 struct rcu_head rcu; 70 int refcnt; 71 struct udp_offload udp_offloads; 72 struct hlist_head vni_list[VNI_HASH_SIZE]; 73 }; 74 75 static inline __u32 geneve_net_vni_hash(u8 vni[3]) 76 { 77 __u32 vnid; 78 79 vnid = (vni[0] << 16) | (vni[1] << 8) | vni[2]; 80 return hash_32(vnid, VNI_HASH_BITS); 81 } 82 83 static __be64 vni_to_tunnel_id(const __u8 *vni) 84 { 85 #ifdef __BIG_ENDIAN 86 return (vni[0] << 16) | (vni[1] << 8) | vni[2]; 87 #else 88 return (__force __be64)(((__force u64)vni[0] << 40) | 89 ((__force u64)vni[1] << 48) | 90 ((__force u64)vni[2] << 56)); 91 #endif 92 } 93 94 static struct geneve_dev *geneve_lookup(struct geneve_sock *gs, 95 __be32 addr, u8 vni[]) 96 { 97 struct hlist_head *vni_list_head; 98 struct geneve_dev *geneve; 99 __u32 hash; 100 101 /* Find the device for this VNI */ 102 hash = geneve_net_vni_hash(vni); 103 vni_list_head = &gs->vni_list[hash]; 104 hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) { 105 if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) && 106 addr == geneve->remote.sin_addr.s_addr) 107 return geneve; 108 } 109 return NULL; 110 } 111 112 static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb) 113 { 114 return (struct genevehdr *)(udp_hdr(skb) + 1); 115 } 116 117 /* geneve receive/decap routine */ 118 static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) 119 { 120 struct genevehdr *gnvh = geneve_hdr(skb); 121 struct metadata_dst *tun_dst = NULL; 122 struct geneve_dev *geneve = NULL; 123 struct pcpu_sw_netstats *stats; 124 struct iphdr *iph; 125 u8 *vni; 126 __be32 addr; 127 int err; 128 129 iph = ip_hdr(skb); /* outer IP header... */ 130 131 if (gs->collect_md) { 132 static u8 zero_vni[3]; 133 134 vni = zero_vni; 135 addr = 0; 136 } else { 137 vni = gnvh->vni; 138 addr = iph->saddr; 139 } 140 141 geneve = geneve_lookup(gs, addr, vni); 142 if (!geneve) 143 goto drop; 144 145 if (ip_tunnel_collect_metadata() || gs->collect_md) { 146 __be16 flags; 147 148 flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT | 149 (gnvh->oam ? TUNNEL_OAM : 0) | 150 (gnvh->critical ? TUNNEL_CRIT_OPT : 0); 151 152 tun_dst = udp_tun_rx_dst(skb, AF_INET, flags, 153 vni_to_tunnel_id(gnvh->vni), 154 gnvh->opt_len * 4); 155 if (!tun_dst) 156 goto drop; 157 /* Update tunnel dst according to Geneve options. */ 158 ip_tunnel_info_opts_set(&tun_dst->u.tun_info, 159 gnvh->options, gnvh->opt_len * 4); 160 } else { 161 /* Drop packets w/ critical options, 162 * since we don't support any... 163 */ 164 if (gnvh->critical) 165 goto drop; 166 } 167 168 skb_reset_mac_header(skb); 169 skb_scrub_packet(skb, !net_eq(geneve->net, dev_net(geneve->dev))); 170 skb->protocol = eth_type_trans(skb, geneve->dev); 171 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 172 173 if (tun_dst) 174 skb_dst_set(skb, &tun_dst->dst); 175 176 /* Ignore packet loops (and multicast echo) */ 177 if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) 178 goto drop; 179 180 skb_reset_network_header(skb); 181 182 err = IP_ECN_decapsulate(iph, skb); 183 184 if (unlikely(err)) { 185 if (log_ecn_error) 186 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", 187 &iph->saddr, iph->tos); 188 if (err > 1) { 189 ++geneve->dev->stats.rx_frame_errors; 190 ++geneve->dev->stats.rx_errors; 191 goto drop; 192 } 193 } 194 195 stats = this_cpu_ptr(geneve->dev->tstats); 196 u64_stats_update_begin(&stats->syncp); 197 stats->rx_packets++; 198 stats->rx_bytes += skb->len; 199 u64_stats_update_end(&stats->syncp); 200 201 gro_cells_receive(&geneve->gro_cells, skb); 202 return; 203 drop: 204 /* Consume bad packet */ 205 kfree_skb(skb); 206 } 207 208 /* Setup stats when device is created */ 209 static int geneve_init(struct net_device *dev) 210 { 211 struct geneve_dev *geneve = netdev_priv(dev); 212 int err; 213 214 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 215 if (!dev->tstats) 216 return -ENOMEM; 217 218 err = gro_cells_init(&geneve->gro_cells, dev); 219 if (err) { 220 free_percpu(dev->tstats); 221 return err; 222 } 223 224 return 0; 225 } 226 227 static void geneve_uninit(struct net_device *dev) 228 { 229 struct geneve_dev *geneve = netdev_priv(dev); 230 231 gro_cells_destroy(&geneve->gro_cells); 232 free_percpu(dev->tstats); 233 } 234 235 /* Callback from net/ipv4/udp.c to receive packets */ 236 static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) 237 { 238 struct genevehdr *geneveh; 239 struct geneve_sock *gs; 240 int opts_len; 241 242 /* Need Geneve and inner Ethernet header to be present */ 243 if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN))) 244 goto error; 245 246 /* Return packets with reserved bits set */ 247 geneveh = geneve_hdr(skb); 248 if (unlikely(geneveh->ver != GENEVE_VER)) 249 goto error; 250 251 if (unlikely(geneveh->proto_type != htons(ETH_P_TEB))) 252 goto error; 253 254 opts_len = geneveh->opt_len * 4; 255 if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, 256 htons(ETH_P_TEB))) 257 goto drop; 258 259 gs = rcu_dereference_sk_user_data(sk); 260 if (!gs) 261 goto drop; 262 263 geneve_rx(gs, skb); 264 return 0; 265 266 drop: 267 /* Consume bad packet */ 268 kfree_skb(skb); 269 return 0; 270 271 error: 272 /* Let the UDP layer deal with the skb */ 273 return 1; 274 } 275 276 static struct socket *geneve_create_sock(struct net *net, bool ipv6, 277 __be16 port) 278 { 279 struct socket *sock; 280 struct udp_port_cfg udp_conf; 281 int err; 282 283 memset(&udp_conf, 0, sizeof(udp_conf)); 284 285 if (ipv6) { 286 udp_conf.family = AF_INET6; 287 } else { 288 udp_conf.family = AF_INET; 289 udp_conf.local_ip.s_addr = htonl(INADDR_ANY); 290 } 291 292 udp_conf.local_udp_port = port; 293 294 /* Open UDP socket */ 295 err = udp_sock_create(net, &udp_conf, &sock); 296 if (err < 0) 297 return ERR_PTR(err); 298 299 return sock; 300 } 301 302 static void geneve_notify_add_rx_port(struct geneve_sock *gs) 303 { 304 struct sock *sk = gs->sock->sk; 305 sa_family_t sa_family = sk->sk_family; 306 int err; 307 308 if (sa_family == AF_INET) { 309 err = udp_add_offload(&gs->udp_offloads); 310 if (err) 311 pr_warn("geneve: udp_add_offload failed with status %d\n", 312 err); 313 } 314 } 315 316 static int geneve_hlen(struct genevehdr *gh) 317 { 318 return sizeof(*gh) + gh->opt_len * 4; 319 } 320 321 static struct sk_buff **geneve_gro_receive(struct sk_buff **head, 322 struct sk_buff *skb, 323 struct udp_offload *uoff) 324 { 325 struct sk_buff *p, **pp = NULL; 326 struct genevehdr *gh, *gh2; 327 unsigned int hlen, gh_len, off_gnv; 328 const struct packet_offload *ptype; 329 __be16 type; 330 int flush = 1; 331 332 off_gnv = skb_gro_offset(skb); 333 hlen = off_gnv + sizeof(*gh); 334 gh = skb_gro_header_fast(skb, off_gnv); 335 if (skb_gro_header_hard(skb, hlen)) { 336 gh = skb_gro_header_slow(skb, hlen, off_gnv); 337 if (unlikely(!gh)) 338 goto out; 339 } 340 341 if (gh->ver != GENEVE_VER || gh->oam) 342 goto out; 343 gh_len = geneve_hlen(gh); 344 345 hlen = off_gnv + gh_len; 346 if (skb_gro_header_hard(skb, hlen)) { 347 gh = skb_gro_header_slow(skb, hlen, off_gnv); 348 if (unlikely(!gh)) 349 goto out; 350 } 351 352 flush = 0; 353 354 for (p = *head; p; p = p->next) { 355 if (!NAPI_GRO_CB(p)->same_flow) 356 continue; 357 358 gh2 = (struct genevehdr *)(p->data + off_gnv); 359 if (gh->opt_len != gh2->opt_len || 360 memcmp(gh, gh2, gh_len)) { 361 NAPI_GRO_CB(p)->same_flow = 0; 362 continue; 363 } 364 } 365 366 type = gh->proto_type; 367 368 rcu_read_lock(); 369 ptype = gro_find_receive_by_type(type); 370 if (!ptype) { 371 flush = 1; 372 goto out_unlock; 373 } 374 375 skb_gro_pull(skb, gh_len); 376 skb_gro_postpull_rcsum(skb, gh, gh_len); 377 pp = ptype->callbacks.gro_receive(head, skb); 378 379 out_unlock: 380 rcu_read_unlock(); 381 out: 382 NAPI_GRO_CB(skb)->flush |= flush; 383 384 return pp; 385 } 386 387 static int geneve_gro_complete(struct sk_buff *skb, int nhoff, 388 struct udp_offload *uoff) 389 { 390 struct genevehdr *gh; 391 struct packet_offload *ptype; 392 __be16 type; 393 int gh_len; 394 int err = -ENOSYS; 395 396 udp_tunnel_gro_complete(skb, nhoff); 397 398 gh = (struct genevehdr *)(skb->data + nhoff); 399 gh_len = geneve_hlen(gh); 400 type = gh->proto_type; 401 402 rcu_read_lock(); 403 ptype = gro_find_complete_by_type(type); 404 if (ptype) 405 err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); 406 407 rcu_read_unlock(); 408 return err; 409 } 410 411 /* Create new listen socket if needed */ 412 static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port, 413 bool ipv6) 414 { 415 struct geneve_net *gn = net_generic(net, geneve_net_id); 416 struct geneve_sock *gs; 417 struct socket *sock; 418 struct udp_tunnel_sock_cfg tunnel_cfg; 419 int h; 420 421 gs = kzalloc(sizeof(*gs), GFP_KERNEL); 422 if (!gs) 423 return ERR_PTR(-ENOMEM); 424 425 sock = geneve_create_sock(net, ipv6, port); 426 if (IS_ERR(sock)) { 427 kfree(gs); 428 return ERR_CAST(sock); 429 } 430 431 gs->sock = sock; 432 gs->refcnt = 1; 433 for (h = 0; h < VNI_HASH_SIZE; ++h) 434 INIT_HLIST_HEAD(&gs->vni_list[h]); 435 436 /* Initialize the geneve udp offloads structure */ 437 gs->udp_offloads.port = port; 438 gs->udp_offloads.callbacks.gro_receive = geneve_gro_receive; 439 gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete; 440 geneve_notify_add_rx_port(gs); 441 442 /* Mark socket as an encapsulation socket */ 443 tunnel_cfg.sk_user_data = gs; 444 tunnel_cfg.encap_type = 1; 445 tunnel_cfg.encap_rcv = geneve_udp_encap_recv; 446 tunnel_cfg.encap_destroy = NULL; 447 setup_udp_tunnel_sock(net, sock, &tunnel_cfg); 448 list_add(&gs->list, &gn->sock_list); 449 return gs; 450 } 451 452 static void geneve_notify_del_rx_port(struct geneve_sock *gs) 453 { 454 struct sock *sk = gs->sock->sk; 455 sa_family_t sa_family = sk->sk_family; 456 457 if (sa_family == AF_INET) 458 udp_del_offload(&gs->udp_offloads); 459 } 460 461 static void geneve_sock_release(struct geneve_sock *gs) 462 { 463 if (--gs->refcnt) 464 return; 465 466 list_del(&gs->list); 467 geneve_notify_del_rx_port(gs); 468 udp_tunnel_sock_release(gs->sock); 469 kfree_rcu(gs, rcu); 470 } 471 472 static struct geneve_sock *geneve_find_sock(struct geneve_net *gn, 473 __be16 dst_port) 474 { 475 struct geneve_sock *gs; 476 477 list_for_each_entry(gs, &gn->sock_list, list) { 478 if (inet_sk(gs->sock->sk)->inet_sport == dst_port && 479 inet_sk(gs->sock->sk)->sk.sk_family == AF_INET) { 480 return gs; 481 } 482 } 483 return NULL; 484 } 485 486 static int geneve_open(struct net_device *dev) 487 { 488 struct geneve_dev *geneve = netdev_priv(dev); 489 struct net *net = geneve->net; 490 struct geneve_net *gn = net_generic(net, geneve_net_id); 491 struct geneve_sock *gs; 492 __u32 hash; 493 494 gs = geneve_find_sock(gn, geneve->dst_port); 495 if (gs) { 496 gs->refcnt++; 497 goto out; 498 } 499 500 gs = geneve_socket_create(net, geneve->dst_port, false); 501 if (IS_ERR(gs)) 502 return PTR_ERR(gs); 503 504 out: 505 gs->collect_md = geneve->collect_md; 506 geneve->sock = gs; 507 508 hash = geneve_net_vni_hash(geneve->vni); 509 hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]); 510 return 0; 511 } 512 513 static int geneve_stop(struct net_device *dev) 514 { 515 struct geneve_dev *geneve = netdev_priv(dev); 516 struct geneve_sock *gs = geneve->sock; 517 518 if (!hlist_unhashed(&geneve->hlist)) 519 hlist_del_rcu(&geneve->hlist); 520 geneve_sock_release(gs); 521 return 0; 522 } 523 524 static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb, 525 __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt, 526 bool csum) 527 { 528 struct genevehdr *gnvh; 529 int min_headroom; 530 int err; 531 532 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len 533 + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr); 534 err = skb_cow_head(skb, min_headroom); 535 if (unlikely(err)) { 536 kfree_skb(skb); 537 goto free_rt; 538 } 539 540 skb = udp_tunnel_handle_offloads(skb, csum); 541 if (IS_ERR(skb)) { 542 err = PTR_ERR(skb); 543 goto free_rt; 544 } 545 546 gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); 547 gnvh->ver = GENEVE_VER; 548 gnvh->opt_len = opt_len / 4; 549 gnvh->oam = !!(tun_flags & TUNNEL_OAM); 550 gnvh->critical = !!(tun_flags & TUNNEL_CRIT_OPT); 551 gnvh->rsvd1 = 0; 552 memcpy(gnvh->vni, vni, 3); 553 gnvh->proto_type = htons(ETH_P_TEB); 554 gnvh->rsvd2 = 0; 555 memcpy(gnvh->options, opt, opt_len); 556 557 skb_set_inner_protocol(skb, htons(ETH_P_TEB)); 558 return 0; 559 560 free_rt: 561 ip_rt_put(rt); 562 return err; 563 } 564 565 static struct rtable *geneve_get_rt(struct sk_buff *skb, 566 struct net_device *dev, 567 struct flowi4 *fl4, 568 struct ip_tunnel_info *info) 569 { 570 struct geneve_dev *geneve = netdev_priv(dev); 571 struct rtable *rt = NULL; 572 __u8 tos; 573 574 memset(fl4, 0, sizeof(*fl4)); 575 fl4->flowi4_mark = skb->mark; 576 fl4->flowi4_proto = IPPROTO_UDP; 577 578 if (info) { 579 fl4->daddr = info->key.u.ipv4.dst; 580 fl4->saddr = info->key.u.ipv4.src; 581 fl4->flowi4_tos = RT_TOS(info->key.tos); 582 } else { 583 tos = geneve->tos; 584 if (tos == 1) { 585 const struct iphdr *iip = ip_hdr(skb); 586 587 tos = ip_tunnel_get_dsfield(iip, skb); 588 } 589 590 fl4->flowi4_tos = RT_TOS(tos); 591 fl4->daddr = geneve->remote.sin_addr.s_addr; 592 } 593 594 rt = ip_route_output_key(geneve->net, fl4); 595 if (IS_ERR(rt)) { 596 netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr); 597 dev->stats.tx_carrier_errors++; 598 return rt; 599 } 600 if (rt->dst.dev == dev) { /* is this necessary? */ 601 netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr); 602 dev->stats.collisions++; 603 ip_rt_put(rt); 604 return ERR_PTR(-EINVAL); 605 } 606 return rt; 607 } 608 609 /* Convert 64 bit tunnel ID to 24 bit VNI. */ 610 static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni) 611 { 612 #ifdef __BIG_ENDIAN 613 vni[0] = (__force __u8)(tun_id >> 16); 614 vni[1] = (__force __u8)(tun_id >> 8); 615 vni[2] = (__force __u8)tun_id; 616 #else 617 vni[0] = (__force __u8)((__force u64)tun_id >> 40); 618 vni[1] = (__force __u8)((__force u64)tun_id >> 48); 619 vni[2] = (__force __u8)((__force u64)tun_id >> 56); 620 #endif 621 } 622 623 static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) 624 { 625 struct geneve_dev *geneve = netdev_priv(dev); 626 struct geneve_sock *gs = geneve->sock; 627 struct ip_tunnel_info *info = NULL; 628 struct rtable *rt = NULL; 629 const struct iphdr *iip; /* interior IP header */ 630 struct flowi4 fl4; 631 __u8 tos, ttl; 632 __be16 sport; 633 bool udp_csum; 634 __be16 df; 635 int err; 636 637 if (geneve->collect_md) { 638 info = skb_tunnel_info(skb); 639 if (unlikely(info && !(info->mode & IP_TUNNEL_INFO_TX))) { 640 netdev_dbg(dev, "no tunnel metadata\n"); 641 goto tx_error; 642 } 643 if (info && ip_tunnel_info_af(info) != AF_INET) 644 goto tx_error; 645 } 646 647 rt = geneve_get_rt(skb, dev, &fl4, info); 648 if (IS_ERR(rt)) { 649 netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr); 650 dev->stats.tx_carrier_errors++; 651 goto tx_error; 652 } 653 654 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 655 skb_reset_mac_header(skb); 656 657 iip = ip_hdr(skb); 658 659 if (info) { 660 const struct ip_tunnel_key *key = &info->key; 661 u8 *opts = NULL; 662 u8 vni[3]; 663 664 tunnel_id_to_vni(key->tun_id, vni); 665 if (key->tun_flags & TUNNEL_GENEVE_OPT) 666 opts = ip_tunnel_info_opts(info); 667 668 udp_csum = !!(key->tun_flags & TUNNEL_CSUM); 669 err = geneve_build_skb(rt, skb, key->tun_flags, vni, 670 info->options_len, opts, udp_csum); 671 if (unlikely(err)) 672 goto err; 673 674 tos = ip_tunnel_ecn_encap(key->tos, iip, skb); 675 ttl = key->ttl; 676 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; 677 } else { 678 udp_csum = false; 679 err = geneve_build_skb(rt, skb, 0, geneve->vni, 680 0, NULL, udp_csum); 681 if (unlikely(err)) 682 goto err; 683 684 tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb); 685 ttl = geneve->ttl; 686 if (!ttl && IN_MULTICAST(ntohl(fl4.daddr))) 687 ttl = 1; 688 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 689 df = 0; 690 } 691 err = udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, fl4.saddr, fl4.daddr, 692 tos, ttl, df, sport, geneve->dst_port, 693 !net_eq(geneve->net, dev_net(geneve->dev)), 694 !udp_csum); 695 696 iptunnel_xmit_stats(err, &dev->stats, dev->tstats); 697 return NETDEV_TX_OK; 698 699 tx_error: 700 dev_kfree_skb(skb); 701 err: 702 dev->stats.tx_errors++; 703 return NETDEV_TX_OK; 704 } 705 706 static const struct net_device_ops geneve_netdev_ops = { 707 .ndo_init = geneve_init, 708 .ndo_uninit = geneve_uninit, 709 .ndo_open = geneve_open, 710 .ndo_stop = geneve_stop, 711 .ndo_start_xmit = geneve_xmit, 712 .ndo_get_stats64 = ip_tunnel_get_stats64, 713 .ndo_change_mtu = eth_change_mtu, 714 .ndo_validate_addr = eth_validate_addr, 715 .ndo_set_mac_address = eth_mac_addr, 716 }; 717 718 static void geneve_get_drvinfo(struct net_device *dev, 719 struct ethtool_drvinfo *drvinfo) 720 { 721 strlcpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version)); 722 strlcpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver)); 723 } 724 725 static const struct ethtool_ops geneve_ethtool_ops = { 726 .get_drvinfo = geneve_get_drvinfo, 727 .get_link = ethtool_op_get_link, 728 }; 729 730 /* Info for udev, that this is a virtual tunnel endpoint */ 731 static struct device_type geneve_type = { 732 .name = "geneve", 733 }; 734 735 /* Initialize the device structure. */ 736 static void geneve_setup(struct net_device *dev) 737 { 738 ether_setup(dev); 739 740 dev->netdev_ops = &geneve_netdev_ops; 741 dev->ethtool_ops = &geneve_ethtool_ops; 742 dev->destructor = free_netdev; 743 744 SET_NETDEV_DEVTYPE(dev, &geneve_type); 745 746 dev->features |= NETIF_F_LLTX; 747 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 748 dev->features |= NETIF_F_RXCSUM; 749 dev->features |= NETIF_F_GSO_SOFTWARE; 750 751 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 752 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 753 754 netif_keep_dst(dev); 755 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; 756 eth_hw_addr_random(dev); 757 } 758 759 static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { 760 [IFLA_GENEVE_ID] = { .type = NLA_U32 }, 761 [IFLA_GENEVE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, 762 [IFLA_GENEVE_TTL] = { .type = NLA_U8 }, 763 [IFLA_GENEVE_TOS] = { .type = NLA_U8 }, 764 [IFLA_GENEVE_PORT] = { .type = NLA_U16 }, 765 [IFLA_GENEVE_COLLECT_METADATA] = { .type = NLA_FLAG }, 766 }; 767 768 static int geneve_validate(struct nlattr *tb[], struct nlattr *data[]) 769 { 770 if (tb[IFLA_ADDRESS]) { 771 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 772 return -EINVAL; 773 774 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 775 return -EADDRNOTAVAIL; 776 } 777 778 if (!data) 779 return -EINVAL; 780 781 if (data[IFLA_GENEVE_ID]) { 782 __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]); 783 784 if (vni >= GENEVE_VID_MASK) 785 return -ERANGE; 786 } 787 788 return 0; 789 } 790 791 static struct geneve_dev *geneve_find_dev(struct geneve_net *gn, 792 __be16 dst_port, 793 __be32 rem_addr, 794 u8 vni[], 795 bool *tun_on_same_port, 796 bool *tun_collect_md) 797 { 798 struct geneve_dev *geneve, *t; 799 800 *tun_on_same_port = false; 801 *tun_collect_md = false; 802 t = NULL; 803 list_for_each_entry(geneve, &gn->geneve_list, next) { 804 if (geneve->dst_port == dst_port) { 805 *tun_collect_md = geneve->collect_md; 806 *tun_on_same_port = true; 807 } 808 if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) && 809 rem_addr == geneve->remote.sin_addr.s_addr && 810 dst_port == geneve->dst_port) 811 t = geneve; 812 } 813 return t; 814 } 815 816 static int geneve_configure(struct net *net, struct net_device *dev, 817 __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos, 818 __be16 dst_port, bool metadata) 819 { 820 struct geneve_net *gn = net_generic(net, geneve_net_id); 821 struct geneve_dev *t, *geneve = netdev_priv(dev); 822 bool tun_collect_md, tun_on_same_port; 823 int err; 824 825 if (metadata) { 826 if (rem_addr || vni || tos || ttl) 827 return -EINVAL; 828 } 829 830 geneve->net = net; 831 geneve->dev = dev; 832 833 geneve->vni[0] = (vni & 0x00ff0000) >> 16; 834 geneve->vni[1] = (vni & 0x0000ff00) >> 8; 835 geneve->vni[2] = vni & 0x000000ff; 836 837 geneve->remote.sin_addr.s_addr = rem_addr; 838 if (IN_MULTICAST(ntohl(geneve->remote.sin_addr.s_addr))) 839 return -EINVAL; 840 841 geneve->ttl = ttl; 842 geneve->tos = tos; 843 geneve->dst_port = dst_port; 844 geneve->collect_md = metadata; 845 846 t = geneve_find_dev(gn, dst_port, rem_addr, geneve->vni, 847 &tun_on_same_port, &tun_collect_md); 848 if (t) 849 return -EBUSY; 850 851 if (metadata) { 852 if (tun_on_same_port) 853 return -EPERM; 854 } else { 855 if (tun_collect_md) 856 return -EPERM; 857 } 858 859 err = register_netdevice(dev); 860 if (err) 861 return err; 862 863 list_add(&geneve->next, &gn->geneve_list); 864 return 0; 865 } 866 867 static int geneve_newlink(struct net *net, struct net_device *dev, 868 struct nlattr *tb[], struct nlattr *data[]) 869 { 870 __be16 dst_port = htons(GENEVE_UDP_PORT); 871 __u8 ttl = 0, tos = 0; 872 bool metadata = false; 873 __be32 rem_addr; 874 __u32 vni; 875 876 if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE]) 877 return -EINVAL; 878 879 vni = nla_get_u32(data[IFLA_GENEVE_ID]); 880 rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); 881 882 if (data[IFLA_GENEVE_TTL]) 883 ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); 884 885 if (data[IFLA_GENEVE_TOS]) 886 tos = nla_get_u8(data[IFLA_GENEVE_TOS]); 887 888 if (data[IFLA_GENEVE_PORT]) 889 dst_port = nla_get_be16(data[IFLA_GENEVE_PORT]); 890 891 if (data[IFLA_GENEVE_COLLECT_METADATA]) 892 metadata = true; 893 894 return geneve_configure(net, dev, rem_addr, vni, 895 ttl, tos, dst_port, metadata); 896 } 897 898 static void geneve_dellink(struct net_device *dev, struct list_head *head) 899 { 900 struct geneve_dev *geneve = netdev_priv(dev); 901 902 list_del(&geneve->next); 903 unregister_netdevice_queue(dev, head); 904 } 905 906 static size_t geneve_get_size(const struct net_device *dev) 907 { 908 return nla_total_size(sizeof(__u32)) + /* IFLA_GENEVE_ID */ 909 nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */ 910 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ 911 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ 912 nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */ 913 nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */ 914 0; 915 } 916 917 static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) 918 { 919 struct geneve_dev *geneve = netdev_priv(dev); 920 __u32 vni; 921 922 vni = (geneve->vni[0] << 16) | (geneve->vni[1] << 8) | geneve->vni[2]; 923 if (nla_put_u32(skb, IFLA_GENEVE_ID, vni)) 924 goto nla_put_failure; 925 926 if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE, 927 geneve->remote.sin_addr.s_addr)) 928 goto nla_put_failure; 929 930 if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl) || 931 nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos)) 932 goto nla_put_failure; 933 934 if (nla_put_be16(skb, IFLA_GENEVE_PORT, geneve->dst_port)) 935 goto nla_put_failure; 936 937 if (geneve->collect_md) { 938 if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA)) 939 goto nla_put_failure; 940 } 941 942 return 0; 943 944 nla_put_failure: 945 return -EMSGSIZE; 946 } 947 948 static struct rtnl_link_ops geneve_link_ops __read_mostly = { 949 .kind = "geneve", 950 .maxtype = IFLA_GENEVE_MAX, 951 .policy = geneve_policy, 952 .priv_size = sizeof(struct geneve_dev), 953 .setup = geneve_setup, 954 .validate = geneve_validate, 955 .newlink = geneve_newlink, 956 .dellink = geneve_dellink, 957 .get_size = geneve_get_size, 958 .fill_info = geneve_fill_info, 959 }; 960 961 struct net_device *geneve_dev_create_fb(struct net *net, const char *name, 962 u8 name_assign_type, u16 dst_port) 963 { 964 struct nlattr *tb[IFLA_MAX + 1]; 965 struct net_device *dev; 966 int err; 967 968 memset(tb, 0, sizeof(tb)); 969 dev = rtnl_create_link(net, name, name_assign_type, 970 &geneve_link_ops, tb); 971 if (IS_ERR(dev)) 972 return dev; 973 974 err = geneve_configure(net, dev, 0, 0, 0, 0, htons(dst_port), true); 975 if (err) { 976 free_netdev(dev); 977 return ERR_PTR(err); 978 } 979 return dev; 980 } 981 EXPORT_SYMBOL_GPL(geneve_dev_create_fb); 982 983 static __net_init int geneve_init_net(struct net *net) 984 { 985 struct geneve_net *gn = net_generic(net, geneve_net_id); 986 987 INIT_LIST_HEAD(&gn->geneve_list); 988 INIT_LIST_HEAD(&gn->sock_list); 989 return 0; 990 } 991 992 static void __net_exit geneve_exit_net(struct net *net) 993 { 994 struct geneve_net *gn = net_generic(net, geneve_net_id); 995 struct geneve_dev *geneve, *next; 996 struct net_device *dev, *aux; 997 LIST_HEAD(list); 998 999 rtnl_lock(); 1000 1001 /* gather any geneve devices that were moved into this ns */ 1002 for_each_netdev_safe(net, dev, aux) 1003 if (dev->rtnl_link_ops == &geneve_link_ops) 1004 unregister_netdevice_queue(dev, &list); 1005 1006 /* now gather any other geneve devices that were created in this ns */ 1007 list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) { 1008 /* If geneve->dev is in the same netns, it was already added 1009 * to the list by the previous loop. 1010 */ 1011 if (!net_eq(dev_net(geneve->dev), net)) 1012 unregister_netdevice_queue(geneve->dev, &list); 1013 } 1014 1015 /* unregister the devices gathered above */ 1016 unregister_netdevice_many(&list); 1017 rtnl_unlock(); 1018 } 1019 1020 static struct pernet_operations geneve_net_ops = { 1021 .init = geneve_init_net, 1022 .exit = geneve_exit_net, 1023 .id = &geneve_net_id, 1024 .size = sizeof(struct geneve_net), 1025 }; 1026 1027 static int __init geneve_init_module(void) 1028 { 1029 int rc; 1030 1031 rc = register_pernet_subsys(&geneve_net_ops); 1032 if (rc) 1033 goto out1; 1034 1035 rc = rtnl_link_register(&geneve_link_ops); 1036 if (rc) 1037 goto out2; 1038 1039 return 0; 1040 out2: 1041 unregister_pernet_subsys(&geneve_net_ops); 1042 out1: 1043 return rc; 1044 } 1045 late_initcall(geneve_init_module); 1046 1047 static void __exit geneve_cleanup_module(void) 1048 { 1049 rtnl_link_unregister(&geneve_link_ops); 1050 unregister_pernet_subsys(&geneve_net_ops); 1051 } 1052 module_exit(geneve_cleanup_module); 1053 1054 MODULE_LICENSE("GPL"); 1055 MODULE_VERSION(GENEVE_NETDEV_VER); 1056 MODULE_AUTHOR("John W. Linville <linville@tuxdriver.com>"); 1057 MODULE_DESCRIPTION("Interface driver for GENEVE encapsulated traffic"); 1058 MODULE_ALIAS_RTNL_LINK("geneve"); 1059