1 /* 2 * GENEVE: Generic Network Virtualization Encapsulation 3 * 4 * Copyright (c) 2015 Red Hat, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/etherdevice.h> 16 #include <linux/hash.h> 17 #include <net/dst_metadata.h> 18 #include <net/gro_cells.h> 19 #include <net/rtnetlink.h> 20 #include <net/geneve.h> 21 #include <net/protocol.h> 22 23 #define GENEVE_NETDEV_VER "0.6" 24 25 #define GENEVE_UDP_PORT 6081 26 27 #define GENEVE_N_VID (1u << 24) 28 #define GENEVE_VID_MASK (GENEVE_N_VID - 1) 29 30 #define VNI_HASH_BITS 10 31 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS) 32 33 static bool log_ecn_error = true; 34 module_param(log_ecn_error, bool, 0644); 35 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 36 37 #define GENEVE_VER 0 38 #define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr)) 39 40 /* per-network namespace private data for this module */ 41 struct geneve_net { 42 struct list_head geneve_list; 43 struct list_head sock_list; 44 }; 45 46 static unsigned int geneve_net_id; 47 48 /* Pseudo network device */ 49 struct geneve_dev { 50 struct hlist_node hlist; /* vni hash table */ 51 struct net *net; /* netns for packet i/o */ 52 struct net_device *dev; /* netdev for geneve tunnel */ 53 struct ip_tunnel_info info; 54 struct geneve_sock __rcu *sock4; /* IPv4 socket used for geneve tunnel */ 55 #if IS_ENABLED(CONFIG_IPV6) 56 struct geneve_sock __rcu *sock6; /* IPv6 socket used for geneve tunnel */ 57 #endif 58 struct list_head next; /* geneve's per namespace list */ 59 struct gro_cells gro_cells; 60 bool collect_md; 61 bool use_udp6_rx_checksums; 62 }; 63 64 struct geneve_sock { 65 bool collect_md; 66 struct list_head list; 67 struct socket *sock; 68 struct rcu_head rcu; 69 int refcnt; 70 struct hlist_head vni_list[VNI_HASH_SIZE]; 71 }; 72 73 static inline __u32 geneve_net_vni_hash(u8 vni[3]) 74 { 75 __u32 vnid; 76 77 vnid = (vni[0] << 16) | (vni[1] << 8) | vni[2]; 78 return hash_32(vnid, VNI_HASH_BITS); 79 } 80 81 static __be64 vni_to_tunnel_id(const __u8 *vni) 82 { 83 #ifdef __BIG_ENDIAN 84 return (vni[0] << 16) | (vni[1] << 8) | vni[2]; 85 #else 86 return (__force __be64)(((__force u64)vni[0] << 40) | 87 ((__force u64)vni[1] << 48) | 88 ((__force u64)vni[2] << 56)); 89 #endif 90 } 91 92 /* Convert 64 bit tunnel ID to 24 bit VNI. */ 93 static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni) 94 { 95 #ifdef __BIG_ENDIAN 96 vni[0] = (__force __u8)(tun_id >> 16); 97 vni[1] = (__force __u8)(tun_id >> 8); 98 vni[2] = (__force __u8)tun_id; 99 #else 100 vni[0] = (__force __u8)((__force u64)tun_id >> 40); 101 vni[1] = (__force __u8)((__force u64)tun_id >> 48); 102 vni[2] = (__force __u8)((__force u64)tun_id >> 56); 103 #endif 104 } 105 106 static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni) 107 { 108 #ifdef __BIG_ENDIAN 109 return (vni[0] == tun_id[2]) && 110 (vni[1] == tun_id[1]) && 111 (vni[2] == tun_id[0]); 112 #else 113 return !memcmp(vni, &tun_id[5], 3); 114 #endif 115 } 116 117 static sa_family_t geneve_get_sk_family(struct geneve_sock *gs) 118 { 119 return gs->sock->sk->sk_family; 120 } 121 122 static struct geneve_dev *geneve_lookup(struct geneve_sock *gs, 123 __be32 addr, u8 vni[]) 124 { 125 struct hlist_head *vni_list_head; 126 struct geneve_dev *geneve; 127 __u32 hash; 128 129 /* Find the device for this VNI */ 130 hash = geneve_net_vni_hash(vni); 131 vni_list_head = &gs->vni_list[hash]; 132 hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) { 133 if (eq_tun_id_and_vni((u8 *)&geneve->info.key.tun_id, vni) && 134 addr == geneve->info.key.u.ipv4.dst) 135 return geneve; 136 } 137 return NULL; 138 } 139 140 #if IS_ENABLED(CONFIG_IPV6) 141 static struct geneve_dev *geneve6_lookup(struct geneve_sock *gs, 142 struct in6_addr addr6, u8 vni[]) 143 { 144 struct hlist_head *vni_list_head; 145 struct geneve_dev *geneve; 146 __u32 hash; 147 148 /* Find the device for this VNI */ 149 hash = geneve_net_vni_hash(vni); 150 vni_list_head = &gs->vni_list[hash]; 151 hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) { 152 if (eq_tun_id_and_vni((u8 *)&geneve->info.key.tun_id, vni) && 153 ipv6_addr_equal(&addr6, &geneve->info.key.u.ipv6.dst)) 154 return geneve; 155 } 156 return NULL; 157 } 158 #endif 159 160 static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb) 161 { 162 return (struct genevehdr *)(udp_hdr(skb) + 1); 163 } 164 165 static struct geneve_dev *geneve_lookup_skb(struct geneve_sock *gs, 166 struct sk_buff *skb) 167 { 168 static u8 zero_vni[3]; 169 u8 *vni; 170 171 if (geneve_get_sk_family(gs) == AF_INET) { 172 struct iphdr *iph; 173 __be32 addr; 174 175 iph = ip_hdr(skb); /* outer IP header... */ 176 177 if (gs->collect_md) { 178 vni = zero_vni; 179 addr = 0; 180 } else { 181 vni = geneve_hdr(skb)->vni; 182 addr = iph->saddr; 183 } 184 185 return geneve_lookup(gs, addr, vni); 186 #if IS_ENABLED(CONFIG_IPV6) 187 } else if (geneve_get_sk_family(gs) == AF_INET6) { 188 static struct in6_addr zero_addr6; 189 struct ipv6hdr *ip6h; 190 struct in6_addr addr6; 191 192 ip6h = ipv6_hdr(skb); /* outer IPv6 header... */ 193 194 if (gs->collect_md) { 195 vni = zero_vni; 196 addr6 = zero_addr6; 197 } else { 198 vni = geneve_hdr(skb)->vni; 199 addr6 = ip6h->saddr; 200 } 201 202 return geneve6_lookup(gs, addr6, vni); 203 #endif 204 } 205 return NULL; 206 } 207 208 /* geneve receive/decap routine */ 209 static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, 210 struct sk_buff *skb) 211 { 212 struct genevehdr *gnvh = geneve_hdr(skb); 213 struct metadata_dst *tun_dst = NULL; 214 struct pcpu_sw_netstats *stats; 215 int err = 0; 216 void *oiph; 217 218 if (ip_tunnel_collect_metadata() || gs->collect_md) { 219 __be16 flags; 220 221 flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT | 222 (gnvh->oam ? TUNNEL_OAM : 0) | 223 (gnvh->critical ? TUNNEL_CRIT_OPT : 0); 224 225 tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags, 226 vni_to_tunnel_id(gnvh->vni), 227 gnvh->opt_len * 4); 228 if (!tun_dst) 229 goto drop; 230 /* Update tunnel dst according to Geneve options. */ 231 ip_tunnel_info_opts_set(&tun_dst->u.tun_info, 232 gnvh->options, gnvh->opt_len * 4); 233 } else { 234 /* Drop packets w/ critical options, 235 * since we don't support any... 236 */ 237 if (gnvh->critical) 238 goto drop; 239 } 240 241 skb_reset_mac_header(skb); 242 skb->protocol = eth_type_trans(skb, geneve->dev); 243 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 244 245 if (tun_dst) 246 skb_dst_set(skb, &tun_dst->dst); 247 248 /* Ignore packet loops (and multicast echo) */ 249 if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) 250 goto drop; 251 252 oiph = skb_network_header(skb); 253 skb_reset_network_header(skb); 254 255 if (geneve_get_sk_family(gs) == AF_INET) 256 err = IP_ECN_decapsulate(oiph, skb); 257 #if IS_ENABLED(CONFIG_IPV6) 258 else 259 err = IP6_ECN_decapsulate(oiph, skb); 260 #endif 261 262 if (unlikely(err)) { 263 if (log_ecn_error) { 264 if (geneve_get_sk_family(gs) == AF_INET) 265 net_info_ratelimited("non-ECT from %pI4 " 266 "with TOS=%#x\n", 267 &((struct iphdr *)oiph)->saddr, 268 ((struct iphdr *)oiph)->tos); 269 #if IS_ENABLED(CONFIG_IPV6) 270 else 271 net_info_ratelimited("non-ECT from %pI6\n", 272 &((struct ipv6hdr *)oiph)->saddr); 273 #endif 274 } 275 if (err > 1) { 276 ++geneve->dev->stats.rx_frame_errors; 277 ++geneve->dev->stats.rx_errors; 278 goto drop; 279 } 280 } 281 282 stats = this_cpu_ptr(geneve->dev->tstats); 283 u64_stats_update_begin(&stats->syncp); 284 stats->rx_packets++; 285 stats->rx_bytes += skb->len; 286 u64_stats_update_end(&stats->syncp); 287 288 gro_cells_receive(&geneve->gro_cells, skb); 289 return; 290 drop: 291 /* Consume bad packet */ 292 kfree_skb(skb); 293 } 294 295 /* Setup stats when device is created */ 296 static int geneve_init(struct net_device *dev) 297 { 298 struct geneve_dev *geneve = netdev_priv(dev); 299 int err; 300 301 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 302 if (!dev->tstats) 303 return -ENOMEM; 304 305 err = gro_cells_init(&geneve->gro_cells, dev); 306 if (err) { 307 free_percpu(dev->tstats); 308 return err; 309 } 310 311 err = dst_cache_init(&geneve->info.dst_cache, GFP_KERNEL); 312 if (err) { 313 free_percpu(dev->tstats); 314 gro_cells_destroy(&geneve->gro_cells); 315 return err; 316 } 317 return 0; 318 } 319 320 static void geneve_uninit(struct net_device *dev) 321 { 322 struct geneve_dev *geneve = netdev_priv(dev); 323 324 dst_cache_destroy(&geneve->info.dst_cache); 325 gro_cells_destroy(&geneve->gro_cells); 326 free_percpu(dev->tstats); 327 } 328 329 /* Callback from net/ipv4/udp.c to receive packets */ 330 static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) 331 { 332 struct genevehdr *geneveh; 333 struct geneve_dev *geneve; 334 struct geneve_sock *gs; 335 int opts_len; 336 337 /* Need Geneve and inner Ethernet header to be present */ 338 if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN))) 339 goto drop; 340 341 /* Return packets with reserved bits set */ 342 geneveh = geneve_hdr(skb); 343 if (unlikely(geneveh->ver != GENEVE_VER)) 344 goto drop; 345 346 if (unlikely(geneveh->proto_type != htons(ETH_P_TEB))) 347 goto drop; 348 349 gs = rcu_dereference_sk_user_data(sk); 350 if (!gs) 351 goto drop; 352 353 geneve = geneve_lookup_skb(gs, skb); 354 if (!geneve) 355 goto drop; 356 357 opts_len = geneveh->opt_len * 4; 358 if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, 359 htons(ETH_P_TEB), 360 !net_eq(geneve->net, dev_net(geneve->dev)))) 361 goto drop; 362 363 geneve_rx(geneve, gs, skb); 364 return 0; 365 366 drop: 367 /* Consume bad packet */ 368 kfree_skb(skb); 369 return 0; 370 } 371 372 static struct socket *geneve_create_sock(struct net *net, bool ipv6, 373 __be16 port, bool ipv6_rx_csum) 374 { 375 struct socket *sock; 376 struct udp_port_cfg udp_conf; 377 int err; 378 379 memset(&udp_conf, 0, sizeof(udp_conf)); 380 381 if (ipv6) { 382 udp_conf.family = AF_INET6; 383 udp_conf.ipv6_v6only = 1; 384 udp_conf.use_udp6_rx_checksums = ipv6_rx_csum; 385 } else { 386 udp_conf.family = AF_INET; 387 udp_conf.local_ip.s_addr = htonl(INADDR_ANY); 388 } 389 390 udp_conf.local_udp_port = port; 391 392 /* Open UDP socket */ 393 err = udp_sock_create(net, &udp_conf, &sock); 394 if (err < 0) 395 return ERR_PTR(err); 396 397 return sock; 398 } 399 400 static int geneve_hlen(struct genevehdr *gh) 401 { 402 return sizeof(*gh) + gh->opt_len * 4; 403 } 404 405 static struct sk_buff **geneve_gro_receive(struct sock *sk, 406 struct sk_buff **head, 407 struct sk_buff *skb) 408 { 409 struct sk_buff *p, **pp = NULL; 410 struct genevehdr *gh, *gh2; 411 unsigned int hlen, gh_len, off_gnv; 412 const struct packet_offload *ptype; 413 __be16 type; 414 int flush = 1; 415 416 off_gnv = skb_gro_offset(skb); 417 hlen = off_gnv + sizeof(*gh); 418 gh = skb_gro_header_fast(skb, off_gnv); 419 if (skb_gro_header_hard(skb, hlen)) { 420 gh = skb_gro_header_slow(skb, hlen, off_gnv); 421 if (unlikely(!gh)) 422 goto out; 423 } 424 425 if (gh->ver != GENEVE_VER || gh->oam) 426 goto out; 427 gh_len = geneve_hlen(gh); 428 429 hlen = off_gnv + gh_len; 430 if (skb_gro_header_hard(skb, hlen)) { 431 gh = skb_gro_header_slow(skb, hlen, off_gnv); 432 if (unlikely(!gh)) 433 goto out; 434 } 435 436 for (p = *head; p; p = p->next) { 437 if (!NAPI_GRO_CB(p)->same_flow) 438 continue; 439 440 gh2 = (struct genevehdr *)(p->data + off_gnv); 441 if (gh->opt_len != gh2->opt_len || 442 memcmp(gh, gh2, gh_len)) { 443 NAPI_GRO_CB(p)->same_flow = 0; 444 continue; 445 } 446 } 447 448 type = gh->proto_type; 449 450 rcu_read_lock(); 451 ptype = gro_find_receive_by_type(type); 452 if (!ptype) 453 goto out_unlock; 454 455 skb_gro_pull(skb, gh_len); 456 skb_gro_postpull_rcsum(skb, gh, gh_len); 457 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); 458 flush = 0; 459 460 out_unlock: 461 rcu_read_unlock(); 462 out: 463 NAPI_GRO_CB(skb)->flush |= flush; 464 465 return pp; 466 } 467 468 static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb, 469 int nhoff) 470 { 471 struct genevehdr *gh; 472 struct packet_offload *ptype; 473 __be16 type; 474 int gh_len; 475 int err = -ENOSYS; 476 477 gh = (struct genevehdr *)(skb->data + nhoff); 478 gh_len = geneve_hlen(gh); 479 type = gh->proto_type; 480 481 rcu_read_lock(); 482 ptype = gro_find_complete_by_type(type); 483 if (ptype) 484 err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); 485 486 rcu_read_unlock(); 487 488 skb_set_inner_mac_header(skb, nhoff + gh_len); 489 490 return err; 491 } 492 493 /* Create new listen socket if needed */ 494 static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port, 495 bool ipv6, bool ipv6_rx_csum) 496 { 497 struct geneve_net *gn = net_generic(net, geneve_net_id); 498 struct geneve_sock *gs; 499 struct socket *sock; 500 struct udp_tunnel_sock_cfg tunnel_cfg; 501 int h; 502 503 gs = kzalloc(sizeof(*gs), GFP_KERNEL); 504 if (!gs) 505 return ERR_PTR(-ENOMEM); 506 507 sock = geneve_create_sock(net, ipv6, port, ipv6_rx_csum); 508 if (IS_ERR(sock)) { 509 kfree(gs); 510 return ERR_CAST(sock); 511 } 512 513 gs->sock = sock; 514 gs->refcnt = 1; 515 for (h = 0; h < VNI_HASH_SIZE; ++h) 516 INIT_HLIST_HEAD(&gs->vni_list[h]); 517 518 /* Initialize the geneve udp offloads structure */ 519 udp_tunnel_notify_add_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE); 520 521 /* Mark socket as an encapsulation socket */ 522 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); 523 tunnel_cfg.sk_user_data = gs; 524 tunnel_cfg.encap_type = 1; 525 tunnel_cfg.gro_receive = geneve_gro_receive; 526 tunnel_cfg.gro_complete = geneve_gro_complete; 527 tunnel_cfg.encap_rcv = geneve_udp_encap_recv; 528 tunnel_cfg.encap_destroy = NULL; 529 setup_udp_tunnel_sock(net, sock, &tunnel_cfg); 530 list_add(&gs->list, &gn->sock_list); 531 return gs; 532 } 533 534 static void __geneve_sock_release(struct geneve_sock *gs) 535 { 536 if (!gs || --gs->refcnt) 537 return; 538 539 list_del(&gs->list); 540 udp_tunnel_notify_del_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE); 541 udp_tunnel_sock_release(gs->sock); 542 kfree_rcu(gs, rcu); 543 } 544 545 static void geneve_sock_release(struct geneve_dev *geneve) 546 { 547 struct geneve_sock *gs4 = rtnl_dereference(geneve->sock4); 548 #if IS_ENABLED(CONFIG_IPV6) 549 struct geneve_sock *gs6 = rtnl_dereference(geneve->sock6); 550 551 rcu_assign_pointer(geneve->sock6, NULL); 552 #endif 553 554 rcu_assign_pointer(geneve->sock4, NULL); 555 synchronize_net(); 556 557 __geneve_sock_release(gs4); 558 #if IS_ENABLED(CONFIG_IPV6) 559 __geneve_sock_release(gs6); 560 #endif 561 } 562 563 static struct geneve_sock *geneve_find_sock(struct geneve_net *gn, 564 sa_family_t family, 565 __be16 dst_port) 566 { 567 struct geneve_sock *gs; 568 569 list_for_each_entry(gs, &gn->sock_list, list) { 570 if (inet_sk(gs->sock->sk)->inet_sport == dst_port && 571 geneve_get_sk_family(gs) == family) { 572 return gs; 573 } 574 } 575 return NULL; 576 } 577 578 static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6) 579 { 580 struct net *net = geneve->net; 581 struct geneve_net *gn = net_generic(net, geneve_net_id); 582 struct geneve_sock *gs; 583 __u8 vni[3]; 584 __u32 hash; 585 586 gs = geneve_find_sock(gn, ipv6 ? AF_INET6 : AF_INET, geneve->info.key.tp_dst); 587 if (gs) { 588 gs->refcnt++; 589 goto out; 590 } 591 592 gs = geneve_socket_create(net, geneve->info.key.tp_dst, ipv6, 593 geneve->use_udp6_rx_checksums); 594 if (IS_ERR(gs)) 595 return PTR_ERR(gs); 596 597 out: 598 gs->collect_md = geneve->collect_md; 599 #if IS_ENABLED(CONFIG_IPV6) 600 if (ipv6) 601 rcu_assign_pointer(geneve->sock6, gs); 602 else 603 #endif 604 rcu_assign_pointer(geneve->sock4, gs); 605 606 tunnel_id_to_vni(geneve->info.key.tun_id, vni); 607 hash = geneve_net_vni_hash(vni); 608 hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]); 609 return 0; 610 } 611 612 static int geneve_open(struct net_device *dev) 613 { 614 struct geneve_dev *geneve = netdev_priv(dev); 615 bool ipv6 = !!(geneve->info.mode & IP_TUNNEL_INFO_IPV6); 616 bool metadata = geneve->collect_md; 617 int ret = 0; 618 619 #if IS_ENABLED(CONFIG_IPV6) 620 if (ipv6 || metadata) 621 ret = geneve_sock_add(geneve, true); 622 #endif 623 if (!ret && (!ipv6 || metadata)) 624 ret = geneve_sock_add(geneve, false); 625 if (ret < 0) 626 geneve_sock_release(geneve); 627 628 return ret; 629 } 630 631 static int geneve_stop(struct net_device *dev) 632 { 633 struct geneve_dev *geneve = netdev_priv(dev); 634 635 if (!hlist_unhashed(&geneve->hlist)) 636 hlist_del_rcu(&geneve->hlist); 637 geneve_sock_release(geneve); 638 return 0; 639 } 640 641 static void geneve_build_header(struct genevehdr *geneveh, 642 const struct ip_tunnel_info *info) 643 { 644 geneveh->ver = GENEVE_VER; 645 geneveh->opt_len = info->options_len / 4; 646 geneveh->oam = !!(info->key.tun_flags & TUNNEL_OAM); 647 geneveh->critical = !!(info->key.tun_flags & TUNNEL_CRIT_OPT); 648 geneveh->rsvd1 = 0; 649 tunnel_id_to_vni(info->key.tun_id, geneveh->vni); 650 geneveh->proto_type = htons(ETH_P_TEB); 651 geneveh->rsvd2 = 0; 652 653 ip_tunnel_info_opts_get(geneveh->options, info); 654 } 655 656 static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb, 657 const struct ip_tunnel_info *info, 658 bool xnet, int ip_hdr_len) 659 { 660 bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); 661 struct genevehdr *gnvh; 662 int min_headroom; 663 int err; 664 665 skb_reset_mac_header(skb); 666 skb_scrub_packet(skb, xnet); 667 668 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len + 669 GENEVE_BASE_HLEN + info->options_len + ip_hdr_len; 670 err = skb_cow_head(skb, min_headroom); 671 if (unlikely(err)) 672 goto free_dst; 673 674 err = udp_tunnel_handle_offloads(skb, udp_sum); 675 if (err) 676 goto free_dst; 677 678 gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + 679 info->options_len); 680 geneve_build_header(gnvh, info); 681 skb_set_inner_protocol(skb, htons(ETH_P_TEB)); 682 return 0; 683 684 free_dst: 685 dst_release(dst); 686 return err; 687 } 688 689 static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, 690 struct net_device *dev, 691 struct flowi4 *fl4, 692 const struct ip_tunnel_info *info) 693 { 694 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 695 struct geneve_dev *geneve = netdev_priv(dev); 696 struct dst_cache *dst_cache; 697 struct rtable *rt = NULL; 698 __u8 tos; 699 700 if (!rcu_dereference(geneve->sock4)) 701 return ERR_PTR(-EIO); 702 703 memset(fl4, 0, sizeof(*fl4)); 704 fl4->flowi4_mark = skb->mark; 705 fl4->flowi4_proto = IPPROTO_UDP; 706 fl4->daddr = info->key.u.ipv4.dst; 707 fl4->saddr = info->key.u.ipv4.src; 708 709 tos = info->key.tos; 710 if ((tos == 1) && !geneve->collect_md) { 711 tos = ip_tunnel_get_dsfield(ip_hdr(skb), skb); 712 use_cache = false; 713 } 714 fl4->flowi4_tos = RT_TOS(tos); 715 716 dst_cache = (struct dst_cache *)&info->dst_cache; 717 if (use_cache) { 718 rt = dst_cache_get_ip4(dst_cache, &fl4->saddr); 719 if (rt) 720 return rt; 721 } 722 rt = ip_route_output_key(geneve->net, fl4); 723 if (IS_ERR(rt)) { 724 netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr); 725 return ERR_PTR(-ENETUNREACH); 726 } 727 if (rt->dst.dev == dev) { /* is this necessary? */ 728 netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr); 729 ip_rt_put(rt); 730 return ERR_PTR(-ELOOP); 731 } 732 if (use_cache) 733 dst_cache_set_ip4(dst_cache, &rt->dst, fl4->saddr); 734 return rt; 735 } 736 737 #if IS_ENABLED(CONFIG_IPV6) 738 static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, 739 struct net_device *dev, 740 struct flowi6 *fl6, 741 const struct ip_tunnel_info *info) 742 { 743 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 744 struct geneve_dev *geneve = netdev_priv(dev); 745 struct dst_entry *dst = NULL; 746 struct dst_cache *dst_cache; 747 struct geneve_sock *gs6; 748 __u8 prio; 749 750 gs6 = rcu_dereference(geneve->sock6); 751 if (!gs6) 752 return ERR_PTR(-EIO); 753 754 memset(fl6, 0, sizeof(*fl6)); 755 fl6->flowi6_mark = skb->mark; 756 fl6->flowi6_proto = IPPROTO_UDP; 757 fl6->daddr = info->key.u.ipv6.dst; 758 fl6->saddr = info->key.u.ipv6.src; 759 prio = info->key.tos; 760 if ((prio == 1) && !geneve->collect_md) { 761 prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb); 762 use_cache = false; 763 } 764 765 fl6->flowlabel = ip6_make_flowinfo(RT_TOS(prio), 766 info->key.label); 767 dst_cache = (struct dst_cache *)&info->dst_cache; 768 if (use_cache) { 769 dst = dst_cache_get_ip6(dst_cache, &fl6->saddr); 770 if (dst) 771 return dst; 772 } 773 if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) { 774 netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr); 775 return ERR_PTR(-ENETUNREACH); 776 } 777 if (dst->dev == dev) { /* is this necessary? */ 778 netdev_dbg(dev, "circular route to %pI6\n", &fl6->daddr); 779 dst_release(dst); 780 return ERR_PTR(-ELOOP); 781 } 782 783 if (use_cache) 784 dst_cache_set_ip6(dst_cache, dst, &fl6->saddr); 785 return dst; 786 } 787 #endif 788 789 static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, 790 struct geneve_dev *geneve, 791 const struct ip_tunnel_info *info) 792 { 793 bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); 794 struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); 795 const struct ip_tunnel_key *key = &info->key; 796 struct rtable *rt; 797 struct flowi4 fl4; 798 __u8 tos, ttl; 799 __be16 sport; 800 __be16 df; 801 int err; 802 803 rt = geneve_get_v4_rt(skb, dev, &fl4, info); 804 if (IS_ERR(rt)) 805 return PTR_ERR(rt); 806 807 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 808 if (geneve->collect_md) { 809 tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); 810 ttl = key->ttl; 811 } else { 812 tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb); 813 ttl = key->ttl ? : ip4_dst_hoplimit(&rt->dst); 814 } 815 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; 816 817 err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr)); 818 if (unlikely(err)) 819 return err; 820 821 udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr, 822 tos, ttl, df, sport, geneve->info.key.tp_dst, 823 !net_eq(geneve->net, dev_net(geneve->dev)), 824 !(info->key.tun_flags & TUNNEL_CSUM)); 825 return 0; 826 } 827 828 #if IS_ENABLED(CONFIG_IPV6) 829 static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, 830 struct geneve_dev *geneve, 831 const struct ip_tunnel_info *info) 832 { 833 bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); 834 struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); 835 const struct ip_tunnel_key *key = &info->key; 836 struct dst_entry *dst = NULL; 837 struct flowi6 fl6; 838 __u8 prio, ttl; 839 __be16 sport; 840 int err; 841 842 dst = geneve_get_v6_dst(skb, dev, &fl6, info); 843 if (IS_ERR(dst)) 844 return PTR_ERR(dst); 845 846 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 847 if (geneve->collect_md) { 848 prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); 849 ttl = key->ttl; 850 } else { 851 prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel), 852 ip_hdr(skb), skb); 853 ttl = key->ttl ? : ip6_dst_hoplimit(dst); 854 } 855 err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr)); 856 if (unlikely(err)) 857 return err; 858 859 udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, 860 &fl6.saddr, &fl6.daddr, prio, ttl, 861 info->key.label, sport, geneve->info.key.tp_dst, 862 !(info->key.tun_flags & TUNNEL_CSUM)); 863 return 0; 864 } 865 #endif 866 867 static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) 868 { 869 struct geneve_dev *geneve = netdev_priv(dev); 870 struct ip_tunnel_info *info = NULL; 871 int err; 872 873 if (geneve->collect_md) { 874 info = skb_tunnel_info(skb); 875 if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { 876 err = -EINVAL; 877 netdev_dbg(dev, "no tunnel metadata\n"); 878 goto tx_error; 879 } 880 } else { 881 info = &geneve->info; 882 } 883 884 rcu_read_lock(); 885 #if IS_ENABLED(CONFIG_IPV6) 886 if (info->mode & IP_TUNNEL_INFO_IPV6) 887 err = geneve6_xmit_skb(skb, dev, geneve, info); 888 else 889 #endif 890 err = geneve_xmit_skb(skb, dev, geneve, info); 891 rcu_read_unlock(); 892 893 if (likely(!err)) 894 return NETDEV_TX_OK; 895 tx_error: 896 dev_kfree_skb(skb); 897 898 if (err == -ELOOP) 899 dev->stats.collisions++; 900 else if (err == -ENETUNREACH) 901 dev->stats.tx_carrier_errors++; 902 903 dev->stats.tx_errors++; 904 return NETDEV_TX_OK; 905 } 906 907 static int geneve_change_mtu(struct net_device *dev, int new_mtu) 908 { 909 /* Only possible if called internally, ndo_change_mtu path's new_mtu 910 * is guaranteed to be between dev->min_mtu and dev->max_mtu. 911 */ 912 if (new_mtu > dev->max_mtu) 913 new_mtu = dev->max_mtu; 914 915 dev->mtu = new_mtu; 916 return 0; 917 } 918 919 static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 920 { 921 struct ip_tunnel_info *info = skb_tunnel_info(skb); 922 struct geneve_dev *geneve = netdev_priv(dev); 923 924 if (ip_tunnel_info_af(info) == AF_INET) { 925 struct rtable *rt; 926 struct flowi4 fl4; 927 928 rt = geneve_get_v4_rt(skb, dev, &fl4, info); 929 if (IS_ERR(rt)) 930 return PTR_ERR(rt); 931 932 ip_rt_put(rt); 933 info->key.u.ipv4.src = fl4.saddr; 934 #if IS_ENABLED(CONFIG_IPV6) 935 } else if (ip_tunnel_info_af(info) == AF_INET6) { 936 struct dst_entry *dst; 937 struct flowi6 fl6; 938 939 dst = geneve_get_v6_dst(skb, dev, &fl6, info); 940 if (IS_ERR(dst)) 941 return PTR_ERR(dst); 942 943 dst_release(dst); 944 info->key.u.ipv6.src = fl6.saddr; 945 #endif 946 } else { 947 return -EINVAL; 948 } 949 950 info->key.tp_src = udp_flow_src_port(geneve->net, skb, 951 1, USHRT_MAX, true); 952 info->key.tp_dst = geneve->info.key.tp_dst; 953 return 0; 954 } 955 956 static const struct net_device_ops geneve_netdev_ops = { 957 .ndo_init = geneve_init, 958 .ndo_uninit = geneve_uninit, 959 .ndo_open = geneve_open, 960 .ndo_stop = geneve_stop, 961 .ndo_start_xmit = geneve_xmit, 962 .ndo_get_stats64 = ip_tunnel_get_stats64, 963 .ndo_change_mtu = geneve_change_mtu, 964 .ndo_validate_addr = eth_validate_addr, 965 .ndo_set_mac_address = eth_mac_addr, 966 .ndo_fill_metadata_dst = geneve_fill_metadata_dst, 967 }; 968 969 static void geneve_get_drvinfo(struct net_device *dev, 970 struct ethtool_drvinfo *drvinfo) 971 { 972 strlcpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version)); 973 strlcpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver)); 974 } 975 976 static const struct ethtool_ops geneve_ethtool_ops = { 977 .get_drvinfo = geneve_get_drvinfo, 978 .get_link = ethtool_op_get_link, 979 }; 980 981 /* Info for udev, that this is a virtual tunnel endpoint */ 982 static struct device_type geneve_type = { 983 .name = "geneve", 984 }; 985 986 /* Calls the ndo_udp_tunnel_add of the caller in order to 987 * supply the listening GENEVE udp ports. Callers are expected 988 * to implement the ndo_udp_tunnel_add. 989 */ 990 static void geneve_push_rx_ports(struct net_device *dev) 991 { 992 struct net *net = dev_net(dev); 993 struct geneve_net *gn = net_generic(net, geneve_net_id); 994 struct geneve_sock *gs; 995 996 rcu_read_lock(); 997 list_for_each_entry_rcu(gs, &gn->sock_list, list) 998 udp_tunnel_push_rx_port(dev, gs->sock, 999 UDP_TUNNEL_TYPE_GENEVE); 1000 rcu_read_unlock(); 1001 } 1002 1003 /* Initialize the device structure. */ 1004 static void geneve_setup(struct net_device *dev) 1005 { 1006 ether_setup(dev); 1007 1008 dev->netdev_ops = &geneve_netdev_ops; 1009 dev->ethtool_ops = &geneve_ethtool_ops; 1010 dev->destructor = free_netdev; 1011 1012 SET_NETDEV_DEVTYPE(dev, &geneve_type); 1013 1014 dev->features |= NETIF_F_LLTX; 1015 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 1016 dev->features |= NETIF_F_RXCSUM; 1017 dev->features |= NETIF_F_GSO_SOFTWARE; 1018 1019 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 1020 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 1021 1022 /* MTU range: 68 - (something less than 65535) */ 1023 dev->min_mtu = ETH_MIN_MTU; 1024 /* The max_mtu calculation does not take account of GENEVE 1025 * options, to avoid excluding potentially valid 1026 * configurations. This will be further reduced by IPvX hdr size. 1027 */ 1028 dev->max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len; 1029 1030 netif_keep_dst(dev); 1031 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1032 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; 1033 eth_hw_addr_random(dev); 1034 } 1035 1036 static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { 1037 [IFLA_GENEVE_ID] = { .type = NLA_U32 }, 1038 [IFLA_GENEVE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, 1039 [IFLA_GENEVE_REMOTE6] = { .len = sizeof(struct in6_addr) }, 1040 [IFLA_GENEVE_TTL] = { .type = NLA_U8 }, 1041 [IFLA_GENEVE_TOS] = { .type = NLA_U8 }, 1042 [IFLA_GENEVE_LABEL] = { .type = NLA_U32 }, 1043 [IFLA_GENEVE_PORT] = { .type = NLA_U16 }, 1044 [IFLA_GENEVE_COLLECT_METADATA] = { .type = NLA_FLAG }, 1045 [IFLA_GENEVE_UDP_CSUM] = { .type = NLA_U8 }, 1046 [IFLA_GENEVE_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, 1047 [IFLA_GENEVE_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 }, 1048 }; 1049 1050 static int geneve_validate(struct nlattr *tb[], struct nlattr *data[]) 1051 { 1052 if (tb[IFLA_ADDRESS]) { 1053 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 1054 return -EINVAL; 1055 1056 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 1057 return -EADDRNOTAVAIL; 1058 } 1059 1060 if (!data) 1061 return -EINVAL; 1062 1063 if (data[IFLA_GENEVE_ID]) { 1064 __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]); 1065 1066 if (vni >= GENEVE_VID_MASK) 1067 return -ERANGE; 1068 } 1069 1070 return 0; 1071 } 1072 1073 static struct geneve_dev *geneve_find_dev(struct geneve_net *gn, 1074 const struct ip_tunnel_info *info, 1075 bool *tun_on_same_port, 1076 bool *tun_collect_md) 1077 { 1078 struct geneve_dev *geneve, *t = NULL; 1079 1080 *tun_on_same_port = false; 1081 *tun_collect_md = false; 1082 list_for_each_entry(geneve, &gn->geneve_list, next) { 1083 if (info->key.tp_dst == geneve->info.key.tp_dst) { 1084 *tun_collect_md = geneve->collect_md; 1085 *tun_on_same_port = true; 1086 } 1087 if (info->key.tun_id == geneve->info.key.tun_id && 1088 info->key.tp_dst == geneve->info.key.tp_dst && 1089 !memcmp(&info->key.u, &geneve->info.key.u, sizeof(info->key.u))) 1090 t = geneve; 1091 } 1092 return t; 1093 } 1094 1095 static bool is_all_zero(const u8 *fp, size_t size) 1096 { 1097 int i; 1098 1099 for (i = 0; i < size; i++) 1100 if (fp[i]) 1101 return false; 1102 return true; 1103 } 1104 1105 static bool is_tnl_info_zero(const struct ip_tunnel_info *info) 1106 { 1107 if (info->key.tun_id || info->key.tun_flags || info->key.tos || 1108 info->key.ttl || info->key.label || info->key.tp_src || 1109 !is_all_zero((const u8 *)&info->key.u, sizeof(info->key.u))) 1110 return false; 1111 else 1112 return true; 1113 } 1114 1115 static int geneve_configure(struct net *net, struct net_device *dev, 1116 const struct ip_tunnel_info *info, 1117 bool metadata, bool ipv6_rx_csum) 1118 { 1119 struct geneve_net *gn = net_generic(net, geneve_net_id); 1120 struct geneve_dev *t, *geneve = netdev_priv(dev); 1121 bool tun_collect_md, tun_on_same_port; 1122 int err, encap_len; 1123 1124 if (metadata && !is_tnl_info_zero(info)) 1125 return -EINVAL; 1126 1127 geneve->net = net; 1128 geneve->dev = dev; 1129 1130 t = geneve_find_dev(gn, info, &tun_on_same_port, &tun_collect_md); 1131 if (t) 1132 return -EBUSY; 1133 1134 /* make enough headroom for basic scenario */ 1135 encap_len = GENEVE_BASE_HLEN + ETH_HLEN; 1136 if (ip_tunnel_info_af(info) == AF_INET) { 1137 encap_len += sizeof(struct iphdr); 1138 dev->max_mtu -= sizeof(struct iphdr); 1139 } else { 1140 encap_len += sizeof(struct ipv6hdr); 1141 dev->max_mtu -= sizeof(struct ipv6hdr); 1142 } 1143 dev->needed_headroom = encap_len + ETH_HLEN; 1144 1145 if (metadata) { 1146 if (tun_on_same_port) 1147 return -EPERM; 1148 } else { 1149 if (tun_collect_md) 1150 return -EPERM; 1151 } 1152 1153 dst_cache_reset(&geneve->info.dst_cache); 1154 geneve->info = *info; 1155 geneve->collect_md = metadata; 1156 geneve->use_udp6_rx_checksums = ipv6_rx_csum; 1157 1158 err = register_netdevice(dev); 1159 if (err) 1160 return err; 1161 1162 list_add(&geneve->next, &gn->geneve_list); 1163 return 0; 1164 } 1165 1166 static void init_tnl_info(struct ip_tunnel_info *info, __u16 dst_port) 1167 { 1168 memset(info, 0, sizeof(*info)); 1169 info->key.tp_dst = htons(dst_port); 1170 } 1171 1172 static int geneve_newlink(struct net *net, struct net_device *dev, 1173 struct nlattr *tb[], struct nlattr *data[]) 1174 { 1175 bool use_udp6_rx_checksums = false; 1176 struct ip_tunnel_info info; 1177 bool metadata = false; 1178 1179 init_tnl_info(&info, GENEVE_UDP_PORT); 1180 1181 if (data[IFLA_GENEVE_REMOTE] && data[IFLA_GENEVE_REMOTE6]) 1182 return -EINVAL; 1183 1184 if (data[IFLA_GENEVE_REMOTE]) { 1185 info.key.u.ipv4.dst = 1186 nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); 1187 1188 if (IN_MULTICAST(ntohl(info.key.u.ipv4.dst))) { 1189 netdev_dbg(dev, "multicast remote is unsupported\n"); 1190 return -EINVAL; 1191 } 1192 } 1193 1194 if (data[IFLA_GENEVE_REMOTE6]) { 1195 #if IS_ENABLED(CONFIG_IPV6) 1196 info.mode = IP_TUNNEL_INFO_IPV6; 1197 info.key.u.ipv6.dst = 1198 nla_get_in6_addr(data[IFLA_GENEVE_REMOTE6]); 1199 1200 if (ipv6_addr_type(&info.key.u.ipv6.dst) & 1201 IPV6_ADDR_LINKLOCAL) { 1202 netdev_dbg(dev, "link-local remote is unsupported\n"); 1203 return -EINVAL; 1204 } 1205 if (ipv6_addr_is_multicast(&info.key.u.ipv6.dst)) { 1206 netdev_dbg(dev, "multicast remote is unsupported\n"); 1207 return -EINVAL; 1208 } 1209 info.key.tun_flags |= TUNNEL_CSUM; 1210 use_udp6_rx_checksums = true; 1211 #else 1212 return -EPFNOSUPPORT; 1213 #endif 1214 } 1215 1216 if (data[IFLA_GENEVE_ID]) { 1217 __u32 vni; 1218 __u8 tvni[3]; 1219 1220 vni = nla_get_u32(data[IFLA_GENEVE_ID]); 1221 tvni[0] = (vni & 0x00ff0000) >> 16; 1222 tvni[1] = (vni & 0x0000ff00) >> 8; 1223 tvni[2] = vni & 0x000000ff; 1224 1225 info.key.tun_id = vni_to_tunnel_id(tvni); 1226 } 1227 if (data[IFLA_GENEVE_TTL]) 1228 info.key.ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); 1229 1230 if (data[IFLA_GENEVE_TOS]) 1231 info.key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]); 1232 1233 if (data[IFLA_GENEVE_LABEL]) { 1234 info.key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) & 1235 IPV6_FLOWLABEL_MASK; 1236 if (info.key.label && (!(info.mode & IP_TUNNEL_INFO_IPV6))) 1237 return -EINVAL; 1238 } 1239 1240 if (data[IFLA_GENEVE_PORT]) 1241 info.key.tp_dst = nla_get_be16(data[IFLA_GENEVE_PORT]); 1242 1243 if (data[IFLA_GENEVE_COLLECT_METADATA]) 1244 metadata = true; 1245 1246 if (data[IFLA_GENEVE_UDP_CSUM] && 1247 !nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) 1248 info.key.tun_flags |= TUNNEL_CSUM; 1249 1250 if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX] && 1251 nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX])) 1252 info.key.tun_flags &= ~TUNNEL_CSUM; 1253 1254 if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX] && 1255 nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX])) 1256 use_udp6_rx_checksums = false; 1257 1258 return geneve_configure(net, dev, &info, metadata, use_udp6_rx_checksums); 1259 } 1260 1261 static void geneve_dellink(struct net_device *dev, struct list_head *head) 1262 { 1263 struct geneve_dev *geneve = netdev_priv(dev); 1264 1265 list_del(&geneve->next); 1266 unregister_netdevice_queue(dev, head); 1267 } 1268 1269 static size_t geneve_get_size(const struct net_device *dev) 1270 { 1271 return nla_total_size(sizeof(__u32)) + /* IFLA_GENEVE_ID */ 1272 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GENEVE_REMOTE{6} */ 1273 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ 1274 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ 1275 nla_total_size(sizeof(__be32)) + /* IFLA_GENEVE_LABEL */ 1276 nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */ 1277 nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */ 1278 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_CSUM */ 1279 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_TX */ 1280 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_RX */ 1281 0; 1282 } 1283 1284 static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) 1285 { 1286 struct geneve_dev *geneve = netdev_priv(dev); 1287 struct ip_tunnel_info *info = &geneve->info; 1288 __u8 tmp_vni[3]; 1289 __u32 vni; 1290 1291 tunnel_id_to_vni(info->key.tun_id, tmp_vni); 1292 vni = (tmp_vni[0] << 16) | (tmp_vni[1] << 8) | tmp_vni[2]; 1293 if (nla_put_u32(skb, IFLA_GENEVE_ID, vni)) 1294 goto nla_put_failure; 1295 1296 if (ip_tunnel_info_af(info) == AF_INET) { 1297 if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE, 1298 info->key.u.ipv4.dst)) 1299 goto nla_put_failure; 1300 1301 if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM, 1302 !!(info->key.tun_flags & TUNNEL_CSUM))) 1303 goto nla_put_failure; 1304 1305 #if IS_ENABLED(CONFIG_IPV6) 1306 } else { 1307 if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6, 1308 &info->key.u.ipv6.dst)) 1309 goto nla_put_failure; 1310 1311 if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX, 1312 !(info->key.tun_flags & TUNNEL_CSUM))) 1313 goto nla_put_failure; 1314 1315 if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, 1316 !geneve->use_udp6_rx_checksums)) 1317 goto nla_put_failure; 1318 #endif 1319 } 1320 1321 if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) || 1322 nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) || 1323 nla_put_be32(skb, IFLA_GENEVE_LABEL, info->key.label)) 1324 goto nla_put_failure; 1325 1326 if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst)) 1327 goto nla_put_failure; 1328 1329 if (geneve->collect_md) { 1330 if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA)) 1331 goto nla_put_failure; 1332 } 1333 return 0; 1334 1335 nla_put_failure: 1336 return -EMSGSIZE; 1337 } 1338 1339 static struct rtnl_link_ops geneve_link_ops __read_mostly = { 1340 .kind = "geneve", 1341 .maxtype = IFLA_GENEVE_MAX, 1342 .policy = geneve_policy, 1343 .priv_size = sizeof(struct geneve_dev), 1344 .setup = geneve_setup, 1345 .validate = geneve_validate, 1346 .newlink = geneve_newlink, 1347 .dellink = geneve_dellink, 1348 .get_size = geneve_get_size, 1349 .fill_info = geneve_fill_info, 1350 }; 1351 1352 struct net_device *geneve_dev_create_fb(struct net *net, const char *name, 1353 u8 name_assign_type, u16 dst_port) 1354 { 1355 struct nlattr *tb[IFLA_MAX + 1]; 1356 struct ip_tunnel_info info; 1357 struct net_device *dev; 1358 LIST_HEAD(list_kill); 1359 int err; 1360 1361 memset(tb, 0, sizeof(tb)); 1362 dev = rtnl_create_link(net, name, name_assign_type, 1363 &geneve_link_ops, tb); 1364 if (IS_ERR(dev)) 1365 return dev; 1366 1367 init_tnl_info(&info, dst_port); 1368 err = geneve_configure(net, dev, &info, true, true); 1369 if (err) { 1370 free_netdev(dev); 1371 return ERR_PTR(err); 1372 } 1373 1374 /* openvswitch users expect packet sizes to be unrestricted, 1375 * so set the largest MTU we can. 1376 */ 1377 err = geneve_change_mtu(dev, IP_MAX_MTU); 1378 if (err) 1379 goto err; 1380 1381 err = rtnl_configure_link(dev, NULL); 1382 if (err < 0) 1383 goto err; 1384 1385 return dev; 1386 err: 1387 geneve_dellink(dev, &list_kill); 1388 unregister_netdevice_many(&list_kill); 1389 return ERR_PTR(err); 1390 } 1391 EXPORT_SYMBOL_GPL(geneve_dev_create_fb); 1392 1393 static int geneve_netdevice_event(struct notifier_block *unused, 1394 unsigned long event, void *ptr) 1395 { 1396 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1397 1398 if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) 1399 geneve_push_rx_ports(dev); 1400 1401 return NOTIFY_DONE; 1402 } 1403 1404 static struct notifier_block geneve_notifier_block __read_mostly = { 1405 .notifier_call = geneve_netdevice_event, 1406 }; 1407 1408 static __net_init int geneve_init_net(struct net *net) 1409 { 1410 struct geneve_net *gn = net_generic(net, geneve_net_id); 1411 1412 INIT_LIST_HEAD(&gn->geneve_list); 1413 INIT_LIST_HEAD(&gn->sock_list); 1414 return 0; 1415 } 1416 1417 static void __net_exit geneve_exit_net(struct net *net) 1418 { 1419 struct geneve_net *gn = net_generic(net, geneve_net_id); 1420 struct geneve_dev *geneve, *next; 1421 struct net_device *dev, *aux; 1422 LIST_HEAD(list); 1423 1424 rtnl_lock(); 1425 1426 /* gather any geneve devices that were moved into this ns */ 1427 for_each_netdev_safe(net, dev, aux) 1428 if (dev->rtnl_link_ops == &geneve_link_ops) 1429 unregister_netdevice_queue(dev, &list); 1430 1431 /* now gather any other geneve devices that were created in this ns */ 1432 list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) { 1433 /* If geneve->dev is in the same netns, it was already added 1434 * to the list by the previous loop. 1435 */ 1436 if (!net_eq(dev_net(geneve->dev), net)) 1437 unregister_netdevice_queue(geneve->dev, &list); 1438 } 1439 1440 /* unregister the devices gathered above */ 1441 unregister_netdevice_many(&list); 1442 rtnl_unlock(); 1443 } 1444 1445 static struct pernet_operations geneve_net_ops = { 1446 .init = geneve_init_net, 1447 .exit = geneve_exit_net, 1448 .id = &geneve_net_id, 1449 .size = sizeof(struct geneve_net), 1450 }; 1451 1452 static int __init geneve_init_module(void) 1453 { 1454 int rc; 1455 1456 rc = register_pernet_subsys(&geneve_net_ops); 1457 if (rc) 1458 goto out1; 1459 1460 rc = register_netdevice_notifier(&geneve_notifier_block); 1461 if (rc) 1462 goto out2; 1463 1464 rc = rtnl_link_register(&geneve_link_ops); 1465 if (rc) 1466 goto out3; 1467 1468 return 0; 1469 out3: 1470 unregister_netdevice_notifier(&geneve_notifier_block); 1471 out2: 1472 unregister_pernet_subsys(&geneve_net_ops); 1473 out1: 1474 return rc; 1475 } 1476 late_initcall(geneve_init_module); 1477 1478 static void __exit geneve_cleanup_module(void) 1479 { 1480 rtnl_link_unregister(&geneve_link_ops); 1481 unregister_netdevice_notifier(&geneve_notifier_block); 1482 unregister_pernet_subsys(&geneve_net_ops); 1483 } 1484 module_exit(geneve_cleanup_module); 1485 1486 MODULE_LICENSE("GPL"); 1487 MODULE_VERSION(GENEVE_NETDEV_VER); 1488 MODULE_AUTHOR("John W. Linville <linville@tuxdriver.com>"); 1489 MODULE_DESCRIPTION("Interface driver for GENEVE encapsulated traffic"); 1490 MODULE_ALIAS_RTNL_LINK("geneve"); 1491