1 /* 2 * net/drivers/team/team.c - Network team device driver 3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/types.h> 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/slab.h> 16 #include <linux/rcupdate.h> 17 #include <linux/errno.h> 18 #include <linux/ctype.h> 19 #include <linux/notifier.h> 20 #include <linux/netdevice.h> 21 #include <linux/if_vlan.h> 22 #include <linux/if_arp.h> 23 #include <linux/socket.h> 24 #include <linux/etherdevice.h> 25 #include <linux/rtnetlink.h> 26 #include <net/rtnetlink.h> 27 #include <net/genetlink.h> 28 #include <net/netlink.h> 29 #include <linux/if_team.h> 30 31 #define DRV_NAME "team" 32 33 34 /********** 35 * Helpers 36 **********/ 37 38 #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT) 39 40 static struct team_port *team_port_get_rcu(const struct net_device *dev) 41 { 42 struct team_port *port = rcu_dereference(dev->rx_handler_data); 43 44 return team_port_exists(dev) ? port : NULL; 45 } 46 47 static struct team_port *team_port_get_rtnl(const struct net_device *dev) 48 { 49 struct team_port *port = rtnl_dereference(dev->rx_handler_data); 50 51 return team_port_exists(dev) ? port : NULL; 52 } 53 54 /* 55 * Since the ability to change mac address for open port device is tested in 56 * team_port_add, this function can be called without control of return value 57 */ 58 static int __set_port_mac(struct net_device *port_dev, 59 const unsigned char *dev_addr) 60 { 61 struct sockaddr addr; 62 63 memcpy(addr.sa_data, dev_addr, ETH_ALEN); 64 addr.sa_family = ARPHRD_ETHER; 65 return dev_set_mac_address(port_dev, &addr); 66 } 67 68 static int team_port_set_orig_mac(struct team_port *port) 69 { 70 return __set_port_mac(port->dev, port->orig.dev_addr); 71 } 72 73 int team_port_set_team_mac(struct team_port *port) 74 { 75 return __set_port_mac(port->dev, port->team->dev->dev_addr); 76 } 77 EXPORT_SYMBOL(team_port_set_team_mac); 78 79 static void team_refresh_port_linkup(struct team_port *port) 80 { 81 port->linkup = port->user.linkup_enabled ? port->user.linkup : 82 port->state.linkup; 83 } 84 85 /******************* 86 * Options handling 87 *******************/ 88 89 struct team_option_inst { /* One for each option instance */ 90 struct list_head list; 91 struct team_option *option; 92 struct team_port *port; /* != NULL if per-port */ 93 bool changed; 94 bool removed; 95 }; 96 97 static struct team_option *__team_find_option(struct team *team, 98 const char *opt_name) 99 { 100 struct team_option *option; 101 102 list_for_each_entry(option, &team->option_list, list) { 103 if (strcmp(option->name, opt_name) == 0) 104 return option; 105 } 106 return NULL; 107 } 108 109 static int __team_option_inst_add(struct team *team, struct team_option *option, 110 struct team_port *port) 111 { 112 struct team_option_inst *opt_inst; 113 114 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL); 115 if (!opt_inst) 116 return -ENOMEM; 117 opt_inst->option = option; 118 opt_inst->port = port; 119 opt_inst->changed = true; 120 opt_inst->removed = false; 121 list_add_tail(&opt_inst->list, &team->option_inst_list); 122 return 0; 123 } 124 125 static void __team_option_inst_del(struct team_option_inst *opt_inst) 126 { 127 list_del(&opt_inst->list); 128 kfree(opt_inst); 129 } 130 131 static void __team_option_inst_del_option(struct team *team, 132 struct team_option *option) 133 { 134 struct team_option_inst *opt_inst, *tmp; 135 136 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) { 137 if (opt_inst->option == option) 138 __team_option_inst_del(opt_inst); 139 } 140 } 141 142 static int __team_option_inst_add_option(struct team *team, 143 struct team_option *option) 144 { 145 struct team_port *port; 146 int err; 147 148 if (!option->per_port) 149 return __team_option_inst_add(team, option, 0); 150 151 list_for_each_entry(port, &team->port_list, list) { 152 err = __team_option_inst_add(team, option, port); 153 if (err) 154 goto inst_del_option; 155 } 156 return 0; 157 158 inst_del_option: 159 __team_option_inst_del_option(team, option); 160 return err; 161 } 162 163 static void __team_option_inst_mark_removed_option(struct team *team, 164 struct team_option *option) 165 { 166 struct team_option_inst *opt_inst; 167 168 list_for_each_entry(opt_inst, &team->option_inst_list, list) { 169 if (opt_inst->option == option) { 170 opt_inst->changed = true; 171 opt_inst->removed = true; 172 } 173 } 174 } 175 176 static void __team_option_inst_del_port(struct team *team, 177 struct team_port *port) 178 { 179 struct team_option_inst *opt_inst, *tmp; 180 181 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) { 182 if (opt_inst->option->per_port && 183 opt_inst->port == port) 184 __team_option_inst_del(opt_inst); 185 } 186 } 187 188 static int __team_option_inst_add_port(struct team *team, 189 struct team_port *port) 190 { 191 struct team_option *option; 192 int err; 193 194 list_for_each_entry(option, &team->option_list, list) { 195 if (!option->per_port) 196 continue; 197 err = __team_option_inst_add(team, option, port); 198 if (err) 199 goto inst_del_port; 200 } 201 return 0; 202 203 inst_del_port: 204 __team_option_inst_del_port(team, port); 205 return err; 206 } 207 208 static void __team_option_inst_mark_removed_port(struct team *team, 209 struct team_port *port) 210 { 211 struct team_option_inst *opt_inst; 212 213 list_for_each_entry(opt_inst, &team->option_inst_list, list) { 214 if (opt_inst->port == port) { 215 opt_inst->changed = true; 216 opt_inst->removed = true; 217 } 218 } 219 } 220 221 static int __team_options_register(struct team *team, 222 const struct team_option *option, 223 size_t option_count) 224 { 225 int i; 226 struct team_option **dst_opts; 227 int err; 228 229 dst_opts = kzalloc(sizeof(struct team_option *) * option_count, 230 GFP_KERNEL); 231 if (!dst_opts) 232 return -ENOMEM; 233 for (i = 0; i < option_count; i++, option++) { 234 if (__team_find_option(team, option->name)) { 235 err = -EEXIST; 236 goto alloc_rollback; 237 } 238 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL); 239 if (!dst_opts[i]) { 240 err = -ENOMEM; 241 goto alloc_rollback; 242 } 243 } 244 245 for (i = 0; i < option_count; i++) { 246 err = __team_option_inst_add_option(team, dst_opts[i]); 247 if (err) 248 goto inst_rollback; 249 list_add_tail(&dst_opts[i]->list, &team->option_list); 250 } 251 252 kfree(dst_opts); 253 return 0; 254 255 inst_rollback: 256 for (i--; i >= 0; i--) 257 __team_option_inst_del_option(team, dst_opts[i]); 258 259 i = option_count - 1; 260 alloc_rollback: 261 for (i--; i >= 0; i--) 262 kfree(dst_opts[i]); 263 264 kfree(dst_opts); 265 return err; 266 } 267 268 static void __team_options_mark_removed(struct team *team, 269 const struct team_option *option, 270 size_t option_count) 271 { 272 int i; 273 274 for (i = 0; i < option_count; i++, option++) { 275 struct team_option *del_opt; 276 277 del_opt = __team_find_option(team, option->name); 278 if (del_opt) 279 __team_option_inst_mark_removed_option(team, del_opt); 280 } 281 } 282 283 static void __team_options_unregister(struct team *team, 284 const struct team_option *option, 285 size_t option_count) 286 { 287 int i; 288 289 for (i = 0; i < option_count; i++, option++) { 290 struct team_option *del_opt; 291 292 del_opt = __team_find_option(team, option->name); 293 if (del_opt) { 294 __team_option_inst_del_option(team, del_opt); 295 list_del(&del_opt->list); 296 kfree(del_opt); 297 } 298 } 299 } 300 301 static void __team_options_change_check(struct team *team); 302 303 int team_options_register(struct team *team, 304 const struct team_option *option, 305 size_t option_count) 306 { 307 int err; 308 309 err = __team_options_register(team, option, option_count); 310 if (err) 311 return err; 312 __team_options_change_check(team); 313 return 0; 314 } 315 EXPORT_SYMBOL(team_options_register); 316 317 void team_options_unregister(struct team *team, 318 const struct team_option *option, 319 size_t option_count) 320 { 321 __team_options_mark_removed(team, option, option_count); 322 __team_options_change_check(team); 323 __team_options_unregister(team, option, option_count); 324 } 325 EXPORT_SYMBOL(team_options_unregister); 326 327 static int team_option_port_add(struct team *team, struct team_port *port) 328 { 329 int err; 330 331 err = __team_option_inst_add_port(team, port); 332 if (err) 333 return err; 334 __team_options_change_check(team); 335 return 0; 336 } 337 338 static void team_option_port_del(struct team *team, struct team_port *port) 339 { 340 __team_option_inst_mark_removed_port(team, port); 341 __team_options_change_check(team); 342 __team_option_inst_del_port(team, port); 343 } 344 345 static int team_option_get(struct team *team, 346 struct team_option_inst *opt_inst, 347 struct team_gsetter_ctx *ctx) 348 { 349 return opt_inst->option->getter(team, ctx); 350 } 351 352 static int team_option_set(struct team *team, 353 struct team_option_inst *opt_inst, 354 struct team_gsetter_ctx *ctx) 355 { 356 int err; 357 358 err = opt_inst->option->setter(team, ctx); 359 if (err) 360 return err; 361 362 opt_inst->changed = true; 363 __team_options_change_check(team); 364 return err; 365 } 366 367 /**************** 368 * Mode handling 369 ****************/ 370 371 static LIST_HEAD(mode_list); 372 static DEFINE_SPINLOCK(mode_list_lock); 373 374 static struct team_mode *__find_mode(const char *kind) 375 { 376 struct team_mode *mode; 377 378 list_for_each_entry(mode, &mode_list, list) { 379 if (strcmp(mode->kind, kind) == 0) 380 return mode; 381 } 382 return NULL; 383 } 384 385 static bool is_good_mode_name(const char *name) 386 { 387 while (*name != '\0') { 388 if (!isalpha(*name) && !isdigit(*name) && *name != '_') 389 return false; 390 name++; 391 } 392 return true; 393 } 394 395 int team_mode_register(struct team_mode *mode) 396 { 397 int err = 0; 398 399 if (!is_good_mode_name(mode->kind) || 400 mode->priv_size > TEAM_MODE_PRIV_SIZE) 401 return -EINVAL; 402 spin_lock(&mode_list_lock); 403 if (__find_mode(mode->kind)) { 404 err = -EEXIST; 405 goto unlock; 406 } 407 list_add_tail(&mode->list, &mode_list); 408 unlock: 409 spin_unlock(&mode_list_lock); 410 return err; 411 } 412 EXPORT_SYMBOL(team_mode_register); 413 414 int team_mode_unregister(struct team_mode *mode) 415 { 416 spin_lock(&mode_list_lock); 417 list_del_init(&mode->list); 418 spin_unlock(&mode_list_lock); 419 return 0; 420 } 421 EXPORT_SYMBOL(team_mode_unregister); 422 423 static struct team_mode *team_mode_get(const char *kind) 424 { 425 struct team_mode *mode; 426 427 spin_lock(&mode_list_lock); 428 mode = __find_mode(kind); 429 if (!mode) { 430 spin_unlock(&mode_list_lock); 431 request_module("team-mode-%s", kind); 432 spin_lock(&mode_list_lock); 433 mode = __find_mode(kind); 434 } 435 if (mode) 436 if (!try_module_get(mode->owner)) 437 mode = NULL; 438 439 spin_unlock(&mode_list_lock); 440 return mode; 441 } 442 443 static void team_mode_put(const struct team_mode *mode) 444 { 445 module_put(mode->owner); 446 } 447 448 static bool team_dummy_transmit(struct team *team, struct sk_buff *skb) 449 { 450 dev_kfree_skb_any(skb); 451 return false; 452 } 453 454 rx_handler_result_t team_dummy_receive(struct team *team, 455 struct team_port *port, 456 struct sk_buff *skb) 457 { 458 return RX_HANDLER_ANOTHER; 459 } 460 461 static void team_adjust_ops(struct team *team) 462 { 463 /* 464 * To avoid checks in rx/tx skb paths, ensure here that non-null and 465 * correct ops are always set. 466 */ 467 468 if (list_empty(&team->port_list) || 469 !team->mode || !team->mode->ops->transmit) 470 team->ops.transmit = team_dummy_transmit; 471 else 472 team->ops.transmit = team->mode->ops->transmit; 473 474 if (list_empty(&team->port_list) || 475 !team->mode || !team->mode->ops->receive) 476 team->ops.receive = team_dummy_receive; 477 else 478 team->ops.receive = team->mode->ops->receive; 479 } 480 481 /* 482 * We can benefit from the fact that it's ensured no port is present 483 * at the time of mode change. Therefore no packets are in fly so there's no 484 * need to set mode operations in any special way. 485 */ 486 static int __team_change_mode(struct team *team, 487 const struct team_mode *new_mode) 488 { 489 /* Check if mode was previously set and do cleanup if so */ 490 if (team->mode) { 491 void (*exit_op)(struct team *team) = team->ops.exit; 492 493 /* Clear ops area so no callback is called any longer */ 494 memset(&team->ops, 0, sizeof(struct team_mode_ops)); 495 team_adjust_ops(team); 496 497 if (exit_op) 498 exit_op(team); 499 team_mode_put(team->mode); 500 team->mode = NULL; 501 /* zero private data area */ 502 memset(&team->mode_priv, 0, 503 sizeof(struct team) - offsetof(struct team, mode_priv)); 504 } 505 506 if (!new_mode) 507 return 0; 508 509 if (new_mode->ops->init) { 510 int err; 511 512 err = new_mode->ops->init(team); 513 if (err) 514 return err; 515 } 516 517 team->mode = new_mode; 518 memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops)); 519 team_adjust_ops(team); 520 521 return 0; 522 } 523 524 static int team_change_mode(struct team *team, const char *kind) 525 { 526 struct team_mode *new_mode; 527 struct net_device *dev = team->dev; 528 int err; 529 530 if (!list_empty(&team->port_list)) { 531 netdev_err(dev, "No ports can be present during mode change\n"); 532 return -EBUSY; 533 } 534 535 if (team->mode && strcmp(team->mode->kind, kind) == 0) { 536 netdev_err(dev, "Unable to change to the same mode the team is in\n"); 537 return -EINVAL; 538 } 539 540 new_mode = team_mode_get(kind); 541 if (!new_mode) { 542 netdev_err(dev, "Mode \"%s\" not found\n", kind); 543 return -EINVAL; 544 } 545 546 err = __team_change_mode(team, new_mode); 547 if (err) { 548 netdev_err(dev, "Failed to change to mode \"%s\"\n", kind); 549 team_mode_put(new_mode); 550 return err; 551 } 552 553 netdev_info(dev, "Mode changed to \"%s\"\n", kind); 554 return 0; 555 } 556 557 558 /************************ 559 * Rx path frame handler 560 ************************/ 561 562 static bool team_port_enabled(struct team_port *port); 563 564 /* note: already called with rcu_read_lock */ 565 static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) 566 { 567 struct sk_buff *skb = *pskb; 568 struct team_port *port; 569 struct team *team; 570 rx_handler_result_t res; 571 572 skb = skb_share_check(skb, GFP_ATOMIC); 573 if (!skb) 574 return RX_HANDLER_CONSUMED; 575 576 *pskb = skb; 577 578 port = team_port_get_rcu(skb->dev); 579 team = port->team; 580 if (!team_port_enabled(port)) { 581 /* allow exact match delivery for disabled ports */ 582 res = RX_HANDLER_EXACT; 583 } else { 584 res = team->ops.receive(team, port, skb); 585 } 586 if (res == RX_HANDLER_ANOTHER) { 587 struct team_pcpu_stats *pcpu_stats; 588 589 pcpu_stats = this_cpu_ptr(team->pcpu_stats); 590 u64_stats_update_begin(&pcpu_stats->syncp); 591 pcpu_stats->rx_packets++; 592 pcpu_stats->rx_bytes += skb->len; 593 if (skb->pkt_type == PACKET_MULTICAST) 594 pcpu_stats->rx_multicast++; 595 u64_stats_update_end(&pcpu_stats->syncp); 596 597 skb->dev = team->dev; 598 } else { 599 this_cpu_inc(team->pcpu_stats->rx_dropped); 600 } 601 602 return res; 603 } 604 605 606 /**************** 607 * Port handling 608 ****************/ 609 610 static bool team_port_find(const struct team *team, 611 const struct team_port *port) 612 { 613 struct team_port *cur; 614 615 list_for_each_entry(cur, &team->port_list, list) 616 if (cur == port) 617 return true; 618 return false; 619 } 620 621 static bool team_port_enabled(struct team_port *port) 622 { 623 return port->index != -1; 624 } 625 626 /* 627 * Enable/disable port by adding to enabled port hashlist and setting 628 * port->index (Might be racy so reader could see incorrect ifindex when 629 * processing a flying packet, but that is not a problem). Write guarded 630 * by team->lock. 631 */ 632 static void team_port_enable(struct team *team, 633 struct team_port *port) 634 { 635 if (team_port_enabled(port)) 636 return; 637 port->index = team->en_port_count++; 638 hlist_add_head_rcu(&port->hlist, 639 team_port_index_hash(team, port->index)); 640 } 641 642 static void __reconstruct_port_hlist(struct team *team, int rm_index) 643 { 644 int i; 645 struct team_port *port; 646 647 for (i = rm_index + 1; i < team->en_port_count; i++) { 648 port = team_get_port_by_index(team, i); 649 hlist_del_rcu(&port->hlist); 650 port->index--; 651 hlist_add_head_rcu(&port->hlist, 652 team_port_index_hash(team, port->index)); 653 } 654 } 655 656 static void team_port_disable(struct team *team, 657 struct team_port *port) 658 { 659 int rm_index = port->index; 660 661 if (!team_port_enabled(port)) 662 return; 663 hlist_del_rcu(&port->hlist); 664 __reconstruct_port_hlist(team, rm_index); 665 team->en_port_count--; 666 port->index = -1; 667 } 668 669 #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ 670 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ 671 NETIF_F_HIGHDMA | NETIF_F_LRO) 672 673 static void __team_compute_features(struct team *team) 674 { 675 struct team_port *port; 676 u32 vlan_features = TEAM_VLAN_FEATURES; 677 unsigned short max_hard_header_len = ETH_HLEN; 678 679 list_for_each_entry(port, &team->port_list, list) { 680 vlan_features = netdev_increment_features(vlan_features, 681 port->dev->vlan_features, 682 TEAM_VLAN_FEATURES); 683 684 if (port->dev->hard_header_len > max_hard_header_len) 685 max_hard_header_len = port->dev->hard_header_len; 686 } 687 688 team->dev->vlan_features = vlan_features; 689 team->dev->hard_header_len = max_hard_header_len; 690 691 netdev_change_features(team->dev); 692 } 693 694 static void team_compute_features(struct team *team) 695 { 696 mutex_lock(&team->lock); 697 __team_compute_features(team); 698 mutex_unlock(&team->lock); 699 } 700 701 static int team_port_enter(struct team *team, struct team_port *port) 702 { 703 int err = 0; 704 705 dev_hold(team->dev); 706 port->dev->priv_flags |= IFF_TEAM_PORT; 707 if (team->ops.port_enter) { 708 err = team->ops.port_enter(team, port); 709 if (err) { 710 netdev_err(team->dev, "Device %s failed to enter team mode\n", 711 port->dev->name); 712 goto err_port_enter; 713 } 714 } 715 716 return 0; 717 718 err_port_enter: 719 port->dev->priv_flags &= ~IFF_TEAM_PORT; 720 dev_put(team->dev); 721 722 return err; 723 } 724 725 static void team_port_leave(struct team *team, struct team_port *port) 726 { 727 if (team->ops.port_leave) 728 team->ops.port_leave(team, port); 729 port->dev->priv_flags &= ~IFF_TEAM_PORT; 730 dev_put(team->dev); 731 } 732 733 static void __team_port_change_check(struct team_port *port, bool linkup); 734 735 static int team_port_add(struct team *team, struct net_device *port_dev) 736 { 737 struct net_device *dev = team->dev; 738 struct team_port *port; 739 char *portname = port_dev->name; 740 int err; 741 742 if (port_dev->flags & IFF_LOOPBACK || 743 port_dev->type != ARPHRD_ETHER) { 744 netdev_err(dev, "Device %s is of an unsupported type\n", 745 portname); 746 return -EINVAL; 747 } 748 749 if (team_port_exists(port_dev)) { 750 netdev_err(dev, "Device %s is already a port " 751 "of a team device\n", portname); 752 return -EBUSY; 753 } 754 755 if (port_dev->flags & IFF_UP) { 756 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n", 757 portname); 758 return -EBUSY; 759 } 760 761 port = kzalloc(sizeof(struct team_port), GFP_KERNEL); 762 if (!port) 763 return -ENOMEM; 764 765 port->dev = port_dev; 766 port->team = team; 767 768 port->orig.mtu = port_dev->mtu; 769 err = dev_set_mtu(port_dev, dev->mtu); 770 if (err) { 771 netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err); 772 goto err_set_mtu; 773 } 774 775 memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN); 776 777 err = team_port_enter(team, port); 778 if (err) { 779 netdev_err(dev, "Device %s failed to enter team mode\n", 780 portname); 781 goto err_port_enter; 782 } 783 784 err = dev_open(port_dev); 785 if (err) { 786 netdev_dbg(dev, "Device %s opening failed\n", 787 portname); 788 goto err_dev_open; 789 } 790 791 err = vlan_vids_add_by_dev(port_dev, dev); 792 if (err) { 793 netdev_err(dev, "Failed to add vlan ids to device %s\n", 794 portname); 795 goto err_vids_add; 796 } 797 798 err = netdev_set_master(port_dev, dev); 799 if (err) { 800 netdev_err(dev, "Device %s failed to set master\n", portname); 801 goto err_set_master; 802 } 803 804 err = netdev_rx_handler_register(port_dev, team_handle_frame, 805 port); 806 if (err) { 807 netdev_err(dev, "Device %s failed to register rx_handler\n", 808 portname); 809 goto err_handler_register; 810 } 811 812 err = team_option_port_add(team, port); 813 if (err) { 814 netdev_err(dev, "Device %s failed to add per-port options\n", 815 portname); 816 goto err_option_port_add; 817 } 818 819 port->index = -1; 820 team_port_enable(team, port); 821 list_add_tail_rcu(&port->list, &team->port_list); 822 team_adjust_ops(team); 823 __team_compute_features(team); 824 __team_port_change_check(port, !!netif_carrier_ok(port_dev)); 825 826 netdev_info(dev, "Port device %s added\n", portname); 827 828 return 0; 829 830 err_option_port_add: 831 netdev_rx_handler_unregister(port_dev); 832 833 err_handler_register: 834 netdev_set_master(port_dev, NULL); 835 836 err_set_master: 837 vlan_vids_del_by_dev(port_dev, dev); 838 839 err_vids_add: 840 dev_close(port_dev); 841 842 err_dev_open: 843 team_port_leave(team, port); 844 team_port_set_orig_mac(port); 845 846 err_port_enter: 847 dev_set_mtu(port_dev, port->orig.mtu); 848 849 err_set_mtu: 850 kfree(port); 851 852 return err; 853 } 854 855 static int team_port_del(struct team *team, struct net_device *port_dev) 856 { 857 struct net_device *dev = team->dev; 858 struct team_port *port; 859 char *portname = port_dev->name; 860 861 port = team_port_get_rtnl(port_dev); 862 if (!port || !team_port_find(team, port)) { 863 netdev_err(dev, "Device %s does not act as a port of this team\n", 864 portname); 865 return -ENOENT; 866 } 867 868 port->removed = true; 869 __team_port_change_check(port, false); 870 team_port_disable(team, port); 871 list_del_rcu(&port->list); 872 team_adjust_ops(team); 873 team_option_port_del(team, port); 874 netdev_rx_handler_unregister(port_dev); 875 netdev_set_master(port_dev, NULL); 876 vlan_vids_del_by_dev(port_dev, dev); 877 dev_close(port_dev); 878 team_port_leave(team, port); 879 team_port_set_orig_mac(port); 880 dev_set_mtu(port_dev, port->orig.mtu); 881 synchronize_rcu(); 882 kfree(port); 883 netdev_info(dev, "Port device %s removed\n", portname); 884 __team_compute_features(team); 885 886 return 0; 887 } 888 889 890 /***************** 891 * Net device ops 892 *****************/ 893 894 static const char team_no_mode_kind[] = "*NOMODE*"; 895 896 static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx) 897 { 898 ctx->data.str_val = team->mode ? team->mode->kind : team_no_mode_kind; 899 return 0; 900 } 901 902 static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx) 903 { 904 return team_change_mode(team, ctx->data.str_val); 905 } 906 907 static int team_port_en_option_get(struct team *team, 908 struct team_gsetter_ctx *ctx) 909 { 910 ctx->data.bool_val = team_port_enabled(ctx->port); 911 return 0; 912 } 913 914 static int team_port_en_option_set(struct team *team, 915 struct team_gsetter_ctx *ctx) 916 { 917 if (ctx->data.bool_val) 918 team_port_enable(team, ctx->port); 919 else 920 team_port_disable(team, ctx->port); 921 return 0; 922 } 923 924 static int team_user_linkup_option_get(struct team *team, 925 struct team_gsetter_ctx *ctx) 926 { 927 ctx->data.bool_val = ctx->port->user.linkup; 928 return 0; 929 } 930 931 static int team_user_linkup_option_set(struct team *team, 932 struct team_gsetter_ctx *ctx) 933 { 934 ctx->port->user.linkup = ctx->data.bool_val; 935 team_refresh_port_linkup(ctx->port); 936 return 0; 937 } 938 939 static int team_user_linkup_en_option_get(struct team *team, 940 struct team_gsetter_ctx *ctx) 941 { 942 struct team_port *port = ctx->port; 943 944 ctx->data.bool_val = port->user.linkup_enabled; 945 return 0; 946 } 947 948 static int team_user_linkup_en_option_set(struct team *team, 949 struct team_gsetter_ctx *ctx) 950 { 951 struct team_port *port = ctx->port; 952 953 port->user.linkup_enabled = ctx->data.bool_val; 954 team_refresh_port_linkup(ctx->port); 955 return 0; 956 } 957 958 static const struct team_option team_options[] = { 959 { 960 .name = "mode", 961 .type = TEAM_OPTION_TYPE_STRING, 962 .getter = team_mode_option_get, 963 .setter = team_mode_option_set, 964 }, 965 { 966 .name = "enabled", 967 .type = TEAM_OPTION_TYPE_BOOL, 968 .per_port = true, 969 .getter = team_port_en_option_get, 970 .setter = team_port_en_option_set, 971 }, 972 { 973 .name = "user_linkup", 974 .type = TEAM_OPTION_TYPE_BOOL, 975 .per_port = true, 976 .getter = team_user_linkup_option_get, 977 .setter = team_user_linkup_option_set, 978 }, 979 { 980 .name = "user_linkup_enabled", 981 .type = TEAM_OPTION_TYPE_BOOL, 982 .per_port = true, 983 .getter = team_user_linkup_en_option_get, 984 .setter = team_user_linkup_en_option_set, 985 }, 986 }; 987 988 static int team_init(struct net_device *dev) 989 { 990 struct team *team = netdev_priv(dev); 991 int i; 992 int err; 993 994 team->dev = dev; 995 mutex_init(&team->lock); 996 997 team->pcpu_stats = alloc_percpu(struct team_pcpu_stats); 998 if (!team->pcpu_stats) 999 return -ENOMEM; 1000 1001 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) 1002 INIT_HLIST_HEAD(&team->en_port_hlist[i]); 1003 INIT_LIST_HEAD(&team->port_list); 1004 1005 team_adjust_ops(team); 1006 1007 INIT_LIST_HEAD(&team->option_list); 1008 INIT_LIST_HEAD(&team->option_inst_list); 1009 err = team_options_register(team, team_options, ARRAY_SIZE(team_options)); 1010 if (err) 1011 goto err_options_register; 1012 netif_carrier_off(dev); 1013 1014 return 0; 1015 1016 err_options_register: 1017 free_percpu(team->pcpu_stats); 1018 1019 return err; 1020 } 1021 1022 static void team_uninit(struct net_device *dev) 1023 { 1024 struct team *team = netdev_priv(dev); 1025 struct team_port *port; 1026 struct team_port *tmp; 1027 1028 mutex_lock(&team->lock); 1029 list_for_each_entry_safe(port, tmp, &team->port_list, list) 1030 team_port_del(team, port->dev); 1031 1032 __team_change_mode(team, NULL); /* cleanup */ 1033 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); 1034 mutex_unlock(&team->lock); 1035 } 1036 1037 static void team_destructor(struct net_device *dev) 1038 { 1039 struct team *team = netdev_priv(dev); 1040 1041 free_percpu(team->pcpu_stats); 1042 free_netdev(dev); 1043 } 1044 1045 static int team_open(struct net_device *dev) 1046 { 1047 netif_carrier_on(dev); 1048 return 0; 1049 } 1050 1051 static int team_close(struct net_device *dev) 1052 { 1053 netif_carrier_off(dev); 1054 return 0; 1055 } 1056 1057 /* 1058 * note: already called with rcu_read_lock 1059 */ 1060 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) 1061 { 1062 struct team *team = netdev_priv(dev); 1063 bool tx_success = false; 1064 unsigned int len = skb->len; 1065 1066 tx_success = team->ops.transmit(team, skb); 1067 if (tx_success) { 1068 struct team_pcpu_stats *pcpu_stats; 1069 1070 pcpu_stats = this_cpu_ptr(team->pcpu_stats); 1071 u64_stats_update_begin(&pcpu_stats->syncp); 1072 pcpu_stats->tx_packets++; 1073 pcpu_stats->tx_bytes += len; 1074 u64_stats_update_end(&pcpu_stats->syncp); 1075 } else { 1076 this_cpu_inc(team->pcpu_stats->tx_dropped); 1077 } 1078 1079 return NETDEV_TX_OK; 1080 } 1081 1082 static void team_change_rx_flags(struct net_device *dev, int change) 1083 { 1084 struct team *team = netdev_priv(dev); 1085 struct team_port *port; 1086 int inc; 1087 1088 rcu_read_lock(); 1089 list_for_each_entry_rcu(port, &team->port_list, list) { 1090 if (change & IFF_PROMISC) { 1091 inc = dev->flags & IFF_PROMISC ? 1 : -1; 1092 dev_set_promiscuity(port->dev, inc); 1093 } 1094 if (change & IFF_ALLMULTI) { 1095 inc = dev->flags & IFF_ALLMULTI ? 1 : -1; 1096 dev_set_allmulti(port->dev, inc); 1097 } 1098 } 1099 rcu_read_unlock(); 1100 } 1101 1102 static void team_set_rx_mode(struct net_device *dev) 1103 { 1104 struct team *team = netdev_priv(dev); 1105 struct team_port *port; 1106 1107 rcu_read_lock(); 1108 list_for_each_entry_rcu(port, &team->port_list, list) { 1109 dev_uc_sync(port->dev, dev); 1110 dev_mc_sync(port->dev, dev); 1111 } 1112 rcu_read_unlock(); 1113 } 1114 1115 static int team_set_mac_address(struct net_device *dev, void *p) 1116 { 1117 struct team *team = netdev_priv(dev); 1118 struct team_port *port; 1119 struct sockaddr *addr = p; 1120 1121 dev->addr_assign_type &= ~NET_ADDR_RANDOM; 1122 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 1123 rcu_read_lock(); 1124 list_for_each_entry_rcu(port, &team->port_list, list) 1125 if (team->ops.port_change_mac) 1126 team->ops.port_change_mac(team, port); 1127 rcu_read_unlock(); 1128 return 0; 1129 } 1130 1131 static int team_change_mtu(struct net_device *dev, int new_mtu) 1132 { 1133 struct team *team = netdev_priv(dev); 1134 struct team_port *port; 1135 int err; 1136 1137 /* 1138 * Alhough this is reader, it's guarded by team lock. It's not possible 1139 * to traverse list in reverse under rcu_read_lock 1140 */ 1141 mutex_lock(&team->lock); 1142 list_for_each_entry(port, &team->port_list, list) { 1143 err = dev_set_mtu(port->dev, new_mtu); 1144 if (err) { 1145 netdev_err(dev, "Device %s failed to change mtu", 1146 port->dev->name); 1147 goto unwind; 1148 } 1149 } 1150 mutex_unlock(&team->lock); 1151 1152 dev->mtu = new_mtu; 1153 1154 return 0; 1155 1156 unwind: 1157 list_for_each_entry_continue_reverse(port, &team->port_list, list) 1158 dev_set_mtu(port->dev, dev->mtu); 1159 mutex_unlock(&team->lock); 1160 1161 return err; 1162 } 1163 1164 static struct rtnl_link_stats64 * 1165 team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1166 { 1167 struct team *team = netdev_priv(dev); 1168 struct team_pcpu_stats *p; 1169 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes; 1170 u32 rx_dropped = 0, tx_dropped = 0; 1171 unsigned int start; 1172 int i; 1173 1174 for_each_possible_cpu(i) { 1175 p = per_cpu_ptr(team->pcpu_stats, i); 1176 do { 1177 start = u64_stats_fetch_begin_bh(&p->syncp); 1178 rx_packets = p->rx_packets; 1179 rx_bytes = p->rx_bytes; 1180 rx_multicast = p->rx_multicast; 1181 tx_packets = p->tx_packets; 1182 tx_bytes = p->tx_bytes; 1183 } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 1184 1185 stats->rx_packets += rx_packets; 1186 stats->rx_bytes += rx_bytes; 1187 stats->multicast += rx_multicast; 1188 stats->tx_packets += tx_packets; 1189 stats->tx_bytes += tx_bytes; 1190 /* 1191 * rx_dropped & tx_dropped are u32, updated 1192 * without syncp protection. 1193 */ 1194 rx_dropped += p->rx_dropped; 1195 tx_dropped += p->tx_dropped; 1196 } 1197 stats->rx_dropped = rx_dropped; 1198 stats->tx_dropped = tx_dropped; 1199 return stats; 1200 } 1201 1202 static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid) 1203 { 1204 struct team *team = netdev_priv(dev); 1205 struct team_port *port; 1206 int err; 1207 1208 /* 1209 * Alhough this is reader, it's guarded by team lock. It's not possible 1210 * to traverse list in reverse under rcu_read_lock 1211 */ 1212 mutex_lock(&team->lock); 1213 list_for_each_entry(port, &team->port_list, list) { 1214 err = vlan_vid_add(port->dev, vid); 1215 if (err) 1216 goto unwind; 1217 } 1218 mutex_unlock(&team->lock); 1219 1220 return 0; 1221 1222 unwind: 1223 list_for_each_entry_continue_reverse(port, &team->port_list, list) 1224 vlan_vid_del(port->dev, vid); 1225 mutex_unlock(&team->lock); 1226 1227 return err; 1228 } 1229 1230 static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid) 1231 { 1232 struct team *team = netdev_priv(dev); 1233 struct team_port *port; 1234 1235 rcu_read_lock(); 1236 list_for_each_entry_rcu(port, &team->port_list, list) 1237 vlan_vid_del(port->dev, vid); 1238 rcu_read_unlock(); 1239 1240 return 0; 1241 } 1242 1243 static int team_add_slave(struct net_device *dev, struct net_device *port_dev) 1244 { 1245 struct team *team = netdev_priv(dev); 1246 int err; 1247 1248 mutex_lock(&team->lock); 1249 err = team_port_add(team, port_dev); 1250 mutex_unlock(&team->lock); 1251 return err; 1252 } 1253 1254 static int team_del_slave(struct net_device *dev, struct net_device *port_dev) 1255 { 1256 struct team *team = netdev_priv(dev); 1257 int err; 1258 1259 mutex_lock(&team->lock); 1260 err = team_port_del(team, port_dev); 1261 mutex_unlock(&team->lock); 1262 return err; 1263 } 1264 1265 static netdev_features_t team_fix_features(struct net_device *dev, 1266 netdev_features_t features) 1267 { 1268 struct team_port *port; 1269 struct team *team = netdev_priv(dev); 1270 netdev_features_t mask; 1271 1272 mask = features; 1273 features &= ~NETIF_F_ONE_FOR_ALL; 1274 features |= NETIF_F_ALL_FOR_ALL; 1275 1276 rcu_read_lock(); 1277 list_for_each_entry_rcu(port, &team->port_list, list) { 1278 features = netdev_increment_features(features, 1279 port->dev->features, 1280 mask); 1281 } 1282 rcu_read_unlock(); 1283 return features; 1284 } 1285 1286 static const struct net_device_ops team_netdev_ops = { 1287 .ndo_init = team_init, 1288 .ndo_uninit = team_uninit, 1289 .ndo_open = team_open, 1290 .ndo_stop = team_close, 1291 .ndo_start_xmit = team_xmit, 1292 .ndo_change_rx_flags = team_change_rx_flags, 1293 .ndo_set_rx_mode = team_set_rx_mode, 1294 .ndo_set_mac_address = team_set_mac_address, 1295 .ndo_change_mtu = team_change_mtu, 1296 .ndo_get_stats64 = team_get_stats64, 1297 .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid, 1298 .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid, 1299 .ndo_add_slave = team_add_slave, 1300 .ndo_del_slave = team_del_slave, 1301 .ndo_fix_features = team_fix_features, 1302 }; 1303 1304 1305 /*********************** 1306 * rt netlink interface 1307 ***********************/ 1308 1309 static void team_setup(struct net_device *dev) 1310 { 1311 ether_setup(dev); 1312 1313 dev->netdev_ops = &team_netdev_ops; 1314 dev->destructor = team_destructor; 1315 dev->tx_queue_len = 0; 1316 dev->flags |= IFF_MULTICAST; 1317 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); 1318 1319 /* 1320 * Indicate we support unicast address filtering. That way core won't 1321 * bring us to promisc mode in case a unicast addr is added. 1322 * Let this up to underlay drivers. 1323 */ 1324 dev->priv_flags |= IFF_UNICAST_FLT; 1325 1326 dev->features |= NETIF_F_LLTX; 1327 dev->features |= NETIF_F_GRO; 1328 dev->hw_features = NETIF_F_HW_VLAN_TX | 1329 NETIF_F_HW_VLAN_RX | 1330 NETIF_F_HW_VLAN_FILTER; 1331 1332 dev->features |= dev->hw_features; 1333 } 1334 1335 static int team_newlink(struct net *src_net, struct net_device *dev, 1336 struct nlattr *tb[], struct nlattr *data[]) 1337 { 1338 int err; 1339 1340 if (tb[IFLA_ADDRESS] == NULL) 1341 eth_hw_addr_random(dev); 1342 1343 err = register_netdevice(dev); 1344 if (err) 1345 return err; 1346 1347 return 0; 1348 } 1349 1350 static int team_validate(struct nlattr *tb[], struct nlattr *data[]) 1351 { 1352 if (tb[IFLA_ADDRESS]) { 1353 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 1354 return -EINVAL; 1355 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 1356 return -EADDRNOTAVAIL; 1357 } 1358 return 0; 1359 } 1360 1361 static struct rtnl_link_ops team_link_ops __read_mostly = { 1362 .kind = DRV_NAME, 1363 .priv_size = sizeof(struct team), 1364 .setup = team_setup, 1365 .newlink = team_newlink, 1366 .validate = team_validate, 1367 }; 1368 1369 1370 /*********************************** 1371 * Generic netlink custom interface 1372 ***********************************/ 1373 1374 static struct genl_family team_nl_family = { 1375 .id = GENL_ID_GENERATE, 1376 .name = TEAM_GENL_NAME, 1377 .version = TEAM_GENL_VERSION, 1378 .maxattr = TEAM_ATTR_MAX, 1379 .netnsok = true, 1380 }; 1381 1382 static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = { 1383 [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, }, 1384 [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 }, 1385 [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED }, 1386 [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED }, 1387 }; 1388 1389 static const struct nla_policy 1390 team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = { 1391 [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, }, 1392 [TEAM_ATTR_OPTION_NAME] = { 1393 .type = NLA_STRING, 1394 .len = TEAM_STRING_MAX_LEN, 1395 }, 1396 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG }, 1397 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 }, 1398 [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY }, 1399 }; 1400 1401 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) 1402 { 1403 struct sk_buff *msg; 1404 void *hdr; 1405 int err; 1406 1407 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1408 if (!msg) 1409 return -ENOMEM; 1410 1411 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, 1412 &team_nl_family, 0, TEAM_CMD_NOOP); 1413 if (IS_ERR(hdr)) { 1414 err = PTR_ERR(hdr); 1415 goto err_msg_put; 1416 } 1417 1418 genlmsg_end(msg, hdr); 1419 1420 return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); 1421 1422 err_msg_put: 1423 nlmsg_free(msg); 1424 1425 return err; 1426 } 1427 1428 /* 1429 * Netlink cmd functions should be locked by following two functions. 1430 * Since dev gets held here, that ensures dev won't disappear in between. 1431 */ 1432 static struct team *team_nl_team_get(struct genl_info *info) 1433 { 1434 struct net *net = genl_info_net(info); 1435 int ifindex; 1436 struct net_device *dev; 1437 struct team *team; 1438 1439 if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX]) 1440 return NULL; 1441 1442 ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]); 1443 dev = dev_get_by_index(net, ifindex); 1444 if (!dev || dev->netdev_ops != &team_netdev_ops) { 1445 if (dev) 1446 dev_put(dev); 1447 return NULL; 1448 } 1449 1450 team = netdev_priv(dev); 1451 mutex_lock(&team->lock); 1452 return team; 1453 } 1454 1455 static void team_nl_team_put(struct team *team) 1456 { 1457 mutex_unlock(&team->lock); 1458 dev_put(team->dev); 1459 } 1460 1461 static int team_nl_send_generic(struct genl_info *info, struct team *team, 1462 int (*fill_func)(struct sk_buff *skb, 1463 struct genl_info *info, 1464 int flags, struct team *team)) 1465 { 1466 struct sk_buff *skb; 1467 int err; 1468 1469 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1470 if (!skb) 1471 return -ENOMEM; 1472 1473 err = fill_func(skb, info, NLM_F_ACK, team); 1474 if (err < 0) 1475 goto err_fill; 1476 1477 err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid); 1478 return err; 1479 1480 err_fill: 1481 nlmsg_free(skb); 1482 return err; 1483 } 1484 1485 static int team_nl_fill_options_get(struct sk_buff *skb, 1486 u32 pid, u32 seq, int flags, 1487 struct team *team, bool fillall) 1488 { 1489 struct nlattr *option_list; 1490 void *hdr; 1491 struct team_option_inst *opt_inst; 1492 int err; 1493 1494 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, 1495 TEAM_CMD_OPTIONS_GET); 1496 if (IS_ERR(hdr)) 1497 return PTR_ERR(hdr); 1498 1499 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) 1500 goto nla_put_failure; 1501 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION); 1502 if (!option_list) 1503 return -EMSGSIZE; 1504 1505 list_for_each_entry(opt_inst, &team->option_inst_list, list) { 1506 struct nlattr *option_item; 1507 struct team_option *option = opt_inst->option; 1508 struct team_gsetter_ctx ctx; 1509 1510 /* Include only changed options if fill all mode is not on */ 1511 if (!fillall && !opt_inst->changed) 1512 continue; 1513 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION); 1514 if (!option_item) 1515 goto nla_put_failure; 1516 if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name)) 1517 goto nla_put_failure; 1518 if (opt_inst->changed) { 1519 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED)) 1520 goto nla_put_failure; 1521 opt_inst->changed = false; 1522 } 1523 if (opt_inst->removed && 1524 nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED)) 1525 goto nla_put_failure; 1526 if (opt_inst->port && 1527 nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX, 1528 opt_inst->port->dev->ifindex)) 1529 goto nla_put_failure; 1530 ctx.port = opt_inst->port; 1531 switch (option->type) { 1532 case TEAM_OPTION_TYPE_U32: 1533 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32)) 1534 goto nla_put_failure; 1535 err = team_option_get(team, opt_inst, &ctx); 1536 if (err) 1537 goto errout; 1538 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, 1539 ctx.data.u32_val)) 1540 goto nla_put_failure; 1541 break; 1542 case TEAM_OPTION_TYPE_STRING: 1543 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING)) 1544 goto nla_put_failure; 1545 err = team_option_get(team, opt_inst, &ctx); 1546 if (err) 1547 goto errout; 1548 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA, 1549 ctx.data.str_val)) 1550 goto nla_put_failure; 1551 break; 1552 case TEAM_OPTION_TYPE_BINARY: 1553 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY)) 1554 goto nla_put_failure; 1555 err = team_option_get(team, opt_inst, &ctx); 1556 if (err) 1557 goto errout; 1558 if (nla_put(skb, TEAM_ATTR_OPTION_DATA, 1559 ctx.data.bin_val.len, ctx.data.bin_val.ptr)) 1560 goto nla_put_failure; 1561 break; 1562 case TEAM_OPTION_TYPE_BOOL: 1563 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG)) 1564 goto nla_put_failure; 1565 err = team_option_get(team, opt_inst, &ctx); 1566 if (err) 1567 goto errout; 1568 if (ctx.data.bool_val && 1569 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA)) 1570 goto nla_put_failure; 1571 break; 1572 default: 1573 BUG(); 1574 } 1575 nla_nest_end(skb, option_item); 1576 } 1577 1578 nla_nest_end(skb, option_list); 1579 return genlmsg_end(skb, hdr); 1580 1581 nla_put_failure: 1582 err = -EMSGSIZE; 1583 errout: 1584 genlmsg_cancel(skb, hdr); 1585 return err; 1586 } 1587 1588 static int team_nl_fill_options_get_all(struct sk_buff *skb, 1589 struct genl_info *info, int flags, 1590 struct team *team) 1591 { 1592 return team_nl_fill_options_get(skb, info->snd_pid, 1593 info->snd_seq, NLM_F_ACK, 1594 team, true); 1595 } 1596 1597 static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info) 1598 { 1599 struct team *team; 1600 int err; 1601 1602 team = team_nl_team_get(info); 1603 if (!team) 1604 return -EINVAL; 1605 1606 err = team_nl_send_generic(info, team, team_nl_fill_options_get_all); 1607 1608 team_nl_team_put(team); 1609 1610 return err; 1611 } 1612 1613 static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) 1614 { 1615 struct team *team; 1616 int err = 0; 1617 int i; 1618 struct nlattr *nl_option; 1619 1620 team = team_nl_team_get(info); 1621 if (!team) 1622 return -EINVAL; 1623 1624 err = -EINVAL; 1625 if (!info->attrs[TEAM_ATTR_LIST_OPTION]) { 1626 err = -EINVAL; 1627 goto team_put; 1628 } 1629 1630 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) { 1631 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; 1632 struct nlattr *attr_port_ifindex; 1633 struct nlattr *attr_data; 1634 enum team_option_type opt_type; 1635 int opt_port_ifindex = 0; /* != 0 for per-port options */ 1636 struct team_option_inst *opt_inst; 1637 char *opt_name; 1638 bool opt_found = false; 1639 1640 if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) { 1641 err = -EINVAL; 1642 goto team_put; 1643 } 1644 err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX, 1645 nl_option, team_nl_option_policy); 1646 if (err) 1647 goto team_put; 1648 if (!opt_attrs[TEAM_ATTR_OPTION_NAME] || 1649 !opt_attrs[TEAM_ATTR_OPTION_TYPE]) { 1650 err = -EINVAL; 1651 goto team_put; 1652 } 1653 switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) { 1654 case NLA_U32: 1655 opt_type = TEAM_OPTION_TYPE_U32; 1656 break; 1657 case NLA_STRING: 1658 opt_type = TEAM_OPTION_TYPE_STRING; 1659 break; 1660 case NLA_BINARY: 1661 opt_type = TEAM_OPTION_TYPE_BINARY; 1662 break; 1663 case NLA_FLAG: 1664 opt_type = TEAM_OPTION_TYPE_BOOL; 1665 break; 1666 default: 1667 goto team_put; 1668 } 1669 1670 attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA]; 1671 if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) { 1672 err = -EINVAL; 1673 goto team_put; 1674 } 1675 1676 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]); 1677 attr_port_ifindex = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX]; 1678 if (attr_port_ifindex) 1679 opt_port_ifindex = nla_get_u32(attr_port_ifindex); 1680 1681 list_for_each_entry(opt_inst, &team->option_inst_list, list) { 1682 struct team_option *option = opt_inst->option; 1683 struct team_gsetter_ctx ctx; 1684 int tmp_ifindex; 1685 1686 tmp_ifindex = opt_inst->port ? 1687 opt_inst->port->dev->ifindex : 0; 1688 if (option->type != opt_type || 1689 strcmp(option->name, opt_name) || 1690 tmp_ifindex != opt_port_ifindex) 1691 continue; 1692 opt_found = true; 1693 ctx.port = opt_inst->port; 1694 switch (opt_type) { 1695 case TEAM_OPTION_TYPE_U32: 1696 ctx.data.u32_val = nla_get_u32(attr_data); 1697 break; 1698 case TEAM_OPTION_TYPE_STRING: 1699 if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) { 1700 err = -EINVAL; 1701 goto team_put; 1702 } 1703 ctx.data.str_val = nla_data(attr_data); 1704 break; 1705 case TEAM_OPTION_TYPE_BINARY: 1706 ctx.data.bin_val.len = nla_len(attr_data); 1707 ctx.data.bin_val.ptr = nla_data(attr_data); 1708 break; 1709 case TEAM_OPTION_TYPE_BOOL: 1710 ctx.data.bool_val = attr_data ? true : false; 1711 break; 1712 default: 1713 BUG(); 1714 } 1715 err = team_option_set(team, opt_inst, &ctx); 1716 if (err) 1717 goto team_put; 1718 } 1719 if (!opt_found) { 1720 err = -ENOENT; 1721 goto team_put; 1722 } 1723 } 1724 1725 team_put: 1726 team_nl_team_put(team); 1727 1728 return err; 1729 } 1730 1731 static int team_nl_fill_port_list_get(struct sk_buff *skb, 1732 u32 pid, u32 seq, int flags, 1733 struct team *team, 1734 bool fillall) 1735 { 1736 struct nlattr *port_list; 1737 void *hdr; 1738 struct team_port *port; 1739 1740 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, 1741 TEAM_CMD_PORT_LIST_GET); 1742 if (IS_ERR(hdr)) 1743 return PTR_ERR(hdr); 1744 1745 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) 1746 goto nla_put_failure; 1747 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT); 1748 if (!port_list) 1749 return -EMSGSIZE; 1750 1751 list_for_each_entry(port, &team->port_list, list) { 1752 struct nlattr *port_item; 1753 1754 /* Include only changed ports if fill all mode is not on */ 1755 if (!fillall && !port->changed) 1756 continue; 1757 port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT); 1758 if (!port_item) 1759 goto nla_put_failure; 1760 if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex)) 1761 goto nla_put_failure; 1762 if (port->changed) { 1763 if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED)) 1764 goto nla_put_failure; 1765 port->changed = false; 1766 } 1767 if ((port->removed && 1768 nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) || 1769 (port->state.linkup && 1770 nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) || 1771 nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) || 1772 nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex)) 1773 goto nla_put_failure; 1774 nla_nest_end(skb, port_item); 1775 } 1776 1777 nla_nest_end(skb, port_list); 1778 return genlmsg_end(skb, hdr); 1779 1780 nla_put_failure: 1781 genlmsg_cancel(skb, hdr); 1782 return -EMSGSIZE; 1783 } 1784 1785 static int team_nl_fill_port_list_get_all(struct sk_buff *skb, 1786 struct genl_info *info, int flags, 1787 struct team *team) 1788 { 1789 return team_nl_fill_port_list_get(skb, info->snd_pid, 1790 info->snd_seq, NLM_F_ACK, 1791 team, true); 1792 } 1793 1794 static int team_nl_cmd_port_list_get(struct sk_buff *skb, 1795 struct genl_info *info) 1796 { 1797 struct team *team; 1798 int err; 1799 1800 team = team_nl_team_get(info); 1801 if (!team) 1802 return -EINVAL; 1803 1804 err = team_nl_send_generic(info, team, team_nl_fill_port_list_get_all); 1805 1806 team_nl_team_put(team); 1807 1808 return err; 1809 } 1810 1811 static struct genl_ops team_nl_ops[] = { 1812 { 1813 .cmd = TEAM_CMD_NOOP, 1814 .doit = team_nl_cmd_noop, 1815 .policy = team_nl_policy, 1816 }, 1817 { 1818 .cmd = TEAM_CMD_OPTIONS_SET, 1819 .doit = team_nl_cmd_options_set, 1820 .policy = team_nl_policy, 1821 .flags = GENL_ADMIN_PERM, 1822 }, 1823 { 1824 .cmd = TEAM_CMD_OPTIONS_GET, 1825 .doit = team_nl_cmd_options_get, 1826 .policy = team_nl_policy, 1827 .flags = GENL_ADMIN_PERM, 1828 }, 1829 { 1830 .cmd = TEAM_CMD_PORT_LIST_GET, 1831 .doit = team_nl_cmd_port_list_get, 1832 .policy = team_nl_policy, 1833 .flags = GENL_ADMIN_PERM, 1834 }, 1835 }; 1836 1837 static struct genl_multicast_group team_change_event_mcgrp = { 1838 .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, 1839 }; 1840 1841 static int team_nl_send_event_options_get(struct team *team) 1842 { 1843 struct sk_buff *skb; 1844 int err; 1845 struct net *net = dev_net(team->dev); 1846 1847 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1848 if (!skb) 1849 return -ENOMEM; 1850 1851 err = team_nl_fill_options_get(skb, 0, 0, 0, team, false); 1852 if (err < 0) 1853 goto err_fill; 1854 1855 err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id, 1856 GFP_KERNEL); 1857 return err; 1858 1859 err_fill: 1860 nlmsg_free(skb); 1861 return err; 1862 } 1863 1864 static int team_nl_send_event_port_list_get(struct team *team) 1865 { 1866 struct sk_buff *skb; 1867 int err; 1868 struct net *net = dev_net(team->dev); 1869 1870 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1871 if (!skb) 1872 return -ENOMEM; 1873 1874 err = team_nl_fill_port_list_get(skb, 0, 0, 0, team, false); 1875 if (err < 0) 1876 goto err_fill; 1877 1878 err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id, 1879 GFP_KERNEL); 1880 return err; 1881 1882 err_fill: 1883 nlmsg_free(skb); 1884 return err; 1885 } 1886 1887 static int team_nl_init(void) 1888 { 1889 int err; 1890 1891 err = genl_register_family_with_ops(&team_nl_family, team_nl_ops, 1892 ARRAY_SIZE(team_nl_ops)); 1893 if (err) 1894 return err; 1895 1896 err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp); 1897 if (err) 1898 goto err_change_event_grp_reg; 1899 1900 return 0; 1901 1902 err_change_event_grp_reg: 1903 genl_unregister_family(&team_nl_family); 1904 1905 return err; 1906 } 1907 1908 static void team_nl_fini(void) 1909 { 1910 genl_unregister_family(&team_nl_family); 1911 } 1912 1913 1914 /****************** 1915 * Change checkers 1916 ******************/ 1917 1918 static void __team_options_change_check(struct team *team) 1919 { 1920 int err; 1921 1922 err = team_nl_send_event_options_get(team); 1923 if (err) 1924 netdev_warn(team->dev, "Failed to send options change via netlink\n"); 1925 } 1926 1927 /* rtnl lock is held */ 1928 static void __team_port_change_check(struct team_port *port, bool linkup) 1929 { 1930 int err; 1931 1932 if (!port->removed && port->state.linkup == linkup) 1933 return; 1934 1935 port->changed = true; 1936 port->state.linkup = linkup; 1937 team_refresh_port_linkup(port); 1938 if (linkup) { 1939 struct ethtool_cmd ecmd; 1940 1941 err = __ethtool_get_settings(port->dev, &ecmd); 1942 if (!err) { 1943 port->state.speed = ethtool_cmd_speed(&ecmd); 1944 port->state.duplex = ecmd.duplex; 1945 goto send_event; 1946 } 1947 } 1948 port->state.speed = 0; 1949 port->state.duplex = 0; 1950 1951 send_event: 1952 err = team_nl_send_event_port_list_get(port->team); 1953 if (err) 1954 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n", 1955 port->dev->name); 1956 1957 } 1958 1959 static void team_port_change_check(struct team_port *port, bool linkup) 1960 { 1961 struct team *team = port->team; 1962 1963 mutex_lock(&team->lock); 1964 __team_port_change_check(port, linkup); 1965 mutex_unlock(&team->lock); 1966 } 1967 1968 /************************************ 1969 * Net device notifier event handler 1970 ************************************/ 1971 1972 static int team_device_event(struct notifier_block *unused, 1973 unsigned long event, void *ptr) 1974 { 1975 struct net_device *dev = (struct net_device *) ptr; 1976 struct team_port *port; 1977 1978 port = team_port_get_rtnl(dev); 1979 if (!port) 1980 return NOTIFY_DONE; 1981 1982 switch (event) { 1983 case NETDEV_UP: 1984 if (netif_carrier_ok(dev)) 1985 team_port_change_check(port, true); 1986 case NETDEV_DOWN: 1987 team_port_change_check(port, false); 1988 case NETDEV_CHANGE: 1989 if (netif_running(port->dev)) 1990 team_port_change_check(port, 1991 !!netif_carrier_ok(port->dev)); 1992 break; 1993 case NETDEV_UNREGISTER: 1994 team_del_slave(port->team->dev, dev); 1995 break; 1996 case NETDEV_FEAT_CHANGE: 1997 team_compute_features(port->team); 1998 break; 1999 case NETDEV_CHANGEMTU: 2000 /* Forbid to change mtu of underlaying device */ 2001 return NOTIFY_BAD; 2002 case NETDEV_PRE_TYPE_CHANGE: 2003 /* Forbid to change type of underlaying device */ 2004 return NOTIFY_BAD; 2005 } 2006 return NOTIFY_DONE; 2007 } 2008 2009 static struct notifier_block team_notifier_block __read_mostly = { 2010 .notifier_call = team_device_event, 2011 }; 2012 2013 2014 /*********************** 2015 * Module init and exit 2016 ***********************/ 2017 2018 static int __init team_module_init(void) 2019 { 2020 int err; 2021 2022 register_netdevice_notifier(&team_notifier_block); 2023 2024 err = rtnl_link_register(&team_link_ops); 2025 if (err) 2026 goto err_rtnl_reg; 2027 2028 err = team_nl_init(); 2029 if (err) 2030 goto err_nl_init; 2031 2032 return 0; 2033 2034 err_nl_init: 2035 rtnl_link_unregister(&team_link_ops); 2036 2037 err_rtnl_reg: 2038 unregister_netdevice_notifier(&team_notifier_block); 2039 2040 return err; 2041 } 2042 2043 static void __exit team_module_exit(void) 2044 { 2045 team_nl_fini(); 2046 rtnl_link_unregister(&team_link_ops); 2047 unregister_netdevice_notifier(&team_notifier_block); 2048 } 2049 2050 module_init(team_module_init); 2051 module_exit(team_module_exit); 2052 2053 MODULE_LICENSE("GPL v2"); 2054 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>"); 2055 MODULE_DESCRIPTION("Ethernet team device driver"); 2056 MODULE_ALIAS_RTNL_LINK(DRV_NAME); 2057