1 /* 2 * drivers/net/team/team.c - Network team device driver 3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/types.h> 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/slab.h> 16 #include <linux/rcupdate.h> 17 #include <linux/errno.h> 18 #include <linux/ctype.h> 19 #include <linux/notifier.h> 20 #include <linux/netdevice.h> 21 #include <linux/if_vlan.h> 22 #include <linux/if_arp.h> 23 #include <linux/socket.h> 24 #include <linux/etherdevice.h> 25 #include <linux/rtnetlink.h> 26 #include <net/rtnetlink.h> 27 #include <net/genetlink.h> 28 #include <net/netlink.h> 29 #include <linux/if_team.h> 30 31 #define DRV_NAME "team" 32 33 34 /********** 35 * Helpers 36 **********/ 37 38 #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT) 39 40 static struct team_port *team_port_get_rcu(const struct net_device *dev) 41 { 42 struct team_port *port = rcu_dereference(dev->rx_handler_data); 43 44 return team_port_exists(dev) ? port : NULL; 45 } 46 47 static struct team_port *team_port_get_rtnl(const struct net_device *dev) 48 { 49 struct team_port *port = rtnl_dereference(dev->rx_handler_data); 50 51 return team_port_exists(dev) ? port : NULL; 52 } 53 54 /* 55 * Since the ability to change mac address for open port device is tested in 56 * team_port_add, this function can be called without control of return value 57 */ 58 static int __set_port_mac(struct net_device *port_dev, 59 const unsigned char *dev_addr) 60 { 61 struct sockaddr addr; 62 63 memcpy(addr.sa_data, dev_addr, ETH_ALEN); 64 addr.sa_family = ARPHRD_ETHER; 65 return dev_set_mac_address(port_dev, &addr); 66 } 67 68 static int team_port_set_orig_mac(struct team_port *port) 69 { 70 return __set_port_mac(port->dev, port->orig.dev_addr); 71 } 72 73 int team_port_set_team_mac(struct team_port *port) 74 { 75 return __set_port_mac(port->dev, port->team->dev->dev_addr); 76 } 77 EXPORT_SYMBOL(team_port_set_team_mac); 78 79 static void team_refresh_port_linkup(struct team_port *port) 80 { 81 port->linkup = port->user.linkup_enabled ? port->user.linkup : 82 port->state.linkup; 83 } 84 85 86 /******************* 87 * Options handling 88 *******************/ 89 90 struct team_option_inst { /* One for each option instance */ 91 struct list_head list; 92 struct team_option *option; 93 struct team_option_inst_info info; 94 bool changed; 95 bool removed; 96 }; 97 98 static struct team_option *__team_find_option(struct team *team, 99 const char *opt_name) 100 { 101 struct team_option *option; 102 103 list_for_each_entry(option, &team->option_list, list) { 104 if (strcmp(option->name, opt_name) == 0) 105 return option; 106 } 107 return NULL; 108 } 109 110 static void __team_option_inst_del(struct team_option_inst *opt_inst) 111 { 112 list_del(&opt_inst->list); 113 kfree(opt_inst); 114 } 115 116 static void __team_option_inst_del_option(struct team *team, 117 struct team_option *option) 118 { 119 struct team_option_inst *opt_inst, *tmp; 120 121 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) { 122 if (opt_inst->option == option) 123 __team_option_inst_del(opt_inst); 124 } 125 } 126 127 static int __team_option_inst_add(struct team *team, struct team_option *option, 128 struct team_port *port) 129 { 130 struct team_option_inst *opt_inst; 131 unsigned int array_size; 132 unsigned int i; 133 int err; 134 135 array_size = option->array_size; 136 if (!array_size) 137 array_size = 1; /* No array but still need one instance */ 138 139 for (i = 0; i < array_size; i++) { 140 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL); 141 if (!opt_inst) 142 return -ENOMEM; 143 opt_inst->option = option; 144 opt_inst->info.port = port; 145 opt_inst->info.array_index = i; 146 opt_inst->changed = true; 147 opt_inst->removed = false; 148 list_add_tail(&opt_inst->list, &team->option_inst_list); 149 if (option->init) { 150 err = option->init(team, &opt_inst->info); 151 if (err) 152 return err; 153 } 154 155 } 156 return 0; 157 } 158 159 static int __team_option_inst_add_option(struct team *team, 160 struct team_option *option) 161 { 162 struct team_port *port; 163 int err; 164 165 if (!option->per_port) { 166 err = __team_option_inst_add(team, option, 0); 167 if (err) 168 goto inst_del_option; 169 } 170 171 list_for_each_entry(port, &team->port_list, list) { 172 err = __team_option_inst_add(team, option, port); 173 if (err) 174 goto inst_del_option; 175 } 176 return 0; 177 178 inst_del_option: 179 __team_option_inst_del_option(team, option); 180 return err; 181 } 182 183 static void __team_option_inst_mark_removed_option(struct team *team, 184 struct team_option *option) 185 { 186 struct team_option_inst *opt_inst; 187 188 list_for_each_entry(opt_inst, &team->option_inst_list, list) { 189 if (opt_inst->option == option) { 190 opt_inst->changed = true; 191 opt_inst->removed = true; 192 } 193 } 194 } 195 196 static void __team_option_inst_del_port(struct team *team, 197 struct team_port *port) 198 { 199 struct team_option_inst *opt_inst, *tmp; 200 201 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) { 202 if (opt_inst->option->per_port && 203 opt_inst->info.port == port) 204 __team_option_inst_del(opt_inst); 205 } 206 } 207 208 static int __team_option_inst_add_port(struct team *team, 209 struct team_port *port) 210 { 211 struct team_option *option; 212 int err; 213 214 list_for_each_entry(option, &team->option_list, list) { 215 if (!option->per_port) 216 continue; 217 err = __team_option_inst_add(team, option, port); 218 if (err) 219 goto inst_del_port; 220 } 221 return 0; 222 223 inst_del_port: 224 __team_option_inst_del_port(team, port); 225 return err; 226 } 227 228 static void __team_option_inst_mark_removed_port(struct team *team, 229 struct team_port *port) 230 { 231 struct team_option_inst *opt_inst; 232 233 list_for_each_entry(opt_inst, &team->option_inst_list, list) { 234 if (opt_inst->info.port == port) { 235 opt_inst->changed = true; 236 opt_inst->removed = true; 237 } 238 } 239 } 240 241 static int __team_options_register(struct team *team, 242 const struct team_option *option, 243 size_t option_count) 244 { 245 int i; 246 struct team_option **dst_opts; 247 int err; 248 249 dst_opts = kzalloc(sizeof(struct team_option *) * option_count, 250 GFP_KERNEL); 251 if (!dst_opts) 252 return -ENOMEM; 253 for (i = 0; i < option_count; i++, option++) { 254 if (__team_find_option(team, option->name)) { 255 err = -EEXIST; 256 goto alloc_rollback; 257 } 258 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL); 259 if (!dst_opts[i]) { 260 err = -ENOMEM; 261 goto alloc_rollback; 262 } 263 } 264 265 for (i = 0; i < option_count; i++) { 266 err = __team_option_inst_add_option(team, dst_opts[i]); 267 if (err) 268 goto inst_rollback; 269 list_add_tail(&dst_opts[i]->list, &team->option_list); 270 } 271 272 kfree(dst_opts); 273 return 0; 274 275 inst_rollback: 276 for (i--; i >= 0; i--) 277 __team_option_inst_del_option(team, dst_opts[i]); 278 279 i = option_count - 1; 280 alloc_rollback: 281 for (i--; i >= 0; i--) 282 kfree(dst_opts[i]); 283 284 kfree(dst_opts); 285 return err; 286 } 287 288 static void __team_options_mark_removed(struct team *team, 289 const struct team_option *option, 290 size_t option_count) 291 { 292 int i; 293 294 for (i = 0; i < option_count; i++, option++) { 295 struct team_option *del_opt; 296 297 del_opt = __team_find_option(team, option->name); 298 if (del_opt) 299 __team_option_inst_mark_removed_option(team, del_opt); 300 } 301 } 302 303 static void __team_options_unregister(struct team *team, 304 const struct team_option *option, 305 size_t option_count) 306 { 307 int i; 308 309 for (i = 0; i < option_count; i++, option++) { 310 struct team_option *del_opt; 311 312 del_opt = __team_find_option(team, option->name); 313 if (del_opt) { 314 __team_option_inst_del_option(team, del_opt); 315 list_del(&del_opt->list); 316 kfree(del_opt); 317 } 318 } 319 } 320 321 static void __team_options_change_check(struct team *team); 322 323 int team_options_register(struct team *team, 324 const struct team_option *option, 325 size_t option_count) 326 { 327 int err; 328 329 err = __team_options_register(team, option, option_count); 330 if (err) 331 return err; 332 __team_options_change_check(team); 333 return 0; 334 } 335 EXPORT_SYMBOL(team_options_register); 336 337 void team_options_unregister(struct team *team, 338 const struct team_option *option, 339 size_t option_count) 340 { 341 __team_options_mark_removed(team, option, option_count); 342 __team_options_change_check(team); 343 __team_options_unregister(team, option, option_count); 344 } 345 EXPORT_SYMBOL(team_options_unregister); 346 347 static int team_option_port_add(struct team *team, struct team_port *port) 348 { 349 int err; 350 351 err = __team_option_inst_add_port(team, port); 352 if (err) 353 return err; 354 __team_options_change_check(team); 355 return 0; 356 } 357 358 static void team_option_port_del(struct team *team, struct team_port *port) 359 { 360 __team_option_inst_mark_removed_port(team, port); 361 __team_options_change_check(team); 362 __team_option_inst_del_port(team, port); 363 } 364 365 static int team_option_get(struct team *team, 366 struct team_option_inst *opt_inst, 367 struct team_gsetter_ctx *ctx) 368 { 369 if (!opt_inst->option->getter) 370 return -EOPNOTSUPP; 371 return opt_inst->option->getter(team, ctx); 372 } 373 374 static int team_option_set(struct team *team, 375 struct team_option_inst *opt_inst, 376 struct team_gsetter_ctx *ctx) 377 { 378 int err; 379 380 if (!opt_inst->option->setter) 381 return -EOPNOTSUPP; 382 err = opt_inst->option->setter(team, ctx); 383 if (err) 384 return err; 385 386 opt_inst->changed = true; 387 __team_options_change_check(team); 388 return err; 389 } 390 391 void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info) 392 { 393 struct team_option_inst *opt_inst; 394 395 opt_inst = container_of(opt_inst_info, struct team_option_inst, info); 396 opt_inst->changed = true; 397 } 398 EXPORT_SYMBOL(team_option_inst_set_change); 399 400 void team_options_change_check(struct team *team) 401 { 402 __team_options_change_check(team); 403 } 404 EXPORT_SYMBOL(team_options_change_check); 405 406 407 /**************** 408 * Mode handling 409 ****************/ 410 411 static LIST_HEAD(mode_list); 412 static DEFINE_SPINLOCK(mode_list_lock); 413 414 struct team_mode_item { 415 struct list_head list; 416 const struct team_mode *mode; 417 }; 418 419 static struct team_mode_item *__find_mode(const char *kind) 420 { 421 struct team_mode_item *mitem; 422 423 list_for_each_entry(mitem, &mode_list, list) { 424 if (strcmp(mitem->mode->kind, kind) == 0) 425 return mitem; 426 } 427 return NULL; 428 } 429 430 static bool is_good_mode_name(const char *name) 431 { 432 while (*name != '\0') { 433 if (!isalpha(*name) && !isdigit(*name) && *name != '_') 434 return false; 435 name++; 436 } 437 return true; 438 } 439 440 int team_mode_register(const struct team_mode *mode) 441 { 442 int err = 0; 443 struct team_mode_item *mitem; 444 445 if (!is_good_mode_name(mode->kind) || 446 mode->priv_size > TEAM_MODE_PRIV_SIZE) 447 return -EINVAL; 448 449 mitem = kmalloc(sizeof(*mitem), GFP_KERNEL); 450 if (!mitem) 451 return -ENOMEM; 452 453 spin_lock(&mode_list_lock); 454 if (__find_mode(mode->kind)) { 455 err = -EEXIST; 456 kfree(mitem); 457 goto unlock; 458 } 459 mitem->mode = mode; 460 list_add_tail(&mitem->list, &mode_list); 461 unlock: 462 spin_unlock(&mode_list_lock); 463 return err; 464 } 465 EXPORT_SYMBOL(team_mode_register); 466 467 void team_mode_unregister(const struct team_mode *mode) 468 { 469 struct team_mode_item *mitem; 470 471 spin_lock(&mode_list_lock); 472 mitem = __find_mode(mode->kind); 473 if (mitem) { 474 list_del_init(&mitem->list); 475 kfree(mitem); 476 } 477 spin_unlock(&mode_list_lock); 478 } 479 EXPORT_SYMBOL(team_mode_unregister); 480 481 static const struct team_mode *team_mode_get(const char *kind) 482 { 483 struct team_mode_item *mitem; 484 const struct team_mode *mode = NULL; 485 486 spin_lock(&mode_list_lock); 487 mitem = __find_mode(kind); 488 if (!mitem) { 489 spin_unlock(&mode_list_lock); 490 request_module("team-mode-%s", kind); 491 spin_lock(&mode_list_lock); 492 mitem = __find_mode(kind); 493 } 494 if (mitem) { 495 mode = mitem->mode; 496 if (!try_module_get(mode->owner)) 497 mode = NULL; 498 } 499 500 spin_unlock(&mode_list_lock); 501 return mode; 502 } 503 504 static void team_mode_put(const struct team_mode *mode) 505 { 506 module_put(mode->owner); 507 } 508 509 static bool team_dummy_transmit(struct team *team, struct sk_buff *skb) 510 { 511 dev_kfree_skb_any(skb); 512 return false; 513 } 514 515 rx_handler_result_t team_dummy_receive(struct team *team, 516 struct team_port *port, 517 struct sk_buff *skb) 518 { 519 return RX_HANDLER_ANOTHER; 520 } 521 522 static const struct team_mode __team_no_mode = { 523 .kind = "*NOMODE*", 524 }; 525 526 static bool team_is_mode_set(struct team *team) 527 { 528 return team->mode != &__team_no_mode; 529 } 530 531 static void team_set_no_mode(struct team *team) 532 { 533 team->mode = &__team_no_mode; 534 } 535 536 static void team_adjust_ops(struct team *team) 537 { 538 /* 539 * To avoid checks in rx/tx skb paths, ensure here that non-null and 540 * correct ops are always set. 541 */ 542 543 if (list_empty(&team->port_list) || 544 !team_is_mode_set(team) || !team->mode->ops->transmit) 545 team->ops.transmit = team_dummy_transmit; 546 else 547 team->ops.transmit = team->mode->ops->transmit; 548 549 if (list_empty(&team->port_list) || 550 !team_is_mode_set(team) || !team->mode->ops->receive) 551 team->ops.receive = team_dummy_receive; 552 else 553 team->ops.receive = team->mode->ops->receive; 554 } 555 556 /* 557 * We can benefit from the fact that it's ensured no port is present 558 * at the time of mode change. Therefore no packets are in fly so there's no 559 * need to set mode operations in any special way. 560 */ 561 static int __team_change_mode(struct team *team, 562 const struct team_mode *new_mode) 563 { 564 /* Check if mode was previously set and do cleanup if so */ 565 if (team_is_mode_set(team)) { 566 void (*exit_op)(struct team *team) = team->ops.exit; 567 568 /* Clear ops area so no callback is called any longer */ 569 memset(&team->ops, 0, sizeof(struct team_mode_ops)); 570 team_adjust_ops(team); 571 572 if (exit_op) 573 exit_op(team); 574 team_mode_put(team->mode); 575 team_set_no_mode(team); 576 /* zero private data area */ 577 memset(&team->mode_priv, 0, 578 sizeof(struct team) - offsetof(struct team, mode_priv)); 579 } 580 581 if (!new_mode) 582 return 0; 583 584 if (new_mode->ops->init) { 585 int err; 586 587 err = new_mode->ops->init(team); 588 if (err) 589 return err; 590 } 591 592 team->mode = new_mode; 593 memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops)); 594 team_adjust_ops(team); 595 596 return 0; 597 } 598 599 static int team_change_mode(struct team *team, const char *kind) 600 { 601 const struct team_mode *new_mode; 602 struct net_device *dev = team->dev; 603 int err; 604 605 if (!list_empty(&team->port_list)) { 606 netdev_err(dev, "No ports can be present during mode change\n"); 607 return -EBUSY; 608 } 609 610 if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) { 611 netdev_err(dev, "Unable to change to the same mode the team is in\n"); 612 return -EINVAL; 613 } 614 615 new_mode = team_mode_get(kind); 616 if (!new_mode) { 617 netdev_err(dev, "Mode \"%s\" not found\n", kind); 618 return -EINVAL; 619 } 620 621 err = __team_change_mode(team, new_mode); 622 if (err) { 623 netdev_err(dev, "Failed to change to mode \"%s\"\n", kind); 624 team_mode_put(new_mode); 625 return err; 626 } 627 628 netdev_info(dev, "Mode changed to \"%s\"\n", kind); 629 return 0; 630 } 631 632 633 /************************ 634 * Rx path frame handler 635 ************************/ 636 637 static bool team_port_enabled(struct team_port *port); 638 639 /* note: already called with rcu_read_lock */ 640 static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) 641 { 642 struct sk_buff *skb = *pskb; 643 struct team_port *port; 644 struct team *team; 645 rx_handler_result_t res; 646 647 skb = skb_share_check(skb, GFP_ATOMIC); 648 if (!skb) 649 return RX_HANDLER_CONSUMED; 650 651 *pskb = skb; 652 653 port = team_port_get_rcu(skb->dev); 654 team = port->team; 655 if (!team_port_enabled(port)) { 656 /* allow exact match delivery for disabled ports */ 657 res = RX_HANDLER_EXACT; 658 } else { 659 res = team->ops.receive(team, port, skb); 660 } 661 if (res == RX_HANDLER_ANOTHER) { 662 struct team_pcpu_stats *pcpu_stats; 663 664 pcpu_stats = this_cpu_ptr(team->pcpu_stats); 665 u64_stats_update_begin(&pcpu_stats->syncp); 666 pcpu_stats->rx_packets++; 667 pcpu_stats->rx_bytes += skb->len; 668 if (skb->pkt_type == PACKET_MULTICAST) 669 pcpu_stats->rx_multicast++; 670 u64_stats_update_end(&pcpu_stats->syncp); 671 672 skb->dev = team->dev; 673 } else { 674 this_cpu_inc(team->pcpu_stats->rx_dropped); 675 } 676 677 return res; 678 } 679 680 681 /**************** 682 * Port handling 683 ****************/ 684 685 static bool team_port_find(const struct team *team, 686 const struct team_port *port) 687 { 688 struct team_port *cur; 689 690 list_for_each_entry(cur, &team->port_list, list) 691 if (cur == port) 692 return true; 693 return false; 694 } 695 696 static bool team_port_enabled(struct team_port *port) 697 { 698 return port->index != -1; 699 } 700 701 /* 702 * Enable/disable port by adding to enabled port hashlist and setting 703 * port->index (Might be racy so reader could see incorrect ifindex when 704 * processing a flying packet, but that is not a problem). Write guarded 705 * by team->lock. 706 */ 707 static void team_port_enable(struct team *team, 708 struct team_port *port) 709 { 710 if (team_port_enabled(port)) 711 return; 712 port->index = team->en_port_count++; 713 hlist_add_head_rcu(&port->hlist, 714 team_port_index_hash(team, port->index)); 715 } 716 717 static void __reconstruct_port_hlist(struct team *team, int rm_index) 718 { 719 int i; 720 struct team_port *port; 721 722 for (i = rm_index + 1; i < team->en_port_count; i++) { 723 port = team_get_port_by_index(team, i); 724 hlist_del_rcu(&port->hlist); 725 port->index--; 726 hlist_add_head_rcu(&port->hlist, 727 team_port_index_hash(team, port->index)); 728 } 729 } 730 731 static void team_port_disable(struct team *team, 732 struct team_port *port) 733 { 734 int rm_index = port->index; 735 736 if (!team_port_enabled(port)) 737 return; 738 hlist_del_rcu(&port->hlist); 739 __reconstruct_port_hlist(team, rm_index); 740 team->en_port_count--; 741 port->index = -1; 742 } 743 744 #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ 745 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ 746 NETIF_F_HIGHDMA | NETIF_F_LRO) 747 748 static void __team_compute_features(struct team *team) 749 { 750 struct team_port *port; 751 u32 vlan_features = TEAM_VLAN_FEATURES; 752 unsigned short max_hard_header_len = ETH_HLEN; 753 754 list_for_each_entry(port, &team->port_list, list) { 755 vlan_features = netdev_increment_features(vlan_features, 756 port->dev->vlan_features, 757 TEAM_VLAN_FEATURES); 758 759 if (port->dev->hard_header_len > max_hard_header_len) 760 max_hard_header_len = port->dev->hard_header_len; 761 } 762 763 team->dev->vlan_features = vlan_features; 764 team->dev->hard_header_len = max_hard_header_len; 765 766 netdev_change_features(team->dev); 767 } 768 769 static void team_compute_features(struct team *team) 770 { 771 mutex_lock(&team->lock); 772 __team_compute_features(team); 773 mutex_unlock(&team->lock); 774 } 775 776 static int team_port_enter(struct team *team, struct team_port *port) 777 { 778 int err = 0; 779 780 dev_hold(team->dev); 781 port->dev->priv_flags |= IFF_TEAM_PORT; 782 if (team->ops.port_enter) { 783 err = team->ops.port_enter(team, port); 784 if (err) { 785 netdev_err(team->dev, "Device %s failed to enter team mode\n", 786 port->dev->name); 787 goto err_port_enter; 788 } 789 } 790 791 return 0; 792 793 err_port_enter: 794 port->dev->priv_flags &= ~IFF_TEAM_PORT; 795 dev_put(team->dev); 796 797 return err; 798 } 799 800 static void team_port_leave(struct team *team, struct team_port *port) 801 { 802 if (team->ops.port_leave) 803 team->ops.port_leave(team, port); 804 port->dev->priv_flags &= ~IFF_TEAM_PORT; 805 dev_put(team->dev); 806 } 807 808 static void __team_port_change_check(struct team_port *port, bool linkup); 809 810 static int team_port_add(struct team *team, struct net_device *port_dev) 811 { 812 struct net_device *dev = team->dev; 813 struct team_port *port; 814 char *portname = port_dev->name; 815 int err; 816 817 if (port_dev->flags & IFF_LOOPBACK || 818 port_dev->type != ARPHRD_ETHER) { 819 netdev_err(dev, "Device %s is of an unsupported type\n", 820 portname); 821 return -EINVAL; 822 } 823 824 if (team_port_exists(port_dev)) { 825 netdev_err(dev, "Device %s is already a port " 826 "of a team device\n", portname); 827 return -EBUSY; 828 } 829 830 if (port_dev->flags & IFF_UP) { 831 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n", 832 portname); 833 return -EBUSY; 834 } 835 836 port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size, 837 GFP_KERNEL); 838 if (!port) 839 return -ENOMEM; 840 841 port->dev = port_dev; 842 port->team = team; 843 844 port->orig.mtu = port_dev->mtu; 845 err = dev_set_mtu(port_dev, dev->mtu); 846 if (err) { 847 netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err); 848 goto err_set_mtu; 849 } 850 851 memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN); 852 853 err = team_port_enter(team, port); 854 if (err) { 855 netdev_err(dev, "Device %s failed to enter team mode\n", 856 portname); 857 goto err_port_enter; 858 } 859 860 err = dev_open(port_dev); 861 if (err) { 862 netdev_dbg(dev, "Device %s opening failed\n", 863 portname); 864 goto err_dev_open; 865 } 866 867 err = vlan_vids_add_by_dev(port_dev, dev); 868 if (err) { 869 netdev_err(dev, "Failed to add vlan ids to device %s\n", 870 portname); 871 goto err_vids_add; 872 } 873 874 err = netdev_set_master(port_dev, dev); 875 if (err) { 876 netdev_err(dev, "Device %s failed to set master\n", portname); 877 goto err_set_master; 878 } 879 880 err = netdev_rx_handler_register(port_dev, team_handle_frame, 881 port); 882 if (err) { 883 netdev_err(dev, "Device %s failed to register rx_handler\n", 884 portname); 885 goto err_handler_register; 886 } 887 888 err = team_option_port_add(team, port); 889 if (err) { 890 netdev_err(dev, "Device %s failed to add per-port options\n", 891 portname); 892 goto err_option_port_add; 893 } 894 895 port->index = -1; 896 team_port_enable(team, port); 897 list_add_tail_rcu(&port->list, &team->port_list); 898 team_adjust_ops(team); 899 __team_compute_features(team); 900 __team_port_change_check(port, !!netif_carrier_ok(port_dev)); 901 902 netdev_info(dev, "Port device %s added\n", portname); 903 904 return 0; 905 906 err_option_port_add: 907 netdev_rx_handler_unregister(port_dev); 908 909 err_handler_register: 910 netdev_set_master(port_dev, NULL); 911 912 err_set_master: 913 vlan_vids_del_by_dev(port_dev, dev); 914 915 err_vids_add: 916 dev_close(port_dev); 917 918 err_dev_open: 919 team_port_leave(team, port); 920 team_port_set_orig_mac(port); 921 922 err_port_enter: 923 dev_set_mtu(port_dev, port->orig.mtu); 924 925 err_set_mtu: 926 kfree(port); 927 928 return err; 929 } 930 931 static int team_port_del(struct team *team, struct net_device *port_dev) 932 { 933 struct net_device *dev = team->dev; 934 struct team_port *port; 935 char *portname = port_dev->name; 936 937 port = team_port_get_rtnl(port_dev); 938 if (!port || !team_port_find(team, port)) { 939 netdev_err(dev, "Device %s does not act as a port of this team\n", 940 portname); 941 return -ENOENT; 942 } 943 944 port->removed = true; 945 __team_port_change_check(port, false); 946 team_port_disable(team, port); 947 list_del_rcu(&port->list); 948 team_adjust_ops(team); 949 team_option_port_del(team, port); 950 netdev_rx_handler_unregister(port_dev); 951 netdev_set_master(port_dev, NULL); 952 vlan_vids_del_by_dev(port_dev, dev); 953 dev_close(port_dev); 954 team_port_leave(team, port); 955 team_port_set_orig_mac(port); 956 dev_set_mtu(port_dev, port->orig.mtu); 957 synchronize_rcu(); 958 kfree(port); 959 netdev_info(dev, "Port device %s removed\n", portname); 960 __team_compute_features(team); 961 962 return 0; 963 } 964 965 966 /***************** 967 * Net device ops 968 *****************/ 969 970 static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx) 971 { 972 ctx->data.str_val = team->mode->kind; 973 return 0; 974 } 975 976 static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx) 977 { 978 return team_change_mode(team, ctx->data.str_val); 979 } 980 981 static int team_port_en_option_get(struct team *team, 982 struct team_gsetter_ctx *ctx) 983 { 984 struct team_port *port = ctx->info->port; 985 986 ctx->data.bool_val = team_port_enabled(port); 987 return 0; 988 } 989 990 static int team_port_en_option_set(struct team *team, 991 struct team_gsetter_ctx *ctx) 992 { 993 struct team_port *port = ctx->info->port; 994 995 if (ctx->data.bool_val) 996 team_port_enable(team, port); 997 else 998 team_port_disable(team, port); 999 return 0; 1000 } 1001 1002 static int team_user_linkup_option_get(struct team *team, 1003 struct team_gsetter_ctx *ctx) 1004 { 1005 struct team_port *port = ctx->info->port; 1006 1007 ctx->data.bool_val = port->user.linkup; 1008 return 0; 1009 } 1010 1011 static int team_user_linkup_option_set(struct team *team, 1012 struct team_gsetter_ctx *ctx) 1013 { 1014 struct team_port *port = ctx->info->port; 1015 1016 port->user.linkup = ctx->data.bool_val; 1017 team_refresh_port_linkup(port); 1018 return 0; 1019 } 1020 1021 static int team_user_linkup_en_option_get(struct team *team, 1022 struct team_gsetter_ctx *ctx) 1023 { 1024 struct team_port *port = ctx->info->port; 1025 1026 ctx->data.bool_val = port->user.linkup_enabled; 1027 return 0; 1028 } 1029 1030 static int team_user_linkup_en_option_set(struct team *team, 1031 struct team_gsetter_ctx *ctx) 1032 { 1033 struct team_port *port = ctx->info->port; 1034 1035 port->user.linkup_enabled = ctx->data.bool_val; 1036 team_refresh_port_linkup(port); 1037 return 0; 1038 } 1039 1040 static const struct team_option team_options[] = { 1041 { 1042 .name = "mode", 1043 .type = TEAM_OPTION_TYPE_STRING, 1044 .getter = team_mode_option_get, 1045 .setter = team_mode_option_set, 1046 }, 1047 { 1048 .name = "enabled", 1049 .type = TEAM_OPTION_TYPE_BOOL, 1050 .per_port = true, 1051 .getter = team_port_en_option_get, 1052 .setter = team_port_en_option_set, 1053 }, 1054 { 1055 .name = "user_linkup", 1056 .type = TEAM_OPTION_TYPE_BOOL, 1057 .per_port = true, 1058 .getter = team_user_linkup_option_get, 1059 .setter = team_user_linkup_option_set, 1060 }, 1061 { 1062 .name = "user_linkup_enabled", 1063 .type = TEAM_OPTION_TYPE_BOOL, 1064 .per_port = true, 1065 .getter = team_user_linkup_en_option_get, 1066 .setter = team_user_linkup_en_option_set, 1067 }, 1068 }; 1069 1070 static int team_init(struct net_device *dev) 1071 { 1072 struct team *team = netdev_priv(dev); 1073 int i; 1074 int err; 1075 1076 team->dev = dev; 1077 mutex_init(&team->lock); 1078 team_set_no_mode(team); 1079 1080 team->pcpu_stats = alloc_percpu(struct team_pcpu_stats); 1081 if (!team->pcpu_stats) 1082 return -ENOMEM; 1083 1084 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) 1085 INIT_HLIST_HEAD(&team->en_port_hlist[i]); 1086 INIT_LIST_HEAD(&team->port_list); 1087 1088 team_adjust_ops(team); 1089 1090 INIT_LIST_HEAD(&team->option_list); 1091 INIT_LIST_HEAD(&team->option_inst_list); 1092 err = team_options_register(team, team_options, ARRAY_SIZE(team_options)); 1093 if (err) 1094 goto err_options_register; 1095 netif_carrier_off(dev); 1096 1097 return 0; 1098 1099 err_options_register: 1100 free_percpu(team->pcpu_stats); 1101 1102 return err; 1103 } 1104 1105 static void team_uninit(struct net_device *dev) 1106 { 1107 struct team *team = netdev_priv(dev); 1108 struct team_port *port; 1109 struct team_port *tmp; 1110 1111 mutex_lock(&team->lock); 1112 list_for_each_entry_safe(port, tmp, &team->port_list, list) 1113 team_port_del(team, port->dev); 1114 1115 __team_change_mode(team, NULL); /* cleanup */ 1116 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); 1117 mutex_unlock(&team->lock); 1118 } 1119 1120 static void team_destructor(struct net_device *dev) 1121 { 1122 struct team *team = netdev_priv(dev); 1123 1124 free_percpu(team->pcpu_stats); 1125 free_netdev(dev); 1126 } 1127 1128 static int team_open(struct net_device *dev) 1129 { 1130 netif_carrier_on(dev); 1131 return 0; 1132 } 1133 1134 static int team_close(struct net_device *dev) 1135 { 1136 netif_carrier_off(dev); 1137 return 0; 1138 } 1139 1140 /* 1141 * note: already called with rcu_read_lock 1142 */ 1143 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) 1144 { 1145 struct team *team = netdev_priv(dev); 1146 bool tx_success = false; 1147 unsigned int len = skb->len; 1148 1149 tx_success = team->ops.transmit(team, skb); 1150 if (tx_success) { 1151 struct team_pcpu_stats *pcpu_stats; 1152 1153 pcpu_stats = this_cpu_ptr(team->pcpu_stats); 1154 u64_stats_update_begin(&pcpu_stats->syncp); 1155 pcpu_stats->tx_packets++; 1156 pcpu_stats->tx_bytes += len; 1157 u64_stats_update_end(&pcpu_stats->syncp); 1158 } else { 1159 this_cpu_inc(team->pcpu_stats->tx_dropped); 1160 } 1161 1162 return NETDEV_TX_OK; 1163 } 1164 1165 static void team_change_rx_flags(struct net_device *dev, int change) 1166 { 1167 struct team *team = netdev_priv(dev); 1168 struct team_port *port; 1169 int inc; 1170 1171 rcu_read_lock(); 1172 list_for_each_entry_rcu(port, &team->port_list, list) { 1173 if (change & IFF_PROMISC) { 1174 inc = dev->flags & IFF_PROMISC ? 1 : -1; 1175 dev_set_promiscuity(port->dev, inc); 1176 } 1177 if (change & IFF_ALLMULTI) { 1178 inc = dev->flags & IFF_ALLMULTI ? 1 : -1; 1179 dev_set_allmulti(port->dev, inc); 1180 } 1181 } 1182 rcu_read_unlock(); 1183 } 1184 1185 static void team_set_rx_mode(struct net_device *dev) 1186 { 1187 struct team *team = netdev_priv(dev); 1188 struct team_port *port; 1189 1190 rcu_read_lock(); 1191 list_for_each_entry_rcu(port, &team->port_list, list) { 1192 dev_uc_sync(port->dev, dev); 1193 dev_mc_sync(port->dev, dev); 1194 } 1195 rcu_read_unlock(); 1196 } 1197 1198 static int team_set_mac_address(struct net_device *dev, void *p) 1199 { 1200 struct team *team = netdev_priv(dev); 1201 struct team_port *port; 1202 struct sockaddr *addr = p; 1203 1204 dev->addr_assign_type &= ~NET_ADDR_RANDOM; 1205 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 1206 rcu_read_lock(); 1207 list_for_each_entry_rcu(port, &team->port_list, list) 1208 if (team->ops.port_change_mac) 1209 team->ops.port_change_mac(team, port); 1210 rcu_read_unlock(); 1211 return 0; 1212 } 1213 1214 static int team_change_mtu(struct net_device *dev, int new_mtu) 1215 { 1216 struct team *team = netdev_priv(dev); 1217 struct team_port *port; 1218 int err; 1219 1220 /* 1221 * Alhough this is reader, it's guarded by team lock. It's not possible 1222 * to traverse list in reverse under rcu_read_lock 1223 */ 1224 mutex_lock(&team->lock); 1225 list_for_each_entry(port, &team->port_list, list) { 1226 err = dev_set_mtu(port->dev, new_mtu); 1227 if (err) { 1228 netdev_err(dev, "Device %s failed to change mtu", 1229 port->dev->name); 1230 goto unwind; 1231 } 1232 } 1233 mutex_unlock(&team->lock); 1234 1235 dev->mtu = new_mtu; 1236 1237 return 0; 1238 1239 unwind: 1240 list_for_each_entry_continue_reverse(port, &team->port_list, list) 1241 dev_set_mtu(port->dev, dev->mtu); 1242 mutex_unlock(&team->lock); 1243 1244 return err; 1245 } 1246 1247 static struct rtnl_link_stats64 * 1248 team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1249 { 1250 struct team *team = netdev_priv(dev); 1251 struct team_pcpu_stats *p; 1252 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes; 1253 u32 rx_dropped = 0, tx_dropped = 0; 1254 unsigned int start; 1255 int i; 1256 1257 for_each_possible_cpu(i) { 1258 p = per_cpu_ptr(team->pcpu_stats, i); 1259 do { 1260 start = u64_stats_fetch_begin_bh(&p->syncp); 1261 rx_packets = p->rx_packets; 1262 rx_bytes = p->rx_bytes; 1263 rx_multicast = p->rx_multicast; 1264 tx_packets = p->tx_packets; 1265 tx_bytes = p->tx_bytes; 1266 } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 1267 1268 stats->rx_packets += rx_packets; 1269 stats->rx_bytes += rx_bytes; 1270 stats->multicast += rx_multicast; 1271 stats->tx_packets += tx_packets; 1272 stats->tx_bytes += tx_bytes; 1273 /* 1274 * rx_dropped & tx_dropped are u32, updated 1275 * without syncp protection. 1276 */ 1277 rx_dropped += p->rx_dropped; 1278 tx_dropped += p->tx_dropped; 1279 } 1280 stats->rx_dropped = rx_dropped; 1281 stats->tx_dropped = tx_dropped; 1282 return stats; 1283 } 1284 1285 static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid) 1286 { 1287 struct team *team = netdev_priv(dev); 1288 struct team_port *port; 1289 int err; 1290 1291 /* 1292 * Alhough this is reader, it's guarded by team lock. It's not possible 1293 * to traverse list in reverse under rcu_read_lock 1294 */ 1295 mutex_lock(&team->lock); 1296 list_for_each_entry(port, &team->port_list, list) { 1297 err = vlan_vid_add(port->dev, vid); 1298 if (err) 1299 goto unwind; 1300 } 1301 mutex_unlock(&team->lock); 1302 1303 return 0; 1304 1305 unwind: 1306 list_for_each_entry_continue_reverse(port, &team->port_list, list) 1307 vlan_vid_del(port->dev, vid); 1308 mutex_unlock(&team->lock); 1309 1310 return err; 1311 } 1312 1313 static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid) 1314 { 1315 struct team *team = netdev_priv(dev); 1316 struct team_port *port; 1317 1318 rcu_read_lock(); 1319 list_for_each_entry_rcu(port, &team->port_list, list) 1320 vlan_vid_del(port->dev, vid); 1321 rcu_read_unlock(); 1322 1323 return 0; 1324 } 1325 1326 static int team_add_slave(struct net_device *dev, struct net_device *port_dev) 1327 { 1328 struct team *team = netdev_priv(dev); 1329 int err; 1330 1331 mutex_lock(&team->lock); 1332 err = team_port_add(team, port_dev); 1333 mutex_unlock(&team->lock); 1334 return err; 1335 } 1336 1337 static int team_del_slave(struct net_device *dev, struct net_device *port_dev) 1338 { 1339 struct team *team = netdev_priv(dev); 1340 int err; 1341 1342 mutex_lock(&team->lock); 1343 err = team_port_del(team, port_dev); 1344 mutex_unlock(&team->lock); 1345 return err; 1346 } 1347 1348 static netdev_features_t team_fix_features(struct net_device *dev, 1349 netdev_features_t features) 1350 { 1351 struct team_port *port; 1352 struct team *team = netdev_priv(dev); 1353 netdev_features_t mask; 1354 1355 mask = features; 1356 features &= ~NETIF_F_ONE_FOR_ALL; 1357 features |= NETIF_F_ALL_FOR_ALL; 1358 1359 rcu_read_lock(); 1360 list_for_each_entry_rcu(port, &team->port_list, list) { 1361 features = netdev_increment_features(features, 1362 port->dev->features, 1363 mask); 1364 } 1365 rcu_read_unlock(); 1366 return features; 1367 } 1368 1369 static const struct net_device_ops team_netdev_ops = { 1370 .ndo_init = team_init, 1371 .ndo_uninit = team_uninit, 1372 .ndo_open = team_open, 1373 .ndo_stop = team_close, 1374 .ndo_start_xmit = team_xmit, 1375 .ndo_change_rx_flags = team_change_rx_flags, 1376 .ndo_set_rx_mode = team_set_rx_mode, 1377 .ndo_set_mac_address = team_set_mac_address, 1378 .ndo_change_mtu = team_change_mtu, 1379 .ndo_get_stats64 = team_get_stats64, 1380 .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid, 1381 .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid, 1382 .ndo_add_slave = team_add_slave, 1383 .ndo_del_slave = team_del_slave, 1384 .ndo_fix_features = team_fix_features, 1385 }; 1386 1387 1388 /*********************** 1389 * rt netlink interface 1390 ***********************/ 1391 1392 static void team_setup(struct net_device *dev) 1393 { 1394 ether_setup(dev); 1395 1396 dev->netdev_ops = &team_netdev_ops; 1397 dev->destructor = team_destructor; 1398 dev->tx_queue_len = 0; 1399 dev->flags |= IFF_MULTICAST; 1400 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); 1401 1402 /* 1403 * Indicate we support unicast address filtering. That way core won't 1404 * bring us to promisc mode in case a unicast addr is added. 1405 * Let this up to underlay drivers. 1406 */ 1407 dev->priv_flags |= IFF_UNICAST_FLT; 1408 1409 dev->features |= NETIF_F_LLTX; 1410 dev->features |= NETIF_F_GRO; 1411 dev->hw_features = NETIF_F_HW_VLAN_TX | 1412 NETIF_F_HW_VLAN_RX | 1413 NETIF_F_HW_VLAN_FILTER; 1414 1415 dev->features |= dev->hw_features; 1416 } 1417 1418 static int team_newlink(struct net *src_net, struct net_device *dev, 1419 struct nlattr *tb[], struct nlattr *data[]) 1420 { 1421 int err; 1422 1423 if (tb[IFLA_ADDRESS] == NULL) 1424 eth_hw_addr_random(dev); 1425 1426 err = register_netdevice(dev); 1427 if (err) 1428 return err; 1429 1430 return 0; 1431 } 1432 1433 static int team_validate(struct nlattr *tb[], struct nlattr *data[]) 1434 { 1435 if (tb[IFLA_ADDRESS]) { 1436 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 1437 return -EINVAL; 1438 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 1439 return -EADDRNOTAVAIL; 1440 } 1441 return 0; 1442 } 1443 1444 static struct rtnl_link_ops team_link_ops __read_mostly = { 1445 .kind = DRV_NAME, 1446 .priv_size = sizeof(struct team), 1447 .setup = team_setup, 1448 .newlink = team_newlink, 1449 .validate = team_validate, 1450 }; 1451 1452 1453 /*********************************** 1454 * Generic netlink custom interface 1455 ***********************************/ 1456 1457 static struct genl_family team_nl_family = { 1458 .id = GENL_ID_GENERATE, 1459 .name = TEAM_GENL_NAME, 1460 .version = TEAM_GENL_VERSION, 1461 .maxattr = TEAM_ATTR_MAX, 1462 .netnsok = true, 1463 }; 1464 1465 static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = { 1466 [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, }, 1467 [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 }, 1468 [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED }, 1469 [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED }, 1470 }; 1471 1472 static const struct nla_policy 1473 team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = { 1474 [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, }, 1475 [TEAM_ATTR_OPTION_NAME] = { 1476 .type = NLA_STRING, 1477 .len = TEAM_STRING_MAX_LEN, 1478 }, 1479 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG }, 1480 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 }, 1481 [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY }, 1482 }; 1483 1484 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) 1485 { 1486 struct sk_buff *msg; 1487 void *hdr; 1488 int err; 1489 1490 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1491 if (!msg) 1492 return -ENOMEM; 1493 1494 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, 1495 &team_nl_family, 0, TEAM_CMD_NOOP); 1496 if (IS_ERR(hdr)) { 1497 err = PTR_ERR(hdr); 1498 goto err_msg_put; 1499 } 1500 1501 genlmsg_end(msg, hdr); 1502 1503 return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); 1504 1505 err_msg_put: 1506 nlmsg_free(msg); 1507 1508 return err; 1509 } 1510 1511 /* 1512 * Netlink cmd functions should be locked by following two functions. 1513 * Since dev gets held here, that ensures dev won't disappear in between. 1514 */ 1515 static struct team *team_nl_team_get(struct genl_info *info) 1516 { 1517 struct net *net = genl_info_net(info); 1518 int ifindex; 1519 struct net_device *dev; 1520 struct team *team; 1521 1522 if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX]) 1523 return NULL; 1524 1525 ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]); 1526 dev = dev_get_by_index(net, ifindex); 1527 if (!dev || dev->netdev_ops != &team_netdev_ops) { 1528 if (dev) 1529 dev_put(dev); 1530 return NULL; 1531 } 1532 1533 team = netdev_priv(dev); 1534 mutex_lock(&team->lock); 1535 return team; 1536 } 1537 1538 static void team_nl_team_put(struct team *team) 1539 { 1540 mutex_unlock(&team->lock); 1541 dev_put(team->dev); 1542 } 1543 1544 static int team_nl_send_generic(struct genl_info *info, struct team *team, 1545 int (*fill_func)(struct sk_buff *skb, 1546 struct genl_info *info, 1547 int flags, struct team *team)) 1548 { 1549 struct sk_buff *skb; 1550 int err; 1551 1552 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1553 if (!skb) 1554 return -ENOMEM; 1555 1556 err = fill_func(skb, info, NLM_F_ACK, team); 1557 if (err < 0) 1558 goto err_fill; 1559 1560 err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid); 1561 return err; 1562 1563 err_fill: 1564 nlmsg_free(skb); 1565 return err; 1566 } 1567 1568 static int team_nl_fill_options_get(struct sk_buff *skb, 1569 u32 pid, u32 seq, int flags, 1570 struct team *team, bool fillall) 1571 { 1572 struct nlattr *option_list; 1573 void *hdr; 1574 struct team_option_inst *opt_inst; 1575 int err; 1576 1577 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, 1578 TEAM_CMD_OPTIONS_GET); 1579 if (IS_ERR(hdr)) 1580 return PTR_ERR(hdr); 1581 1582 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) 1583 goto nla_put_failure; 1584 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION); 1585 if (!option_list) 1586 return -EMSGSIZE; 1587 1588 list_for_each_entry(opt_inst, &team->option_inst_list, list) { 1589 struct nlattr *option_item; 1590 struct team_option *option = opt_inst->option; 1591 struct team_option_inst_info *opt_inst_info; 1592 struct team_gsetter_ctx ctx; 1593 1594 /* Include only changed options if fill all mode is not on */ 1595 if (!fillall && !opt_inst->changed) 1596 continue; 1597 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION); 1598 if (!option_item) 1599 goto nla_put_failure; 1600 if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name)) 1601 goto nla_put_failure; 1602 if (opt_inst->changed) { 1603 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED)) 1604 goto nla_put_failure; 1605 opt_inst->changed = false; 1606 } 1607 if (opt_inst->removed && 1608 nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED)) 1609 goto nla_put_failure; 1610 1611 opt_inst_info = &opt_inst->info; 1612 if (opt_inst_info->port && 1613 nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX, 1614 opt_inst_info->port->dev->ifindex)) 1615 goto nla_put_failure; 1616 if (opt_inst->option->array_size && 1617 nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX, 1618 opt_inst_info->array_index)) 1619 goto nla_put_failure; 1620 ctx.info = opt_inst_info; 1621 1622 switch (option->type) { 1623 case TEAM_OPTION_TYPE_U32: 1624 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32)) 1625 goto nla_put_failure; 1626 err = team_option_get(team, opt_inst, &ctx); 1627 if (err) 1628 goto errout; 1629 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, 1630 ctx.data.u32_val)) 1631 goto nla_put_failure; 1632 break; 1633 case TEAM_OPTION_TYPE_STRING: 1634 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING)) 1635 goto nla_put_failure; 1636 err = team_option_get(team, opt_inst, &ctx); 1637 if (err) 1638 goto errout; 1639 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA, 1640 ctx.data.str_val)) 1641 goto nla_put_failure; 1642 break; 1643 case TEAM_OPTION_TYPE_BINARY: 1644 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY)) 1645 goto nla_put_failure; 1646 err = team_option_get(team, opt_inst, &ctx); 1647 if (err) 1648 goto errout; 1649 if (nla_put(skb, TEAM_ATTR_OPTION_DATA, 1650 ctx.data.bin_val.len, ctx.data.bin_val.ptr)) 1651 goto nla_put_failure; 1652 break; 1653 case TEAM_OPTION_TYPE_BOOL: 1654 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG)) 1655 goto nla_put_failure; 1656 err = team_option_get(team, opt_inst, &ctx); 1657 if (err) 1658 goto errout; 1659 if (ctx.data.bool_val && 1660 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA)) 1661 goto nla_put_failure; 1662 break; 1663 default: 1664 BUG(); 1665 } 1666 nla_nest_end(skb, option_item); 1667 } 1668 1669 nla_nest_end(skb, option_list); 1670 return genlmsg_end(skb, hdr); 1671 1672 nla_put_failure: 1673 err = -EMSGSIZE; 1674 errout: 1675 genlmsg_cancel(skb, hdr); 1676 return err; 1677 } 1678 1679 static int team_nl_fill_options_get_all(struct sk_buff *skb, 1680 struct genl_info *info, int flags, 1681 struct team *team) 1682 { 1683 return team_nl_fill_options_get(skb, info->snd_pid, 1684 info->snd_seq, NLM_F_ACK, 1685 team, true); 1686 } 1687 1688 static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info) 1689 { 1690 struct team *team; 1691 int err; 1692 1693 team = team_nl_team_get(info); 1694 if (!team) 1695 return -EINVAL; 1696 1697 err = team_nl_send_generic(info, team, team_nl_fill_options_get_all); 1698 1699 team_nl_team_put(team); 1700 1701 return err; 1702 } 1703 1704 static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) 1705 { 1706 struct team *team; 1707 int err = 0; 1708 int i; 1709 struct nlattr *nl_option; 1710 1711 team = team_nl_team_get(info); 1712 if (!team) 1713 return -EINVAL; 1714 1715 err = -EINVAL; 1716 if (!info->attrs[TEAM_ATTR_LIST_OPTION]) { 1717 err = -EINVAL; 1718 goto team_put; 1719 } 1720 1721 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) { 1722 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; 1723 struct nlattr *attr; 1724 struct nlattr *attr_data; 1725 enum team_option_type opt_type; 1726 int opt_port_ifindex = 0; /* != 0 for per-port options */ 1727 u32 opt_array_index = 0; 1728 bool opt_is_array = false; 1729 struct team_option_inst *opt_inst; 1730 char *opt_name; 1731 bool opt_found = false; 1732 1733 if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) { 1734 err = -EINVAL; 1735 goto team_put; 1736 } 1737 err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX, 1738 nl_option, team_nl_option_policy); 1739 if (err) 1740 goto team_put; 1741 if (!opt_attrs[TEAM_ATTR_OPTION_NAME] || 1742 !opt_attrs[TEAM_ATTR_OPTION_TYPE]) { 1743 err = -EINVAL; 1744 goto team_put; 1745 } 1746 switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) { 1747 case NLA_U32: 1748 opt_type = TEAM_OPTION_TYPE_U32; 1749 break; 1750 case NLA_STRING: 1751 opt_type = TEAM_OPTION_TYPE_STRING; 1752 break; 1753 case NLA_BINARY: 1754 opt_type = TEAM_OPTION_TYPE_BINARY; 1755 break; 1756 case NLA_FLAG: 1757 opt_type = TEAM_OPTION_TYPE_BOOL; 1758 break; 1759 default: 1760 goto team_put; 1761 } 1762 1763 attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA]; 1764 if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) { 1765 err = -EINVAL; 1766 goto team_put; 1767 } 1768 1769 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]); 1770 attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX]; 1771 if (attr) 1772 opt_port_ifindex = nla_get_u32(attr); 1773 1774 attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX]; 1775 if (attr) { 1776 opt_is_array = true; 1777 opt_array_index = nla_get_u32(attr); 1778 } 1779 1780 list_for_each_entry(opt_inst, &team->option_inst_list, list) { 1781 struct team_option *option = opt_inst->option; 1782 struct team_gsetter_ctx ctx; 1783 struct team_option_inst_info *opt_inst_info; 1784 int tmp_ifindex; 1785 1786 opt_inst_info = &opt_inst->info; 1787 tmp_ifindex = opt_inst_info->port ? 1788 opt_inst_info->port->dev->ifindex : 0; 1789 if (option->type != opt_type || 1790 strcmp(option->name, opt_name) || 1791 tmp_ifindex != opt_port_ifindex || 1792 (option->array_size && !opt_is_array) || 1793 opt_inst_info->array_index != opt_array_index) 1794 continue; 1795 opt_found = true; 1796 ctx.info = opt_inst_info; 1797 switch (opt_type) { 1798 case TEAM_OPTION_TYPE_U32: 1799 ctx.data.u32_val = nla_get_u32(attr_data); 1800 break; 1801 case TEAM_OPTION_TYPE_STRING: 1802 if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) { 1803 err = -EINVAL; 1804 goto team_put; 1805 } 1806 ctx.data.str_val = nla_data(attr_data); 1807 break; 1808 case TEAM_OPTION_TYPE_BINARY: 1809 ctx.data.bin_val.len = nla_len(attr_data); 1810 ctx.data.bin_val.ptr = nla_data(attr_data); 1811 break; 1812 case TEAM_OPTION_TYPE_BOOL: 1813 ctx.data.bool_val = attr_data ? true : false; 1814 break; 1815 default: 1816 BUG(); 1817 } 1818 err = team_option_set(team, opt_inst, &ctx); 1819 if (err) 1820 goto team_put; 1821 } 1822 if (!opt_found) { 1823 err = -ENOENT; 1824 goto team_put; 1825 } 1826 } 1827 1828 team_put: 1829 team_nl_team_put(team); 1830 1831 return err; 1832 } 1833 1834 static int team_nl_fill_port_list_get(struct sk_buff *skb, 1835 u32 pid, u32 seq, int flags, 1836 struct team *team, 1837 bool fillall) 1838 { 1839 struct nlattr *port_list; 1840 void *hdr; 1841 struct team_port *port; 1842 1843 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, 1844 TEAM_CMD_PORT_LIST_GET); 1845 if (IS_ERR(hdr)) 1846 return PTR_ERR(hdr); 1847 1848 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) 1849 goto nla_put_failure; 1850 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT); 1851 if (!port_list) 1852 return -EMSGSIZE; 1853 1854 list_for_each_entry(port, &team->port_list, list) { 1855 struct nlattr *port_item; 1856 1857 /* Include only changed ports if fill all mode is not on */ 1858 if (!fillall && !port->changed) 1859 continue; 1860 port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT); 1861 if (!port_item) 1862 goto nla_put_failure; 1863 if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex)) 1864 goto nla_put_failure; 1865 if (port->changed) { 1866 if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED)) 1867 goto nla_put_failure; 1868 port->changed = false; 1869 } 1870 if ((port->removed && 1871 nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) || 1872 (port->state.linkup && 1873 nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) || 1874 nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) || 1875 nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex)) 1876 goto nla_put_failure; 1877 nla_nest_end(skb, port_item); 1878 } 1879 1880 nla_nest_end(skb, port_list); 1881 return genlmsg_end(skb, hdr); 1882 1883 nla_put_failure: 1884 genlmsg_cancel(skb, hdr); 1885 return -EMSGSIZE; 1886 } 1887 1888 static int team_nl_fill_port_list_get_all(struct sk_buff *skb, 1889 struct genl_info *info, int flags, 1890 struct team *team) 1891 { 1892 return team_nl_fill_port_list_get(skb, info->snd_pid, 1893 info->snd_seq, NLM_F_ACK, 1894 team, true); 1895 } 1896 1897 static int team_nl_cmd_port_list_get(struct sk_buff *skb, 1898 struct genl_info *info) 1899 { 1900 struct team *team; 1901 int err; 1902 1903 team = team_nl_team_get(info); 1904 if (!team) 1905 return -EINVAL; 1906 1907 err = team_nl_send_generic(info, team, team_nl_fill_port_list_get_all); 1908 1909 team_nl_team_put(team); 1910 1911 return err; 1912 } 1913 1914 static struct genl_ops team_nl_ops[] = { 1915 { 1916 .cmd = TEAM_CMD_NOOP, 1917 .doit = team_nl_cmd_noop, 1918 .policy = team_nl_policy, 1919 }, 1920 { 1921 .cmd = TEAM_CMD_OPTIONS_SET, 1922 .doit = team_nl_cmd_options_set, 1923 .policy = team_nl_policy, 1924 .flags = GENL_ADMIN_PERM, 1925 }, 1926 { 1927 .cmd = TEAM_CMD_OPTIONS_GET, 1928 .doit = team_nl_cmd_options_get, 1929 .policy = team_nl_policy, 1930 .flags = GENL_ADMIN_PERM, 1931 }, 1932 { 1933 .cmd = TEAM_CMD_PORT_LIST_GET, 1934 .doit = team_nl_cmd_port_list_get, 1935 .policy = team_nl_policy, 1936 .flags = GENL_ADMIN_PERM, 1937 }, 1938 }; 1939 1940 static struct genl_multicast_group team_change_event_mcgrp = { 1941 .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, 1942 }; 1943 1944 static int team_nl_send_event_options_get(struct team *team) 1945 { 1946 struct sk_buff *skb; 1947 int err; 1948 struct net *net = dev_net(team->dev); 1949 1950 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1951 if (!skb) 1952 return -ENOMEM; 1953 1954 err = team_nl_fill_options_get(skb, 0, 0, 0, team, false); 1955 if (err < 0) 1956 goto err_fill; 1957 1958 err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id, 1959 GFP_KERNEL); 1960 return err; 1961 1962 err_fill: 1963 nlmsg_free(skb); 1964 return err; 1965 } 1966 1967 static int team_nl_send_event_port_list_get(struct team *team) 1968 { 1969 struct sk_buff *skb; 1970 int err; 1971 struct net *net = dev_net(team->dev); 1972 1973 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1974 if (!skb) 1975 return -ENOMEM; 1976 1977 err = team_nl_fill_port_list_get(skb, 0, 0, 0, team, false); 1978 if (err < 0) 1979 goto err_fill; 1980 1981 err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id, 1982 GFP_KERNEL); 1983 return err; 1984 1985 err_fill: 1986 nlmsg_free(skb); 1987 return err; 1988 } 1989 1990 static int team_nl_init(void) 1991 { 1992 int err; 1993 1994 err = genl_register_family_with_ops(&team_nl_family, team_nl_ops, 1995 ARRAY_SIZE(team_nl_ops)); 1996 if (err) 1997 return err; 1998 1999 err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp); 2000 if (err) 2001 goto err_change_event_grp_reg; 2002 2003 return 0; 2004 2005 err_change_event_grp_reg: 2006 genl_unregister_family(&team_nl_family); 2007 2008 return err; 2009 } 2010 2011 static void team_nl_fini(void) 2012 { 2013 genl_unregister_family(&team_nl_family); 2014 } 2015 2016 2017 /****************** 2018 * Change checkers 2019 ******************/ 2020 2021 static void __team_options_change_check(struct team *team) 2022 { 2023 int err; 2024 2025 err = team_nl_send_event_options_get(team); 2026 if (err) 2027 netdev_warn(team->dev, "Failed to send options change via netlink\n"); 2028 } 2029 2030 /* rtnl lock is held */ 2031 static void __team_port_change_check(struct team_port *port, bool linkup) 2032 { 2033 int err; 2034 2035 if (!port->removed && port->state.linkup == linkup) 2036 return; 2037 2038 port->changed = true; 2039 port->state.linkup = linkup; 2040 team_refresh_port_linkup(port); 2041 if (linkup) { 2042 struct ethtool_cmd ecmd; 2043 2044 err = __ethtool_get_settings(port->dev, &ecmd); 2045 if (!err) { 2046 port->state.speed = ethtool_cmd_speed(&ecmd); 2047 port->state.duplex = ecmd.duplex; 2048 goto send_event; 2049 } 2050 } 2051 port->state.speed = 0; 2052 port->state.duplex = 0; 2053 2054 send_event: 2055 err = team_nl_send_event_port_list_get(port->team); 2056 if (err) 2057 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n", 2058 port->dev->name); 2059 2060 } 2061 2062 static void team_port_change_check(struct team_port *port, bool linkup) 2063 { 2064 struct team *team = port->team; 2065 2066 mutex_lock(&team->lock); 2067 __team_port_change_check(port, linkup); 2068 mutex_unlock(&team->lock); 2069 } 2070 2071 2072 /************************************ 2073 * Net device notifier event handler 2074 ************************************/ 2075 2076 static int team_device_event(struct notifier_block *unused, 2077 unsigned long event, void *ptr) 2078 { 2079 struct net_device *dev = (struct net_device *) ptr; 2080 struct team_port *port; 2081 2082 port = team_port_get_rtnl(dev); 2083 if (!port) 2084 return NOTIFY_DONE; 2085 2086 switch (event) { 2087 case NETDEV_UP: 2088 if (netif_carrier_ok(dev)) 2089 team_port_change_check(port, true); 2090 case NETDEV_DOWN: 2091 team_port_change_check(port, false); 2092 case NETDEV_CHANGE: 2093 if (netif_running(port->dev)) 2094 team_port_change_check(port, 2095 !!netif_carrier_ok(port->dev)); 2096 break; 2097 case NETDEV_UNREGISTER: 2098 team_del_slave(port->team->dev, dev); 2099 break; 2100 case NETDEV_FEAT_CHANGE: 2101 team_compute_features(port->team); 2102 break; 2103 case NETDEV_CHANGEMTU: 2104 /* Forbid to change mtu of underlaying device */ 2105 return NOTIFY_BAD; 2106 case NETDEV_PRE_TYPE_CHANGE: 2107 /* Forbid to change type of underlaying device */ 2108 return NOTIFY_BAD; 2109 } 2110 return NOTIFY_DONE; 2111 } 2112 2113 static struct notifier_block team_notifier_block __read_mostly = { 2114 .notifier_call = team_device_event, 2115 }; 2116 2117 2118 /*********************** 2119 * Module init and exit 2120 ***********************/ 2121 2122 static int __init team_module_init(void) 2123 { 2124 int err; 2125 2126 register_netdevice_notifier(&team_notifier_block); 2127 2128 err = rtnl_link_register(&team_link_ops); 2129 if (err) 2130 goto err_rtnl_reg; 2131 2132 err = team_nl_init(); 2133 if (err) 2134 goto err_nl_init; 2135 2136 return 0; 2137 2138 err_nl_init: 2139 rtnl_link_unregister(&team_link_ops); 2140 2141 err_rtnl_reg: 2142 unregister_netdevice_notifier(&team_notifier_block); 2143 2144 return err; 2145 } 2146 2147 static void __exit team_module_exit(void) 2148 { 2149 team_nl_fini(); 2150 rtnl_link_unregister(&team_link_ops); 2151 unregister_netdevice_notifier(&team_notifier_block); 2152 } 2153 2154 module_init(team_module_init); 2155 module_exit(team_module_exit); 2156 2157 MODULE_LICENSE("GPL v2"); 2158 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>"); 2159 MODULE_DESCRIPTION("Ethernet team device driver"); 2160 MODULE_ALIAS_RTNL_LINK(DRV_NAME); 2161