1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * drivers/net/team/team.c - Network team device driver 4 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> 5 */ 6 7 #include <linux/ethtool.h> 8 #include <linux/kernel.h> 9 #include <linux/types.h> 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/slab.h> 13 #include <linux/rcupdate.h> 14 #include <linux/errno.h> 15 #include <linux/ctype.h> 16 #include <linux/notifier.h> 17 #include <linux/netdevice.h> 18 #include <linux/netpoll.h> 19 #include <linux/if_vlan.h> 20 #include <linux/if_arp.h> 21 #include <linux/socket.h> 22 #include <linux/etherdevice.h> 23 #include <linux/rtnetlink.h> 24 #include <net/rtnetlink.h> 25 #include <net/genetlink.h> 26 #include <net/netlink.h> 27 #include <net/sch_generic.h> 28 #include <generated/utsrelease.h> 29 #include <linux/if_team.h> 30 31 #define DRV_NAME "team" 32 33 34 /********** 35 * Helpers 36 **********/ 37 38 static struct team_port *team_port_get_rtnl(const struct net_device *dev) 39 { 40 struct team_port *port = rtnl_dereference(dev->rx_handler_data); 41 42 return netif_is_team_port(dev) ? port : NULL; 43 } 44 45 /* 46 * Since the ability to change device address for open port device is tested in 47 * team_port_add, this function can be called without control of return value 48 */ 49 static int __set_port_dev_addr(struct net_device *port_dev, 50 const unsigned char *dev_addr) 51 { 52 struct sockaddr_storage addr; 53 54 memcpy(addr.__data, dev_addr, port_dev->addr_len); 55 addr.ss_family = port_dev->type; 56 return dev_set_mac_address(port_dev, (struct sockaddr *)&addr, NULL); 57 } 58 59 static int team_port_set_orig_dev_addr(struct team_port *port) 60 { 61 return __set_port_dev_addr(port->dev, port->orig.dev_addr); 62 } 63 64 static int team_port_set_team_dev_addr(struct team *team, 65 struct team_port *port) 66 { 67 return __set_port_dev_addr(port->dev, team->dev->dev_addr); 68 } 69 70 int team_modeop_port_enter(struct team *team, struct team_port *port) 71 { 72 return team_port_set_team_dev_addr(team, port); 73 } 74 EXPORT_SYMBOL(team_modeop_port_enter); 75 76 void team_modeop_port_change_dev_addr(struct team *team, 77 struct team_port *port) 78 { 79 team_port_set_team_dev_addr(team, port); 80 } 81 EXPORT_SYMBOL(team_modeop_port_change_dev_addr); 82 83 static void team_lower_state_changed(struct team_port *port) 84 { 85 struct netdev_lag_lower_state_info info; 86 87 info.link_up = port->linkup; 88 info.tx_enabled = team_port_enabled(port); 89 netdev_lower_state_changed(port->dev, &info); 90 } 91 92 static void team_refresh_port_linkup(struct team_port *port) 93 { 94 bool new_linkup = port->user.linkup_enabled ? port->user.linkup : 95 port->state.linkup; 96 97 if (port->linkup != new_linkup) { 98 port->linkup = new_linkup; 99 team_lower_state_changed(port); 100 } 101 } 102 103 104 /******************* 105 * Options handling 106 *******************/ 107 108 struct team_option_inst { /* One for each option instance */ 109 struct list_head list; 110 struct list_head tmp_list; 111 struct team_option *option; 112 struct team_option_inst_info info; 113 bool changed; 114 bool removed; 115 }; 116 117 static struct team_option *__team_find_option(struct team *team, 118 const char *opt_name) 119 { 120 struct team_option *option; 121 122 list_for_each_entry(option, &team->option_list, list) { 123 if (strcmp(option->name, opt_name) == 0) 124 return option; 125 } 126 return NULL; 127 } 128 129 static void __team_option_inst_del(struct team_option_inst *opt_inst) 130 { 131 list_del(&opt_inst->list); 132 kfree(opt_inst); 133 } 134 135 static void __team_option_inst_del_option(struct team *team, 136 struct team_option *option) 137 { 138 struct team_option_inst *opt_inst, *tmp; 139 140 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) { 141 if (opt_inst->option == option) 142 __team_option_inst_del(opt_inst); 143 } 144 } 145 146 static int __team_option_inst_add(struct team *team, struct team_option *option, 147 struct team_port *port) 148 { 149 struct team_option_inst *opt_inst; 150 unsigned int array_size; 151 unsigned int i; 152 int err; 153 154 array_size = option->array_size; 155 if (!array_size) 156 array_size = 1; /* No array but still need one instance */ 157 158 for (i = 0; i < array_size; i++) { 159 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL); 160 if (!opt_inst) 161 return -ENOMEM; 162 opt_inst->option = option; 163 opt_inst->info.port = port; 164 opt_inst->info.array_index = i; 165 opt_inst->changed = true; 166 opt_inst->removed = false; 167 list_add_tail(&opt_inst->list, &team->option_inst_list); 168 if (option->init) { 169 err = option->init(team, &opt_inst->info); 170 if (err) 171 return err; 172 } 173 174 } 175 return 0; 176 } 177 178 static int __team_option_inst_add_option(struct team *team, 179 struct team_option *option) 180 { 181 int err; 182 183 if (!option->per_port) { 184 err = __team_option_inst_add(team, option, NULL); 185 if (err) 186 goto inst_del_option; 187 } 188 return 0; 189 190 inst_del_option: 191 __team_option_inst_del_option(team, option); 192 return err; 193 } 194 195 static void __team_option_inst_mark_removed_option(struct team *team, 196 struct team_option *option) 197 { 198 struct team_option_inst *opt_inst; 199 200 list_for_each_entry(opt_inst, &team->option_inst_list, list) { 201 if (opt_inst->option == option) { 202 opt_inst->changed = true; 203 opt_inst->removed = true; 204 } 205 } 206 } 207 208 static void __team_option_inst_del_port(struct team *team, 209 struct team_port *port) 210 { 211 struct team_option_inst *opt_inst, *tmp; 212 213 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) { 214 if (opt_inst->option->per_port && 215 opt_inst->info.port == port) 216 __team_option_inst_del(opt_inst); 217 } 218 } 219 220 static int __team_option_inst_add_port(struct team *team, 221 struct team_port *port) 222 { 223 struct team_option *option; 224 int err; 225 226 list_for_each_entry(option, &team->option_list, list) { 227 if (!option->per_port) 228 continue; 229 err = __team_option_inst_add(team, option, port); 230 if (err) 231 goto inst_del_port; 232 } 233 return 0; 234 235 inst_del_port: 236 __team_option_inst_del_port(team, port); 237 return err; 238 } 239 240 static void __team_option_inst_mark_removed_port(struct team *team, 241 struct team_port *port) 242 { 243 struct team_option_inst *opt_inst; 244 245 list_for_each_entry(opt_inst, &team->option_inst_list, list) { 246 if (opt_inst->info.port == port) { 247 opt_inst->changed = true; 248 opt_inst->removed = true; 249 } 250 } 251 } 252 253 static int __team_options_register(struct team *team, 254 const struct team_option *option, 255 size_t option_count) 256 { 257 int i; 258 struct team_option **dst_opts; 259 int err; 260 261 dst_opts = kcalloc(option_count, sizeof(struct team_option *), 262 GFP_KERNEL); 263 if (!dst_opts) 264 return -ENOMEM; 265 for (i = 0; i < option_count; i++, option++) { 266 if (__team_find_option(team, option->name)) { 267 err = -EEXIST; 268 goto alloc_rollback; 269 } 270 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL); 271 if (!dst_opts[i]) { 272 err = -ENOMEM; 273 goto alloc_rollback; 274 } 275 } 276 277 for (i = 0; i < option_count; i++) { 278 err = __team_option_inst_add_option(team, dst_opts[i]); 279 if (err) 280 goto inst_rollback; 281 list_add_tail(&dst_opts[i]->list, &team->option_list); 282 } 283 284 kfree(dst_opts); 285 return 0; 286 287 inst_rollback: 288 for (i--; i >= 0; i--) 289 __team_option_inst_del_option(team, dst_opts[i]); 290 291 i = option_count; 292 alloc_rollback: 293 for (i--; i >= 0; i--) 294 kfree(dst_opts[i]); 295 296 kfree(dst_opts); 297 return err; 298 } 299 300 static void __team_options_mark_removed(struct team *team, 301 const struct team_option *option, 302 size_t option_count) 303 { 304 int i; 305 306 for (i = 0; i < option_count; i++, option++) { 307 struct team_option *del_opt; 308 309 del_opt = __team_find_option(team, option->name); 310 if (del_opt) 311 __team_option_inst_mark_removed_option(team, del_opt); 312 } 313 } 314 315 static void __team_options_unregister(struct team *team, 316 const struct team_option *option, 317 size_t option_count) 318 { 319 int i; 320 321 for (i = 0; i < option_count; i++, option++) { 322 struct team_option *del_opt; 323 324 del_opt = __team_find_option(team, option->name); 325 if (del_opt) { 326 __team_option_inst_del_option(team, del_opt); 327 list_del(&del_opt->list); 328 kfree(del_opt); 329 } 330 } 331 } 332 333 static void __team_options_change_check(struct team *team); 334 335 int team_options_register(struct team *team, 336 const struct team_option *option, 337 size_t option_count) 338 { 339 int err; 340 341 err = __team_options_register(team, option, option_count); 342 if (err) 343 return err; 344 __team_options_change_check(team); 345 return 0; 346 } 347 EXPORT_SYMBOL(team_options_register); 348 349 void team_options_unregister(struct team *team, 350 const struct team_option *option, 351 size_t option_count) 352 { 353 __team_options_mark_removed(team, option, option_count); 354 __team_options_change_check(team); 355 __team_options_unregister(team, option, option_count); 356 } 357 EXPORT_SYMBOL(team_options_unregister); 358 359 static int team_option_get(struct team *team, 360 struct team_option_inst *opt_inst, 361 struct team_gsetter_ctx *ctx) 362 { 363 if (!opt_inst->option->getter) 364 return -EOPNOTSUPP; 365 return opt_inst->option->getter(team, ctx); 366 } 367 368 static int team_option_set(struct team *team, 369 struct team_option_inst *opt_inst, 370 struct team_gsetter_ctx *ctx) 371 { 372 if (!opt_inst->option->setter) 373 return -EOPNOTSUPP; 374 return opt_inst->option->setter(team, ctx); 375 } 376 377 void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info) 378 { 379 struct team_option_inst *opt_inst; 380 381 opt_inst = container_of(opt_inst_info, struct team_option_inst, info); 382 opt_inst->changed = true; 383 } 384 EXPORT_SYMBOL(team_option_inst_set_change); 385 386 void team_options_change_check(struct team *team) 387 { 388 __team_options_change_check(team); 389 } 390 EXPORT_SYMBOL(team_options_change_check); 391 392 393 /**************** 394 * Mode handling 395 ****************/ 396 397 static LIST_HEAD(mode_list); 398 static DEFINE_SPINLOCK(mode_list_lock); 399 400 struct team_mode_item { 401 struct list_head list; 402 const struct team_mode *mode; 403 }; 404 405 static struct team_mode_item *__find_mode(const char *kind) 406 { 407 struct team_mode_item *mitem; 408 409 list_for_each_entry(mitem, &mode_list, list) { 410 if (strcmp(mitem->mode->kind, kind) == 0) 411 return mitem; 412 } 413 return NULL; 414 } 415 416 static bool is_good_mode_name(const char *name) 417 { 418 while (*name != '\0') { 419 if (!isalpha(*name) && !isdigit(*name) && *name != '_') 420 return false; 421 name++; 422 } 423 return true; 424 } 425 426 int team_mode_register(const struct team_mode *mode) 427 { 428 int err = 0; 429 struct team_mode_item *mitem; 430 431 if (!is_good_mode_name(mode->kind) || 432 mode->priv_size > TEAM_MODE_PRIV_SIZE) 433 return -EINVAL; 434 435 mitem = kmalloc(sizeof(*mitem), GFP_KERNEL); 436 if (!mitem) 437 return -ENOMEM; 438 439 spin_lock(&mode_list_lock); 440 if (__find_mode(mode->kind)) { 441 err = -EEXIST; 442 kfree(mitem); 443 goto unlock; 444 } 445 mitem->mode = mode; 446 list_add_tail(&mitem->list, &mode_list); 447 unlock: 448 spin_unlock(&mode_list_lock); 449 return err; 450 } 451 EXPORT_SYMBOL(team_mode_register); 452 453 void team_mode_unregister(const struct team_mode *mode) 454 { 455 struct team_mode_item *mitem; 456 457 spin_lock(&mode_list_lock); 458 mitem = __find_mode(mode->kind); 459 if (mitem) { 460 list_del_init(&mitem->list); 461 kfree(mitem); 462 } 463 spin_unlock(&mode_list_lock); 464 } 465 EXPORT_SYMBOL(team_mode_unregister); 466 467 static const struct team_mode *team_mode_get(const char *kind) 468 { 469 struct team_mode_item *mitem; 470 const struct team_mode *mode = NULL; 471 472 if (!try_module_get(THIS_MODULE)) 473 return NULL; 474 475 spin_lock(&mode_list_lock); 476 mitem = __find_mode(kind); 477 if (!mitem) { 478 spin_unlock(&mode_list_lock); 479 request_module("team-mode-%s", kind); 480 spin_lock(&mode_list_lock); 481 mitem = __find_mode(kind); 482 } 483 if (mitem) { 484 mode = mitem->mode; 485 if (!try_module_get(mode->owner)) 486 mode = NULL; 487 } 488 489 spin_unlock(&mode_list_lock); 490 module_put(THIS_MODULE); 491 return mode; 492 } 493 494 static void team_mode_put(const struct team_mode *mode) 495 { 496 module_put(mode->owner); 497 } 498 499 static bool team_dummy_transmit(struct team *team, struct sk_buff *skb) 500 { 501 dev_kfree_skb_any(skb); 502 return false; 503 } 504 505 static rx_handler_result_t team_dummy_receive(struct team *team, 506 struct team_port *port, 507 struct sk_buff *skb) 508 { 509 return RX_HANDLER_ANOTHER; 510 } 511 512 static const struct team_mode __team_no_mode = { 513 .kind = "*NOMODE*", 514 }; 515 516 static bool team_is_mode_set(struct team *team) 517 { 518 return team->mode != &__team_no_mode; 519 } 520 521 static void team_set_no_mode(struct team *team) 522 { 523 team->user_carrier_enabled = false; 524 team->mode = &__team_no_mode; 525 } 526 527 static void team_adjust_ops(struct team *team) 528 { 529 /* 530 * To avoid checks in rx/tx skb paths, ensure here that non-null and 531 * correct ops are always set. 532 */ 533 534 if (!team->en_port_count || !team_is_mode_set(team) || 535 !team->mode->ops->transmit) 536 team->ops.transmit = team_dummy_transmit; 537 else 538 team->ops.transmit = team->mode->ops->transmit; 539 540 if (!team->en_port_count || !team_is_mode_set(team) || 541 !team->mode->ops->receive) 542 team->ops.receive = team_dummy_receive; 543 else 544 team->ops.receive = team->mode->ops->receive; 545 } 546 547 /* 548 * We can benefit from the fact that it's ensured no port is present 549 * at the time of mode change. Therefore no packets are in fly so there's no 550 * need to set mode operations in any special way. 551 */ 552 static int __team_change_mode(struct team *team, 553 const struct team_mode *new_mode) 554 { 555 /* Check if mode was previously set and do cleanup if so */ 556 if (team_is_mode_set(team)) { 557 void (*exit_op)(struct team *team) = team->ops.exit; 558 559 /* Clear ops area so no callback is called any longer */ 560 memset(&team->ops, 0, sizeof(struct team_mode_ops)); 561 team_adjust_ops(team); 562 563 if (exit_op) 564 exit_op(team); 565 team_mode_put(team->mode); 566 team_set_no_mode(team); 567 /* zero private data area */ 568 memset(&team->mode_priv, 0, 569 sizeof(struct team) - offsetof(struct team, mode_priv)); 570 } 571 572 if (!new_mode) 573 return 0; 574 575 if (new_mode->ops->init) { 576 int err; 577 578 err = new_mode->ops->init(team); 579 if (err) 580 return err; 581 } 582 583 team->mode = new_mode; 584 memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops)); 585 team_adjust_ops(team); 586 587 return 0; 588 } 589 590 static int team_change_mode(struct team *team, const char *kind) 591 { 592 const struct team_mode *new_mode; 593 struct net_device *dev = team->dev; 594 int err; 595 596 if (!list_empty(&team->port_list)) { 597 netdev_err(dev, "No ports can be present during mode change\n"); 598 return -EBUSY; 599 } 600 601 if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) { 602 netdev_err(dev, "Unable to change to the same mode the team is in\n"); 603 return -EINVAL; 604 } 605 606 new_mode = team_mode_get(kind); 607 if (!new_mode) { 608 netdev_err(dev, "Mode \"%s\" not found\n", kind); 609 return -EINVAL; 610 } 611 612 err = __team_change_mode(team, new_mode); 613 if (err) { 614 netdev_err(dev, "Failed to change to mode \"%s\"\n", kind); 615 team_mode_put(new_mode); 616 return err; 617 } 618 619 netdev_info(dev, "Mode changed to \"%s\"\n", kind); 620 return 0; 621 } 622 623 624 /********************* 625 * Peers notification 626 *********************/ 627 628 static void team_notify_peers_work(struct work_struct *work) 629 { 630 struct team *team; 631 int val; 632 633 team = container_of(work, struct team, notify_peers.dw.work); 634 635 if (!rtnl_trylock()) { 636 schedule_delayed_work(&team->notify_peers.dw, 0); 637 return; 638 } 639 val = atomic_dec_if_positive(&team->notify_peers.count_pending); 640 if (val < 0) { 641 rtnl_unlock(); 642 return; 643 } 644 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev); 645 rtnl_unlock(); 646 if (val) 647 schedule_delayed_work(&team->notify_peers.dw, 648 msecs_to_jiffies(team->notify_peers.interval)); 649 } 650 651 static void team_notify_peers(struct team *team) 652 { 653 if (!team->notify_peers.count || !netif_running(team->dev)) 654 return; 655 atomic_add(team->notify_peers.count, &team->notify_peers.count_pending); 656 schedule_delayed_work(&team->notify_peers.dw, 0); 657 } 658 659 static void team_notify_peers_init(struct team *team) 660 { 661 INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work); 662 } 663 664 static void team_notify_peers_fini(struct team *team) 665 { 666 cancel_delayed_work_sync(&team->notify_peers.dw); 667 } 668 669 670 /******************************* 671 * Send multicast group rejoins 672 *******************************/ 673 674 static void team_mcast_rejoin_work(struct work_struct *work) 675 { 676 struct team *team; 677 int val; 678 679 team = container_of(work, struct team, mcast_rejoin.dw.work); 680 681 if (!rtnl_trylock()) { 682 schedule_delayed_work(&team->mcast_rejoin.dw, 0); 683 return; 684 } 685 val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending); 686 if (val < 0) { 687 rtnl_unlock(); 688 return; 689 } 690 call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev); 691 rtnl_unlock(); 692 if (val) 693 schedule_delayed_work(&team->mcast_rejoin.dw, 694 msecs_to_jiffies(team->mcast_rejoin.interval)); 695 } 696 697 static void team_mcast_rejoin(struct team *team) 698 { 699 if (!team->mcast_rejoin.count || !netif_running(team->dev)) 700 return; 701 atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending); 702 schedule_delayed_work(&team->mcast_rejoin.dw, 0); 703 } 704 705 static void team_mcast_rejoin_init(struct team *team) 706 { 707 INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work); 708 } 709 710 static void team_mcast_rejoin_fini(struct team *team) 711 { 712 cancel_delayed_work_sync(&team->mcast_rejoin.dw); 713 } 714 715 716 /************************ 717 * Rx path frame handler 718 ************************/ 719 720 /* note: already called with rcu_read_lock */ 721 static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) 722 { 723 struct sk_buff *skb = *pskb; 724 struct team_port *port; 725 struct team *team; 726 rx_handler_result_t res; 727 728 skb = skb_share_check(skb, GFP_ATOMIC); 729 if (!skb) 730 return RX_HANDLER_CONSUMED; 731 732 *pskb = skb; 733 734 port = team_port_get_rcu(skb->dev); 735 team = port->team; 736 if (!team_port_enabled(port)) { 737 /* allow exact match delivery for disabled ports */ 738 res = RX_HANDLER_EXACT; 739 } else { 740 res = team->ops.receive(team, port, skb); 741 } 742 if (res == RX_HANDLER_ANOTHER) { 743 struct team_pcpu_stats *pcpu_stats; 744 745 pcpu_stats = this_cpu_ptr(team->pcpu_stats); 746 u64_stats_update_begin(&pcpu_stats->syncp); 747 pcpu_stats->rx_packets++; 748 pcpu_stats->rx_bytes += skb->len; 749 if (skb->pkt_type == PACKET_MULTICAST) 750 pcpu_stats->rx_multicast++; 751 u64_stats_update_end(&pcpu_stats->syncp); 752 753 skb->dev = team->dev; 754 } else if (res == RX_HANDLER_EXACT) { 755 this_cpu_inc(team->pcpu_stats->rx_nohandler); 756 } else { 757 this_cpu_inc(team->pcpu_stats->rx_dropped); 758 } 759 760 return res; 761 } 762 763 764 /************************************* 765 * Multiqueue Tx port select override 766 *************************************/ 767 768 static int team_queue_override_init(struct team *team) 769 { 770 struct list_head *listarr; 771 unsigned int queue_cnt = team->dev->num_tx_queues - 1; 772 unsigned int i; 773 774 if (!queue_cnt) 775 return 0; 776 listarr = kmalloc_array(queue_cnt, sizeof(struct list_head), 777 GFP_KERNEL); 778 if (!listarr) 779 return -ENOMEM; 780 team->qom_lists = listarr; 781 for (i = 0; i < queue_cnt; i++) 782 INIT_LIST_HEAD(listarr++); 783 return 0; 784 } 785 786 static void team_queue_override_fini(struct team *team) 787 { 788 kfree(team->qom_lists); 789 } 790 791 static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id) 792 { 793 return &team->qom_lists[queue_id - 1]; 794 } 795 796 /* 797 * note: already called with rcu_read_lock 798 */ 799 static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb) 800 { 801 struct list_head *qom_list; 802 struct team_port *port; 803 804 if (!team->queue_override_enabled || !skb->queue_mapping) 805 return false; 806 qom_list = __team_get_qom_list(team, skb->queue_mapping); 807 list_for_each_entry_rcu(port, qom_list, qom_list) { 808 if (!team_dev_queue_xmit(team, port, skb)) 809 return true; 810 } 811 return false; 812 } 813 814 static void __team_queue_override_port_del(struct team *team, 815 struct team_port *port) 816 { 817 if (!port->queue_id) 818 return; 819 list_del_rcu(&port->qom_list); 820 } 821 822 static bool team_queue_override_port_has_gt_prio_than(struct team_port *port, 823 struct team_port *cur) 824 { 825 if (port->priority < cur->priority) 826 return true; 827 if (port->priority > cur->priority) 828 return false; 829 if (port->index < cur->index) 830 return true; 831 return false; 832 } 833 834 static void __team_queue_override_port_add(struct team *team, 835 struct team_port *port) 836 { 837 struct team_port *cur; 838 struct list_head *qom_list; 839 struct list_head *node; 840 841 if (!port->queue_id) 842 return; 843 qom_list = __team_get_qom_list(team, port->queue_id); 844 node = qom_list; 845 list_for_each_entry(cur, qom_list, qom_list) { 846 if (team_queue_override_port_has_gt_prio_than(port, cur)) 847 break; 848 node = &cur->qom_list; 849 } 850 list_add_tail_rcu(&port->qom_list, node); 851 } 852 853 static void __team_queue_override_enabled_check(struct team *team) 854 { 855 struct team_port *port; 856 bool enabled = false; 857 858 list_for_each_entry(port, &team->port_list, list) { 859 if (port->queue_id) { 860 enabled = true; 861 break; 862 } 863 } 864 if (enabled == team->queue_override_enabled) 865 return; 866 netdev_dbg(team->dev, "%s queue override\n", 867 enabled ? "Enabling" : "Disabling"); 868 team->queue_override_enabled = enabled; 869 } 870 871 static void team_queue_override_port_prio_changed(struct team *team, 872 struct team_port *port) 873 { 874 if (!port->queue_id || team_port_enabled(port)) 875 return; 876 __team_queue_override_port_del(team, port); 877 __team_queue_override_port_add(team, port); 878 __team_queue_override_enabled_check(team); 879 } 880 881 static void team_queue_override_port_change_queue_id(struct team *team, 882 struct team_port *port, 883 u16 new_queue_id) 884 { 885 if (team_port_enabled(port)) { 886 __team_queue_override_port_del(team, port); 887 port->queue_id = new_queue_id; 888 __team_queue_override_port_add(team, port); 889 __team_queue_override_enabled_check(team); 890 } else { 891 port->queue_id = new_queue_id; 892 } 893 } 894 895 static void team_queue_override_port_add(struct team *team, 896 struct team_port *port) 897 { 898 __team_queue_override_port_add(team, port); 899 __team_queue_override_enabled_check(team); 900 } 901 902 static void team_queue_override_port_del(struct team *team, 903 struct team_port *port) 904 { 905 __team_queue_override_port_del(team, port); 906 __team_queue_override_enabled_check(team); 907 } 908 909 910 /**************** 911 * Port handling 912 ****************/ 913 914 static bool team_port_find(const struct team *team, 915 const struct team_port *port) 916 { 917 struct team_port *cur; 918 919 list_for_each_entry(cur, &team->port_list, list) 920 if (cur == port) 921 return true; 922 return false; 923 } 924 925 /* 926 * Enable/disable port by adding to enabled port hashlist and setting 927 * port->index (Might be racy so reader could see incorrect ifindex when 928 * processing a flying packet, but that is not a problem). Write guarded 929 * by team->lock. 930 */ 931 static void team_port_enable(struct team *team, 932 struct team_port *port) 933 { 934 if (team_port_enabled(port)) 935 return; 936 port->index = team->en_port_count++; 937 hlist_add_head_rcu(&port->hlist, 938 team_port_index_hash(team, port->index)); 939 team_adjust_ops(team); 940 team_queue_override_port_add(team, port); 941 if (team->ops.port_enabled) 942 team->ops.port_enabled(team, port); 943 team_notify_peers(team); 944 team_mcast_rejoin(team); 945 team_lower_state_changed(port); 946 } 947 948 static void __reconstruct_port_hlist(struct team *team, int rm_index) 949 { 950 int i; 951 struct team_port *port; 952 953 for (i = rm_index + 1; i < team->en_port_count; i++) { 954 port = team_get_port_by_index(team, i); 955 hlist_del_rcu(&port->hlist); 956 port->index--; 957 hlist_add_head_rcu(&port->hlist, 958 team_port_index_hash(team, port->index)); 959 } 960 } 961 962 static void team_port_disable(struct team *team, 963 struct team_port *port) 964 { 965 if (!team_port_enabled(port)) 966 return; 967 if (team->ops.port_disabled) 968 team->ops.port_disabled(team, port); 969 hlist_del_rcu(&port->hlist); 970 __reconstruct_port_hlist(team, port->index); 971 port->index = -1; 972 team->en_port_count--; 973 team_queue_override_port_del(team, port); 974 team_adjust_ops(team); 975 team_lower_state_changed(port); 976 } 977 978 #define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ 979 NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \ 980 NETIF_F_HIGHDMA | NETIF_F_LRO) 981 982 #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ 983 NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE) 984 985 static void __team_compute_features(struct team *team) 986 { 987 struct team_port *port; 988 netdev_features_t vlan_features = TEAM_VLAN_FEATURES & 989 NETIF_F_ALL_FOR_ALL; 990 netdev_features_t enc_features = TEAM_ENC_FEATURES; 991 unsigned short max_hard_header_len = ETH_HLEN; 992 unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | 993 IFF_XMIT_DST_RELEASE_PERM; 994 995 rcu_read_lock(); 996 list_for_each_entry_rcu(port, &team->port_list, list) { 997 vlan_features = netdev_increment_features(vlan_features, 998 port->dev->vlan_features, 999 TEAM_VLAN_FEATURES); 1000 enc_features = 1001 netdev_increment_features(enc_features, 1002 port->dev->hw_enc_features, 1003 TEAM_ENC_FEATURES); 1004 1005 1006 dst_release_flag &= port->dev->priv_flags; 1007 if (port->dev->hard_header_len > max_hard_header_len) 1008 max_hard_header_len = port->dev->hard_header_len; 1009 } 1010 rcu_read_unlock(); 1011 1012 team->dev->vlan_features = vlan_features; 1013 team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | 1014 NETIF_F_HW_VLAN_CTAG_TX | 1015 NETIF_F_HW_VLAN_STAG_TX; 1016 team->dev->hard_header_len = max_hard_header_len; 1017 1018 team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1019 if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) 1020 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; 1021 } 1022 1023 static void team_compute_features(struct team *team) 1024 { 1025 __team_compute_features(team); 1026 netdev_change_features(team->dev); 1027 } 1028 1029 static int team_port_enter(struct team *team, struct team_port *port) 1030 { 1031 int err = 0; 1032 1033 dev_hold(team->dev); 1034 if (team->ops.port_enter) { 1035 err = team->ops.port_enter(team, port); 1036 if (err) { 1037 netdev_err(team->dev, "Device %s failed to enter team mode\n", 1038 port->dev->name); 1039 goto err_port_enter; 1040 } 1041 } 1042 1043 return 0; 1044 1045 err_port_enter: 1046 dev_put(team->dev); 1047 1048 return err; 1049 } 1050 1051 static void team_port_leave(struct team *team, struct team_port *port) 1052 { 1053 if (team->ops.port_leave) 1054 team->ops.port_leave(team, port); 1055 dev_put(team->dev); 1056 } 1057 1058 #ifdef CONFIG_NET_POLL_CONTROLLER 1059 static int __team_port_enable_netpoll(struct team_port *port) 1060 { 1061 struct netpoll *np; 1062 int err; 1063 1064 np = kzalloc(sizeof(*np), GFP_KERNEL); 1065 if (!np) 1066 return -ENOMEM; 1067 1068 err = __netpoll_setup(np, port->dev); 1069 if (err) { 1070 kfree(np); 1071 return err; 1072 } 1073 port->np = np; 1074 return err; 1075 } 1076 1077 static int team_port_enable_netpoll(struct team_port *port) 1078 { 1079 if (!port->team->dev->npinfo) 1080 return 0; 1081 1082 return __team_port_enable_netpoll(port); 1083 } 1084 1085 static void team_port_disable_netpoll(struct team_port *port) 1086 { 1087 struct netpoll *np = port->np; 1088 1089 if (!np) 1090 return; 1091 port->np = NULL; 1092 1093 __netpoll_free(np); 1094 } 1095 #else 1096 static int team_port_enable_netpoll(struct team_port *port) 1097 { 1098 return 0; 1099 } 1100 static void team_port_disable_netpoll(struct team_port *port) 1101 { 1102 } 1103 #endif 1104 1105 static int team_upper_dev_link(struct team *team, struct team_port *port, 1106 struct netlink_ext_ack *extack) 1107 { 1108 struct netdev_lag_upper_info lag_upper_info; 1109 int err; 1110 1111 lag_upper_info.tx_type = team->mode->lag_tx_type; 1112 lag_upper_info.hash_type = NETDEV_LAG_HASH_UNKNOWN; 1113 err = netdev_master_upper_dev_link(port->dev, team->dev, NULL, 1114 &lag_upper_info, extack); 1115 if (err) 1116 return err; 1117 port->dev->priv_flags |= IFF_TEAM_PORT; 1118 return 0; 1119 } 1120 1121 static void team_upper_dev_unlink(struct team *team, struct team_port *port) 1122 { 1123 netdev_upper_dev_unlink(port->dev, team->dev); 1124 port->dev->priv_flags &= ~IFF_TEAM_PORT; 1125 } 1126 1127 static void __team_port_change_port_added(struct team_port *port, bool linkup); 1128 static int team_dev_type_check_change(struct net_device *dev, 1129 struct net_device *port_dev); 1130 1131 static int team_port_add(struct team *team, struct net_device *port_dev, 1132 struct netlink_ext_ack *extack) 1133 { 1134 struct net_device *dev = team->dev; 1135 struct team_port *port; 1136 char *portname = port_dev->name; 1137 int err; 1138 1139 if (port_dev->flags & IFF_LOOPBACK) { 1140 NL_SET_ERR_MSG(extack, "Loopback device can't be added as a team port"); 1141 netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n", 1142 portname); 1143 return -EINVAL; 1144 } 1145 1146 if (netif_is_team_port(port_dev)) { 1147 NL_SET_ERR_MSG(extack, "Device is already a port of a team device"); 1148 netdev_err(dev, "Device %s is already a port " 1149 "of a team device\n", portname); 1150 return -EBUSY; 1151 } 1152 1153 if (dev == port_dev) { 1154 NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself"); 1155 netdev_err(dev, "Cannot enslave team device to itself\n"); 1156 return -EINVAL; 1157 } 1158 1159 if (netdev_has_upper_dev(dev, port_dev)) { 1160 NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface"); 1161 netdev_err(dev, "Device %s is already an upper device of the team interface\n", 1162 portname); 1163 return -EBUSY; 1164 } 1165 1166 if (port_dev->features & NETIF_F_VLAN_CHALLENGED && 1167 vlan_uses_dev(dev)) { 1168 NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up"); 1169 netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n", 1170 portname); 1171 return -EPERM; 1172 } 1173 1174 err = team_dev_type_check_change(dev, port_dev); 1175 if (err) 1176 return err; 1177 1178 if (port_dev->flags & IFF_UP) { 1179 NL_SET_ERR_MSG(extack, "Device is up. Set it down before adding it as a team port"); 1180 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n", 1181 portname); 1182 return -EBUSY; 1183 } 1184 1185 port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size, 1186 GFP_KERNEL); 1187 if (!port) 1188 return -ENOMEM; 1189 1190 port->dev = port_dev; 1191 port->team = team; 1192 INIT_LIST_HEAD(&port->qom_list); 1193 1194 port->orig.mtu = port_dev->mtu; 1195 err = dev_set_mtu(port_dev, dev->mtu); 1196 if (err) { 1197 netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err); 1198 goto err_set_mtu; 1199 } 1200 1201 memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len); 1202 1203 err = team_port_enter(team, port); 1204 if (err) { 1205 netdev_err(dev, "Device %s failed to enter team mode\n", 1206 portname); 1207 goto err_port_enter; 1208 } 1209 1210 err = dev_open(port_dev, extack); 1211 if (err) { 1212 netdev_dbg(dev, "Device %s opening failed\n", 1213 portname); 1214 goto err_dev_open; 1215 } 1216 1217 err = vlan_vids_add_by_dev(port_dev, dev); 1218 if (err) { 1219 netdev_err(dev, "Failed to add vlan ids to device %s\n", 1220 portname); 1221 goto err_vids_add; 1222 } 1223 1224 err = team_port_enable_netpoll(port); 1225 if (err) { 1226 netdev_err(dev, "Failed to enable netpoll on device %s\n", 1227 portname); 1228 goto err_enable_netpoll; 1229 } 1230 1231 if (!(dev->features & NETIF_F_LRO)) 1232 dev_disable_lro(port_dev); 1233 1234 err = netdev_rx_handler_register(port_dev, team_handle_frame, 1235 port); 1236 if (err) { 1237 netdev_err(dev, "Device %s failed to register rx_handler\n", 1238 portname); 1239 goto err_handler_register; 1240 } 1241 1242 err = team_upper_dev_link(team, port, extack); 1243 if (err) { 1244 netdev_err(dev, "Device %s failed to set upper link\n", 1245 portname); 1246 goto err_set_upper_link; 1247 } 1248 1249 err = __team_option_inst_add_port(team, port); 1250 if (err) { 1251 netdev_err(dev, "Device %s failed to add per-port options\n", 1252 portname); 1253 goto err_option_port_add; 1254 } 1255 1256 /* set promiscuity level to new slave */ 1257 if (dev->flags & IFF_PROMISC) { 1258 err = dev_set_promiscuity(port_dev, 1); 1259 if (err) 1260 goto err_set_slave_promisc; 1261 } 1262 1263 /* set allmulti level to new slave */ 1264 if (dev->flags & IFF_ALLMULTI) { 1265 err = dev_set_allmulti(port_dev, 1); 1266 if (err) { 1267 if (dev->flags & IFF_PROMISC) 1268 dev_set_promiscuity(port_dev, -1); 1269 goto err_set_slave_promisc; 1270 } 1271 } 1272 1273 if (dev->flags & IFF_UP) { 1274 netif_addr_lock_bh(dev); 1275 dev_uc_sync_multiple(port_dev, dev); 1276 dev_mc_sync_multiple(port_dev, dev); 1277 netif_addr_unlock_bh(dev); 1278 } 1279 1280 port->index = -1; 1281 list_add_tail_rcu(&port->list, &team->port_list); 1282 team_port_enable(team, port); 1283 __team_compute_features(team); 1284 __team_port_change_port_added(port, !!netif_oper_up(port_dev)); 1285 __team_options_change_check(team); 1286 1287 netdev_info(dev, "Port device %s added\n", portname); 1288 1289 return 0; 1290 1291 err_set_slave_promisc: 1292 __team_option_inst_del_port(team, port); 1293 1294 err_option_port_add: 1295 team_upper_dev_unlink(team, port); 1296 1297 err_set_upper_link: 1298 netdev_rx_handler_unregister(port_dev); 1299 1300 err_handler_register: 1301 team_port_disable_netpoll(port); 1302 1303 err_enable_netpoll: 1304 vlan_vids_del_by_dev(port_dev, dev); 1305 1306 err_vids_add: 1307 dev_close(port_dev); 1308 1309 err_dev_open: 1310 team_port_leave(team, port); 1311 team_port_set_orig_dev_addr(port); 1312 1313 err_port_enter: 1314 dev_set_mtu(port_dev, port->orig.mtu); 1315 1316 err_set_mtu: 1317 kfree(port); 1318 1319 return err; 1320 } 1321 1322 static void __team_port_change_port_removed(struct team_port *port); 1323 1324 static int team_port_del(struct team *team, struct net_device *port_dev) 1325 { 1326 struct net_device *dev = team->dev; 1327 struct team_port *port; 1328 char *portname = port_dev->name; 1329 1330 port = team_port_get_rtnl(port_dev); 1331 if (!port || !team_port_find(team, port)) { 1332 netdev_err(dev, "Device %s does not act as a port of this team\n", 1333 portname); 1334 return -ENOENT; 1335 } 1336 1337 team_port_disable(team, port); 1338 list_del_rcu(&port->list); 1339 1340 if (dev->flags & IFF_PROMISC) 1341 dev_set_promiscuity(port_dev, -1); 1342 if (dev->flags & IFF_ALLMULTI) 1343 dev_set_allmulti(port_dev, -1); 1344 1345 team_upper_dev_unlink(team, port); 1346 netdev_rx_handler_unregister(port_dev); 1347 team_port_disable_netpoll(port); 1348 vlan_vids_del_by_dev(port_dev, dev); 1349 if (dev->flags & IFF_UP) { 1350 dev_uc_unsync(port_dev, dev); 1351 dev_mc_unsync(port_dev, dev); 1352 } 1353 dev_close(port_dev); 1354 team_port_leave(team, port); 1355 1356 __team_option_inst_mark_removed_port(team, port); 1357 __team_options_change_check(team); 1358 __team_option_inst_del_port(team, port); 1359 __team_port_change_port_removed(port); 1360 1361 team_port_set_orig_dev_addr(port); 1362 dev_set_mtu(port_dev, port->orig.mtu); 1363 kfree_rcu(port, rcu); 1364 netdev_info(dev, "Port device %s removed\n", portname); 1365 __team_compute_features(team); 1366 1367 return 0; 1368 } 1369 1370 1371 /***************** 1372 * Net device ops 1373 *****************/ 1374 1375 static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx) 1376 { 1377 ctx->data.str_val = team->mode->kind; 1378 return 0; 1379 } 1380 1381 static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx) 1382 { 1383 return team_change_mode(team, ctx->data.str_val); 1384 } 1385 1386 static int team_notify_peers_count_get(struct team *team, 1387 struct team_gsetter_ctx *ctx) 1388 { 1389 ctx->data.u32_val = team->notify_peers.count; 1390 return 0; 1391 } 1392 1393 static int team_notify_peers_count_set(struct team *team, 1394 struct team_gsetter_ctx *ctx) 1395 { 1396 team->notify_peers.count = ctx->data.u32_val; 1397 return 0; 1398 } 1399 1400 static int team_notify_peers_interval_get(struct team *team, 1401 struct team_gsetter_ctx *ctx) 1402 { 1403 ctx->data.u32_val = team->notify_peers.interval; 1404 return 0; 1405 } 1406 1407 static int team_notify_peers_interval_set(struct team *team, 1408 struct team_gsetter_ctx *ctx) 1409 { 1410 team->notify_peers.interval = ctx->data.u32_val; 1411 return 0; 1412 } 1413 1414 static int team_mcast_rejoin_count_get(struct team *team, 1415 struct team_gsetter_ctx *ctx) 1416 { 1417 ctx->data.u32_val = team->mcast_rejoin.count; 1418 return 0; 1419 } 1420 1421 static int team_mcast_rejoin_count_set(struct team *team, 1422 struct team_gsetter_ctx *ctx) 1423 { 1424 team->mcast_rejoin.count = ctx->data.u32_val; 1425 return 0; 1426 } 1427 1428 static int team_mcast_rejoin_interval_get(struct team *team, 1429 struct team_gsetter_ctx *ctx) 1430 { 1431 ctx->data.u32_val = team->mcast_rejoin.interval; 1432 return 0; 1433 } 1434 1435 static int team_mcast_rejoin_interval_set(struct team *team, 1436 struct team_gsetter_ctx *ctx) 1437 { 1438 team->mcast_rejoin.interval = ctx->data.u32_val; 1439 return 0; 1440 } 1441 1442 static int team_port_en_option_get(struct team *team, 1443 struct team_gsetter_ctx *ctx) 1444 { 1445 struct team_port *port = ctx->info->port; 1446 1447 ctx->data.bool_val = team_port_enabled(port); 1448 return 0; 1449 } 1450 1451 static int team_port_en_option_set(struct team *team, 1452 struct team_gsetter_ctx *ctx) 1453 { 1454 struct team_port *port = ctx->info->port; 1455 1456 if (ctx->data.bool_val) 1457 team_port_enable(team, port); 1458 else 1459 team_port_disable(team, port); 1460 return 0; 1461 } 1462 1463 static int team_user_linkup_option_get(struct team *team, 1464 struct team_gsetter_ctx *ctx) 1465 { 1466 struct team_port *port = ctx->info->port; 1467 1468 ctx->data.bool_val = port->user.linkup; 1469 return 0; 1470 } 1471 1472 static void __team_carrier_check(struct team *team); 1473 1474 static int team_user_linkup_option_set(struct team *team, 1475 struct team_gsetter_ctx *ctx) 1476 { 1477 struct team_port *port = ctx->info->port; 1478 1479 port->user.linkup = ctx->data.bool_val; 1480 team_refresh_port_linkup(port); 1481 __team_carrier_check(port->team); 1482 return 0; 1483 } 1484 1485 static int team_user_linkup_en_option_get(struct team *team, 1486 struct team_gsetter_ctx *ctx) 1487 { 1488 struct team_port *port = ctx->info->port; 1489 1490 ctx->data.bool_val = port->user.linkup_enabled; 1491 return 0; 1492 } 1493 1494 static int team_user_linkup_en_option_set(struct team *team, 1495 struct team_gsetter_ctx *ctx) 1496 { 1497 struct team_port *port = ctx->info->port; 1498 1499 port->user.linkup_enabled = ctx->data.bool_val; 1500 team_refresh_port_linkup(port); 1501 __team_carrier_check(port->team); 1502 return 0; 1503 } 1504 1505 static int team_priority_option_get(struct team *team, 1506 struct team_gsetter_ctx *ctx) 1507 { 1508 struct team_port *port = ctx->info->port; 1509 1510 ctx->data.s32_val = port->priority; 1511 return 0; 1512 } 1513 1514 static int team_priority_option_set(struct team *team, 1515 struct team_gsetter_ctx *ctx) 1516 { 1517 struct team_port *port = ctx->info->port; 1518 s32 priority = ctx->data.s32_val; 1519 1520 if (port->priority == priority) 1521 return 0; 1522 port->priority = priority; 1523 team_queue_override_port_prio_changed(team, port); 1524 return 0; 1525 } 1526 1527 static int team_queue_id_option_get(struct team *team, 1528 struct team_gsetter_ctx *ctx) 1529 { 1530 struct team_port *port = ctx->info->port; 1531 1532 ctx->data.u32_val = port->queue_id; 1533 return 0; 1534 } 1535 1536 static int team_queue_id_option_set(struct team *team, 1537 struct team_gsetter_ctx *ctx) 1538 { 1539 struct team_port *port = ctx->info->port; 1540 u16 new_queue_id = ctx->data.u32_val; 1541 1542 if (port->queue_id == new_queue_id) 1543 return 0; 1544 if (new_queue_id >= team->dev->real_num_tx_queues) 1545 return -EINVAL; 1546 team_queue_override_port_change_queue_id(team, port, new_queue_id); 1547 return 0; 1548 } 1549 1550 static const struct team_option team_options[] = { 1551 { 1552 .name = "mode", 1553 .type = TEAM_OPTION_TYPE_STRING, 1554 .getter = team_mode_option_get, 1555 .setter = team_mode_option_set, 1556 }, 1557 { 1558 .name = "notify_peers_count", 1559 .type = TEAM_OPTION_TYPE_U32, 1560 .getter = team_notify_peers_count_get, 1561 .setter = team_notify_peers_count_set, 1562 }, 1563 { 1564 .name = "notify_peers_interval", 1565 .type = TEAM_OPTION_TYPE_U32, 1566 .getter = team_notify_peers_interval_get, 1567 .setter = team_notify_peers_interval_set, 1568 }, 1569 { 1570 .name = "mcast_rejoin_count", 1571 .type = TEAM_OPTION_TYPE_U32, 1572 .getter = team_mcast_rejoin_count_get, 1573 .setter = team_mcast_rejoin_count_set, 1574 }, 1575 { 1576 .name = "mcast_rejoin_interval", 1577 .type = TEAM_OPTION_TYPE_U32, 1578 .getter = team_mcast_rejoin_interval_get, 1579 .setter = team_mcast_rejoin_interval_set, 1580 }, 1581 { 1582 .name = "enabled", 1583 .type = TEAM_OPTION_TYPE_BOOL, 1584 .per_port = true, 1585 .getter = team_port_en_option_get, 1586 .setter = team_port_en_option_set, 1587 }, 1588 { 1589 .name = "user_linkup", 1590 .type = TEAM_OPTION_TYPE_BOOL, 1591 .per_port = true, 1592 .getter = team_user_linkup_option_get, 1593 .setter = team_user_linkup_option_set, 1594 }, 1595 { 1596 .name = "user_linkup_enabled", 1597 .type = TEAM_OPTION_TYPE_BOOL, 1598 .per_port = true, 1599 .getter = team_user_linkup_en_option_get, 1600 .setter = team_user_linkup_en_option_set, 1601 }, 1602 { 1603 .name = "priority", 1604 .type = TEAM_OPTION_TYPE_S32, 1605 .per_port = true, 1606 .getter = team_priority_option_get, 1607 .setter = team_priority_option_set, 1608 }, 1609 { 1610 .name = "queue_id", 1611 .type = TEAM_OPTION_TYPE_U32, 1612 .per_port = true, 1613 .getter = team_queue_id_option_get, 1614 .setter = team_queue_id_option_set, 1615 }, 1616 }; 1617 1618 1619 static int team_init(struct net_device *dev) 1620 { 1621 struct team *team = netdev_priv(dev); 1622 int i; 1623 int err; 1624 1625 team->dev = dev; 1626 team_set_no_mode(team); 1627 1628 team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats); 1629 if (!team->pcpu_stats) 1630 return -ENOMEM; 1631 1632 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) 1633 INIT_HLIST_HEAD(&team->en_port_hlist[i]); 1634 INIT_LIST_HEAD(&team->port_list); 1635 err = team_queue_override_init(team); 1636 if (err) 1637 goto err_team_queue_override_init; 1638 1639 team_adjust_ops(team); 1640 1641 INIT_LIST_HEAD(&team->option_list); 1642 INIT_LIST_HEAD(&team->option_inst_list); 1643 1644 team_notify_peers_init(team); 1645 team_mcast_rejoin_init(team); 1646 1647 err = team_options_register(team, team_options, ARRAY_SIZE(team_options)); 1648 if (err) 1649 goto err_options_register; 1650 netif_carrier_off(dev); 1651 1652 lockdep_register_key(&team->team_lock_key); 1653 __mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key); 1654 netdev_lockdep_set_classes(dev); 1655 1656 return 0; 1657 1658 err_options_register: 1659 team_mcast_rejoin_fini(team); 1660 team_notify_peers_fini(team); 1661 team_queue_override_fini(team); 1662 err_team_queue_override_init: 1663 free_percpu(team->pcpu_stats); 1664 1665 return err; 1666 } 1667 1668 static void team_uninit(struct net_device *dev) 1669 { 1670 struct team *team = netdev_priv(dev); 1671 struct team_port *port; 1672 struct team_port *tmp; 1673 1674 mutex_lock(&team->lock); 1675 list_for_each_entry_safe(port, tmp, &team->port_list, list) 1676 team_port_del(team, port->dev); 1677 1678 __team_change_mode(team, NULL); /* cleanup */ 1679 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); 1680 team_mcast_rejoin_fini(team); 1681 team_notify_peers_fini(team); 1682 team_queue_override_fini(team); 1683 mutex_unlock(&team->lock); 1684 netdev_change_features(dev); 1685 lockdep_unregister_key(&team->team_lock_key); 1686 } 1687 1688 static void team_destructor(struct net_device *dev) 1689 { 1690 struct team *team = netdev_priv(dev); 1691 1692 free_percpu(team->pcpu_stats); 1693 } 1694 1695 static int team_open(struct net_device *dev) 1696 { 1697 return 0; 1698 } 1699 1700 static int team_close(struct net_device *dev) 1701 { 1702 struct team *team = netdev_priv(dev); 1703 struct team_port *port; 1704 1705 list_for_each_entry(port, &team->port_list, list) { 1706 dev_uc_unsync(port->dev, dev); 1707 dev_mc_unsync(port->dev, dev); 1708 } 1709 1710 return 0; 1711 } 1712 1713 /* 1714 * note: already called with rcu_read_lock 1715 */ 1716 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) 1717 { 1718 struct team *team = netdev_priv(dev); 1719 bool tx_success; 1720 unsigned int len = skb->len; 1721 1722 tx_success = team_queue_override_transmit(team, skb); 1723 if (!tx_success) 1724 tx_success = team->ops.transmit(team, skb); 1725 if (tx_success) { 1726 struct team_pcpu_stats *pcpu_stats; 1727 1728 pcpu_stats = this_cpu_ptr(team->pcpu_stats); 1729 u64_stats_update_begin(&pcpu_stats->syncp); 1730 pcpu_stats->tx_packets++; 1731 pcpu_stats->tx_bytes += len; 1732 u64_stats_update_end(&pcpu_stats->syncp); 1733 } else { 1734 this_cpu_inc(team->pcpu_stats->tx_dropped); 1735 } 1736 1737 return NETDEV_TX_OK; 1738 } 1739 1740 static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, 1741 struct net_device *sb_dev) 1742 { 1743 /* 1744 * This helper function exists to help dev_pick_tx get the correct 1745 * destination queue. Using a helper function skips a call to 1746 * skb_tx_hash and will put the skbs in the queue we expect on their 1747 * way down to the team driver. 1748 */ 1749 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; 1750 1751 /* 1752 * Save the original txq to restore before passing to the driver 1753 */ 1754 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; 1755 1756 if (unlikely(txq >= dev->real_num_tx_queues)) { 1757 do { 1758 txq -= dev->real_num_tx_queues; 1759 } while (txq >= dev->real_num_tx_queues); 1760 } 1761 return txq; 1762 } 1763 1764 static void team_change_rx_flags(struct net_device *dev, int change) 1765 { 1766 struct team *team = netdev_priv(dev); 1767 struct team_port *port; 1768 int inc; 1769 1770 rcu_read_lock(); 1771 list_for_each_entry_rcu(port, &team->port_list, list) { 1772 if (change & IFF_PROMISC) { 1773 inc = dev->flags & IFF_PROMISC ? 1 : -1; 1774 dev_set_promiscuity(port->dev, inc); 1775 } 1776 if (change & IFF_ALLMULTI) { 1777 inc = dev->flags & IFF_ALLMULTI ? 1 : -1; 1778 dev_set_allmulti(port->dev, inc); 1779 } 1780 } 1781 rcu_read_unlock(); 1782 } 1783 1784 static void team_set_rx_mode(struct net_device *dev) 1785 { 1786 struct team *team = netdev_priv(dev); 1787 struct team_port *port; 1788 1789 rcu_read_lock(); 1790 list_for_each_entry_rcu(port, &team->port_list, list) { 1791 dev_uc_sync_multiple(port->dev, dev); 1792 dev_mc_sync_multiple(port->dev, dev); 1793 } 1794 rcu_read_unlock(); 1795 } 1796 1797 static int team_set_mac_address(struct net_device *dev, void *p) 1798 { 1799 struct sockaddr *addr = p; 1800 struct team *team = netdev_priv(dev); 1801 struct team_port *port; 1802 1803 if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) 1804 return -EADDRNOTAVAIL; 1805 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1806 mutex_lock(&team->lock); 1807 list_for_each_entry(port, &team->port_list, list) 1808 if (team->ops.port_change_dev_addr) 1809 team->ops.port_change_dev_addr(team, port); 1810 mutex_unlock(&team->lock); 1811 return 0; 1812 } 1813 1814 static int team_change_mtu(struct net_device *dev, int new_mtu) 1815 { 1816 struct team *team = netdev_priv(dev); 1817 struct team_port *port; 1818 int err; 1819 1820 /* 1821 * Alhough this is reader, it's guarded by team lock. It's not possible 1822 * to traverse list in reverse under rcu_read_lock 1823 */ 1824 mutex_lock(&team->lock); 1825 team->port_mtu_change_allowed = true; 1826 list_for_each_entry(port, &team->port_list, list) { 1827 err = dev_set_mtu(port->dev, new_mtu); 1828 if (err) { 1829 netdev_err(dev, "Device %s failed to change mtu", 1830 port->dev->name); 1831 goto unwind; 1832 } 1833 } 1834 team->port_mtu_change_allowed = false; 1835 mutex_unlock(&team->lock); 1836 1837 dev->mtu = new_mtu; 1838 1839 return 0; 1840 1841 unwind: 1842 list_for_each_entry_continue_reverse(port, &team->port_list, list) 1843 dev_set_mtu(port->dev, dev->mtu); 1844 team->port_mtu_change_allowed = false; 1845 mutex_unlock(&team->lock); 1846 1847 return err; 1848 } 1849 1850 static void 1851 team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1852 { 1853 struct team *team = netdev_priv(dev); 1854 struct team_pcpu_stats *p; 1855 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes; 1856 u32 rx_dropped = 0, tx_dropped = 0, rx_nohandler = 0; 1857 unsigned int start; 1858 int i; 1859 1860 for_each_possible_cpu(i) { 1861 p = per_cpu_ptr(team->pcpu_stats, i); 1862 do { 1863 start = u64_stats_fetch_begin_irq(&p->syncp); 1864 rx_packets = p->rx_packets; 1865 rx_bytes = p->rx_bytes; 1866 rx_multicast = p->rx_multicast; 1867 tx_packets = p->tx_packets; 1868 tx_bytes = p->tx_bytes; 1869 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1870 1871 stats->rx_packets += rx_packets; 1872 stats->rx_bytes += rx_bytes; 1873 stats->multicast += rx_multicast; 1874 stats->tx_packets += tx_packets; 1875 stats->tx_bytes += tx_bytes; 1876 /* 1877 * rx_dropped, tx_dropped & rx_nohandler are u32, 1878 * updated without syncp protection. 1879 */ 1880 rx_dropped += p->rx_dropped; 1881 tx_dropped += p->tx_dropped; 1882 rx_nohandler += p->rx_nohandler; 1883 } 1884 stats->rx_dropped = rx_dropped; 1885 stats->tx_dropped = tx_dropped; 1886 stats->rx_nohandler = rx_nohandler; 1887 } 1888 1889 static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 1890 { 1891 struct team *team = netdev_priv(dev); 1892 struct team_port *port; 1893 int err; 1894 1895 /* 1896 * Alhough this is reader, it's guarded by team lock. It's not possible 1897 * to traverse list in reverse under rcu_read_lock 1898 */ 1899 mutex_lock(&team->lock); 1900 list_for_each_entry(port, &team->port_list, list) { 1901 err = vlan_vid_add(port->dev, proto, vid); 1902 if (err) 1903 goto unwind; 1904 } 1905 mutex_unlock(&team->lock); 1906 1907 return 0; 1908 1909 unwind: 1910 list_for_each_entry_continue_reverse(port, &team->port_list, list) 1911 vlan_vid_del(port->dev, proto, vid); 1912 mutex_unlock(&team->lock); 1913 1914 return err; 1915 } 1916 1917 static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) 1918 { 1919 struct team *team = netdev_priv(dev); 1920 struct team_port *port; 1921 1922 mutex_lock(&team->lock); 1923 list_for_each_entry(port, &team->port_list, list) 1924 vlan_vid_del(port->dev, proto, vid); 1925 mutex_unlock(&team->lock); 1926 1927 return 0; 1928 } 1929 1930 #ifdef CONFIG_NET_POLL_CONTROLLER 1931 static void team_poll_controller(struct net_device *dev) 1932 { 1933 } 1934 1935 static void __team_netpoll_cleanup(struct team *team) 1936 { 1937 struct team_port *port; 1938 1939 list_for_each_entry(port, &team->port_list, list) 1940 team_port_disable_netpoll(port); 1941 } 1942 1943 static void team_netpoll_cleanup(struct net_device *dev) 1944 { 1945 struct team *team = netdev_priv(dev); 1946 1947 mutex_lock(&team->lock); 1948 __team_netpoll_cleanup(team); 1949 mutex_unlock(&team->lock); 1950 } 1951 1952 static int team_netpoll_setup(struct net_device *dev, 1953 struct netpoll_info *npifo) 1954 { 1955 struct team *team = netdev_priv(dev); 1956 struct team_port *port; 1957 int err = 0; 1958 1959 mutex_lock(&team->lock); 1960 list_for_each_entry(port, &team->port_list, list) { 1961 err = __team_port_enable_netpoll(port); 1962 if (err) { 1963 __team_netpoll_cleanup(team); 1964 break; 1965 } 1966 } 1967 mutex_unlock(&team->lock); 1968 return err; 1969 } 1970 #endif 1971 1972 static int team_add_slave(struct net_device *dev, struct net_device *port_dev, 1973 struct netlink_ext_ack *extack) 1974 { 1975 struct team *team = netdev_priv(dev); 1976 int err; 1977 1978 mutex_lock(&team->lock); 1979 err = team_port_add(team, port_dev, extack); 1980 mutex_unlock(&team->lock); 1981 1982 if (!err) 1983 netdev_change_features(dev); 1984 1985 return err; 1986 } 1987 1988 static int team_del_slave(struct net_device *dev, struct net_device *port_dev) 1989 { 1990 struct team *team = netdev_priv(dev); 1991 int err; 1992 1993 mutex_lock(&team->lock); 1994 err = team_port_del(team, port_dev); 1995 mutex_unlock(&team->lock); 1996 1997 if (err) 1998 return err; 1999 2000 if (netif_is_team_master(port_dev)) { 2001 lockdep_unregister_key(&team->team_lock_key); 2002 lockdep_register_key(&team->team_lock_key); 2003 lockdep_set_class(&team->lock, &team->team_lock_key); 2004 } 2005 netdev_change_features(dev); 2006 2007 return err; 2008 } 2009 2010 static netdev_features_t team_fix_features(struct net_device *dev, 2011 netdev_features_t features) 2012 { 2013 struct team_port *port; 2014 struct team *team = netdev_priv(dev); 2015 netdev_features_t mask; 2016 2017 mask = features; 2018 features &= ~NETIF_F_ONE_FOR_ALL; 2019 features |= NETIF_F_ALL_FOR_ALL; 2020 2021 rcu_read_lock(); 2022 list_for_each_entry_rcu(port, &team->port_list, list) { 2023 features = netdev_increment_features(features, 2024 port->dev->features, 2025 mask); 2026 } 2027 rcu_read_unlock(); 2028 2029 features = netdev_add_tso_features(features, mask); 2030 2031 return features; 2032 } 2033 2034 static int team_change_carrier(struct net_device *dev, bool new_carrier) 2035 { 2036 struct team *team = netdev_priv(dev); 2037 2038 team->user_carrier_enabled = true; 2039 2040 if (new_carrier) 2041 netif_carrier_on(dev); 2042 else 2043 netif_carrier_off(dev); 2044 return 0; 2045 } 2046 2047 static const struct net_device_ops team_netdev_ops = { 2048 .ndo_init = team_init, 2049 .ndo_uninit = team_uninit, 2050 .ndo_open = team_open, 2051 .ndo_stop = team_close, 2052 .ndo_start_xmit = team_xmit, 2053 .ndo_select_queue = team_select_queue, 2054 .ndo_change_rx_flags = team_change_rx_flags, 2055 .ndo_set_rx_mode = team_set_rx_mode, 2056 .ndo_set_mac_address = team_set_mac_address, 2057 .ndo_change_mtu = team_change_mtu, 2058 .ndo_get_stats64 = team_get_stats64, 2059 .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid, 2060 .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid, 2061 #ifdef CONFIG_NET_POLL_CONTROLLER 2062 .ndo_poll_controller = team_poll_controller, 2063 .ndo_netpoll_setup = team_netpoll_setup, 2064 .ndo_netpoll_cleanup = team_netpoll_cleanup, 2065 #endif 2066 .ndo_add_slave = team_add_slave, 2067 .ndo_del_slave = team_del_slave, 2068 .ndo_fix_features = team_fix_features, 2069 .ndo_change_carrier = team_change_carrier, 2070 .ndo_features_check = passthru_features_check, 2071 }; 2072 2073 /*********************** 2074 * ethtool interface 2075 ***********************/ 2076 2077 static void team_ethtool_get_drvinfo(struct net_device *dev, 2078 struct ethtool_drvinfo *drvinfo) 2079 { 2080 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); 2081 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); 2082 } 2083 2084 static int team_ethtool_get_link_ksettings(struct net_device *dev, 2085 struct ethtool_link_ksettings *cmd) 2086 { 2087 struct team *team= netdev_priv(dev); 2088 unsigned long speed = 0; 2089 struct team_port *port; 2090 2091 cmd->base.duplex = DUPLEX_UNKNOWN; 2092 cmd->base.port = PORT_OTHER; 2093 2094 rcu_read_lock(); 2095 list_for_each_entry_rcu(port, &team->port_list, list) { 2096 if (team_port_txable(port)) { 2097 if (port->state.speed != SPEED_UNKNOWN) 2098 speed += port->state.speed; 2099 if (cmd->base.duplex == DUPLEX_UNKNOWN && 2100 port->state.duplex != DUPLEX_UNKNOWN) 2101 cmd->base.duplex = port->state.duplex; 2102 } 2103 } 2104 rcu_read_unlock(); 2105 2106 cmd->base.speed = speed ? : SPEED_UNKNOWN; 2107 2108 return 0; 2109 } 2110 2111 static const struct ethtool_ops team_ethtool_ops = { 2112 .get_drvinfo = team_ethtool_get_drvinfo, 2113 .get_link = ethtool_op_get_link, 2114 .get_link_ksettings = team_ethtool_get_link_ksettings, 2115 }; 2116 2117 /*********************** 2118 * rt netlink interface 2119 ***********************/ 2120 2121 static void team_setup_by_port(struct net_device *dev, 2122 struct net_device *port_dev) 2123 { 2124 dev->header_ops = port_dev->header_ops; 2125 dev->type = port_dev->type; 2126 dev->hard_header_len = port_dev->hard_header_len; 2127 dev->needed_headroom = port_dev->needed_headroom; 2128 dev->addr_len = port_dev->addr_len; 2129 dev->mtu = port_dev->mtu; 2130 memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len); 2131 eth_hw_addr_inherit(dev, port_dev); 2132 } 2133 2134 static int team_dev_type_check_change(struct net_device *dev, 2135 struct net_device *port_dev) 2136 { 2137 struct team *team = netdev_priv(dev); 2138 char *portname = port_dev->name; 2139 int err; 2140 2141 if (dev->type == port_dev->type) 2142 return 0; 2143 if (!list_empty(&team->port_list)) { 2144 netdev_err(dev, "Device %s is of different type\n", portname); 2145 return -EBUSY; 2146 } 2147 err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev); 2148 err = notifier_to_errno(err); 2149 if (err) { 2150 netdev_err(dev, "Refused to change device type\n"); 2151 return err; 2152 } 2153 dev_uc_flush(dev); 2154 dev_mc_flush(dev); 2155 team_setup_by_port(dev, port_dev); 2156 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); 2157 return 0; 2158 } 2159 2160 static void team_setup(struct net_device *dev) 2161 { 2162 ether_setup(dev); 2163 dev->max_mtu = ETH_MAX_MTU; 2164 2165 dev->netdev_ops = &team_netdev_ops; 2166 dev->ethtool_ops = &team_ethtool_ops; 2167 dev->needs_free_netdev = true; 2168 dev->priv_destructor = team_destructor; 2169 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); 2170 dev->priv_flags |= IFF_NO_QUEUE; 2171 dev->priv_flags |= IFF_TEAM; 2172 2173 /* 2174 * Indicate we support unicast address filtering. That way core won't 2175 * bring us to promisc mode in case a unicast addr is added. 2176 * Let this up to underlay drivers. 2177 */ 2178 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; 2179 2180 dev->features |= NETIF_F_LLTX; 2181 dev->features |= NETIF_F_GRO; 2182 2183 /* Don't allow team devices to change network namespaces. */ 2184 dev->features |= NETIF_F_NETNS_LOCAL; 2185 2186 dev->hw_features = TEAM_VLAN_FEATURES | 2187 NETIF_F_HW_VLAN_CTAG_RX | 2188 NETIF_F_HW_VLAN_CTAG_FILTER; 2189 2190 dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; 2191 dev->features |= dev->hw_features; 2192 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 2193 } 2194 2195 static int team_newlink(struct net *src_net, struct net_device *dev, 2196 struct nlattr *tb[], struct nlattr *data[], 2197 struct netlink_ext_ack *extack) 2198 { 2199 if (tb[IFLA_ADDRESS] == NULL) 2200 eth_hw_addr_random(dev); 2201 2202 return register_netdevice(dev); 2203 } 2204 2205 static int team_validate(struct nlattr *tb[], struct nlattr *data[], 2206 struct netlink_ext_ack *extack) 2207 { 2208 if (tb[IFLA_ADDRESS]) { 2209 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 2210 return -EINVAL; 2211 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 2212 return -EADDRNOTAVAIL; 2213 } 2214 return 0; 2215 } 2216 2217 static unsigned int team_get_num_tx_queues(void) 2218 { 2219 return TEAM_DEFAULT_NUM_TX_QUEUES; 2220 } 2221 2222 static unsigned int team_get_num_rx_queues(void) 2223 { 2224 return TEAM_DEFAULT_NUM_RX_QUEUES; 2225 } 2226 2227 static struct rtnl_link_ops team_link_ops __read_mostly = { 2228 .kind = DRV_NAME, 2229 .priv_size = sizeof(struct team), 2230 .setup = team_setup, 2231 .newlink = team_newlink, 2232 .validate = team_validate, 2233 .get_num_tx_queues = team_get_num_tx_queues, 2234 .get_num_rx_queues = team_get_num_rx_queues, 2235 }; 2236 2237 2238 /*********************************** 2239 * Generic netlink custom interface 2240 ***********************************/ 2241 2242 static struct genl_family team_nl_family; 2243 2244 static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = { 2245 [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, }, 2246 [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 }, 2247 [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED }, 2248 [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED }, 2249 }; 2250 2251 static const struct nla_policy 2252 team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = { 2253 [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, }, 2254 [TEAM_ATTR_OPTION_NAME] = { 2255 .type = NLA_STRING, 2256 .len = TEAM_STRING_MAX_LEN, 2257 }, 2258 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG }, 2259 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 }, 2260 [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY }, 2261 [TEAM_ATTR_OPTION_PORT_IFINDEX] = { .type = NLA_U32 }, 2262 [TEAM_ATTR_OPTION_ARRAY_INDEX] = { .type = NLA_U32 }, 2263 }; 2264 2265 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) 2266 { 2267 struct sk_buff *msg; 2268 void *hdr; 2269 int err; 2270 2271 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2272 if (!msg) 2273 return -ENOMEM; 2274 2275 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, 2276 &team_nl_family, 0, TEAM_CMD_NOOP); 2277 if (!hdr) { 2278 err = -EMSGSIZE; 2279 goto err_msg_put; 2280 } 2281 2282 genlmsg_end(msg, hdr); 2283 2284 return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); 2285 2286 err_msg_put: 2287 nlmsg_free(msg); 2288 2289 return err; 2290 } 2291 2292 /* 2293 * Netlink cmd functions should be locked by following two functions. 2294 * Since dev gets held here, that ensures dev won't disappear in between. 2295 */ 2296 static struct team *team_nl_team_get(struct genl_info *info) 2297 { 2298 struct net *net = genl_info_net(info); 2299 int ifindex; 2300 struct net_device *dev; 2301 struct team *team; 2302 2303 if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX]) 2304 return NULL; 2305 2306 ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]); 2307 dev = dev_get_by_index(net, ifindex); 2308 if (!dev || dev->netdev_ops != &team_netdev_ops) { 2309 if (dev) 2310 dev_put(dev); 2311 return NULL; 2312 } 2313 2314 team = netdev_priv(dev); 2315 mutex_lock(&team->lock); 2316 return team; 2317 } 2318 2319 static void team_nl_team_put(struct team *team) 2320 { 2321 mutex_unlock(&team->lock); 2322 dev_put(team->dev); 2323 } 2324 2325 typedef int team_nl_send_func_t(struct sk_buff *skb, 2326 struct team *team, u32 portid); 2327 2328 static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid) 2329 { 2330 return genlmsg_unicast(dev_net(team->dev), skb, portid); 2331 } 2332 2333 static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team, 2334 struct team_option_inst *opt_inst) 2335 { 2336 struct nlattr *option_item; 2337 struct team_option *option = opt_inst->option; 2338 struct team_option_inst_info *opt_inst_info = &opt_inst->info; 2339 struct team_gsetter_ctx ctx; 2340 int err; 2341 2342 ctx.info = opt_inst_info; 2343 err = team_option_get(team, opt_inst, &ctx); 2344 if (err) 2345 return err; 2346 2347 option_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_OPTION); 2348 if (!option_item) 2349 return -EMSGSIZE; 2350 2351 if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name)) 2352 goto nest_cancel; 2353 if (opt_inst_info->port && 2354 nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX, 2355 opt_inst_info->port->dev->ifindex)) 2356 goto nest_cancel; 2357 if (opt_inst->option->array_size && 2358 nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX, 2359 opt_inst_info->array_index)) 2360 goto nest_cancel; 2361 2362 switch (option->type) { 2363 case TEAM_OPTION_TYPE_U32: 2364 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32)) 2365 goto nest_cancel; 2366 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val)) 2367 goto nest_cancel; 2368 break; 2369 case TEAM_OPTION_TYPE_STRING: 2370 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING)) 2371 goto nest_cancel; 2372 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA, 2373 ctx.data.str_val)) 2374 goto nest_cancel; 2375 break; 2376 case TEAM_OPTION_TYPE_BINARY: 2377 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY)) 2378 goto nest_cancel; 2379 if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len, 2380 ctx.data.bin_val.ptr)) 2381 goto nest_cancel; 2382 break; 2383 case TEAM_OPTION_TYPE_BOOL: 2384 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG)) 2385 goto nest_cancel; 2386 if (ctx.data.bool_val && 2387 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA)) 2388 goto nest_cancel; 2389 break; 2390 case TEAM_OPTION_TYPE_S32: 2391 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32)) 2392 goto nest_cancel; 2393 if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val)) 2394 goto nest_cancel; 2395 break; 2396 default: 2397 BUG(); 2398 } 2399 if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED)) 2400 goto nest_cancel; 2401 if (opt_inst->changed) { 2402 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED)) 2403 goto nest_cancel; 2404 opt_inst->changed = false; 2405 } 2406 nla_nest_end(skb, option_item); 2407 return 0; 2408 2409 nest_cancel: 2410 nla_nest_cancel(skb, option_item); 2411 return -EMSGSIZE; 2412 } 2413 2414 static int __send_and_alloc_skb(struct sk_buff **pskb, 2415 struct team *team, u32 portid, 2416 team_nl_send_func_t *send_func) 2417 { 2418 int err; 2419 2420 if (*pskb) { 2421 err = send_func(*pskb, team, portid); 2422 if (err) 2423 return err; 2424 } 2425 *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 2426 if (!*pskb) 2427 return -ENOMEM; 2428 return 0; 2429 } 2430 2431 static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq, 2432 int flags, team_nl_send_func_t *send_func, 2433 struct list_head *sel_opt_inst_list) 2434 { 2435 struct nlattr *option_list; 2436 struct nlmsghdr *nlh; 2437 void *hdr; 2438 struct team_option_inst *opt_inst; 2439 int err; 2440 struct sk_buff *skb = NULL; 2441 bool incomplete; 2442 int i; 2443 2444 opt_inst = list_first_entry(sel_opt_inst_list, 2445 struct team_option_inst, tmp_list); 2446 2447 start_again: 2448 err = __send_and_alloc_skb(&skb, team, portid, send_func); 2449 if (err) 2450 return err; 2451 2452 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI, 2453 TEAM_CMD_OPTIONS_GET); 2454 if (!hdr) { 2455 nlmsg_free(skb); 2456 return -EMSGSIZE; 2457 } 2458 2459 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) 2460 goto nla_put_failure; 2461 option_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_OPTION); 2462 if (!option_list) 2463 goto nla_put_failure; 2464 2465 i = 0; 2466 incomplete = false; 2467 list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) { 2468 err = team_nl_fill_one_option_get(skb, team, opt_inst); 2469 if (err) { 2470 if (err == -EMSGSIZE) { 2471 if (!i) 2472 goto errout; 2473 incomplete = true; 2474 break; 2475 } 2476 goto errout; 2477 } 2478 i++; 2479 } 2480 2481 nla_nest_end(skb, option_list); 2482 genlmsg_end(skb, hdr); 2483 if (incomplete) 2484 goto start_again; 2485 2486 send_done: 2487 nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI); 2488 if (!nlh) { 2489 err = __send_and_alloc_skb(&skb, team, portid, send_func); 2490 if (err) 2491 return err; 2492 goto send_done; 2493 } 2494 2495 return send_func(skb, team, portid); 2496 2497 nla_put_failure: 2498 err = -EMSGSIZE; 2499 errout: 2500 nlmsg_free(skb); 2501 return err; 2502 } 2503 2504 static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info) 2505 { 2506 struct team *team; 2507 struct team_option_inst *opt_inst; 2508 int err; 2509 LIST_HEAD(sel_opt_inst_list); 2510 2511 team = team_nl_team_get(info); 2512 if (!team) 2513 return -EINVAL; 2514 2515 list_for_each_entry(opt_inst, &team->option_inst_list, list) 2516 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); 2517 err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq, 2518 NLM_F_ACK, team_nl_send_unicast, 2519 &sel_opt_inst_list); 2520 2521 team_nl_team_put(team); 2522 2523 return err; 2524 } 2525 2526 static int team_nl_send_event_options_get(struct team *team, 2527 struct list_head *sel_opt_inst_list); 2528 2529 static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) 2530 { 2531 struct team *team; 2532 int err = 0; 2533 int i; 2534 struct nlattr *nl_option; 2535 2536 rtnl_lock(); 2537 2538 team = team_nl_team_get(info); 2539 if (!team) { 2540 err = -EINVAL; 2541 goto rtnl_unlock; 2542 } 2543 2544 err = -EINVAL; 2545 if (!info->attrs[TEAM_ATTR_LIST_OPTION]) { 2546 err = -EINVAL; 2547 goto team_put; 2548 } 2549 2550 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) { 2551 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; 2552 struct nlattr *attr; 2553 struct nlattr *attr_data; 2554 LIST_HEAD(opt_inst_list); 2555 enum team_option_type opt_type; 2556 int opt_port_ifindex = 0; /* != 0 for per-port options */ 2557 u32 opt_array_index = 0; 2558 bool opt_is_array = false; 2559 struct team_option_inst *opt_inst; 2560 char *opt_name; 2561 bool opt_found = false; 2562 2563 if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) { 2564 err = -EINVAL; 2565 goto team_put; 2566 } 2567 err = nla_parse_nested_deprecated(opt_attrs, 2568 TEAM_ATTR_OPTION_MAX, 2569 nl_option, 2570 team_nl_option_policy, 2571 info->extack); 2572 if (err) 2573 goto team_put; 2574 if (!opt_attrs[TEAM_ATTR_OPTION_NAME] || 2575 !opt_attrs[TEAM_ATTR_OPTION_TYPE]) { 2576 err = -EINVAL; 2577 goto team_put; 2578 } 2579 switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) { 2580 case NLA_U32: 2581 opt_type = TEAM_OPTION_TYPE_U32; 2582 break; 2583 case NLA_STRING: 2584 opt_type = TEAM_OPTION_TYPE_STRING; 2585 break; 2586 case NLA_BINARY: 2587 opt_type = TEAM_OPTION_TYPE_BINARY; 2588 break; 2589 case NLA_FLAG: 2590 opt_type = TEAM_OPTION_TYPE_BOOL; 2591 break; 2592 case NLA_S32: 2593 opt_type = TEAM_OPTION_TYPE_S32; 2594 break; 2595 default: 2596 goto team_put; 2597 } 2598 2599 attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA]; 2600 if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) { 2601 err = -EINVAL; 2602 goto team_put; 2603 } 2604 2605 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]); 2606 attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX]; 2607 if (attr) 2608 opt_port_ifindex = nla_get_u32(attr); 2609 2610 attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX]; 2611 if (attr) { 2612 opt_is_array = true; 2613 opt_array_index = nla_get_u32(attr); 2614 } 2615 2616 list_for_each_entry(opt_inst, &team->option_inst_list, list) { 2617 struct team_option *option = opt_inst->option; 2618 struct team_gsetter_ctx ctx; 2619 struct team_option_inst_info *opt_inst_info; 2620 int tmp_ifindex; 2621 2622 opt_inst_info = &opt_inst->info; 2623 tmp_ifindex = opt_inst_info->port ? 2624 opt_inst_info->port->dev->ifindex : 0; 2625 if (option->type != opt_type || 2626 strcmp(option->name, opt_name) || 2627 tmp_ifindex != opt_port_ifindex || 2628 (option->array_size && !opt_is_array) || 2629 opt_inst_info->array_index != opt_array_index) 2630 continue; 2631 opt_found = true; 2632 ctx.info = opt_inst_info; 2633 switch (opt_type) { 2634 case TEAM_OPTION_TYPE_U32: 2635 ctx.data.u32_val = nla_get_u32(attr_data); 2636 break; 2637 case TEAM_OPTION_TYPE_STRING: 2638 if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) { 2639 err = -EINVAL; 2640 goto team_put; 2641 } 2642 ctx.data.str_val = nla_data(attr_data); 2643 break; 2644 case TEAM_OPTION_TYPE_BINARY: 2645 ctx.data.bin_val.len = nla_len(attr_data); 2646 ctx.data.bin_val.ptr = nla_data(attr_data); 2647 break; 2648 case TEAM_OPTION_TYPE_BOOL: 2649 ctx.data.bool_val = attr_data ? true : false; 2650 break; 2651 case TEAM_OPTION_TYPE_S32: 2652 ctx.data.s32_val = nla_get_s32(attr_data); 2653 break; 2654 default: 2655 BUG(); 2656 } 2657 err = team_option_set(team, opt_inst, &ctx); 2658 if (err) 2659 goto team_put; 2660 opt_inst->changed = true; 2661 list_add(&opt_inst->tmp_list, &opt_inst_list); 2662 } 2663 if (!opt_found) { 2664 err = -ENOENT; 2665 goto team_put; 2666 } 2667 2668 err = team_nl_send_event_options_get(team, &opt_inst_list); 2669 if (err) 2670 break; 2671 } 2672 2673 team_put: 2674 team_nl_team_put(team); 2675 rtnl_unlock: 2676 rtnl_unlock(); 2677 return err; 2678 } 2679 2680 static int team_nl_fill_one_port_get(struct sk_buff *skb, 2681 struct team_port *port) 2682 { 2683 struct nlattr *port_item; 2684 2685 port_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_PORT); 2686 if (!port_item) 2687 goto nest_cancel; 2688 if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex)) 2689 goto nest_cancel; 2690 if (port->changed) { 2691 if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED)) 2692 goto nest_cancel; 2693 port->changed = false; 2694 } 2695 if ((port->removed && 2696 nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) || 2697 (port->state.linkup && 2698 nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) || 2699 nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) || 2700 nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex)) 2701 goto nest_cancel; 2702 nla_nest_end(skb, port_item); 2703 return 0; 2704 2705 nest_cancel: 2706 nla_nest_cancel(skb, port_item); 2707 return -EMSGSIZE; 2708 } 2709 2710 static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq, 2711 int flags, team_nl_send_func_t *send_func, 2712 struct team_port *one_port) 2713 { 2714 struct nlattr *port_list; 2715 struct nlmsghdr *nlh; 2716 void *hdr; 2717 struct team_port *port; 2718 int err; 2719 struct sk_buff *skb = NULL; 2720 bool incomplete; 2721 int i; 2722 2723 port = list_first_entry_or_null(&team->port_list, 2724 struct team_port, list); 2725 2726 start_again: 2727 err = __send_and_alloc_skb(&skb, team, portid, send_func); 2728 if (err) 2729 return err; 2730 2731 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI, 2732 TEAM_CMD_PORT_LIST_GET); 2733 if (!hdr) { 2734 nlmsg_free(skb); 2735 return -EMSGSIZE; 2736 } 2737 2738 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) 2739 goto nla_put_failure; 2740 port_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_PORT); 2741 if (!port_list) 2742 goto nla_put_failure; 2743 2744 i = 0; 2745 incomplete = false; 2746 2747 /* If one port is selected, called wants to send port list containing 2748 * only this port. Otherwise go through all listed ports and send all 2749 */ 2750 if (one_port) { 2751 err = team_nl_fill_one_port_get(skb, one_port); 2752 if (err) 2753 goto errout; 2754 } else if (port) { 2755 list_for_each_entry_from(port, &team->port_list, list) { 2756 err = team_nl_fill_one_port_get(skb, port); 2757 if (err) { 2758 if (err == -EMSGSIZE) { 2759 if (!i) 2760 goto errout; 2761 incomplete = true; 2762 break; 2763 } 2764 goto errout; 2765 } 2766 i++; 2767 } 2768 } 2769 2770 nla_nest_end(skb, port_list); 2771 genlmsg_end(skb, hdr); 2772 if (incomplete) 2773 goto start_again; 2774 2775 send_done: 2776 nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI); 2777 if (!nlh) { 2778 err = __send_and_alloc_skb(&skb, team, portid, send_func); 2779 if (err) 2780 return err; 2781 goto send_done; 2782 } 2783 2784 return send_func(skb, team, portid); 2785 2786 nla_put_failure: 2787 err = -EMSGSIZE; 2788 errout: 2789 nlmsg_free(skb); 2790 return err; 2791 } 2792 2793 static int team_nl_cmd_port_list_get(struct sk_buff *skb, 2794 struct genl_info *info) 2795 { 2796 struct team *team; 2797 int err; 2798 2799 team = team_nl_team_get(info); 2800 if (!team) 2801 return -EINVAL; 2802 2803 err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq, 2804 NLM_F_ACK, team_nl_send_unicast, NULL); 2805 2806 team_nl_team_put(team); 2807 2808 return err; 2809 } 2810 2811 static const struct genl_small_ops team_nl_ops[] = { 2812 { 2813 .cmd = TEAM_CMD_NOOP, 2814 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2815 .doit = team_nl_cmd_noop, 2816 }, 2817 { 2818 .cmd = TEAM_CMD_OPTIONS_SET, 2819 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2820 .doit = team_nl_cmd_options_set, 2821 .flags = GENL_ADMIN_PERM, 2822 }, 2823 { 2824 .cmd = TEAM_CMD_OPTIONS_GET, 2825 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2826 .doit = team_nl_cmd_options_get, 2827 .flags = GENL_ADMIN_PERM, 2828 }, 2829 { 2830 .cmd = TEAM_CMD_PORT_LIST_GET, 2831 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2832 .doit = team_nl_cmd_port_list_get, 2833 .flags = GENL_ADMIN_PERM, 2834 }, 2835 }; 2836 2837 static const struct genl_multicast_group team_nl_mcgrps[] = { 2838 { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, }, 2839 }; 2840 2841 static struct genl_family team_nl_family __ro_after_init = { 2842 .name = TEAM_GENL_NAME, 2843 .version = TEAM_GENL_VERSION, 2844 .maxattr = TEAM_ATTR_MAX, 2845 .policy = team_nl_policy, 2846 .netnsok = true, 2847 .module = THIS_MODULE, 2848 .small_ops = team_nl_ops, 2849 .n_small_ops = ARRAY_SIZE(team_nl_ops), 2850 .mcgrps = team_nl_mcgrps, 2851 .n_mcgrps = ARRAY_SIZE(team_nl_mcgrps), 2852 }; 2853 2854 static int team_nl_send_multicast(struct sk_buff *skb, 2855 struct team *team, u32 portid) 2856 { 2857 return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev), 2858 skb, 0, 0, GFP_KERNEL); 2859 } 2860 2861 static int team_nl_send_event_options_get(struct team *team, 2862 struct list_head *sel_opt_inst_list) 2863 { 2864 return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast, 2865 sel_opt_inst_list); 2866 } 2867 2868 static int team_nl_send_event_port_get(struct team *team, 2869 struct team_port *port) 2870 { 2871 return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast, 2872 port); 2873 } 2874 2875 static int __init team_nl_init(void) 2876 { 2877 return genl_register_family(&team_nl_family); 2878 } 2879 2880 static void team_nl_fini(void) 2881 { 2882 genl_unregister_family(&team_nl_family); 2883 } 2884 2885 2886 /****************** 2887 * Change checkers 2888 ******************/ 2889 2890 static void __team_options_change_check(struct team *team) 2891 { 2892 int err; 2893 struct team_option_inst *opt_inst; 2894 LIST_HEAD(sel_opt_inst_list); 2895 2896 list_for_each_entry(opt_inst, &team->option_inst_list, list) { 2897 if (opt_inst->changed) 2898 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); 2899 } 2900 err = team_nl_send_event_options_get(team, &sel_opt_inst_list); 2901 if (err && err != -ESRCH) 2902 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n", 2903 err); 2904 } 2905 2906 /* rtnl lock is held */ 2907 2908 static void __team_port_change_send(struct team_port *port, bool linkup) 2909 { 2910 int err; 2911 2912 port->changed = true; 2913 port->state.linkup = linkup; 2914 team_refresh_port_linkup(port); 2915 if (linkup) { 2916 struct ethtool_link_ksettings ecmd; 2917 2918 err = __ethtool_get_link_ksettings(port->dev, &ecmd); 2919 if (!err) { 2920 port->state.speed = ecmd.base.speed; 2921 port->state.duplex = ecmd.base.duplex; 2922 goto send_event; 2923 } 2924 } 2925 port->state.speed = 0; 2926 port->state.duplex = 0; 2927 2928 send_event: 2929 err = team_nl_send_event_port_get(port->team, port); 2930 if (err && err != -ESRCH) 2931 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n", 2932 port->dev->name, err); 2933 2934 } 2935 2936 static void __team_carrier_check(struct team *team) 2937 { 2938 struct team_port *port; 2939 bool team_linkup; 2940 2941 if (team->user_carrier_enabled) 2942 return; 2943 2944 team_linkup = false; 2945 list_for_each_entry(port, &team->port_list, list) { 2946 if (port->linkup) { 2947 team_linkup = true; 2948 break; 2949 } 2950 } 2951 2952 if (team_linkup) 2953 netif_carrier_on(team->dev); 2954 else 2955 netif_carrier_off(team->dev); 2956 } 2957 2958 static void __team_port_change_check(struct team_port *port, bool linkup) 2959 { 2960 if (port->state.linkup != linkup) 2961 __team_port_change_send(port, linkup); 2962 __team_carrier_check(port->team); 2963 } 2964 2965 static void __team_port_change_port_added(struct team_port *port, bool linkup) 2966 { 2967 __team_port_change_send(port, linkup); 2968 __team_carrier_check(port->team); 2969 } 2970 2971 static void __team_port_change_port_removed(struct team_port *port) 2972 { 2973 port->removed = true; 2974 __team_port_change_send(port, false); 2975 __team_carrier_check(port->team); 2976 } 2977 2978 static void team_port_change_check(struct team_port *port, bool linkup) 2979 { 2980 struct team *team = port->team; 2981 2982 mutex_lock(&team->lock); 2983 __team_port_change_check(port, linkup); 2984 mutex_unlock(&team->lock); 2985 } 2986 2987 2988 /************************************ 2989 * Net device notifier event handler 2990 ************************************/ 2991 2992 static int team_device_event(struct notifier_block *unused, 2993 unsigned long event, void *ptr) 2994 { 2995 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 2996 struct team_port *port; 2997 2998 port = team_port_get_rtnl(dev); 2999 if (!port) 3000 return NOTIFY_DONE; 3001 3002 switch (event) { 3003 case NETDEV_UP: 3004 if (netif_oper_up(dev)) 3005 team_port_change_check(port, true); 3006 break; 3007 case NETDEV_DOWN: 3008 team_port_change_check(port, false); 3009 break; 3010 case NETDEV_CHANGE: 3011 if (netif_running(port->dev)) 3012 team_port_change_check(port, 3013 !!netif_oper_up(port->dev)); 3014 break; 3015 case NETDEV_UNREGISTER: 3016 team_del_slave(port->team->dev, dev); 3017 break; 3018 case NETDEV_FEAT_CHANGE: 3019 team_compute_features(port->team); 3020 break; 3021 case NETDEV_PRECHANGEMTU: 3022 /* Forbid to change mtu of underlaying device */ 3023 if (!port->team->port_mtu_change_allowed) 3024 return NOTIFY_BAD; 3025 break; 3026 case NETDEV_PRE_TYPE_CHANGE: 3027 /* Forbid to change type of underlaying device */ 3028 return NOTIFY_BAD; 3029 case NETDEV_RESEND_IGMP: 3030 /* Propagate to master device */ 3031 call_netdevice_notifiers(event, port->team->dev); 3032 break; 3033 } 3034 return NOTIFY_DONE; 3035 } 3036 3037 static struct notifier_block team_notifier_block __read_mostly = { 3038 .notifier_call = team_device_event, 3039 }; 3040 3041 3042 /*********************** 3043 * Module init and exit 3044 ***********************/ 3045 3046 static int __init team_module_init(void) 3047 { 3048 int err; 3049 3050 register_netdevice_notifier(&team_notifier_block); 3051 3052 err = rtnl_link_register(&team_link_ops); 3053 if (err) 3054 goto err_rtnl_reg; 3055 3056 err = team_nl_init(); 3057 if (err) 3058 goto err_nl_init; 3059 3060 return 0; 3061 3062 err_nl_init: 3063 rtnl_link_unregister(&team_link_ops); 3064 3065 err_rtnl_reg: 3066 unregister_netdevice_notifier(&team_notifier_block); 3067 3068 return err; 3069 } 3070 3071 static void __exit team_module_exit(void) 3072 { 3073 team_nl_fini(); 3074 rtnl_link_unregister(&team_link_ops); 3075 unregister_netdevice_notifier(&team_notifier_block); 3076 } 3077 3078 module_init(team_module_init); 3079 module_exit(team_module_exit); 3080 3081 MODULE_LICENSE("GPL v2"); 3082 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>"); 3083 MODULE_DESCRIPTION("Ethernet team device driver"); 3084 MODULE_ALIAS_RTNL_LINK(DRV_NAME); 3085