1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a single switch port 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include <linux/if_bridge.h> 10 #include <linux/netdevice.h> 11 #include <linux/notifier.h> 12 #include <linux/of_mdio.h> 13 #include <linux/of_net.h> 14 15 #include "dsa_priv.h" 16 17 /** 18 * dsa_port_notify - Notify the switching fabric of changes to a port 19 * @dp: port on which change occurred 20 * @e: event, must be of type DSA_NOTIFIER_* 21 * @v: event-specific value. 22 * 23 * Notify all switches in the DSA tree that this port's switch belongs to, 24 * including this switch itself, of an event. Allows the other switches to 25 * reconfigure themselves for cross-chip operations. Can also be used to 26 * reconfigure ports without net_devices (CPU ports, DSA links) whenever 27 * a user port's state changes. 28 */ 29 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v) 30 { 31 return dsa_tree_notify(dp->ds->dst, e, v); 32 } 33 34 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp, u16 vid) 35 { 36 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 37 struct switchdev_notifier_fdb_info info = { 38 .vid = vid, 39 }; 40 41 /* When the port becomes standalone it has already left the bridge. 42 * Don't notify the bridge in that case. 43 */ 44 if (!brport_dev) 45 return; 46 47 call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE, 48 brport_dev, &info.info, NULL); 49 } 50 51 static void dsa_port_fast_age(const struct dsa_port *dp) 52 { 53 struct dsa_switch *ds = dp->ds; 54 55 if (!ds->ops->port_fast_age) 56 return; 57 58 ds->ops->port_fast_age(ds, dp->index); 59 60 /* flush all VLANs */ 61 dsa_port_notify_bridge_fdb_flush(dp, 0); 62 } 63 64 static int dsa_port_vlan_fast_age(const struct dsa_port *dp, u16 vid) 65 { 66 struct dsa_switch *ds = dp->ds; 67 int err; 68 69 if (!ds->ops->port_vlan_fast_age) 70 return -EOPNOTSUPP; 71 72 err = ds->ops->port_vlan_fast_age(ds, dp->index, vid); 73 74 if (!err) 75 dsa_port_notify_bridge_fdb_flush(dp, vid); 76 77 return err; 78 } 79 80 static int dsa_port_msti_fast_age(const struct dsa_port *dp, u16 msti) 81 { 82 DECLARE_BITMAP(vids, VLAN_N_VID) = { 0 }; 83 int err, vid; 84 85 err = br_mst_get_info(dsa_port_bridge_dev_get(dp), msti, vids); 86 if (err) 87 return err; 88 89 for_each_set_bit(vid, vids, VLAN_N_VID) { 90 err = dsa_port_vlan_fast_age(dp, vid); 91 if (err) 92 return err; 93 } 94 95 return 0; 96 } 97 98 static bool dsa_port_can_configure_learning(struct dsa_port *dp) 99 { 100 struct switchdev_brport_flags flags = { 101 .mask = BR_LEARNING, 102 }; 103 struct dsa_switch *ds = dp->ds; 104 int err; 105 106 if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags) 107 return false; 108 109 err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL); 110 return !err; 111 } 112 113 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age) 114 { 115 struct dsa_switch *ds = dp->ds; 116 int port = dp->index; 117 118 if (!ds->ops->port_stp_state_set) 119 return -EOPNOTSUPP; 120 121 ds->ops->port_stp_state_set(ds, port, state); 122 123 if (!dsa_port_can_configure_learning(dp) || 124 (do_fast_age && dp->learning)) { 125 /* Fast age FDB entries or flush appropriate forwarding database 126 * for the given port, if we are moving it from Learning or 127 * Forwarding state, to Disabled or Blocking or Listening state. 128 * Ports that were standalone before the STP state change don't 129 * need to fast age the FDB, since address learning is off in 130 * standalone mode. 131 */ 132 133 if ((dp->stp_state == BR_STATE_LEARNING || 134 dp->stp_state == BR_STATE_FORWARDING) && 135 (state == BR_STATE_DISABLED || 136 state == BR_STATE_BLOCKING || 137 state == BR_STATE_LISTENING)) 138 dsa_port_fast_age(dp); 139 } 140 141 dp->stp_state = state; 142 143 return 0; 144 } 145 146 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state, 147 bool do_fast_age) 148 { 149 struct dsa_switch *ds = dp->ds; 150 int err; 151 152 err = dsa_port_set_state(dp, state, do_fast_age); 153 if (err && err != -EOPNOTSUPP) { 154 dev_err(ds->dev, "port %d failed to set STP state %u: %pe\n", 155 dp->index, state, ERR_PTR(err)); 156 } 157 } 158 159 int dsa_port_set_mst_state(struct dsa_port *dp, 160 const struct switchdev_mst_state *state, 161 struct netlink_ext_ack *extack) 162 { 163 struct dsa_switch *ds = dp->ds; 164 u8 prev_state; 165 int err; 166 167 if (!ds->ops->port_mst_state_set) 168 return -EOPNOTSUPP; 169 170 err = br_mst_get_state(dsa_port_to_bridge_port(dp), state->msti, 171 &prev_state); 172 if (err) 173 return err; 174 175 err = ds->ops->port_mst_state_set(ds, dp->index, state); 176 if (err) 177 return err; 178 179 if (!(dp->learning && 180 (prev_state == BR_STATE_LEARNING || 181 prev_state == BR_STATE_FORWARDING) && 182 (state->state == BR_STATE_DISABLED || 183 state->state == BR_STATE_BLOCKING || 184 state->state == BR_STATE_LISTENING))) 185 return 0; 186 187 err = dsa_port_msti_fast_age(dp, state->msti); 188 if (err) 189 NL_SET_ERR_MSG_MOD(extack, 190 "Unable to flush associated VLANs"); 191 192 return 0; 193 } 194 195 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy) 196 { 197 struct dsa_switch *ds = dp->ds; 198 int port = dp->index; 199 int err; 200 201 if (ds->ops->port_enable) { 202 err = ds->ops->port_enable(ds, port, phy); 203 if (err) 204 return err; 205 } 206 207 if (!dp->bridge) 208 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false); 209 210 if (dp->pl) 211 phylink_start(dp->pl); 212 213 return 0; 214 } 215 216 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) 217 { 218 int err; 219 220 rtnl_lock(); 221 err = dsa_port_enable_rt(dp, phy); 222 rtnl_unlock(); 223 224 return err; 225 } 226 227 void dsa_port_disable_rt(struct dsa_port *dp) 228 { 229 struct dsa_switch *ds = dp->ds; 230 int port = dp->index; 231 232 if (dp->pl) 233 phylink_stop(dp->pl); 234 235 if (!dp->bridge) 236 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false); 237 238 if (ds->ops->port_disable) 239 ds->ops->port_disable(ds, port); 240 } 241 242 void dsa_port_disable(struct dsa_port *dp) 243 { 244 rtnl_lock(); 245 dsa_port_disable_rt(dp); 246 rtnl_unlock(); 247 } 248 249 static void dsa_port_reset_vlan_filtering(struct dsa_port *dp, 250 struct dsa_bridge bridge) 251 { 252 struct netlink_ext_ack extack = {0}; 253 bool change_vlan_filtering = false; 254 struct dsa_switch *ds = dp->ds; 255 struct dsa_port *other_dp; 256 bool vlan_filtering; 257 int err; 258 259 if (ds->needs_standalone_vlan_filtering && 260 !br_vlan_enabled(bridge.dev)) { 261 change_vlan_filtering = true; 262 vlan_filtering = true; 263 } else if (!ds->needs_standalone_vlan_filtering && 264 br_vlan_enabled(bridge.dev)) { 265 change_vlan_filtering = true; 266 vlan_filtering = false; 267 } 268 269 /* If the bridge was vlan_filtering, the bridge core doesn't trigger an 270 * event for changing vlan_filtering setting upon slave ports leaving 271 * it. That is a good thing, because that lets us handle it and also 272 * handle the case where the switch's vlan_filtering setting is global 273 * (not per port). When that happens, the correct moment to trigger the 274 * vlan_filtering callback is only when the last port leaves the last 275 * VLAN-aware bridge. 276 */ 277 if (change_vlan_filtering && ds->vlan_filtering_is_global) { 278 dsa_switch_for_each_port(other_dp, ds) { 279 struct net_device *br = dsa_port_bridge_dev_get(other_dp); 280 281 if (br && br_vlan_enabled(br)) { 282 change_vlan_filtering = false; 283 break; 284 } 285 } 286 } 287 288 if (!change_vlan_filtering) 289 return; 290 291 err = dsa_port_vlan_filtering(dp, vlan_filtering, &extack); 292 if (extack._msg) { 293 dev_err(ds->dev, "port %d: %s\n", dp->index, 294 extack._msg); 295 } 296 if (err && err != -EOPNOTSUPP) { 297 dev_err(ds->dev, 298 "port %d failed to reset VLAN filtering to %d: %pe\n", 299 dp->index, vlan_filtering, ERR_PTR(err)); 300 } 301 } 302 303 static int dsa_port_inherit_brport_flags(struct dsa_port *dp, 304 struct netlink_ext_ack *extack) 305 { 306 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 307 BR_BCAST_FLOOD | BR_PORT_LOCKED; 308 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 309 int flag, err; 310 311 for_each_set_bit(flag, &mask, 32) { 312 struct switchdev_brport_flags flags = {0}; 313 314 flags.mask = BIT(flag); 315 316 if (br_port_flag_is_set(brport_dev, BIT(flag))) 317 flags.val = BIT(flag); 318 319 err = dsa_port_bridge_flags(dp, flags, extack); 320 if (err && err != -EOPNOTSUPP) 321 return err; 322 } 323 324 return 0; 325 } 326 327 static void dsa_port_clear_brport_flags(struct dsa_port *dp) 328 { 329 const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; 330 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 331 BR_BCAST_FLOOD | BR_PORT_LOCKED; 332 int flag, err; 333 334 for_each_set_bit(flag, &mask, 32) { 335 struct switchdev_brport_flags flags = {0}; 336 337 flags.mask = BIT(flag); 338 flags.val = val & BIT(flag); 339 340 err = dsa_port_bridge_flags(dp, flags, NULL); 341 if (err && err != -EOPNOTSUPP) 342 dev_err(dp->ds->dev, 343 "failed to clear bridge port flag %lu: %pe\n", 344 flags.val, ERR_PTR(err)); 345 } 346 } 347 348 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp, 349 struct netlink_ext_ack *extack) 350 { 351 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 352 struct net_device *br = dsa_port_bridge_dev_get(dp); 353 int err; 354 355 err = dsa_port_inherit_brport_flags(dp, extack); 356 if (err) 357 return err; 358 359 err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false); 360 if (err && err != -EOPNOTSUPP) 361 return err; 362 363 err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack); 364 if (err && err != -EOPNOTSUPP) 365 return err; 366 367 err = dsa_port_ageing_time(dp, br_get_ageing_time(br)); 368 if (err && err != -EOPNOTSUPP) 369 return err; 370 371 return 0; 372 } 373 374 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp, 375 struct dsa_bridge bridge) 376 { 377 /* Configure the port for standalone mode (no address learning, 378 * flood everything). 379 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events 380 * when the user requests it through netlink or sysfs, but not 381 * automatically at port join or leave, so we need to handle resetting 382 * the brport flags ourselves. But we even prefer it that way, because 383 * otherwise, some setups might never get the notification they need, 384 * for example, when a port leaves a LAG that offloads the bridge, 385 * it becomes standalone, but as far as the bridge is concerned, no 386 * port ever left. 387 */ 388 dsa_port_clear_brport_flags(dp); 389 390 /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, 391 * so allow it to be in BR_STATE_FORWARDING to be kept functional 392 */ 393 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true); 394 395 dsa_port_reset_vlan_filtering(dp, bridge); 396 397 /* Ageing time may be global to the switch chip, so don't change it 398 * here because we have no good reason (or value) to change it to. 399 */ 400 } 401 402 static int dsa_port_bridge_create(struct dsa_port *dp, 403 struct net_device *br, 404 struct netlink_ext_ack *extack) 405 { 406 struct dsa_switch *ds = dp->ds; 407 struct dsa_bridge *bridge; 408 409 bridge = dsa_tree_bridge_find(ds->dst, br); 410 if (bridge) { 411 refcount_inc(&bridge->refcount); 412 dp->bridge = bridge; 413 return 0; 414 } 415 416 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 417 if (!bridge) 418 return -ENOMEM; 419 420 refcount_set(&bridge->refcount, 1); 421 422 bridge->dev = br; 423 424 bridge->num = dsa_bridge_num_get(br, ds->max_num_bridges); 425 if (ds->max_num_bridges && !bridge->num) { 426 NL_SET_ERR_MSG_MOD(extack, 427 "Range of offloadable bridges exceeded"); 428 kfree(bridge); 429 return -EOPNOTSUPP; 430 } 431 432 dp->bridge = bridge; 433 434 return 0; 435 } 436 437 static void dsa_port_bridge_destroy(struct dsa_port *dp, 438 const struct net_device *br) 439 { 440 struct dsa_bridge *bridge = dp->bridge; 441 442 dp->bridge = NULL; 443 444 if (!refcount_dec_and_test(&bridge->refcount)) 445 return; 446 447 if (bridge->num) 448 dsa_bridge_num_put(br, bridge->num); 449 450 kfree(bridge); 451 } 452 453 static bool dsa_port_supports_mst(struct dsa_port *dp) 454 { 455 struct dsa_switch *ds = dp->ds; 456 457 return ds->ops->vlan_msti_set && 458 ds->ops->port_mst_state_set && 459 ds->ops->port_vlan_fast_age && 460 dsa_port_can_configure_learning(dp); 461 } 462 463 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br, 464 struct netlink_ext_ack *extack) 465 { 466 struct dsa_notifier_bridge_info info = { 467 .dp = dp, 468 .extack = extack, 469 }; 470 struct net_device *dev = dp->slave; 471 struct net_device *brport_dev; 472 int err; 473 474 if (br_mst_enabled(br) && !dsa_port_supports_mst(dp)) 475 return -EOPNOTSUPP; 476 477 /* Here the interface is already bridged. Reflect the current 478 * configuration so that drivers can program their chips accordingly. 479 */ 480 err = dsa_port_bridge_create(dp, br, extack); 481 if (err) 482 return err; 483 484 brport_dev = dsa_port_to_bridge_port(dp); 485 486 info.bridge = *dp->bridge; 487 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info); 488 if (err) 489 goto out_rollback; 490 491 /* Drivers which support bridge TX forwarding should set this */ 492 dp->bridge->tx_fwd_offload = info.tx_fwd_offload; 493 494 err = switchdev_bridge_port_offload(brport_dev, dev, dp, 495 &dsa_slave_switchdev_notifier, 496 &dsa_slave_switchdev_blocking_notifier, 497 dp->bridge->tx_fwd_offload, extack); 498 if (err) 499 goto out_rollback_unbridge; 500 501 err = dsa_port_switchdev_sync_attrs(dp, extack); 502 if (err) 503 goto out_rollback_unoffload; 504 505 return 0; 506 507 out_rollback_unoffload: 508 switchdev_bridge_port_unoffload(brport_dev, dp, 509 &dsa_slave_switchdev_notifier, 510 &dsa_slave_switchdev_blocking_notifier); 511 dsa_flush_workqueue(); 512 out_rollback_unbridge: 513 dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 514 out_rollback: 515 dsa_port_bridge_destroy(dp, br); 516 return err; 517 } 518 519 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br) 520 { 521 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 522 523 /* Don't try to unoffload something that is not offloaded */ 524 if (!brport_dev) 525 return; 526 527 switchdev_bridge_port_unoffload(brport_dev, dp, 528 &dsa_slave_switchdev_notifier, 529 &dsa_slave_switchdev_blocking_notifier); 530 531 dsa_flush_workqueue(); 532 } 533 534 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) 535 { 536 struct dsa_notifier_bridge_info info = { 537 .dp = dp, 538 }; 539 int err; 540 541 /* If the port could not be offloaded to begin with, then 542 * there is nothing to do. 543 */ 544 if (!dp->bridge) 545 return; 546 547 info.bridge = *dp->bridge; 548 549 /* Here the port is already unbridged. Reflect the current configuration 550 * so that drivers can program their chips accordingly. 551 */ 552 dsa_port_bridge_destroy(dp, br); 553 554 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 555 if (err) 556 dev_err(dp->ds->dev, 557 "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n", 558 dp->index, ERR_PTR(err)); 559 560 dsa_port_switchdev_unsync_attrs(dp, info.bridge); 561 } 562 563 int dsa_port_lag_change(struct dsa_port *dp, 564 struct netdev_lag_lower_state_info *linfo) 565 { 566 struct dsa_notifier_lag_info info = { 567 .dp = dp, 568 }; 569 bool tx_enabled; 570 571 if (!dp->lag) 572 return 0; 573 574 /* On statically configured aggregates (e.g. loadbalance 575 * without LACP) ports will always be tx_enabled, even if the 576 * link is down. Thus we require both link_up and tx_enabled 577 * in order to include it in the tx set. 578 */ 579 tx_enabled = linfo->link_up && linfo->tx_enabled; 580 581 if (tx_enabled == dp->lag_tx_enabled) 582 return 0; 583 584 dp->lag_tx_enabled = tx_enabled; 585 586 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info); 587 } 588 589 static int dsa_port_lag_create(struct dsa_port *dp, 590 struct net_device *lag_dev) 591 { 592 struct dsa_switch *ds = dp->ds; 593 struct dsa_lag *lag; 594 595 lag = dsa_tree_lag_find(ds->dst, lag_dev); 596 if (lag) { 597 refcount_inc(&lag->refcount); 598 dp->lag = lag; 599 return 0; 600 } 601 602 lag = kzalloc(sizeof(*lag), GFP_KERNEL); 603 if (!lag) 604 return -ENOMEM; 605 606 refcount_set(&lag->refcount, 1); 607 mutex_init(&lag->fdb_lock); 608 INIT_LIST_HEAD(&lag->fdbs); 609 lag->dev = lag_dev; 610 dsa_lag_map(ds->dst, lag); 611 dp->lag = lag; 612 613 return 0; 614 } 615 616 static void dsa_port_lag_destroy(struct dsa_port *dp) 617 { 618 struct dsa_lag *lag = dp->lag; 619 620 dp->lag = NULL; 621 dp->lag_tx_enabled = false; 622 623 if (!refcount_dec_and_test(&lag->refcount)) 624 return; 625 626 WARN_ON(!list_empty(&lag->fdbs)); 627 dsa_lag_unmap(dp->ds->dst, lag); 628 kfree(lag); 629 } 630 631 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev, 632 struct netdev_lag_upper_info *uinfo, 633 struct netlink_ext_ack *extack) 634 { 635 struct dsa_notifier_lag_info info = { 636 .dp = dp, 637 .info = uinfo, 638 .extack = extack, 639 }; 640 struct net_device *bridge_dev; 641 int err; 642 643 err = dsa_port_lag_create(dp, lag_dev); 644 if (err) 645 goto err_lag_create; 646 647 info.lag = *dp->lag; 648 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info); 649 if (err) 650 goto err_lag_join; 651 652 bridge_dev = netdev_master_upper_dev_get(lag_dev); 653 if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) 654 return 0; 655 656 err = dsa_port_bridge_join(dp, bridge_dev, extack); 657 if (err) 658 goto err_bridge_join; 659 660 return 0; 661 662 err_bridge_join: 663 dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 664 err_lag_join: 665 dsa_port_lag_destroy(dp); 666 err_lag_create: 667 return err; 668 } 669 670 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 671 { 672 struct net_device *br = dsa_port_bridge_dev_get(dp); 673 674 if (br) 675 dsa_port_pre_bridge_leave(dp, br); 676 } 677 678 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 679 { 680 struct net_device *br = dsa_port_bridge_dev_get(dp); 681 struct dsa_notifier_lag_info info = { 682 .dp = dp, 683 }; 684 int err; 685 686 if (!dp->lag) 687 return; 688 689 /* Port might have been part of a LAG that in turn was 690 * attached to a bridge. 691 */ 692 if (br) 693 dsa_port_bridge_leave(dp, br); 694 695 info.lag = *dp->lag; 696 697 dsa_port_lag_destroy(dp); 698 699 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 700 if (err) 701 dev_err(dp->ds->dev, 702 "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n", 703 dp->index, ERR_PTR(err)); 704 } 705 706 /* Must be called under rcu_read_lock() */ 707 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, 708 bool vlan_filtering, 709 struct netlink_ext_ack *extack) 710 { 711 struct dsa_switch *ds = dp->ds; 712 struct dsa_port *other_dp; 713 int err; 714 715 /* VLAN awareness was off, so the question is "can we turn it on". 716 * We may have had 8021q uppers, those need to go. Make sure we don't 717 * enter an inconsistent state: deny changing the VLAN awareness state 718 * as long as we have 8021q uppers. 719 */ 720 if (vlan_filtering && dsa_port_is_user(dp)) { 721 struct net_device *br = dsa_port_bridge_dev_get(dp); 722 struct net_device *upper_dev, *slave = dp->slave; 723 struct list_head *iter; 724 725 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { 726 struct bridge_vlan_info br_info; 727 u16 vid; 728 729 if (!is_vlan_dev(upper_dev)) 730 continue; 731 732 vid = vlan_dev_vlan_id(upper_dev); 733 734 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the 735 * device, respectively the VID is not found, returning 736 * 0 means success, which is a failure for us here. 737 */ 738 err = br_vlan_get_info(br, vid, &br_info); 739 if (err == 0) { 740 NL_SET_ERR_MSG_MOD(extack, 741 "Must first remove VLAN uppers having VIDs also present in bridge"); 742 return false; 743 } 744 } 745 } 746 747 if (!ds->vlan_filtering_is_global) 748 return true; 749 750 /* For cases where enabling/disabling VLAN awareness is global to the 751 * switch, we need to handle the case where multiple bridges span 752 * different ports of the same switch device and one of them has a 753 * different setting than what is being requested. 754 */ 755 dsa_switch_for_each_port(other_dp, ds) { 756 struct net_device *other_br = dsa_port_bridge_dev_get(other_dp); 757 758 /* If it's the same bridge, it also has same 759 * vlan_filtering setting => no need to check 760 */ 761 if (!other_br || other_br == dsa_port_bridge_dev_get(dp)) 762 continue; 763 764 if (br_vlan_enabled(other_br) != vlan_filtering) { 765 NL_SET_ERR_MSG_MOD(extack, 766 "VLAN filtering is a global setting"); 767 return false; 768 } 769 } 770 return true; 771 } 772 773 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, 774 struct netlink_ext_ack *extack) 775 { 776 bool old_vlan_filtering = dsa_port_is_vlan_filtering(dp); 777 struct dsa_switch *ds = dp->ds; 778 bool apply; 779 int err; 780 781 if (!ds->ops->port_vlan_filtering) 782 return -EOPNOTSUPP; 783 784 /* We are called from dsa_slave_switchdev_blocking_event(), 785 * which is not under rcu_read_lock(), unlike 786 * dsa_slave_switchdev_event(). 787 */ 788 rcu_read_lock(); 789 apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack); 790 rcu_read_unlock(); 791 if (!apply) 792 return -EINVAL; 793 794 if (dsa_port_is_vlan_filtering(dp) == vlan_filtering) 795 return 0; 796 797 err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering, 798 extack); 799 if (err) 800 return err; 801 802 if (ds->vlan_filtering_is_global) { 803 struct dsa_port *other_dp; 804 805 ds->vlan_filtering = vlan_filtering; 806 807 dsa_switch_for_each_user_port(other_dp, ds) { 808 struct net_device *slave = other_dp->slave; 809 810 /* We might be called in the unbind path, so not 811 * all slave devices might still be registered. 812 */ 813 if (!slave) 814 continue; 815 816 err = dsa_slave_manage_vlan_filtering(slave, 817 vlan_filtering); 818 if (err) 819 goto restore; 820 } 821 } else { 822 dp->vlan_filtering = vlan_filtering; 823 824 err = dsa_slave_manage_vlan_filtering(dp->slave, 825 vlan_filtering); 826 if (err) 827 goto restore; 828 } 829 830 return 0; 831 832 restore: 833 ds->ops->port_vlan_filtering(ds, dp->index, old_vlan_filtering, NULL); 834 835 if (ds->vlan_filtering_is_global) 836 ds->vlan_filtering = old_vlan_filtering; 837 else 838 dp->vlan_filtering = old_vlan_filtering; 839 840 return err; 841 } 842 843 /* This enforces legacy behavior for switch drivers which assume they can't 844 * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0 845 */ 846 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp) 847 { 848 struct net_device *br = dsa_port_bridge_dev_get(dp); 849 struct dsa_switch *ds = dp->ds; 850 851 if (!br) 852 return false; 853 854 return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br); 855 } 856 857 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock) 858 { 859 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock); 860 unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies); 861 struct dsa_notifier_ageing_time_info info; 862 int err; 863 864 info.ageing_time = ageing_time; 865 866 err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info); 867 if (err) 868 return err; 869 870 dp->ageing_time = ageing_time; 871 872 return 0; 873 } 874 875 int dsa_port_mst_enable(struct dsa_port *dp, bool on, 876 struct netlink_ext_ack *extack) 877 { 878 if (on && !dsa_port_supports_mst(dp)) { 879 NL_SET_ERR_MSG_MOD(extack, "Hardware does not support MST"); 880 return -EINVAL; 881 } 882 883 return 0; 884 } 885 886 int dsa_port_pre_bridge_flags(const struct dsa_port *dp, 887 struct switchdev_brport_flags flags, 888 struct netlink_ext_ack *extack) 889 { 890 struct dsa_switch *ds = dp->ds; 891 892 if (!ds->ops->port_pre_bridge_flags) 893 return -EINVAL; 894 895 return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack); 896 } 897 898 int dsa_port_bridge_flags(struct dsa_port *dp, 899 struct switchdev_brport_flags flags, 900 struct netlink_ext_ack *extack) 901 { 902 struct dsa_switch *ds = dp->ds; 903 int err; 904 905 if (!ds->ops->port_bridge_flags) 906 return -EOPNOTSUPP; 907 908 err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack); 909 if (err) 910 return err; 911 912 if (flags.mask & BR_LEARNING) { 913 bool learning = flags.val & BR_LEARNING; 914 915 if (learning == dp->learning) 916 return 0; 917 918 if ((dp->learning && !learning) && 919 (dp->stp_state == BR_STATE_LEARNING || 920 dp->stp_state == BR_STATE_FORWARDING)) 921 dsa_port_fast_age(dp); 922 923 dp->learning = learning; 924 } 925 926 return 0; 927 } 928 929 void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc) 930 { 931 struct dsa_switch *ds = dp->ds; 932 933 if (ds->ops->port_set_host_flood) 934 ds->ops->port_set_host_flood(ds, dp->index, uc, mc); 935 } 936 937 int dsa_port_vlan_msti(struct dsa_port *dp, 938 const struct switchdev_vlan_msti *msti) 939 { 940 struct dsa_switch *ds = dp->ds; 941 942 if (!ds->ops->vlan_msti_set) 943 return -EOPNOTSUPP; 944 945 return ds->ops->vlan_msti_set(ds, *dp->bridge, msti); 946 } 947 948 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu) 949 { 950 struct dsa_notifier_mtu_info info = { 951 .dp = dp, 952 .mtu = new_mtu, 953 }; 954 955 return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info); 956 } 957 958 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, 959 u16 vid) 960 { 961 struct dsa_notifier_fdb_info info = { 962 .dp = dp, 963 .addr = addr, 964 .vid = vid, 965 .db = { 966 .type = DSA_DB_BRIDGE, 967 .bridge = *dp->bridge, 968 }, 969 }; 970 971 /* Refcounting takes bridge.num as a key, and should be global for all 972 * bridges in the absence of FDB isolation, and per bridge otherwise. 973 * Force the bridge.num to zero here in the absence of FDB isolation. 974 */ 975 if (!dp->ds->fdb_isolation) 976 info.db.bridge.num = 0; 977 978 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info); 979 } 980 981 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr, 982 u16 vid) 983 { 984 struct dsa_notifier_fdb_info info = { 985 .dp = dp, 986 .addr = addr, 987 .vid = vid, 988 .db = { 989 .type = DSA_DB_BRIDGE, 990 .bridge = *dp->bridge, 991 }, 992 }; 993 994 if (!dp->ds->fdb_isolation) 995 info.db.bridge.num = 0; 996 997 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info); 998 } 999 1000 static int dsa_port_host_fdb_add(struct dsa_port *dp, 1001 const unsigned char *addr, u16 vid, 1002 struct dsa_db db) 1003 { 1004 struct dsa_notifier_fdb_info info = { 1005 .dp = dp, 1006 .addr = addr, 1007 .vid = vid, 1008 .db = db, 1009 }; 1010 1011 if (!dp->ds->fdb_isolation) 1012 info.db.bridge.num = 0; 1013 1014 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info); 1015 } 1016 1017 int dsa_port_standalone_host_fdb_add(struct dsa_port *dp, 1018 const unsigned char *addr, u16 vid) 1019 { 1020 struct dsa_db db = { 1021 .type = DSA_DB_PORT, 1022 .dp = dp, 1023 }; 1024 1025 return dsa_port_host_fdb_add(dp, addr, vid, db); 1026 } 1027 1028 int dsa_port_bridge_host_fdb_add(struct dsa_port *dp, 1029 const unsigned char *addr, u16 vid) 1030 { 1031 struct net_device *master = dsa_port_to_master(dp); 1032 struct dsa_db db = { 1033 .type = DSA_DB_BRIDGE, 1034 .bridge = *dp->bridge, 1035 }; 1036 int err; 1037 1038 /* Avoid a call to __dev_set_promiscuity() on the master, which 1039 * requires rtnl_lock(), since we can't guarantee that is held here, 1040 * and we can't take it either. 1041 */ 1042 if (master->priv_flags & IFF_UNICAST_FLT) { 1043 err = dev_uc_add(master, addr); 1044 if (err) 1045 return err; 1046 } 1047 1048 return dsa_port_host_fdb_add(dp, addr, vid, db); 1049 } 1050 1051 static int dsa_port_host_fdb_del(struct dsa_port *dp, 1052 const unsigned char *addr, u16 vid, 1053 struct dsa_db db) 1054 { 1055 struct dsa_notifier_fdb_info info = { 1056 .dp = dp, 1057 .addr = addr, 1058 .vid = vid, 1059 .db = db, 1060 }; 1061 1062 if (!dp->ds->fdb_isolation) 1063 info.db.bridge.num = 0; 1064 1065 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info); 1066 } 1067 1068 int dsa_port_standalone_host_fdb_del(struct dsa_port *dp, 1069 const unsigned char *addr, u16 vid) 1070 { 1071 struct dsa_db db = { 1072 .type = DSA_DB_PORT, 1073 .dp = dp, 1074 }; 1075 1076 return dsa_port_host_fdb_del(dp, addr, vid, db); 1077 } 1078 1079 int dsa_port_bridge_host_fdb_del(struct dsa_port *dp, 1080 const unsigned char *addr, u16 vid) 1081 { 1082 struct net_device *master = dsa_port_to_master(dp); 1083 struct dsa_db db = { 1084 .type = DSA_DB_BRIDGE, 1085 .bridge = *dp->bridge, 1086 }; 1087 int err; 1088 1089 if (master->priv_flags & IFF_UNICAST_FLT) { 1090 err = dev_uc_del(master, addr); 1091 if (err) 1092 return err; 1093 } 1094 1095 return dsa_port_host_fdb_del(dp, addr, vid, db); 1096 } 1097 1098 int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr, 1099 u16 vid) 1100 { 1101 struct dsa_notifier_lag_fdb_info info = { 1102 .lag = dp->lag, 1103 .addr = addr, 1104 .vid = vid, 1105 .db = { 1106 .type = DSA_DB_BRIDGE, 1107 .bridge = *dp->bridge, 1108 }, 1109 }; 1110 1111 if (!dp->ds->fdb_isolation) 1112 info.db.bridge.num = 0; 1113 1114 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_ADD, &info); 1115 } 1116 1117 int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr, 1118 u16 vid) 1119 { 1120 struct dsa_notifier_lag_fdb_info info = { 1121 .lag = dp->lag, 1122 .addr = addr, 1123 .vid = vid, 1124 .db = { 1125 .type = DSA_DB_BRIDGE, 1126 .bridge = *dp->bridge, 1127 }, 1128 }; 1129 1130 if (!dp->ds->fdb_isolation) 1131 info.db.bridge.num = 0; 1132 1133 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_DEL, &info); 1134 } 1135 1136 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data) 1137 { 1138 struct dsa_switch *ds = dp->ds; 1139 int port = dp->index; 1140 1141 if (!ds->ops->port_fdb_dump) 1142 return -EOPNOTSUPP; 1143 1144 return ds->ops->port_fdb_dump(ds, port, cb, data); 1145 } 1146 1147 int dsa_port_mdb_add(const struct dsa_port *dp, 1148 const struct switchdev_obj_port_mdb *mdb) 1149 { 1150 struct dsa_notifier_mdb_info info = { 1151 .dp = dp, 1152 .mdb = mdb, 1153 .db = { 1154 .type = DSA_DB_BRIDGE, 1155 .bridge = *dp->bridge, 1156 }, 1157 }; 1158 1159 if (!dp->ds->fdb_isolation) 1160 info.db.bridge.num = 0; 1161 1162 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info); 1163 } 1164 1165 int dsa_port_mdb_del(const struct dsa_port *dp, 1166 const struct switchdev_obj_port_mdb *mdb) 1167 { 1168 struct dsa_notifier_mdb_info info = { 1169 .dp = dp, 1170 .mdb = mdb, 1171 .db = { 1172 .type = DSA_DB_BRIDGE, 1173 .bridge = *dp->bridge, 1174 }, 1175 }; 1176 1177 if (!dp->ds->fdb_isolation) 1178 info.db.bridge.num = 0; 1179 1180 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info); 1181 } 1182 1183 static int dsa_port_host_mdb_add(const struct dsa_port *dp, 1184 const struct switchdev_obj_port_mdb *mdb, 1185 struct dsa_db db) 1186 { 1187 struct dsa_notifier_mdb_info info = { 1188 .dp = dp, 1189 .mdb = mdb, 1190 .db = db, 1191 }; 1192 1193 if (!dp->ds->fdb_isolation) 1194 info.db.bridge.num = 0; 1195 1196 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info); 1197 } 1198 1199 int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp, 1200 const struct switchdev_obj_port_mdb *mdb) 1201 { 1202 struct dsa_db db = { 1203 .type = DSA_DB_PORT, 1204 .dp = dp, 1205 }; 1206 1207 return dsa_port_host_mdb_add(dp, mdb, db); 1208 } 1209 1210 int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp, 1211 const struct switchdev_obj_port_mdb *mdb) 1212 { 1213 struct net_device *master = dsa_port_to_master(dp); 1214 struct dsa_db db = { 1215 .type = DSA_DB_BRIDGE, 1216 .bridge = *dp->bridge, 1217 }; 1218 int err; 1219 1220 err = dev_mc_add(master, mdb->addr); 1221 if (err) 1222 return err; 1223 1224 return dsa_port_host_mdb_add(dp, mdb, db); 1225 } 1226 1227 static int dsa_port_host_mdb_del(const struct dsa_port *dp, 1228 const struct switchdev_obj_port_mdb *mdb, 1229 struct dsa_db db) 1230 { 1231 struct dsa_notifier_mdb_info info = { 1232 .dp = dp, 1233 .mdb = mdb, 1234 .db = db, 1235 }; 1236 1237 if (!dp->ds->fdb_isolation) 1238 info.db.bridge.num = 0; 1239 1240 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info); 1241 } 1242 1243 int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp, 1244 const struct switchdev_obj_port_mdb *mdb) 1245 { 1246 struct dsa_db db = { 1247 .type = DSA_DB_PORT, 1248 .dp = dp, 1249 }; 1250 1251 return dsa_port_host_mdb_del(dp, mdb, db); 1252 } 1253 1254 int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp, 1255 const struct switchdev_obj_port_mdb *mdb) 1256 { 1257 struct net_device *master = dsa_port_to_master(dp); 1258 struct dsa_db db = { 1259 .type = DSA_DB_BRIDGE, 1260 .bridge = *dp->bridge, 1261 }; 1262 int err; 1263 1264 err = dev_mc_del(master, mdb->addr); 1265 if (err) 1266 return err; 1267 1268 return dsa_port_host_mdb_del(dp, mdb, db); 1269 } 1270 1271 int dsa_port_vlan_add(struct dsa_port *dp, 1272 const struct switchdev_obj_port_vlan *vlan, 1273 struct netlink_ext_ack *extack) 1274 { 1275 struct dsa_notifier_vlan_info info = { 1276 .dp = dp, 1277 .vlan = vlan, 1278 .extack = extack, 1279 }; 1280 1281 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info); 1282 } 1283 1284 int dsa_port_vlan_del(struct dsa_port *dp, 1285 const struct switchdev_obj_port_vlan *vlan) 1286 { 1287 struct dsa_notifier_vlan_info info = { 1288 .dp = dp, 1289 .vlan = vlan, 1290 }; 1291 1292 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info); 1293 } 1294 1295 int dsa_port_host_vlan_add(struct dsa_port *dp, 1296 const struct switchdev_obj_port_vlan *vlan, 1297 struct netlink_ext_ack *extack) 1298 { 1299 struct net_device *master = dsa_port_to_master(dp); 1300 struct dsa_notifier_vlan_info info = { 1301 .dp = dp, 1302 .vlan = vlan, 1303 .extack = extack, 1304 }; 1305 int err; 1306 1307 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_ADD, &info); 1308 if (err && err != -EOPNOTSUPP) 1309 return err; 1310 1311 vlan_vid_add(master, htons(ETH_P_8021Q), vlan->vid); 1312 1313 return err; 1314 } 1315 1316 int dsa_port_host_vlan_del(struct dsa_port *dp, 1317 const struct switchdev_obj_port_vlan *vlan) 1318 { 1319 struct net_device *master = dsa_port_to_master(dp); 1320 struct dsa_notifier_vlan_info info = { 1321 .dp = dp, 1322 .vlan = vlan, 1323 }; 1324 int err; 1325 1326 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_DEL, &info); 1327 if (err && err != -EOPNOTSUPP) 1328 return err; 1329 1330 vlan_vid_del(master, htons(ETH_P_8021Q), vlan->vid); 1331 1332 return err; 1333 } 1334 1335 int dsa_port_mrp_add(const struct dsa_port *dp, 1336 const struct switchdev_obj_mrp *mrp) 1337 { 1338 struct dsa_switch *ds = dp->ds; 1339 1340 if (!ds->ops->port_mrp_add) 1341 return -EOPNOTSUPP; 1342 1343 return ds->ops->port_mrp_add(ds, dp->index, mrp); 1344 } 1345 1346 int dsa_port_mrp_del(const struct dsa_port *dp, 1347 const struct switchdev_obj_mrp *mrp) 1348 { 1349 struct dsa_switch *ds = dp->ds; 1350 1351 if (!ds->ops->port_mrp_del) 1352 return -EOPNOTSUPP; 1353 1354 return ds->ops->port_mrp_del(ds, dp->index, mrp); 1355 } 1356 1357 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp, 1358 const struct switchdev_obj_ring_role_mrp *mrp) 1359 { 1360 struct dsa_switch *ds = dp->ds; 1361 1362 if (!ds->ops->port_mrp_add_ring_role) 1363 return -EOPNOTSUPP; 1364 1365 return ds->ops->port_mrp_add_ring_role(ds, dp->index, mrp); 1366 } 1367 1368 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp, 1369 const struct switchdev_obj_ring_role_mrp *mrp) 1370 { 1371 struct dsa_switch *ds = dp->ds; 1372 1373 if (!ds->ops->port_mrp_del_ring_role) 1374 return -EOPNOTSUPP; 1375 1376 return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp); 1377 } 1378 1379 static int dsa_port_assign_master(struct dsa_port *dp, 1380 struct net_device *master, 1381 struct netlink_ext_ack *extack, 1382 bool fail_on_err) 1383 { 1384 struct dsa_switch *ds = dp->ds; 1385 int port = dp->index, err; 1386 1387 err = ds->ops->port_change_master(ds, port, master, extack); 1388 if (err && !fail_on_err) 1389 dev_err(ds->dev, "port %d failed to assign master %s: %pe\n", 1390 port, master->name, ERR_PTR(err)); 1391 1392 if (err && fail_on_err) 1393 return err; 1394 1395 dp->cpu_dp = master->dsa_ptr; 1396 dp->cpu_port_in_lag = netif_is_lag_master(master); 1397 1398 return 0; 1399 } 1400 1401 /* Change the dp->cpu_dp affinity for a user port. Note that both cross-chip 1402 * notifiers and drivers have implicit assumptions about user-to-CPU-port 1403 * mappings, so we unfortunately cannot delay the deletion of the objects 1404 * (switchdev, standalone addresses, standalone VLANs) on the old CPU port 1405 * until the new CPU port has been set up. So we need to completely tear down 1406 * the old CPU port before changing it, and restore it on errors during the 1407 * bringup of the new one. 1408 */ 1409 int dsa_port_change_master(struct dsa_port *dp, struct net_device *master, 1410 struct netlink_ext_ack *extack) 1411 { 1412 struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp); 1413 struct net_device *old_master = dsa_port_to_master(dp); 1414 struct net_device *dev = dp->slave; 1415 struct dsa_switch *ds = dp->ds; 1416 bool vlan_filtering; 1417 int err, tmp; 1418 1419 /* Bridges may hold host FDB, MDB and VLAN objects. These need to be 1420 * migrated, so dynamically unoffload and later reoffload the bridge 1421 * port. 1422 */ 1423 if (bridge_dev) { 1424 dsa_port_pre_bridge_leave(dp, bridge_dev); 1425 dsa_port_bridge_leave(dp, bridge_dev); 1426 } 1427 1428 /* The port might still be VLAN filtering even if it's no longer 1429 * under a bridge, either due to ds->vlan_filtering_is_global or 1430 * ds->needs_standalone_vlan_filtering. In turn this means VLANs 1431 * on the CPU port. 1432 */ 1433 vlan_filtering = dsa_port_is_vlan_filtering(dp); 1434 if (vlan_filtering) { 1435 err = dsa_slave_manage_vlan_filtering(dev, false); 1436 if (err) { 1437 NL_SET_ERR_MSG_MOD(extack, 1438 "Failed to remove standalone VLANs"); 1439 goto rewind_old_bridge; 1440 } 1441 } 1442 1443 /* Standalone addresses, and addresses of upper interfaces like 1444 * VLAN, LAG, HSR need to be migrated. 1445 */ 1446 dsa_slave_unsync_ha(dev); 1447 1448 err = dsa_port_assign_master(dp, master, extack, true); 1449 if (err) 1450 goto rewind_old_addrs; 1451 1452 dsa_slave_sync_ha(dev); 1453 1454 if (vlan_filtering) { 1455 err = dsa_slave_manage_vlan_filtering(dev, true); 1456 if (err) { 1457 NL_SET_ERR_MSG_MOD(extack, 1458 "Failed to restore standalone VLANs"); 1459 goto rewind_new_addrs; 1460 } 1461 } 1462 1463 if (bridge_dev) { 1464 err = dsa_port_bridge_join(dp, bridge_dev, extack); 1465 if (err && err == -EOPNOTSUPP) { 1466 NL_SET_ERR_MSG_MOD(extack, 1467 "Failed to reoffload bridge"); 1468 goto rewind_new_vlan; 1469 } 1470 } 1471 1472 return 0; 1473 1474 rewind_new_vlan: 1475 if (vlan_filtering) 1476 dsa_slave_manage_vlan_filtering(dev, false); 1477 1478 rewind_new_addrs: 1479 dsa_slave_unsync_ha(dev); 1480 1481 dsa_port_assign_master(dp, old_master, NULL, false); 1482 1483 /* Restore the objects on the old CPU port */ 1484 rewind_old_addrs: 1485 dsa_slave_sync_ha(dev); 1486 1487 if (vlan_filtering) { 1488 tmp = dsa_slave_manage_vlan_filtering(dev, true); 1489 if (tmp) { 1490 dev_err(ds->dev, 1491 "port %d failed to restore standalone VLANs: %pe\n", 1492 dp->index, ERR_PTR(tmp)); 1493 } 1494 } 1495 1496 rewind_old_bridge: 1497 if (bridge_dev) { 1498 tmp = dsa_port_bridge_join(dp, bridge_dev, extack); 1499 if (tmp) { 1500 dev_err(ds->dev, 1501 "port %d failed to rejoin bridge %s: %pe\n", 1502 dp->index, bridge_dev->name, ERR_PTR(tmp)); 1503 } 1504 } 1505 1506 return err; 1507 } 1508 1509 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp, 1510 const struct dsa_device_ops *tag_ops) 1511 { 1512 cpu_dp->rcv = tag_ops->rcv; 1513 cpu_dp->tag_ops = tag_ops; 1514 } 1515 1516 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp) 1517 { 1518 struct device_node *phy_dn; 1519 struct phy_device *phydev; 1520 1521 phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0); 1522 if (!phy_dn) 1523 return NULL; 1524 1525 phydev = of_phy_find_device(phy_dn); 1526 if (!phydev) { 1527 of_node_put(phy_dn); 1528 return ERR_PTR(-EPROBE_DEFER); 1529 } 1530 1531 of_node_put(phy_dn); 1532 return phydev; 1533 } 1534 1535 static void dsa_port_phylink_validate(struct phylink_config *config, 1536 unsigned long *supported, 1537 struct phylink_link_state *state) 1538 { 1539 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1540 struct dsa_switch *ds = dp->ds; 1541 1542 if (!ds->ops->phylink_validate) { 1543 if (config->mac_capabilities) 1544 phylink_generic_validate(config, supported, state); 1545 return; 1546 } 1547 1548 ds->ops->phylink_validate(ds, dp->index, supported, state); 1549 } 1550 1551 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config, 1552 struct phylink_link_state *state) 1553 { 1554 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1555 struct dsa_switch *ds = dp->ds; 1556 int err; 1557 1558 /* Only called for inband modes */ 1559 if (!ds->ops->phylink_mac_link_state) { 1560 state->link = 0; 1561 return; 1562 } 1563 1564 err = ds->ops->phylink_mac_link_state(ds, dp->index, state); 1565 if (err < 0) { 1566 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n", 1567 dp->index, err); 1568 state->link = 0; 1569 } 1570 } 1571 1572 static struct phylink_pcs * 1573 dsa_port_phylink_mac_select_pcs(struct phylink_config *config, 1574 phy_interface_t interface) 1575 { 1576 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1577 struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP); 1578 struct dsa_switch *ds = dp->ds; 1579 1580 if (ds->ops->phylink_mac_select_pcs) 1581 pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface); 1582 1583 return pcs; 1584 } 1585 1586 static void dsa_port_phylink_mac_config(struct phylink_config *config, 1587 unsigned int mode, 1588 const struct phylink_link_state *state) 1589 { 1590 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1591 struct dsa_switch *ds = dp->ds; 1592 1593 if (!ds->ops->phylink_mac_config) 1594 return; 1595 1596 ds->ops->phylink_mac_config(ds, dp->index, mode, state); 1597 } 1598 1599 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config) 1600 { 1601 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1602 struct dsa_switch *ds = dp->ds; 1603 1604 if (!ds->ops->phylink_mac_an_restart) 1605 return; 1606 1607 ds->ops->phylink_mac_an_restart(ds, dp->index); 1608 } 1609 1610 static void dsa_port_phylink_mac_link_down(struct phylink_config *config, 1611 unsigned int mode, 1612 phy_interface_t interface) 1613 { 1614 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1615 struct phy_device *phydev = NULL; 1616 struct dsa_switch *ds = dp->ds; 1617 1618 if (dsa_port_is_user(dp)) 1619 phydev = dp->slave->phydev; 1620 1621 if (!ds->ops->phylink_mac_link_down) { 1622 if (ds->ops->adjust_link && phydev) 1623 ds->ops->adjust_link(ds, dp->index, phydev); 1624 return; 1625 } 1626 1627 ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface); 1628 } 1629 1630 static void dsa_port_phylink_mac_link_up(struct phylink_config *config, 1631 struct phy_device *phydev, 1632 unsigned int mode, 1633 phy_interface_t interface, 1634 int speed, int duplex, 1635 bool tx_pause, bool rx_pause) 1636 { 1637 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1638 struct dsa_switch *ds = dp->ds; 1639 1640 if (!ds->ops->phylink_mac_link_up) { 1641 if (ds->ops->adjust_link && phydev) 1642 ds->ops->adjust_link(ds, dp->index, phydev); 1643 return; 1644 } 1645 1646 ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev, 1647 speed, duplex, tx_pause, rx_pause); 1648 } 1649 1650 static const struct phylink_mac_ops dsa_port_phylink_mac_ops = { 1651 .validate = dsa_port_phylink_validate, 1652 .mac_select_pcs = dsa_port_phylink_mac_select_pcs, 1653 .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state, 1654 .mac_config = dsa_port_phylink_mac_config, 1655 .mac_an_restart = dsa_port_phylink_mac_an_restart, 1656 .mac_link_down = dsa_port_phylink_mac_link_down, 1657 .mac_link_up = dsa_port_phylink_mac_link_up, 1658 }; 1659 1660 int dsa_port_phylink_create(struct dsa_port *dp) 1661 { 1662 struct dsa_switch *ds = dp->ds; 1663 phy_interface_t mode; 1664 struct phylink *pl; 1665 int err; 1666 1667 err = of_get_phy_mode(dp->dn, &mode); 1668 if (err) 1669 mode = PHY_INTERFACE_MODE_NA; 1670 1671 /* Presence of phylink_mac_link_state or phylink_mac_an_restart is 1672 * an indicator of a legacy phylink driver. 1673 */ 1674 if (ds->ops->phylink_mac_link_state || 1675 ds->ops->phylink_mac_an_restart) 1676 dp->pl_config.legacy_pre_march2020 = true; 1677 1678 if (ds->ops->phylink_get_caps) 1679 ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config); 1680 1681 pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn), 1682 mode, &dsa_port_phylink_mac_ops); 1683 if (IS_ERR(pl)) { 1684 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(pl)); 1685 return PTR_ERR(pl); 1686 } 1687 1688 dp->pl = pl; 1689 1690 return 0; 1691 } 1692 1693 void dsa_port_phylink_destroy(struct dsa_port *dp) 1694 { 1695 phylink_destroy(dp->pl); 1696 dp->pl = NULL; 1697 } 1698 1699 static int dsa_shared_port_setup_phy_of(struct dsa_port *dp, bool enable) 1700 { 1701 struct dsa_switch *ds = dp->ds; 1702 struct phy_device *phydev; 1703 int port = dp->index; 1704 int err = 0; 1705 1706 phydev = dsa_port_get_phy_device(dp); 1707 if (!phydev) 1708 return 0; 1709 1710 if (IS_ERR(phydev)) 1711 return PTR_ERR(phydev); 1712 1713 if (enable) { 1714 err = genphy_resume(phydev); 1715 if (err < 0) 1716 goto err_put_dev; 1717 1718 err = genphy_read_status(phydev); 1719 if (err < 0) 1720 goto err_put_dev; 1721 } else { 1722 err = genphy_suspend(phydev); 1723 if (err < 0) 1724 goto err_put_dev; 1725 } 1726 1727 if (ds->ops->adjust_link) 1728 ds->ops->adjust_link(ds, port, phydev); 1729 1730 dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev)); 1731 1732 err_put_dev: 1733 put_device(&phydev->mdio.dev); 1734 return err; 1735 } 1736 1737 static int dsa_shared_port_fixed_link_register_of(struct dsa_port *dp) 1738 { 1739 struct device_node *dn = dp->dn; 1740 struct dsa_switch *ds = dp->ds; 1741 struct phy_device *phydev; 1742 int port = dp->index; 1743 phy_interface_t mode; 1744 int err; 1745 1746 err = of_phy_register_fixed_link(dn); 1747 if (err) { 1748 dev_err(ds->dev, 1749 "failed to register the fixed PHY of port %d\n", 1750 port); 1751 return err; 1752 } 1753 1754 phydev = of_phy_find_device(dn); 1755 1756 err = of_get_phy_mode(dn, &mode); 1757 if (err) 1758 mode = PHY_INTERFACE_MODE_NA; 1759 phydev->interface = mode; 1760 1761 genphy_read_status(phydev); 1762 1763 if (ds->ops->adjust_link) 1764 ds->ops->adjust_link(ds, port, phydev); 1765 1766 put_device(&phydev->mdio.dev); 1767 1768 return 0; 1769 } 1770 1771 static int dsa_shared_port_phylink_register(struct dsa_port *dp) 1772 { 1773 struct dsa_switch *ds = dp->ds; 1774 struct device_node *port_dn = dp->dn; 1775 int err; 1776 1777 dp->pl_config.dev = ds->dev; 1778 dp->pl_config.type = PHYLINK_DEV; 1779 1780 err = dsa_port_phylink_create(dp); 1781 if (err) 1782 return err; 1783 1784 err = phylink_of_phy_connect(dp->pl, port_dn, 0); 1785 if (err && err != -ENODEV) { 1786 pr_err("could not attach to PHY: %d\n", err); 1787 goto err_phy_connect; 1788 } 1789 1790 return 0; 1791 1792 err_phy_connect: 1793 dsa_port_phylink_destroy(dp); 1794 return err; 1795 } 1796 1797 /* During the initial DSA driver migration to OF, port nodes were sometimes 1798 * added to device trees with no indication of how they should operate from a 1799 * link management perspective (phy-handle, fixed-link, etc). Additionally, the 1800 * phy-mode may be absent. The interpretation of these port OF nodes depends on 1801 * their type. 1802 * 1803 * User ports with no phy-handle or fixed-link are expected to connect to an 1804 * internal PHY located on the ds->slave_mii_bus at an MDIO address equal to 1805 * the port number. This description is still actively supported. 1806 * 1807 * Shared (CPU and DSA) ports with no phy-handle or fixed-link are expected to 1808 * operate at the maximum speed that their phy-mode is capable of. If the 1809 * phy-mode is absent, they are expected to operate using the phy-mode 1810 * supported by the port that gives the highest link speed. It is unspecified 1811 * if the port should use flow control or not, half duplex or full duplex, or 1812 * if the phy-mode is a SERDES link, whether in-band autoneg is expected to be 1813 * enabled or not. 1814 * 1815 * In the latter case of shared ports, omitting the link management description 1816 * from the firmware node is deprecated and strongly discouraged. DSA uses 1817 * phylink, which rejects the firmware nodes of these ports for lacking 1818 * required properties. 1819 * 1820 * For switches in this table, DSA will skip enforcing validation and will 1821 * later omit registering a phylink instance for the shared ports, if they lack 1822 * a fixed-link, a phy-handle, or a managed = "in-band-status" property. 1823 * It becomes the responsibility of the driver to ensure that these ports 1824 * operate at the maximum speed (whatever this means) and will interoperate 1825 * with the DSA master or other cascade port, since phylink methods will not be 1826 * invoked for them. 1827 * 1828 * If you are considering expanding this table for newly introduced switches, 1829 * think again. It is OK to remove switches from this table if there aren't DT 1830 * blobs in circulation which rely on defaulting the shared ports. 1831 */ 1832 static const char * const dsa_switches_apply_workarounds[] = { 1833 #if IS_ENABLED(CONFIG_NET_DSA_XRS700X) 1834 "arrow,xrs7003e", 1835 "arrow,xrs7003f", 1836 "arrow,xrs7004e", 1837 "arrow,xrs7004f", 1838 #endif 1839 #if IS_ENABLED(CONFIG_B53) 1840 "brcm,bcm5325", 1841 "brcm,bcm53115", 1842 "brcm,bcm53125", 1843 "brcm,bcm53128", 1844 "brcm,bcm5365", 1845 "brcm,bcm5389", 1846 "brcm,bcm5395", 1847 "brcm,bcm5397", 1848 "brcm,bcm5398", 1849 "brcm,bcm53010-srab", 1850 "brcm,bcm53011-srab", 1851 "brcm,bcm53012-srab", 1852 "brcm,bcm53018-srab", 1853 "brcm,bcm53019-srab", 1854 "brcm,bcm5301x-srab", 1855 "brcm,bcm11360-srab", 1856 "brcm,bcm58522-srab", 1857 "brcm,bcm58525-srab", 1858 "brcm,bcm58535-srab", 1859 "brcm,bcm58622-srab", 1860 "brcm,bcm58623-srab", 1861 "brcm,bcm58625-srab", 1862 "brcm,bcm88312-srab", 1863 "brcm,cygnus-srab", 1864 "brcm,nsp-srab", 1865 "brcm,omega-srab", 1866 "brcm,bcm3384-switch", 1867 "brcm,bcm6328-switch", 1868 "brcm,bcm6368-switch", 1869 "brcm,bcm63xx-switch", 1870 #endif 1871 #if IS_ENABLED(CONFIG_NET_DSA_BCM_SF2) 1872 "brcm,bcm7445-switch-v4.0", 1873 "brcm,bcm7278-switch-v4.0", 1874 "brcm,bcm7278-switch-v4.8", 1875 #endif 1876 #if IS_ENABLED(CONFIG_NET_DSA_LANTIQ_GSWIP) 1877 "lantiq,xrx200-gswip", 1878 "lantiq,xrx300-gswip", 1879 "lantiq,xrx330-gswip", 1880 #endif 1881 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6060) 1882 "marvell,mv88e6060", 1883 #endif 1884 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6XXX) 1885 "marvell,mv88e6085", 1886 "marvell,mv88e6190", 1887 "marvell,mv88e6250", 1888 #endif 1889 #if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) 1890 "microchip,ksz8765", 1891 "microchip,ksz8794", 1892 "microchip,ksz8795", 1893 "microchip,ksz8863", 1894 "microchip,ksz8873", 1895 "microchip,ksz9477", 1896 "microchip,ksz9897", 1897 "microchip,ksz9893", 1898 "microchip,ksz9563", 1899 "microchip,ksz8563", 1900 "microchip,ksz9567", 1901 #endif 1902 #if IS_ENABLED(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) 1903 "smsc,lan9303-mdio", 1904 #endif 1905 #if IS_ENABLED(CONFIG_NET_DSA_SMSC_LAN9303_I2C) 1906 "smsc,lan9303-i2c", 1907 #endif 1908 NULL, 1909 }; 1910 1911 static void dsa_shared_port_validate_of(struct dsa_port *dp, 1912 bool *missing_phy_mode, 1913 bool *missing_link_description) 1914 { 1915 struct device_node *dn = dp->dn, *phy_np; 1916 struct dsa_switch *ds = dp->ds; 1917 phy_interface_t mode; 1918 1919 *missing_phy_mode = false; 1920 *missing_link_description = false; 1921 1922 if (of_get_phy_mode(dn, &mode)) { 1923 *missing_phy_mode = true; 1924 dev_err(ds->dev, 1925 "OF node %pOF of %s port %d lacks the required \"phy-mode\" property\n", 1926 dn, dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); 1927 } 1928 1929 /* Note: of_phy_is_fixed_link() also returns true for 1930 * managed = "in-band-status" 1931 */ 1932 if (of_phy_is_fixed_link(dn)) 1933 return; 1934 1935 phy_np = of_parse_phandle(dn, "phy-handle", 0); 1936 if (phy_np) { 1937 of_node_put(phy_np); 1938 return; 1939 } 1940 1941 *missing_link_description = true; 1942 1943 dev_err(ds->dev, 1944 "OF node %pOF of %s port %d lacks the required \"phy-handle\", \"fixed-link\" or \"managed\" properties\n", 1945 dn, dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); 1946 } 1947 1948 int dsa_shared_port_link_register_of(struct dsa_port *dp) 1949 { 1950 struct dsa_switch *ds = dp->ds; 1951 bool missing_link_description; 1952 bool missing_phy_mode; 1953 int port = dp->index; 1954 1955 dsa_shared_port_validate_of(dp, &missing_phy_mode, 1956 &missing_link_description); 1957 1958 if ((missing_phy_mode || missing_link_description) && 1959 !of_device_compatible_match(ds->dev->of_node, 1960 dsa_switches_apply_workarounds)) 1961 return -EINVAL; 1962 1963 if (!ds->ops->adjust_link) { 1964 if (missing_link_description) { 1965 dev_warn(ds->dev, 1966 "Skipping phylink registration for %s port %d\n", 1967 dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); 1968 } else { 1969 if (ds->ops->phylink_mac_link_down) 1970 ds->ops->phylink_mac_link_down(ds, port, 1971 MLO_AN_FIXED, PHY_INTERFACE_MODE_NA); 1972 1973 return dsa_shared_port_phylink_register(dp); 1974 } 1975 return 0; 1976 } 1977 1978 dev_warn(ds->dev, 1979 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n"); 1980 1981 if (of_phy_is_fixed_link(dp->dn)) 1982 return dsa_shared_port_fixed_link_register_of(dp); 1983 else 1984 return dsa_shared_port_setup_phy_of(dp, true); 1985 } 1986 1987 void dsa_shared_port_link_unregister_of(struct dsa_port *dp) 1988 { 1989 struct dsa_switch *ds = dp->ds; 1990 1991 if (!ds->ops->adjust_link && dp->pl) { 1992 rtnl_lock(); 1993 phylink_disconnect_phy(dp->pl); 1994 rtnl_unlock(); 1995 dsa_port_phylink_destroy(dp); 1996 return; 1997 } 1998 1999 if (of_phy_is_fixed_link(dp->dn)) 2000 of_phy_deregister_fixed_link(dp->dn); 2001 else 2002 dsa_shared_port_setup_phy_of(dp, false); 2003 } 2004 2005 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr) 2006 { 2007 struct dsa_switch *ds = dp->ds; 2008 int err; 2009 2010 if (!ds->ops->port_hsr_join) 2011 return -EOPNOTSUPP; 2012 2013 dp->hsr_dev = hsr; 2014 2015 err = ds->ops->port_hsr_join(ds, dp->index, hsr); 2016 if (err) 2017 dp->hsr_dev = NULL; 2018 2019 return err; 2020 } 2021 2022 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr) 2023 { 2024 struct dsa_switch *ds = dp->ds; 2025 int err; 2026 2027 dp->hsr_dev = NULL; 2028 2029 if (ds->ops->port_hsr_leave) { 2030 err = ds->ops->port_hsr_leave(ds, dp->index, hsr); 2031 if (err) 2032 dev_err(dp->ds->dev, 2033 "port %d failed to leave HSR %s: %pe\n", 2034 dp->index, hsr->name, ERR_PTR(err)); 2035 } 2036 } 2037 2038 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast) 2039 { 2040 struct dsa_notifier_tag_8021q_vlan_info info = { 2041 .dp = dp, 2042 .vid = vid, 2043 }; 2044 2045 if (broadcast) 2046 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 2047 2048 return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 2049 } 2050 2051 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast) 2052 { 2053 struct dsa_notifier_tag_8021q_vlan_info info = { 2054 .dp = dp, 2055 .vid = vid, 2056 }; 2057 int err; 2058 2059 if (broadcast) 2060 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 2061 else 2062 err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 2063 if (err) 2064 dev_err(dp->ds->dev, 2065 "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n", 2066 dp->index, vid, ERR_PTR(err)); 2067 } 2068