1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a single switch port 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include <linux/if_bridge.h> 10 #include <linux/netdevice.h> 11 #include <linux/notifier.h> 12 #include <linux/of_mdio.h> 13 #include <linux/of_net.h> 14 15 #include "dsa_priv.h" 16 #include "port.h" 17 #include "slave.h" 18 19 /** 20 * dsa_port_notify - Notify the switching fabric of changes to a port 21 * @dp: port on which change occurred 22 * @e: event, must be of type DSA_NOTIFIER_* 23 * @v: event-specific value. 24 * 25 * Notify all switches in the DSA tree that this port's switch belongs to, 26 * including this switch itself, of an event. Allows the other switches to 27 * reconfigure themselves for cross-chip operations. Can also be used to 28 * reconfigure ports without net_devices (CPU ports, DSA links) whenever 29 * a user port's state changes. 30 */ 31 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v) 32 { 33 return dsa_tree_notify(dp->ds->dst, e, v); 34 } 35 36 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp, u16 vid) 37 { 38 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 39 struct switchdev_notifier_fdb_info info = { 40 .vid = vid, 41 }; 42 43 /* When the port becomes standalone it has already left the bridge. 44 * Don't notify the bridge in that case. 45 */ 46 if (!brport_dev) 47 return; 48 49 call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE, 50 brport_dev, &info.info, NULL); 51 } 52 53 static void dsa_port_fast_age(const struct dsa_port *dp) 54 { 55 struct dsa_switch *ds = dp->ds; 56 57 if (!ds->ops->port_fast_age) 58 return; 59 60 ds->ops->port_fast_age(ds, dp->index); 61 62 /* flush all VLANs */ 63 dsa_port_notify_bridge_fdb_flush(dp, 0); 64 } 65 66 static int dsa_port_vlan_fast_age(const struct dsa_port *dp, u16 vid) 67 { 68 struct dsa_switch *ds = dp->ds; 69 int err; 70 71 if (!ds->ops->port_vlan_fast_age) 72 return -EOPNOTSUPP; 73 74 err = ds->ops->port_vlan_fast_age(ds, dp->index, vid); 75 76 if (!err) 77 dsa_port_notify_bridge_fdb_flush(dp, vid); 78 79 return err; 80 } 81 82 static int dsa_port_msti_fast_age(const struct dsa_port *dp, u16 msti) 83 { 84 DECLARE_BITMAP(vids, VLAN_N_VID) = { 0 }; 85 int err, vid; 86 87 err = br_mst_get_info(dsa_port_bridge_dev_get(dp), msti, vids); 88 if (err) 89 return err; 90 91 for_each_set_bit(vid, vids, VLAN_N_VID) { 92 err = dsa_port_vlan_fast_age(dp, vid); 93 if (err) 94 return err; 95 } 96 97 return 0; 98 } 99 100 static bool dsa_port_can_configure_learning(struct dsa_port *dp) 101 { 102 struct switchdev_brport_flags flags = { 103 .mask = BR_LEARNING, 104 }; 105 struct dsa_switch *ds = dp->ds; 106 int err; 107 108 if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags) 109 return false; 110 111 err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL); 112 return !err; 113 } 114 115 bool dsa_port_supports_hwtstamp(struct dsa_port *dp, struct ifreq *ifr) 116 { 117 struct dsa_switch *ds = dp->ds; 118 int err; 119 120 if (!ds->ops->port_hwtstamp_get || !ds->ops->port_hwtstamp_set) 121 return false; 122 123 /* "See through" shim implementations of the "get" method. 124 * This will clobber the ifreq structure, but we will either return an 125 * error, or the master will overwrite it with proper values. 126 */ 127 err = ds->ops->port_hwtstamp_get(ds, dp->index, ifr); 128 return err != -EOPNOTSUPP; 129 } 130 131 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age) 132 { 133 struct dsa_switch *ds = dp->ds; 134 int port = dp->index; 135 136 if (!ds->ops->port_stp_state_set) 137 return -EOPNOTSUPP; 138 139 ds->ops->port_stp_state_set(ds, port, state); 140 141 if (!dsa_port_can_configure_learning(dp) || 142 (do_fast_age && dp->learning)) { 143 /* Fast age FDB entries or flush appropriate forwarding database 144 * for the given port, if we are moving it from Learning or 145 * Forwarding state, to Disabled or Blocking or Listening state. 146 * Ports that were standalone before the STP state change don't 147 * need to fast age the FDB, since address learning is off in 148 * standalone mode. 149 */ 150 151 if ((dp->stp_state == BR_STATE_LEARNING || 152 dp->stp_state == BR_STATE_FORWARDING) && 153 (state == BR_STATE_DISABLED || 154 state == BR_STATE_BLOCKING || 155 state == BR_STATE_LISTENING)) 156 dsa_port_fast_age(dp); 157 } 158 159 dp->stp_state = state; 160 161 return 0; 162 } 163 164 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state, 165 bool do_fast_age) 166 { 167 struct dsa_switch *ds = dp->ds; 168 int err; 169 170 err = dsa_port_set_state(dp, state, do_fast_age); 171 if (err && err != -EOPNOTSUPP) { 172 dev_err(ds->dev, "port %d failed to set STP state %u: %pe\n", 173 dp->index, state, ERR_PTR(err)); 174 } 175 } 176 177 int dsa_port_set_mst_state(struct dsa_port *dp, 178 const struct switchdev_mst_state *state, 179 struct netlink_ext_ack *extack) 180 { 181 struct dsa_switch *ds = dp->ds; 182 u8 prev_state; 183 int err; 184 185 if (!ds->ops->port_mst_state_set) 186 return -EOPNOTSUPP; 187 188 err = br_mst_get_state(dsa_port_to_bridge_port(dp), state->msti, 189 &prev_state); 190 if (err) 191 return err; 192 193 err = ds->ops->port_mst_state_set(ds, dp->index, state); 194 if (err) 195 return err; 196 197 if (!(dp->learning && 198 (prev_state == BR_STATE_LEARNING || 199 prev_state == BR_STATE_FORWARDING) && 200 (state->state == BR_STATE_DISABLED || 201 state->state == BR_STATE_BLOCKING || 202 state->state == BR_STATE_LISTENING))) 203 return 0; 204 205 err = dsa_port_msti_fast_age(dp, state->msti); 206 if (err) 207 NL_SET_ERR_MSG_MOD(extack, 208 "Unable to flush associated VLANs"); 209 210 return 0; 211 } 212 213 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy) 214 { 215 struct dsa_switch *ds = dp->ds; 216 int port = dp->index; 217 int err; 218 219 if (ds->ops->port_enable) { 220 err = ds->ops->port_enable(ds, port, phy); 221 if (err) 222 return err; 223 } 224 225 if (!dp->bridge) 226 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false); 227 228 if (dp->pl) 229 phylink_start(dp->pl); 230 231 return 0; 232 } 233 234 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) 235 { 236 int err; 237 238 rtnl_lock(); 239 err = dsa_port_enable_rt(dp, phy); 240 rtnl_unlock(); 241 242 return err; 243 } 244 245 void dsa_port_disable_rt(struct dsa_port *dp) 246 { 247 struct dsa_switch *ds = dp->ds; 248 int port = dp->index; 249 250 if (dp->pl) 251 phylink_stop(dp->pl); 252 253 if (!dp->bridge) 254 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false); 255 256 if (ds->ops->port_disable) 257 ds->ops->port_disable(ds, port); 258 } 259 260 void dsa_port_disable(struct dsa_port *dp) 261 { 262 rtnl_lock(); 263 dsa_port_disable_rt(dp); 264 rtnl_unlock(); 265 } 266 267 static void dsa_port_reset_vlan_filtering(struct dsa_port *dp, 268 struct dsa_bridge bridge) 269 { 270 struct netlink_ext_ack extack = {0}; 271 bool change_vlan_filtering = false; 272 struct dsa_switch *ds = dp->ds; 273 struct dsa_port *other_dp; 274 bool vlan_filtering; 275 int err; 276 277 if (ds->needs_standalone_vlan_filtering && 278 !br_vlan_enabled(bridge.dev)) { 279 change_vlan_filtering = true; 280 vlan_filtering = true; 281 } else if (!ds->needs_standalone_vlan_filtering && 282 br_vlan_enabled(bridge.dev)) { 283 change_vlan_filtering = true; 284 vlan_filtering = false; 285 } 286 287 /* If the bridge was vlan_filtering, the bridge core doesn't trigger an 288 * event for changing vlan_filtering setting upon slave ports leaving 289 * it. That is a good thing, because that lets us handle it and also 290 * handle the case where the switch's vlan_filtering setting is global 291 * (not per port). When that happens, the correct moment to trigger the 292 * vlan_filtering callback is only when the last port leaves the last 293 * VLAN-aware bridge. 294 */ 295 if (change_vlan_filtering && ds->vlan_filtering_is_global) { 296 dsa_switch_for_each_port(other_dp, ds) { 297 struct net_device *br = dsa_port_bridge_dev_get(other_dp); 298 299 if (br && br_vlan_enabled(br)) { 300 change_vlan_filtering = false; 301 break; 302 } 303 } 304 } 305 306 if (!change_vlan_filtering) 307 return; 308 309 err = dsa_port_vlan_filtering(dp, vlan_filtering, &extack); 310 if (extack._msg) { 311 dev_err(ds->dev, "port %d: %s\n", dp->index, 312 extack._msg); 313 } 314 if (err && err != -EOPNOTSUPP) { 315 dev_err(ds->dev, 316 "port %d failed to reset VLAN filtering to %d: %pe\n", 317 dp->index, vlan_filtering, ERR_PTR(err)); 318 } 319 } 320 321 static int dsa_port_inherit_brport_flags(struct dsa_port *dp, 322 struct netlink_ext_ack *extack) 323 { 324 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 325 BR_BCAST_FLOOD | BR_PORT_LOCKED; 326 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 327 int flag, err; 328 329 for_each_set_bit(flag, &mask, 32) { 330 struct switchdev_brport_flags flags = {0}; 331 332 flags.mask = BIT(flag); 333 334 if (br_port_flag_is_set(brport_dev, BIT(flag))) 335 flags.val = BIT(flag); 336 337 err = dsa_port_bridge_flags(dp, flags, extack); 338 if (err && err != -EOPNOTSUPP) 339 return err; 340 } 341 342 return 0; 343 } 344 345 static void dsa_port_clear_brport_flags(struct dsa_port *dp) 346 { 347 const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; 348 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 349 BR_BCAST_FLOOD | BR_PORT_LOCKED; 350 int flag, err; 351 352 for_each_set_bit(flag, &mask, 32) { 353 struct switchdev_brport_flags flags = {0}; 354 355 flags.mask = BIT(flag); 356 flags.val = val & BIT(flag); 357 358 err = dsa_port_bridge_flags(dp, flags, NULL); 359 if (err && err != -EOPNOTSUPP) 360 dev_err(dp->ds->dev, 361 "failed to clear bridge port flag %lu: %pe\n", 362 flags.val, ERR_PTR(err)); 363 } 364 } 365 366 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp, 367 struct netlink_ext_ack *extack) 368 { 369 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 370 struct net_device *br = dsa_port_bridge_dev_get(dp); 371 int err; 372 373 err = dsa_port_inherit_brport_flags(dp, extack); 374 if (err) 375 return err; 376 377 err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false); 378 if (err && err != -EOPNOTSUPP) 379 return err; 380 381 err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack); 382 if (err && err != -EOPNOTSUPP) 383 return err; 384 385 err = dsa_port_ageing_time(dp, br_get_ageing_time(br)); 386 if (err && err != -EOPNOTSUPP) 387 return err; 388 389 return 0; 390 } 391 392 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp, 393 struct dsa_bridge bridge) 394 { 395 /* Configure the port for standalone mode (no address learning, 396 * flood everything). 397 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events 398 * when the user requests it through netlink or sysfs, but not 399 * automatically at port join or leave, so we need to handle resetting 400 * the brport flags ourselves. But we even prefer it that way, because 401 * otherwise, some setups might never get the notification they need, 402 * for example, when a port leaves a LAG that offloads the bridge, 403 * it becomes standalone, but as far as the bridge is concerned, no 404 * port ever left. 405 */ 406 dsa_port_clear_brport_flags(dp); 407 408 /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, 409 * so allow it to be in BR_STATE_FORWARDING to be kept functional 410 */ 411 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true); 412 413 dsa_port_reset_vlan_filtering(dp, bridge); 414 415 /* Ageing time may be global to the switch chip, so don't change it 416 * here because we have no good reason (or value) to change it to. 417 */ 418 } 419 420 static int dsa_port_bridge_create(struct dsa_port *dp, 421 struct net_device *br, 422 struct netlink_ext_ack *extack) 423 { 424 struct dsa_switch *ds = dp->ds; 425 struct dsa_bridge *bridge; 426 427 bridge = dsa_tree_bridge_find(ds->dst, br); 428 if (bridge) { 429 refcount_inc(&bridge->refcount); 430 dp->bridge = bridge; 431 return 0; 432 } 433 434 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 435 if (!bridge) 436 return -ENOMEM; 437 438 refcount_set(&bridge->refcount, 1); 439 440 bridge->dev = br; 441 442 bridge->num = dsa_bridge_num_get(br, ds->max_num_bridges); 443 if (ds->max_num_bridges && !bridge->num) { 444 NL_SET_ERR_MSG_MOD(extack, 445 "Range of offloadable bridges exceeded"); 446 kfree(bridge); 447 return -EOPNOTSUPP; 448 } 449 450 dp->bridge = bridge; 451 452 return 0; 453 } 454 455 static void dsa_port_bridge_destroy(struct dsa_port *dp, 456 const struct net_device *br) 457 { 458 struct dsa_bridge *bridge = dp->bridge; 459 460 dp->bridge = NULL; 461 462 if (!refcount_dec_and_test(&bridge->refcount)) 463 return; 464 465 if (bridge->num) 466 dsa_bridge_num_put(br, bridge->num); 467 468 kfree(bridge); 469 } 470 471 static bool dsa_port_supports_mst(struct dsa_port *dp) 472 { 473 struct dsa_switch *ds = dp->ds; 474 475 return ds->ops->vlan_msti_set && 476 ds->ops->port_mst_state_set && 477 ds->ops->port_vlan_fast_age && 478 dsa_port_can_configure_learning(dp); 479 } 480 481 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br, 482 struct netlink_ext_ack *extack) 483 { 484 struct dsa_notifier_bridge_info info = { 485 .dp = dp, 486 .extack = extack, 487 }; 488 struct net_device *dev = dp->slave; 489 struct net_device *brport_dev; 490 int err; 491 492 if (br_mst_enabled(br) && !dsa_port_supports_mst(dp)) 493 return -EOPNOTSUPP; 494 495 /* Here the interface is already bridged. Reflect the current 496 * configuration so that drivers can program their chips accordingly. 497 */ 498 err = dsa_port_bridge_create(dp, br, extack); 499 if (err) 500 return err; 501 502 brport_dev = dsa_port_to_bridge_port(dp); 503 504 info.bridge = *dp->bridge; 505 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info); 506 if (err) 507 goto out_rollback; 508 509 /* Drivers which support bridge TX forwarding should set this */ 510 dp->bridge->tx_fwd_offload = info.tx_fwd_offload; 511 512 err = switchdev_bridge_port_offload(brport_dev, dev, dp, 513 &dsa_slave_switchdev_notifier, 514 &dsa_slave_switchdev_blocking_notifier, 515 dp->bridge->tx_fwd_offload, extack); 516 if (err) 517 goto out_rollback_unbridge; 518 519 err = dsa_port_switchdev_sync_attrs(dp, extack); 520 if (err) 521 goto out_rollback_unoffload; 522 523 return 0; 524 525 out_rollback_unoffload: 526 switchdev_bridge_port_unoffload(brport_dev, dp, 527 &dsa_slave_switchdev_notifier, 528 &dsa_slave_switchdev_blocking_notifier); 529 dsa_flush_workqueue(); 530 out_rollback_unbridge: 531 dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 532 out_rollback: 533 dsa_port_bridge_destroy(dp, br); 534 return err; 535 } 536 537 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br) 538 { 539 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 540 541 /* Don't try to unoffload something that is not offloaded */ 542 if (!brport_dev) 543 return; 544 545 switchdev_bridge_port_unoffload(brport_dev, dp, 546 &dsa_slave_switchdev_notifier, 547 &dsa_slave_switchdev_blocking_notifier); 548 549 dsa_flush_workqueue(); 550 } 551 552 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) 553 { 554 struct dsa_notifier_bridge_info info = { 555 .dp = dp, 556 }; 557 int err; 558 559 /* If the port could not be offloaded to begin with, then 560 * there is nothing to do. 561 */ 562 if (!dp->bridge) 563 return; 564 565 info.bridge = *dp->bridge; 566 567 /* Here the port is already unbridged. Reflect the current configuration 568 * so that drivers can program their chips accordingly. 569 */ 570 dsa_port_bridge_destroy(dp, br); 571 572 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 573 if (err) 574 dev_err(dp->ds->dev, 575 "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n", 576 dp->index, ERR_PTR(err)); 577 578 dsa_port_switchdev_unsync_attrs(dp, info.bridge); 579 } 580 581 int dsa_port_lag_change(struct dsa_port *dp, 582 struct netdev_lag_lower_state_info *linfo) 583 { 584 struct dsa_notifier_lag_info info = { 585 .dp = dp, 586 }; 587 bool tx_enabled; 588 589 if (!dp->lag) 590 return 0; 591 592 /* On statically configured aggregates (e.g. loadbalance 593 * without LACP) ports will always be tx_enabled, even if the 594 * link is down. Thus we require both link_up and tx_enabled 595 * in order to include it in the tx set. 596 */ 597 tx_enabled = linfo->link_up && linfo->tx_enabled; 598 599 if (tx_enabled == dp->lag_tx_enabled) 600 return 0; 601 602 dp->lag_tx_enabled = tx_enabled; 603 604 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info); 605 } 606 607 static int dsa_port_lag_create(struct dsa_port *dp, 608 struct net_device *lag_dev) 609 { 610 struct dsa_switch *ds = dp->ds; 611 struct dsa_lag *lag; 612 613 lag = dsa_tree_lag_find(ds->dst, lag_dev); 614 if (lag) { 615 refcount_inc(&lag->refcount); 616 dp->lag = lag; 617 return 0; 618 } 619 620 lag = kzalloc(sizeof(*lag), GFP_KERNEL); 621 if (!lag) 622 return -ENOMEM; 623 624 refcount_set(&lag->refcount, 1); 625 mutex_init(&lag->fdb_lock); 626 INIT_LIST_HEAD(&lag->fdbs); 627 lag->dev = lag_dev; 628 dsa_lag_map(ds->dst, lag); 629 dp->lag = lag; 630 631 return 0; 632 } 633 634 static void dsa_port_lag_destroy(struct dsa_port *dp) 635 { 636 struct dsa_lag *lag = dp->lag; 637 638 dp->lag = NULL; 639 dp->lag_tx_enabled = false; 640 641 if (!refcount_dec_and_test(&lag->refcount)) 642 return; 643 644 WARN_ON(!list_empty(&lag->fdbs)); 645 dsa_lag_unmap(dp->ds->dst, lag); 646 kfree(lag); 647 } 648 649 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev, 650 struct netdev_lag_upper_info *uinfo, 651 struct netlink_ext_ack *extack) 652 { 653 struct dsa_notifier_lag_info info = { 654 .dp = dp, 655 .info = uinfo, 656 .extack = extack, 657 }; 658 struct net_device *bridge_dev; 659 int err; 660 661 err = dsa_port_lag_create(dp, lag_dev); 662 if (err) 663 goto err_lag_create; 664 665 info.lag = *dp->lag; 666 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info); 667 if (err) 668 goto err_lag_join; 669 670 bridge_dev = netdev_master_upper_dev_get(lag_dev); 671 if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) 672 return 0; 673 674 err = dsa_port_bridge_join(dp, bridge_dev, extack); 675 if (err) 676 goto err_bridge_join; 677 678 return 0; 679 680 err_bridge_join: 681 dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 682 err_lag_join: 683 dsa_port_lag_destroy(dp); 684 err_lag_create: 685 return err; 686 } 687 688 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 689 { 690 struct net_device *br = dsa_port_bridge_dev_get(dp); 691 692 if (br) 693 dsa_port_pre_bridge_leave(dp, br); 694 } 695 696 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 697 { 698 struct net_device *br = dsa_port_bridge_dev_get(dp); 699 struct dsa_notifier_lag_info info = { 700 .dp = dp, 701 }; 702 int err; 703 704 if (!dp->lag) 705 return; 706 707 /* Port might have been part of a LAG that in turn was 708 * attached to a bridge. 709 */ 710 if (br) 711 dsa_port_bridge_leave(dp, br); 712 713 info.lag = *dp->lag; 714 715 dsa_port_lag_destroy(dp); 716 717 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 718 if (err) 719 dev_err(dp->ds->dev, 720 "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n", 721 dp->index, ERR_PTR(err)); 722 } 723 724 /* Must be called under rcu_read_lock() */ 725 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, 726 bool vlan_filtering, 727 struct netlink_ext_ack *extack) 728 { 729 struct dsa_switch *ds = dp->ds; 730 struct dsa_port *other_dp; 731 int err; 732 733 /* VLAN awareness was off, so the question is "can we turn it on". 734 * We may have had 8021q uppers, those need to go. Make sure we don't 735 * enter an inconsistent state: deny changing the VLAN awareness state 736 * as long as we have 8021q uppers. 737 */ 738 if (vlan_filtering && dsa_port_is_user(dp)) { 739 struct net_device *br = dsa_port_bridge_dev_get(dp); 740 struct net_device *upper_dev, *slave = dp->slave; 741 struct list_head *iter; 742 743 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { 744 struct bridge_vlan_info br_info; 745 u16 vid; 746 747 if (!is_vlan_dev(upper_dev)) 748 continue; 749 750 vid = vlan_dev_vlan_id(upper_dev); 751 752 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the 753 * device, respectively the VID is not found, returning 754 * 0 means success, which is a failure for us here. 755 */ 756 err = br_vlan_get_info(br, vid, &br_info); 757 if (err == 0) { 758 NL_SET_ERR_MSG_MOD(extack, 759 "Must first remove VLAN uppers having VIDs also present in bridge"); 760 return false; 761 } 762 } 763 } 764 765 if (!ds->vlan_filtering_is_global) 766 return true; 767 768 /* For cases where enabling/disabling VLAN awareness is global to the 769 * switch, we need to handle the case where multiple bridges span 770 * different ports of the same switch device and one of them has a 771 * different setting than what is being requested. 772 */ 773 dsa_switch_for_each_port(other_dp, ds) { 774 struct net_device *other_br = dsa_port_bridge_dev_get(other_dp); 775 776 /* If it's the same bridge, it also has same 777 * vlan_filtering setting => no need to check 778 */ 779 if (!other_br || other_br == dsa_port_bridge_dev_get(dp)) 780 continue; 781 782 if (br_vlan_enabled(other_br) != vlan_filtering) { 783 NL_SET_ERR_MSG_MOD(extack, 784 "VLAN filtering is a global setting"); 785 return false; 786 } 787 } 788 return true; 789 } 790 791 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, 792 struct netlink_ext_ack *extack) 793 { 794 bool old_vlan_filtering = dsa_port_is_vlan_filtering(dp); 795 struct dsa_switch *ds = dp->ds; 796 bool apply; 797 int err; 798 799 if (!ds->ops->port_vlan_filtering) 800 return -EOPNOTSUPP; 801 802 /* We are called from dsa_slave_switchdev_blocking_event(), 803 * which is not under rcu_read_lock(), unlike 804 * dsa_slave_switchdev_event(). 805 */ 806 rcu_read_lock(); 807 apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack); 808 rcu_read_unlock(); 809 if (!apply) 810 return -EINVAL; 811 812 if (dsa_port_is_vlan_filtering(dp) == vlan_filtering) 813 return 0; 814 815 err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering, 816 extack); 817 if (err) 818 return err; 819 820 if (ds->vlan_filtering_is_global) { 821 struct dsa_port *other_dp; 822 823 ds->vlan_filtering = vlan_filtering; 824 825 dsa_switch_for_each_user_port(other_dp, ds) { 826 struct net_device *slave = other_dp->slave; 827 828 /* We might be called in the unbind path, so not 829 * all slave devices might still be registered. 830 */ 831 if (!slave) 832 continue; 833 834 err = dsa_slave_manage_vlan_filtering(slave, 835 vlan_filtering); 836 if (err) 837 goto restore; 838 } 839 } else { 840 dp->vlan_filtering = vlan_filtering; 841 842 err = dsa_slave_manage_vlan_filtering(dp->slave, 843 vlan_filtering); 844 if (err) 845 goto restore; 846 } 847 848 return 0; 849 850 restore: 851 ds->ops->port_vlan_filtering(ds, dp->index, old_vlan_filtering, NULL); 852 853 if (ds->vlan_filtering_is_global) 854 ds->vlan_filtering = old_vlan_filtering; 855 else 856 dp->vlan_filtering = old_vlan_filtering; 857 858 return err; 859 } 860 861 /* This enforces legacy behavior for switch drivers which assume they can't 862 * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0 863 */ 864 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp) 865 { 866 struct net_device *br = dsa_port_bridge_dev_get(dp); 867 struct dsa_switch *ds = dp->ds; 868 869 if (!br) 870 return false; 871 872 return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br); 873 } 874 875 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock) 876 { 877 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock); 878 unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies); 879 struct dsa_notifier_ageing_time_info info; 880 int err; 881 882 info.ageing_time = ageing_time; 883 884 err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info); 885 if (err) 886 return err; 887 888 dp->ageing_time = ageing_time; 889 890 return 0; 891 } 892 893 int dsa_port_mst_enable(struct dsa_port *dp, bool on, 894 struct netlink_ext_ack *extack) 895 { 896 if (on && !dsa_port_supports_mst(dp)) { 897 NL_SET_ERR_MSG_MOD(extack, "Hardware does not support MST"); 898 return -EINVAL; 899 } 900 901 return 0; 902 } 903 904 int dsa_port_pre_bridge_flags(const struct dsa_port *dp, 905 struct switchdev_brport_flags flags, 906 struct netlink_ext_ack *extack) 907 { 908 struct dsa_switch *ds = dp->ds; 909 910 if (!ds->ops->port_pre_bridge_flags) 911 return -EINVAL; 912 913 return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack); 914 } 915 916 int dsa_port_bridge_flags(struct dsa_port *dp, 917 struct switchdev_brport_flags flags, 918 struct netlink_ext_ack *extack) 919 { 920 struct dsa_switch *ds = dp->ds; 921 int err; 922 923 if (!ds->ops->port_bridge_flags) 924 return -EOPNOTSUPP; 925 926 err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack); 927 if (err) 928 return err; 929 930 if (flags.mask & BR_LEARNING) { 931 bool learning = flags.val & BR_LEARNING; 932 933 if (learning == dp->learning) 934 return 0; 935 936 if ((dp->learning && !learning) && 937 (dp->stp_state == BR_STATE_LEARNING || 938 dp->stp_state == BR_STATE_FORWARDING)) 939 dsa_port_fast_age(dp); 940 941 dp->learning = learning; 942 } 943 944 return 0; 945 } 946 947 void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc) 948 { 949 struct dsa_switch *ds = dp->ds; 950 951 if (ds->ops->port_set_host_flood) 952 ds->ops->port_set_host_flood(ds, dp->index, uc, mc); 953 } 954 955 int dsa_port_vlan_msti(struct dsa_port *dp, 956 const struct switchdev_vlan_msti *msti) 957 { 958 struct dsa_switch *ds = dp->ds; 959 960 if (!ds->ops->vlan_msti_set) 961 return -EOPNOTSUPP; 962 963 return ds->ops->vlan_msti_set(ds, *dp->bridge, msti); 964 } 965 966 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu) 967 { 968 struct dsa_notifier_mtu_info info = { 969 .dp = dp, 970 .mtu = new_mtu, 971 }; 972 973 return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info); 974 } 975 976 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, 977 u16 vid) 978 { 979 struct dsa_notifier_fdb_info info = { 980 .dp = dp, 981 .addr = addr, 982 .vid = vid, 983 .db = { 984 .type = DSA_DB_BRIDGE, 985 .bridge = *dp->bridge, 986 }, 987 }; 988 989 /* Refcounting takes bridge.num as a key, and should be global for all 990 * bridges in the absence of FDB isolation, and per bridge otherwise. 991 * Force the bridge.num to zero here in the absence of FDB isolation. 992 */ 993 if (!dp->ds->fdb_isolation) 994 info.db.bridge.num = 0; 995 996 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info); 997 } 998 999 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr, 1000 u16 vid) 1001 { 1002 struct dsa_notifier_fdb_info info = { 1003 .dp = dp, 1004 .addr = addr, 1005 .vid = vid, 1006 .db = { 1007 .type = DSA_DB_BRIDGE, 1008 .bridge = *dp->bridge, 1009 }, 1010 }; 1011 1012 if (!dp->ds->fdb_isolation) 1013 info.db.bridge.num = 0; 1014 1015 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info); 1016 } 1017 1018 static int dsa_port_host_fdb_add(struct dsa_port *dp, 1019 const unsigned char *addr, u16 vid, 1020 struct dsa_db db) 1021 { 1022 struct dsa_notifier_fdb_info info = { 1023 .dp = dp, 1024 .addr = addr, 1025 .vid = vid, 1026 .db = db, 1027 }; 1028 1029 if (!dp->ds->fdb_isolation) 1030 info.db.bridge.num = 0; 1031 1032 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info); 1033 } 1034 1035 int dsa_port_standalone_host_fdb_add(struct dsa_port *dp, 1036 const unsigned char *addr, u16 vid) 1037 { 1038 struct dsa_db db = { 1039 .type = DSA_DB_PORT, 1040 .dp = dp, 1041 }; 1042 1043 return dsa_port_host_fdb_add(dp, addr, vid, db); 1044 } 1045 1046 int dsa_port_bridge_host_fdb_add(struct dsa_port *dp, 1047 const unsigned char *addr, u16 vid) 1048 { 1049 struct net_device *master = dsa_port_to_master(dp); 1050 struct dsa_db db = { 1051 .type = DSA_DB_BRIDGE, 1052 .bridge = *dp->bridge, 1053 }; 1054 int err; 1055 1056 /* Avoid a call to __dev_set_promiscuity() on the master, which 1057 * requires rtnl_lock(), since we can't guarantee that is held here, 1058 * and we can't take it either. 1059 */ 1060 if (master->priv_flags & IFF_UNICAST_FLT) { 1061 err = dev_uc_add(master, addr); 1062 if (err) 1063 return err; 1064 } 1065 1066 return dsa_port_host_fdb_add(dp, addr, vid, db); 1067 } 1068 1069 static int dsa_port_host_fdb_del(struct dsa_port *dp, 1070 const unsigned char *addr, u16 vid, 1071 struct dsa_db db) 1072 { 1073 struct dsa_notifier_fdb_info info = { 1074 .dp = dp, 1075 .addr = addr, 1076 .vid = vid, 1077 .db = db, 1078 }; 1079 1080 if (!dp->ds->fdb_isolation) 1081 info.db.bridge.num = 0; 1082 1083 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info); 1084 } 1085 1086 int dsa_port_standalone_host_fdb_del(struct dsa_port *dp, 1087 const unsigned char *addr, u16 vid) 1088 { 1089 struct dsa_db db = { 1090 .type = DSA_DB_PORT, 1091 .dp = dp, 1092 }; 1093 1094 return dsa_port_host_fdb_del(dp, addr, vid, db); 1095 } 1096 1097 int dsa_port_bridge_host_fdb_del(struct dsa_port *dp, 1098 const unsigned char *addr, u16 vid) 1099 { 1100 struct net_device *master = dsa_port_to_master(dp); 1101 struct dsa_db db = { 1102 .type = DSA_DB_BRIDGE, 1103 .bridge = *dp->bridge, 1104 }; 1105 int err; 1106 1107 if (master->priv_flags & IFF_UNICAST_FLT) { 1108 err = dev_uc_del(master, addr); 1109 if (err) 1110 return err; 1111 } 1112 1113 return dsa_port_host_fdb_del(dp, addr, vid, db); 1114 } 1115 1116 int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr, 1117 u16 vid) 1118 { 1119 struct dsa_notifier_lag_fdb_info info = { 1120 .lag = dp->lag, 1121 .addr = addr, 1122 .vid = vid, 1123 .db = { 1124 .type = DSA_DB_BRIDGE, 1125 .bridge = *dp->bridge, 1126 }, 1127 }; 1128 1129 if (!dp->ds->fdb_isolation) 1130 info.db.bridge.num = 0; 1131 1132 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_ADD, &info); 1133 } 1134 1135 int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr, 1136 u16 vid) 1137 { 1138 struct dsa_notifier_lag_fdb_info info = { 1139 .lag = dp->lag, 1140 .addr = addr, 1141 .vid = vid, 1142 .db = { 1143 .type = DSA_DB_BRIDGE, 1144 .bridge = *dp->bridge, 1145 }, 1146 }; 1147 1148 if (!dp->ds->fdb_isolation) 1149 info.db.bridge.num = 0; 1150 1151 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_DEL, &info); 1152 } 1153 1154 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data) 1155 { 1156 struct dsa_switch *ds = dp->ds; 1157 int port = dp->index; 1158 1159 if (!ds->ops->port_fdb_dump) 1160 return -EOPNOTSUPP; 1161 1162 return ds->ops->port_fdb_dump(ds, port, cb, data); 1163 } 1164 1165 int dsa_port_mdb_add(const struct dsa_port *dp, 1166 const struct switchdev_obj_port_mdb *mdb) 1167 { 1168 struct dsa_notifier_mdb_info info = { 1169 .dp = dp, 1170 .mdb = mdb, 1171 .db = { 1172 .type = DSA_DB_BRIDGE, 1173 .bridge = *dp->bridge, 1174 }, 1175 }; 1176 1177 if (!dp->ds->fdb_isolation) 1178 info.db.bridge.num = 0; 1179 1180 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info); 1181 } 1182 1183 int dsa_port_mdb_del(const struct dsa_port *dp, 1184 const struct switchdev_obj_port_mdb *mdb) 1185 { 1186 struct dsa_notifier_mdb_info info = { 1187 .dp = dp, 1188 .mdb = mdb, 1189 .db = { 1190 .type = DSA_DB_BRIDGE, 1191 .bridge = *dp->bridge, 1192 }, 1193 }; 1194 1195 if (!dp->ds->fdb_isolation) 1196 info.db.bridge.num = 0; 1197 1198 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info); 1199 } 1200 1201 static int dsa_port_host_mdb_add(const struct dsa_port *dp, 1202 const struct switchdev_obj_port_mdb *mdb, 1203 struct dsa_db db) 1204 { 1205 struct dsa_notifier_mdb_info info = { 1206 .dp = dp, 1207 .mdb = mdb, 1208 .db = db, 1209 }; 1210 1211 if (!dp->ds->fdb_isolation) 1212 info.db.bridge.num = 0; 1213 1214 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info); 1215 } 1216 1217 int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp, 1218 const struct switchdev_obj_port_mdb *mdb) 1219 { 1220 struct dsa_db db = { 1221 .type = DSA_DB_PORT, 1222 .dp = dp, 1223 }; 1224 1225 return dsa_port_host_mdb_add(dp, mdb, db); 1226 } 1227 1228 int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp, 1229 const struct switchdev_obj_port_mdb *mdb) 1230 { 1231 struct net_device *master = dsa_port_to_master(dp); 1232 struct dsa_db db = { 1233 .type = DSA_DB_BRIDGE, 1234 .bridge = *dp->bridge, 1235 }; 1236 int err; 1237 1238 err = dev_mc_add(master, mdb->addr); 1239 if (err) 1240 return err; 1241 1242 return dsa_port_host_mdb_add(dp, mdb, db); 1243 } 1244 1245 static int dsa_port_host_mdb_del(const struct dsa_port *dp, 1246 const struct switchdev_obj_port_mdb *mdb, 1247 struct dsa_db db) 1248 { 1249 struct dsa_notifier_mdb_info info = { 1250 .dp = dp, 1251 .mdb = mdb, 1252 .db = db, 1253 }; 1254 1255 if (!dp->ds->fdb_isolation) 1256 info.db.bridge.num = 0; 1257 1258 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info); 1259 } 1260 1261 int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp, 1262 const struct switchdev_obj_port_mdb *mdb) 1263 { 1264 struct dsa_db db = { 1265 .type = DSA_DB_PORT, 1266 .dp = dp, 1267 }; 1268 1269 return dsa_port_host_mdb_del(dp, mdb, db); 1270 } 1271 1272 int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp, 1273 const struct switchdev_obj_port_mdb *mdb) 1274 { 1275 struct net_device *master = dsa_port_to_master(dp); 1276 struct dsa_db db = { 1277 .type = DSA_DB_BRIDGE, 1278 .bridge = *dp->bridge, 1279 }; 1280 int err; 1281 1282 err = dev_mc_del(master, mdb->addr); 1283 if (err) 1284 return err; 1285 1286 return dsa_port_host_mdb_del(dp, mdb, db); 1287 } 1288 1289 int dsa_port_vlan_add(struct dsa_port *dp, 1290 const struct switchdev_obj_port_vlan *vlan, 1291 struct netlink_ext_ack *extack) 1292 { 1293 struct dsa_notifier_vlan_info info = { 1294 .dp = dp, 1295 .vlan = vlan, 1296 .extack = extack, 1297 }; 1298 1299 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info); 1300 } 1301 1302 int dsa_port_vlan_del(struct dsa_port *dp, 1303 const struct switchdev_obj_port_vlan *vlan) 1304 { 1305 struct dsa_notifier_vlan_info info = { 1306 .dp = dp, 1307 .vlan = vlan, 1308 }; 1309 1310 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info); 1311 } 1312 1313 int dsa_port_host_vlan_add(struct dsa_port *dp, 1314 const struct switchdev_obj_port_vlan *vlan, 1315 struct netlink_ext_ack *extack) 1316 { 1317 struct net_device *master = dsa_port_to_master(dp); 1318 struct dsa_notifier_vlan_info info = { 1319 .dp = dp, 1320 .vlan = vlan, 1321 .extack = extack, 1322 }; 1323 int err; 1324 1325 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_ADD, &info); 1326 if (err && err != -EOPNOTSUPP) 1327 return err; 1328 1329 vlan_vid_add(master, htons(ETH_P_8021Q), vlan->vid); 1330 1331 return err; 1332 } 1333 1334 int dsa_port_host_vlan_del(struct dsa_port *dp, 1335 const struct switchdev_obj_port_vlan *vlan) 1336 { 1337 struct net_device *master = dsa_port_to_master(dp); 1338 struct dsa_notifier_vlan_info info = { 1339 .dp = dp, 1340 .vlan = vlan, 1341 }; 1342 int err; 1343 1344 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_DEL, &info); 1345 if (err && err != -EOPNOTSUPP) 1346 return err; 1347 1348 vlan_vid_del(master, htons(ETH_P_8021Q), vlan->vid); 1349 1350 return err; 1351 } 1352 1353 int dsa_port_mrp_add(const struct dsa_port *dp, 1354 const struct switchdev_obj_mrp *mrp) 1355 { 1356 struct dsa_switch *ds = dp->ds; 1357 1358 if (!ds->ops->port_mrp_add) 1359 return -EOPNOTSUPP; 1360 1361 return ds->ops->port_mrp_add(ds, dp->index, mrp); 1362 } 1363 1364 int dsa_port_mrp_del(const struct dsa_port *dp, 1365 const struct switchdev_obj_mrp *mrp) 1366 { 1367 struct dsa_switch *ds = dp->ds; 1368 1369 if (!ds->ops->port_mrp_del) 1370 return -EOPNOTSUPP; 1371 1372 return ds->ops->port_mrp_del(ds, dp->index, mrp); 1373 } 1374 1375 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp, 1376 const struct switchdev_obj_ring_role_mrp *mrp) 1377 { 1378 struct dsa_switch *ds = dp->ds; 1379 1380 if (!ds->ops->port_mrp_add_ring_role) 1381 return -EOPNOTSUPP; 1382 1383 return ds->ops->port_mrp_add_ring_role(ds, dp->index, mrp); 1384 } 1385 1386 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp, 1387 const struct switchdev_obj_ring_role_mrp *mrp) 1388 { 1389 struct dsa_switch *ds = dp->ds; 1390 1391 if (!ds->ops->port_mrp_del_ring_role) 1392 return -EOPNOTSUPP; 1393 1394 return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp); 1395 } 1396 1397 static int dsa_port_assign_master(struct dsa_port *dp, 1398 struct net_device *master, 1399 struct netlink_ext_ack *extack, 1400 bool fail_on_err) 1401 { 1402 struct dsa_switch *ds = dp->ds; 1403 int port = dp->index, err; 1404 1405 err = ds->ops->port_change_master(ds, port, master, extack); 1406 if (err && !fail_on_err) 1407 dev_err(ds->dev, "port %d failed to assign master %s: %pe\n", 1408 port, master->name, ERR_PTR(err)); 1409 1410 if (err && fail_on_err) 1411 return err; 1412 1413 dp->cpu_dp = master->dsa_ptr; 1414 dp->cpu_port_in_lag = netif_is_lag_master(master); 1415 1416 return 0; 1417 } 1418 1419 /* Change the dp->cpu_dp affinity for a user port. Note that both cross-chip 1420 * notifiers and drivers have implicit assumptions about user-to-CPU-port 1421 * mappings, so we unfortunately cannot delay the deletion of the objects 1422 * (switchdev, standalone addresses, standalone VLANs) on the old CPU port 1423 * until the new CPU port has been set up. So we need to completely tear down 1424 * the old CPU port before changing it, and restore it on errors during the 1425 * bringup of the new one. 1426 */ 1427 int dsa_port_change_master(struct dsa_port *dp, struct net_device *master, 1428 struct netlink_ext_ack *extack) 1429 { 1430 struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp); 1431 struct net_device *old_master = dsa_port_to_master(dp); 1432 struct net_device *dev = dp->slave; 1433 struct dsa_switch *ds = dp->ds; 1434 bool vlan_filtering; 1435 int err, tmp; 1436 1437 /* Bridges may hold host FDB, MDB and VLAN objects. These need to be 1438 * migrated, so dynamically unoffload and later reoffload the bridge 1439 * port. 1440 */ 1441 if (bridge_dev) { 1442 dsa_port_pre_bridge_leave(dp, bridge_dev); 1443 dsa_port_bridge_leave(dp, bridge_dev); 1444 } 1445 1446 /* The port might still be VLAN filtering even if it's no longer 1447 * under a bridge, either due to ds->vlan_filtering_is_global or 1448 * ds->needs_standalone_vlan_filtering. In turn this means VLANs 1449 * on the CPU port. 1450 */ 1451 vlan_filtering = dsa_port_is_vlan_filtering(dp); 1452 if (vlan_filtering) { 1453 err = dsa_slave_manage_vlan_filtering(dev, false); 1454 if (err) { 1455 NL_SET_ERR_MSG_MOD(extack, 1456 "Failed to remove standalone VLANs"); 1457 goto rewind_old_bridge; 1458 } 1459 } 1460 1461 /* Standalone addresses, and addresses of upper interfaces like 1462 * VLAN, LAG, HSR need to be migrated. 1463 */ 1464 dsa_slave_unsync_ha(dev); 1465 1466 err = dsa_port_assign_master(dp, master, extack, true); 1467 if (err) 1468 goto rewind_old_addrs; 1469 1470 dsa_slave_sync_ha(dev); 1471 1472 if (vlan_filtering) { 1473 err = dsa_slave_manage_vlan_filtering(dev, true); 1474 if (err) { 1475 NL_SET_ERR_MSG_MOD(extack, 1476 "Failed to restore standalone VLANs"); 1477 goto rewind_new_addrs; 1478 } 1479 } 1480 1481 if (bridge_dev) { 1482 err = dsa_port_bridge_join(dp, bridge_dev, extack); 1483 if (err && err == -EOPNOTSUPP) { 1484 NL_SET_ERR_MSG_MOD(extack, 1485 "Failed to reoffload bridge"); 1486 goto rewind_new_vlan; 1487 } 1488 } 1489 1490 return 0; 1491 1492 rewind_new_vlan: 1493 if (vlan_filtering) 1494 dsa_slave_manage_vlan_filtering(dev, false); 1495 1496 rewind_new_addrs: 1497 dsa_slave_unsync_ha(dev); 1498 1499 dsa_port_assign_master(dp, old_master, NULL, false); 1500 1501 /* Restore the objects on the old CPU port */ 1502 rewind_old_addrs: 1503 dsa_slave_sync_ha(dev); 1504 1505 if (vlan_filtering) { 1506 tmp = dsa_slave_manage_vlan_filtering(dev, true); 1507 if (tmp) { 1508 dev_err(ds->dev, 1509 "port %d failed to restore standalone VLANs: %pe\n", 1510 dp->index, ERR_PTR(tmp)); 1511 } 1512 } 1513 1514 rewind_old_bridge: 1515 if (bridge_dev) { 1516 tmp = dsa_port_bridge_join(dp, bridge_dev, extack); 1517 if (tmp) { 1518 dev_err(ds->dev, 1519 "port %d failed to rejoin bridge %s: %pe\n", 1520 dp->index, bridge_dev->name, ERR_PTR(tmp)); 1521 } 1522 } 1523 1524 return err; 1525 } 1526 1527 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp, 1528 const struct dsa_device_ops *tag_ops) 1529 { 1530 cpu_dp->rcv = tag_ops->rcv; 1531 cpu_dp->tag_ops = tag_ops; 1532 } 1533 1534 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp) 1535 { 1536 struct device_node *phy_dn; 1537 struct phy_device *phydev; 1538 1539 phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0); 1540 if (!phy_dn) 1541 return NULL; 1542 1543 phydev = of_phy_find_device(phy_dn); 1544 if (!phydev) { 1545 of_node_put(phy_dn); 1546 return ERR_PTR(-EPROBE_DEFER); 1547 } 1548 1549 of_node_put(phy_dn); 1550 return phydev; 1551 } 1552 1553 static void dsa_port_phylink_validate(struct phylink_config *config, 1554 unsigned long *supported, 1555 struct phylink_link_state *state) 1556 { 1557 /* Skip call for drivers which don't yet set mac_capabilities, 1558 * since validating in that case would mean their PHY will advertise 1559 * nothing. In turn, skipping validation makes them advertise 1560 * everything that the PHY supports, so those drivers should be 1561 * converted ASAP. 1562 */ 1563 if (config->mac_capabilities) 1564 phylink_generic_validate(config, supported, state); 1565 } 1566 1567 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config, 1568 struct phylink_link_state *state) 1569 { 1570 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1571 struct dsa_switch *ds = dp->ds; 1572 int err; 1573 1574 /* Only called for inband modes */ 1575 if (!ds->ops->phylink_mac_link_state) { 1576 state->link = 0; 1577 return; 1578 } 1579 1580 err = ds->ops->phylink_mac_link_state(ds, dp->index, state); 1581 if (err < 0) { 1582 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n", 1583 dp->index, err); 1584 state->link = 0; 1585 } 1586 } 1587 1588 static struct phylink_pcs * 1589 dsa_port_phylink_mac_select_pcs(struct phylink_config *config, 1590 phy_interface_t interface) 1591 { 1592 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1593 struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP); 1594 struct dsa_switch *ds = dp->ds; 1595 1596 if (ds->ops->phylink_mac_select_pcs) 1597 pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface); 1598 1599 return pcs; 1600 } 1601 1602 static void dsa_port_phylink_mac_config(struct phylink_config *config, 1603 unsigned int mode, 1604 const struct phylink_link_state *state) 1605 { 1606 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1607 struct dsa_switch *ds = dp->ds; 1608 1609 if (!ds->ops->phylink_mac_config) 1610 return; 1611 1612 ds->ops->phylink_mac_config(ds, dp->index, mode, state); 1613 } 1614 1615 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config) 1616 { 1617 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1618 struct dsa_switch *ds = dp->ds; 1619 1620 if (!ds->ops->phylink_mac_an_restart) 1621 return; 1622 1623 ds->ops->phylink_mac_an_restart(ds, dp->index); 1624 } 1625 1626 static void dsa_port_phylink_mac_link_down(struct phylink_config *config, 1627 unsigned int mode, 1628 phy_interface_t interface) 1629 { 1630 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1631 struct phy_device *phydev = NULL; 1632 struct dsa_switch *ds = dp->ds; 1633 1634 if (dsa_port_is_user(dp)) 1635 phydev = dp->slave->phydev; 1636 1637 if (!ds->ops->phylink_mac_link_down) { 1638 if (ds->ops->adjust_link && phydev) 1639 ds->ops->adjust_link(ds, dp->index, phydev); 1640 return; 1641 } 1642 1643 ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface); 1644 } 1645 1646 static void dsa_port_phylink_mac_link_up(struct phylink_config *config, 1647 struct phy_device *phydev, 1648 unsigned int mode, 1649 phy_interface_t interface, 1650 int speed, int duplex, 1651 bool tx_pause, bool rx_pause) 1652 { 1653 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1654 struct dsa_switch *ds = dp->ds; 1655 1656 if (!ds->ops->phylink_mac_link_up) { 1657 if (ds->ops->adjust_link && phydev) 1658 ds->ops->adjust_link(ds, dp->index, phydev); 1659 return; 1660 } 1661 1662 ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev, 1663 speed, duplex, tx_pause, rx_pause); 1664 } 1665 1666 static const struct phylink_mac_ops dsa_port_phylink_mac_ops = { 1667 .validate = dsa_port_phylink_validate, 1668 .mac_select_pcs = dsa_port_phylink_mac_select_pcs, 1669 .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state, 1670 .mac_config = dsa_port_phylink_mac_config, 1671 .mac_an_restart = dsa_port_phylink_mac_an_restart, 1672 .mac_link_down = dsa_port_phylink_mac_link_down, 1673 .mac_link_up = dsa_port_phylink_mac_link_up, 1674 }; 1675 1676 int dsa_port_phylink_create(struct dsa_port *dp) 1677 { 1678 struct dsa_switch *ds = dp->ds; 1679 phy_interface_t mode; 1680 struct phylink *pl; 1681 int err; 1682 1683 err = of_get_phy_mode(dp->dn, &mode); 1684 if (err) 1685 mode = PHY_INTERFACE_MODE_NA; 1686 1687 /* Presence of phylink_mac_link_state or phylink_mac_an_restart is 1688 * an indicator of a legacy phylink driver. 1689 */ 1690 if (ds->ops->phylink_mac_link_state || 1691 ds->ops->phylink_mac_an_restart) 1692 dp->pl_config.legacy_pre_march2020 = true; 1693 1694 if (ds->ops->phylink_get_caps) 1695 ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config); 1696 1697 pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn), 1698 mode, &dsa_port_phylink_mac_ops); 1699 if (IS_ERR(pl)) { 1700 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(pl)); 1701 return PTR_ERR(pl); 1702 } 1703 1704 dp->pl = pl; 1705 1706 return 0; 1707 } 1708 1709 void dsa_port_phylink_destroy(struct dsa_port *dp) 1710 { 1711 phylink_destroy(dp->pl); 1712 dp->pl = NULL; 1713 } 1714 1715 static int dsa_shared_port_setup_phy_of(struct dsa_port *dp, bool enable) 1716 { 1717 struct dsa_switch *ds = dp->ds; 1718 struct phy_device *phydev; 1719 int port = dp->index; 1720 int err = 0; 1721 1722 phydev = dsa_port_get_phy_device(dp); 1723 if (!phydev) 1724 return 0; 1725 1726 if (IS_ERR(phydev)) 1727 return PTR_ERR(phydev); 1728 1729 if (enable) { 1730 err = genphy_resume(phydev); 1731 if (err < 0) 1732 goto err_put_dev; 1733 1734 err = genphy_read_status(phydev); 1735 if (err < 0) 1736 goto err_put_dev; 1737 } else { 1738 err = genphy_suspend(phydev); 1739 if (err < 0) 1740 goto err_put_dev; 1741 } 1742 1743 if (ds->ops->adjust_link) 1744 ds->ops->adjust_link(ds, port, phydev); 1745 1746 dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev)); 1747 1748 err_put_dev: 1749 put_device(&phydev->mdio.dev); 1750 return err; 1751 } 1752 1753 static int dsa_shared_port_fixed_link_register_of(struct dsa_port *dp) 1754 { 1755 struct device_node *dn = dp->dn; 1756 struct dsa_switch *ds = dp->ds; 1757 struct phy_device *phydev; 1758 int port = dp->index; 1759 phy_interface_t mode; 1760 int err; 1761 1762 err = of_phy_register_fixed_link(dn); 1763 if (err) { 1764 dev_err(ds->dev, 1765 "failed to register the fixed PHY of port %d\n", 1766 port); 1767 return err; 1768 } 1769 1770 phydev = of_phy_find_device(dn); 1771 1772 err = of_get_phy_mode(dn, &mode); 1773 if (err) 1774 mode = PHY_INTERFACE_MODE_NA; 1775 phydev->interface = mode; 1776 1777 genphy_read_status(phydev); 1778 1779 if (ds->ops->adjust_link) 1780 ds->ops->adjust_link(ds, port, phydev); 1781 1782 put_device(&phydev->mdio.dev); 1783 1784 return 0; 1785 } 1786 1787 static int dsa_shared_port_phylink_register(struct dsa_port *dp) 1788 { 1789 struct dsa_switch *ds = dp->ds; 1790 struct device_node *port_dn = dp->dn; 1791 int err; 1792 1793 dp->pl_config.dev = ds->dev; 1794 dp->pl_config.type = PHYLINK_DEV; 1795 1796 err = dsa_port_phylink_create(dp); 1797 if (err) 1798 return err; 1799 1800 err = phylink_of_phy_connect(dp->pl, port_dn, 0); 1801 if (err && err != -ENODEV) { 1802 pr_err("could not attach to PHY: %d\n", err); 1803 goto err_phy_connect; 1804 } 1805 1806 return 0; 1807 1808 err_phy_connect: 1809 dsa_port_phylink_destroy(dp); 1810 return err; 1811 } 1812 1813 /* During the initial DSA driver migration to OF, port nodes were sometimes 1814 * added to device trees with no indication of how they should operate from a 1815 * link management perspective (phy-handle, fixed-link, etc). Additionally, the 1816 * phy-mode may be absent. The interpretation of these port OF nodes depends on 1817 * their type. 1818 * 1819 * User ports with no phy-handle or fixed-link are expected to connect to an 1820 * internal PHY located on the ds->slave_mii_bus at an MDIO address equal to 1821 * the port number. This description is still actively supported. 1822 * 1823 * Shared (CPU and DSA) ports with no phy-handle or fixed-link are expected to 1824 * operate at the maximum speed that their phy-mode is capable of. If the 1825 * phy-mode is absent, they are expected to operate using the phy-mode 1826 * supported by the port that gives the highest link speed. It is unspecified 1827 * if the port should use flow control or not, half duplex or full duplex, or 1828 * if the phy-mode is a SERDES link, whether in-band autoneg is expected to be 1829 * enabled or not. 1830 * 1831 * In the latter case of shared ports, omitting the link management description 1832 * from the firmware node is deprecated and strongly discouraged. DSA uses 1833 * phylink, which rejects the firmware nodes of these ports for lacking 1834 * required properties. 1835 * 1836 * For switches in this table, DSA will skip enforcing validation and will 1837 * later omit registering a phylink instance for the shared ports, if they lack 1838 * a fixed-link, a phy-handle, or a managed = "in-band-status" property. 1839 * It becomes the responsibility of the driver to ensure that these ports 1840 * operate at the maximum speed (whatever this means) and will interoperate 1841 * with the DSA master or other cascade port, since phylink methods will not be 1842 * invoked for them. 1843 * 1844 * If you are considering expanding this table for newly introduced switches, 1845 * think again. It is OK to remove switches from this table if there aren't DT 1846 * blobs in circulation which rely on defaulting the shared ports. 1847 */ 1848 static const char * const dsa_switches_apply_workarounds[] = { 1849 #if IS_ENABLED(CONFIG_NET_DSA_XRS700X) 1850 "arrow,xrs7003e", 1851 "arrow,xrs7003f", 1852 "arrow,xrs7004e", 1853 "arrow,xrs7004f", 1854 #endif 1855 #if IS_ENABLED(CONFIG_B53) 1856 "brcm,bcm5325", 1857 "brcm,bcm53115", 1858 "brcm,bcm53125", 1859 "brcm,bcm53128", 1860 "brcm,bcm5365", 1861 "brcm,bcm5389", 1862 "brcm,bcm5395", 1863 "brcm,bcm5397", 1864 "brcm,bcm5398", 1865 "brcm,bcm53010-srab", 1866 "brcm,bcm53011-srab", 1867 "brcm,bcm53012-srab", 1868 "brcm,bcm53018-srab", 1869 "brcm,bcm53019-srab", 1870 "brcm,bcm5301x-srab", 1871 "brcm,bcm11360-srab", 1872 "brcm,bcm58522-srab", 1873 "brcm,bcm58525-srab", 1874 "brcm,bcm58535-srab", 1875 "brcm,bcm58622-srab", 1876 "brcm,bcm58623-srab", 1877 "brcm,bcm58625-srab", 1878 "brcm,bcm88312-srab", 1879 "brcm,cygnus-srab", 1880 "brcm,nsp-srab", 1881 "brcm,omega-srab", 1882 "brcm,bcm3384-switch", 1883 "brcm,bcm6328-switch", 1884 "brcm,bcm6368-switch", 1885 "brcm,bcm63xx-switch", 1886 #endif 1887 #if IS_ENABLED(CONFIG_NET_DSA_BCM_SF2) 1888 "brcm,bcm7445-switch-v4.0", 1889 "brcm,bcm7278-switch-v4.0", 1890 "brcm,bcm7278-switch-v4.8", 1891 #endif 1892 #if IS_ENABLED(CONFIG_NET_DSA_LANTIQ_GSWIP) 1893 "lantiq,xrx200-gswip", 1894 "lantiq,xrx300-gswip", 1895 "lantiq,xrx330-gswip", 1896 #endif 1897 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6060) 1898 "marvell,mv88e6060", 1899 #endif 1900 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6XXX) 1901 "marvell,mv88e6085", 1902 "marvell,mv88e6190", 1903 "marvell,mv88e6250", 1904 #endif 1905 #if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) 1906 "microchip,ksz8765", 1907 "microchip,ksz8794", 1908 "microchip,ksz8795", 1909 "microchip,ksz8863", 1910 "microchip,ksz8873", 1911 "microchip,ksz9477", 1912 "microchip,ksz9897", 1913 "microchip,ksz9893", 1914 "microchip,ksz9563", 1915 "microchip,ksz8563", 1916 "microchip,ksz9567", 1917 #endif 1918 #if IS_ENABLED(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) 1919 "smsc,lan9303-mdio", 1920 #endif 1921 #if IS_ENABLED(CONFIG_NET_DSA_SMSC_LAN9303_I2C) 1922 "smsc,lan9303-i2c", 1923 #endif 1924 NULL, 1925 }; 1926 1927 static void dsa_shared_port_validate_of(struct dsa_port *dp, 1928 bool *missing_phy_mode, 1929 bool *missing_link_description) 1930 { 1931 struct device_node *dn = dp->dn, *phy_np; 1932 struct dsa_switch *ds = dp->ds; 1933 phy_interface_t mode; 1934 1935 *missing_phy_mode = false; 1936 *missing_link_description = false; 1937 1938 if (of_get_phy_mode(dn, &mode)) { 1939 *missing_phy_mode = true; 1940 dev_err(ds->dev, 1941 "OF node %pOF of %s port %d lacks the required \"phy-mode\" property\n", 1942 dn, dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); 1943 } 1944 1945 /* Note: of_phy_is_fixed_link() also returns true for 1946 * managed = "in-band-status" 1947 */ 1948 if (of_phy_is_fixed_link(dn)) 1949 return; 1950 1951 phy_np = of_parse_phandle(dn, "phy-handle", 0); 1952 if (phy_np) { 1953 of_node_put(phy_np); 1954 return; 1955 } 1956 1957 *missing_link_description = true; 1958 1959 dev_err(ds->dev, 1960 "OF node %pOF of %s port %d lacks the required \"phy-handle\", \"fixed-link\" or \"managed\" properties\n", 1961 dn, dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); 1962 } 1963 1964 int dsa_shared_port_link_register_of(struct dsa_port *dp) 1965 { 1966 struct dsa_switch *ds = dp->ds; 1967 bool missing_link_description; 1968 bool missing_phy_mode; 1969 int port = dp->index; 1970 1971 dsa_shared_port_validate_of(dp, &missing_phy_mode, 1972 &missing_link_description); 1973 1974 if ((missing_phy_mode || missing_link_description) && 1975 !of_device_compatible_match(ds->dev->of_node, 1976 dsa_switches_apply_workarounds)) 1977 return -EINVAL; 1978 1979 if (!ds->ops->adjust_link) { 1980 if (missing_link_description) { 1981 dev_warn(ds->dev, 1982 "Skipping phylink registration for %s port %d\n", 1983 dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); 1984 } else { 1985 if (ds->ops->phylink_mac_link_down) 1986 ds->ops->phylink_mac_link_down(ds, port, 1987 MLO_AN_FIXED, PHY_INTERFACE_MODE_NA); 1988 1989 return dsa_shared_port_phylink_register(dp); 1990 } 1991 return 0; 1992 } 1993 1994 dev_warn(ds->dev, 1995 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n"); 1996 1997 if (of_phy_is_fixed_link(dp->dn)) 1998 return dsa_shared_port_fixed_link_register_of(dp); 1999 else 2000 return dsa_shared_port_setup_phy_of(dp, true); 2001 } 2002 2003 void dsa_shared_port_link_unregister_of(struct dsa_port *dp) 2004 { 2005 struct dsa_switch *ds = dp->ds; 2006 2007 if (!ds->ops->adjust_link && dp->pl) { 2008 rtnl_lock(); 2009 phylink_disconnect_phy(dp->pl); 2010 rtnl_unlock(); 2011 dsa_port_phylink_destroy(dp); 2012 return; 2013 } 2014 2015 if (of_phy_is_fixed_link(dp->dn)) 2016 of_phy_deregister_fixed_link(dp->dn); 2017 else 2018 dsa_shared_port_setup_phy_of(dp, false); 2019 } 2020 2021 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr) 2022 { 2023 struct dsa_switch *ds = dp->ds; 2024 int err; 2025 2026 if (!ds->ops->port_hsr_join) 2027 return -EOPNOTSUPP; 2028 2029 dp->hsr_dev = hsr; 2030 2031 err = ds->ops->port_hsr_join(ds, dp->index, hsr); 2032 if (err) 2033 dp->hsr_dev = NULL; 2034 2035 return err; 2036 } 2037 2038 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr) 2039 { 2040 struct dsa_switch *ds = dp->ds; 2041 int err; 2042 2043 dp->hsr_dev = NULL; 2044 2045 if (ds->ops->port_hsr_leave) { 2046 err = ds->ops->port_hsr_leave(ds, dp->index, hsr); 2047 if (err) 2048 dev_err(dp->ds->dev, 2049 "port %d failed to leave HSR %s: %pe\n", 2050 dp->index, hsr->name, ERR_PTR(err)); 2051 } 2052 } 2053 2054 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast) 2055 { 2056 struct dsa_notifier_tag_8021q_vlan_info info = { 2057 .dp = dp, 2058 .vid = vid, 2059 }; 2060 2061 if (broadcast) 2062 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 2063 2064 return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 2065 } 2066 2067 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast) 2068 { 2069 struct dsa_notifier_tag_8021q_vlan_info info = { 2070 .dp = dp, 2071 .vid = vid, 2072 }; 2073 int err; 2074 2075 if (broadcast) 2076 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 2077 else 2078 err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 2079 if (err) 2080 dev_err(dp->ds->dev, 2081 "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n", 2082 dp->index, vid, ERR_PTR(err)); 2083 } 2084