1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a single switch port 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include <linux/if_bridge.h> 10 #include <linux/netdevice.h> 11 #include <linux/notifier.h> 12 #include <linux/of_mdio.h> 13 #include <linux/of_net.h> 14 15 #include "dsa_priv.h" 16 #include "port.h" 17 18 /** 19 * dsa_port_notify - Notify the switching fabric of changes to a port 20 * @dp: port on which change occurred 21 * @e: event, must be of type DSA_NOTIFIER_* 22 * @v: event-specific value. 23 * 24 * Notify all switches in the DSA tree that this port's switch belongs to, 25 * including this switch itself, of an event. Allows the other switches to 26 * reconfigure themselves for cross-chip operations. Can also be used to 27 * reconfigure ports without net_devices (CPU ports, DSA links) whenever 28 * a user port's state changes. 29 */ 30 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v) 31 { 32 return dsa_tree_notify(dp->ds->dst, e, v); 33 } 34 35 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp, u16 vid) 36 { 37 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 38 struct switchdev_notifier_fdb_info info = { 39 .vid = vid, 40 }; 41 42 /* When the port becomes standalone it has already left the bridge. 43 * Don't notify the bridge in that case. 44 */ 45 if (!brport_dev) 46 return; 47 48 call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE, 49 brport_dev, &info.info, NULL); 50 } 51 52 static void dsa_port_fast_age(const struct dsa_port *dp) 53 { 54 struct dsa_switch *ds = dp->ds; 55 56 if (!ds->ops->port_fast_age) 57 return; 58 59 ds->ops->port_fast_age(ds, dp->index); 60 61 /* flush all VLANs */ 62 dsa_port_notify_bridge_fdb_flush(dp, 0); 63 } 64 65 static int dsa_port_vlan_fast_age(const struct dsa_port *dp, u16 vid) 66 { 67 struct dsa_switch *ds = dp->ds; 68 int err; 69 70 if (!ds->ops->port_vlan_fast_age) 71 return -EOPNOTSUPP; 72 73 err = ds->ops->port_vlan_fast_age(ds, dp->index, vid); 74 75 if (!err) 76 dsa_port_notify_bridge_fdb_flush(dp, vid); 77 78 return err; 79 } 80 81 static int dsa_port_msti_fast_age(const struct dsa_port *dp, u16 msti) 82 { 83 DECLARE_BITMAP(vids, VLAN_N_VID) = { 0 }; 84 int err, vid; 85 86 err = br_mst_get_info(dsa_port_bridge_dev_get(dp), msti, vids); 87 if (err) 88 return err; 89 90 for_each_set_bit(vid, vids, VLAN_N_VID) { 91 err = dsa_port_vlan_fast_age(dp, vid); 92 if (err) 93 return err; 94 } 95 96 return 0; 97 } 98 99 static bool dsa_port_can_configure_learning(struct dsa_port *dp) 100 { 101 struct switchdev_brport_flags flags = { 102 .mask = BR_LEARNING, 103 }; 104 struct dsa_switch *ds = dp->ds; 105 int err; 106 107 if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags) 108 return false; 109 110 err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL); 111 return !err; 112 } 113 114 bool dsa_port_supports_hwtstamp(struct dsa_port *dp, struct ifreq *ifr) 115 { 116 struct dsa_switch *ds = dp->ds; 117 int err; 118 119 if (!ds->ops->port_hwtstamp_get || !ds->ops->port_hwtstamp_set) 120 return false; 121 122 /* "See through" shim implementations of the "get" method. 123 * This will clobber the ifreq structure, but we will either return an 124 * error, or the master will overwrite it with proper values. 125 */ 126 err = ds->ops->port_hwtstamp_get(ds, dp->index, ifr); 127 return err != -EOPNOTSUPP; 128 } 129 130 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age) 131 { 132 struct dsa_switch *ds = dp->ds; 133 int port = dp->index; 134 135 if (!ds->ops->port_stp_state_set) 136 return -EOPNOTSUPP; 137 138 ds->ops->port_stp_state_set(ds, port, state); 139 140 if (!dsa_port_can_configure_learning(dp) || 141 (do_fast_age && dp->learning)) { 142 /* Fast age FDB entries or flush appropriate forwarding database 143 * for the given port, if we are moving it from Learning or 144 * Forwarding state, to Disabled or Blocking or Listening state. 145 * Ports that were standalone before the STP state change don't 146 * need to fast age the FDB, since address learning is off in 147 * standalone mode. 148 */ 149 150 if ((dp->stp_state == BR_STATE_LEARNING || 151 dp->stp_state == BR_STATE_FORWARDING) && 152 (state == BR_STATE_DISABLED || 153 state == BR_STATE_BLOCKING || 154 state == BR_STATE_LISTENING)) 155 dsa_port_fast_age(dp); 156 } 157 158 dp->stp_state = state; 159 160 return 0; 161 } 162 163 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state, 164 bool do_fast_age) 165 { 166 struct dsa_switch *ds = dp->ds; 167 int err; 168 169 err = dsa_port_set_state(dp, state, do_fast_age); 170 if (err && err != -EOPNOTSUPP) { 171 dev_err(ds->dev, "port %d failed to set STP state %u: %pe\n", 172 dp->index, state, ERR_PTR(err)); 173 } 174 } 175 176 int dsa_port_set_mst_state(struct dsa_port *dp, 177 const struct switchdev_mst_state *state, 178 struct netlink_ext_ack *extack) 179 { 180 struct dsa_switch *ds = dp->ds; 181 u8 prev_state; 182 int err; 183 184 if (!ds->ops->port_mst_state_set) 185 return -EOPNOTSUPP; 186 187 err = br_mst_get_state(dsa_port_to_bridge_port(dp), state->msti, 188 &prev_state); 189 if (err) 190 return err; 191 192 err = ds->ops->port_mst_state_set(ds, dp->index, state); 193 if (err) 194 return err; 195 196 if (!(dp->learning && 197 (prev_state == BR_STATE_LEARNING || 198 prev_state == BR_STATE_FORWARDING) && 199 (state->state == BR_STATE_DISABLED || 200 state->state == BR_STATE_BLOCKING || 201 state->state == BR_STATE_LISTENING))) 202 return 0; 203 204 err = dsa_port_msti_fast_age(dp, state->msti); 205 if (err) 206 NL_SET_ERR_MSG_MOD(extack, 207 "Unable to flush associated VLANs"); 208 209 return 0; 210 } 211 212 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy) 213 { 214 struct dsa_switch *ds = dp->ds; 215 int port = dp->index; 216 int err; 217 218 if (ds->ops->port_enable) { 219 err = ds->ops->port_enable(ds, port, phy); 220 if (err) 221 return err; 222 } 223 224 if (!dp->bridge) 225 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false); 226 227 if (dp->pl) 228 phylink_start(dp->pl); 229 230 return 0; 231 } 232 233 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) 234 { 235 int err; 236 237 rtnl_lock(); 238 err = dsa_port_enable_rt(dp, phy); 239 rtnl_unlock(); 240 241 return err; 242 } 243 244 void dsa_port_disable_rt(struct dsa_port *dp) 245 { 246 struct dsa_switch *ds = dp->ds; 247 int port = dp->index; 248 249 if (dp->pl) 250 phylink_stop(dp->pl); 251 252 if (!dp->bridge) 253 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false); 254 255 if (ds->ops->port_disable) 256 ds->ops->port_disable(ds, port); 257 } 258 259 void dsa_port_disable(struct dsa_port *dp) 260 { 261 rtnl_lock(); 262 dsa_port_disable_rt(dp); 263 rtnl_unlock(); 264 } 265 266 static void dsa_port_reset_vlan_filtering(struct dsa_port *dp, 267 struct dsa_bridge bridge) 268 { 269 struct netlink_ext_ack extack = {0}; 270 bool change_vlan_filtering = false; 271 struct dsa_switch *ds = dp->ds; 272 struct dsa_port *other_dp; 273 bool vlan_filtering; 274 int err; 275 276 if (ds->needs_standalone_vlan_filtering && 277 !br_vlan_enabled(bridge.dev)) { 278 change_vlan_filtering = true; 279 vlan_filtering = true; 280 } else if (!ds->needs_standalone_vlan_filtering && 281 br_vlan_enabled(bridge.dev)) { 282 change_vlan_filtering = true; 283 vlan_filtering = false; 284 } 285 286 /* If the bridge was vlan_filtering, the bridge core doesn't trigger an 287 * event for changing vlan_filtering setting upon slave ports leaving 288 * it. That is a good thing, because that lets us handle it and also 289 * handle the case where the switch's vlan_filtering setting is global 290 * (not per port). When that happens, the correct moment to trigger the 291 * vlan_filtering callback is only when the last port leaves the last 292 * VLAN-aware bridge. 293 */ 294 if (change_vlan_filtering && ds->vlan_filtering_is_global) { 295 dsa_switch_for_each_port(other_dp, ds) { 296 struct net_device *br = dsa_port_bridge_dev_get(other_dp); 297 298 if (br && br_vlan_enabled(br)) { 299 change_vlan_filtering = false; 300 break; 301 } 302 } 303 } 304 305 if (!change_vlan_filtering) 306 return; 307 308 err = dsa_port_vlan_filtering(dp, vlan_filtering, &extack); 309 if (extack._msg) { 310 dev_err(ds->dev, "port %d: %s\n", dp->index, 311 extack._msg); 312 } 313 if (err && err != -EOPNOTSUPP) { 314 dev_err(ds->dev, 315 "port %d failed to reset VLAN filtering to %d: %pe\n", 316 dp->index, vlan_filtering, ERR_PTR(err)); 317 } 318 } 319 320 static int dsa_port_inherit_brport_flags(struct dsa_port *dp, 321 struct netlink_ext_ack *extack) 322 { 323 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 324 BR_BCAST_FLOOD | BR_PORT_LOCKED; 325 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 326 int flag, err; 327 328 for_each_set_bit(flag, &mask, 32) { 329 struct switchdev_brport_flags flags = {0}; 330 331 flags.mask = BIT(flag); 332 333 if (br_port_flag_is_set(brport_dev, BIT(flag))) 334 flags.val = BIT(flag); 335 336 err = dsa_port_bridge_flags(dp, flags, extack); 337 if (err && err != -EOPNOTSUPP) 338 return err; 339 } 340 341 return 0; 342 } 343 344 static void dsa_port_clear_brport_flags(struct dsa_port *dp) 345 { 346 const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; 347 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 348 BR_BCAST_FLOOD | BR_PORT_LOCKED; 349 int flag, err; 350 351 for_each_set_bit(flag, &mask, 32) { 352 struct switchdev_brport_flags flags = {0}; 353 354 flags.mask = BIT(flag); 355 flags.val = val & BIT(flag); 356 357 err = dsa_port_bridge_flags(dp, flags, NULL); 358 if (err && err != -EOPNOTSUPP) 359 dev_err(dp->ds->dev, 360 "failed to clear bridge port flag %lu: %pe\n", 361 flags.val, ERR_PTR(err)); 362 } 363 } 364 365 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp, 366 struct netlink_ext_ack *extack) 367 { 368 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 369 struct net_device *br = dsa_port_bridge_dev_get(dp); 370 int err; 371 372 err = dsa_port_inherit_brport_flags(dp, extack); 373 if (err) 374 return err; 375 376 err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false); 377 if (err && err != -EOPNOTSUPP) 378 return err; 379 380 err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack); 381 if (err && err != -EOPNOTSUPP) 382 return err; 383 384 err = dsa_port_ageing_time(dp, br_get_ageing_time(br)); 385 if (err && err != -EOPNOTSUPP) 386 return err; 387 388 return 0; 389 } 390 391 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp, 392 struct dsa_bridge bridge) 393 { 394 /* Configure the port for standalone mode (no address learning, 395 * flood everything). 396 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events 397 * when the user requests it through netlink or sysfs, but not 398 * automatically at port join or leave, so we need to handle resetting 399 * the brport flags ourselves. But we even prefer it that way, because 400 * otherwise, some setups might never get the notification they need, 401 * for example, when a port leaves a LAG that offloads the bridge, 402 * it becomes standalone, but as far as the bridge is concerned, no 403 * port ever left. 404 */ 405 dsa_port_clear_brport_flags(dp); 406 407 /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, 408 * so allow it to be in BR_STATE_FORWARDING to be kept functional 409 */ 410 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true); 411 412 dsa_port_reset_vlan_filtering(dp, bridge); 413 414 /* Ageing time may be global to the switch chip, so don't change it 415 * here because we have no good reason (or value) to change it to. 416 */ 417 } 418 419 static int dsa_port_bridge_create(struct dsa_port *dp, 420 struct net_device *br, 421 struct netlink_ext_ack *extack) 422 { 423 struct dsa_switch *ds = dp->ds; 424 struct dsa_bridge *bridge; 425 426 bridge = dsa_tree_bridge_find(ds->dst, br); 427 if (bridge) { 428 refcount_inc(&bridge->refcount); 429 dp->bridge = bridge; 430 return 0; 431 } 432 433 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 434 if (!bridge) 435 return -ENOMEM; 436 437 refcount_set(&bridge->refcount, 1); 438 439 bridge->dev = br; 440 441 bridge->num = dsa_bridge_num_get(br, ds->max_num_bridges); 442 if (ds->max_num_bridges && !bridge->num) { 443 NL_SET_ERR_MSG_MOD(extack, 444 "Range of offloadable bridges exceeded"); 445 kfree(bridge); 446 return -EOPNOTSUPP; 447 } 448 449 dp->bridge = bridge; 450 451 return 0; 452 } 453 454 static void dsa_port_bridge_destroy(struct dsa_port *dp, 455 const struct net_device *br) 456 { 457 struct dsa_bridge *bridge = dp->bridge; 458 459 dp->bridge = NULL; 460 461 if (!refcount_dec_and_test(&bridge->refcount)) 462 return; 463 464 if (bridge->num) 465 dsa_bridge_num_put(br, bridge->num); 466 467 kfree(bridge); 468 } 469 470 static bool dsa_port_supports_mst(struct dsa_port *dp) 471 { 472 struct dsa_switch *ds = dp->ds; 473 474 return ds->ops->vlan_msti_set && 475 ds->ops->port_mst_state_set && 476 ds->ops->port_vlan_fast_age && 477 dsa_port_can_configure_learning(dp); 478 } 479 480 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br, 481 struct netlink_ext_ack *extack) 482 { 483 struct dsa_notifier_bridge_info info = { 484 .dp = dp, 485 .extack = extack, 486 }; 487 struct net_device *dev = dp->slave; 488 struct net_device *brport_dev; 489 int err; 490 491 if (br_mst_enabled(br) && !dsa_port_supports_mst(dp)) 492 return -EOPNOTSUPP; 493 494 /* Here the interface is already bridged. Reflect the current 495 * configuration so that drivers can program their chips accordingly. 496 */ 497 err = dsa_port_bridge_create(dp, br, extack); 498 if (err) 499 return err; 500 501 brport_dev = dsa_port_to_bridge_port(dp); 502 503 info.bridge = *dp->bridge; 504 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info); 505 if (err) 506 goto out_rollback; 507 508 /* Drivers which support bridge TX forwarding should set this */ 509 dp->bridge->tx_fwd_offload = info.tx_fwd_offload; 510 511 err = switchdev_bridge_port_offload(brport_dev, dev, dp, 512 &dsa_slave_switchdev_notifier, 513 &dsa_slave_switchdev_blocking_notifier, 514 dp->bridge->tx_fwd_offload, extack); 515 if (err) 516 goto out_rollback_unbridge; 517 518 err = dsa_port_switchdev_sync_attrs(dp, extack); 519 if (err) 520 goto out_rollback_unoffload; 521 522 return 0; 523 524 out_rollback_unoffload: 525 switchdev_bridge_port_unoffload(brport_dev, dp, 526 &dsa_slave_switchdev_notifier, 527 &dsa_slave_switchdev_blocking_notifier); 528 dsa_flush_workqueue(); 529 out_rollback_unbridge: 530 dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 531 out_rollback: 532 dsa_port_bridge_destroy(dp, br); 533 return err; 534 } 535 536 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br) 537 { 538 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 539 540 /* Don't try to unoffload something that is not offloaded */ 541 if (!brport_dev) 542 return; 543 544 switchdev_bridge_port_unoffload(brport_dev, dp, 545 &dsa_slave_switchdev_notifier, 546 &dsa_slave_switchdev_blocking_notifier); 547 548 dsa_flush_workqueue(); 549 } 550 551 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) 552 { 553 struct dsa_notifier_bridge_info info = { 554 .dp = dp, 555 }; 556 int err; 557 558 /* If the port could not be offloaded to begin with, then 559 * there is nothing to do. 560 */ 561 if (!dp->bridge) 562 return; 563 564 info.bridge = *dp->bridge; 565 566 /* Here the port is already unbridged. Reflect the current configuration 567 * so that drivers can program their chips accordingly. 568 */ 569 dsa_port_bridge_destroy(dp, br); 570 571 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 572 if (err) 573 dev_err(dp->ds->dev, 574 "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n", 575 dp->index, ERR_PTR(err)); 576 577 dsa_port_switchdev_unsync_attrs(dp, info.bridge); 578 } 579 580 int dsa_port_lag_change(struct dsa_port *dp, 581 struct netdev_lag_lower_state_info *linfo) 582 { 583 struct dsa_notifier_lag_info info = { 584 .dp = dp, 585 }; 586 bool tx_enabled; 587 588 if (!dp->lag) 589 return 0; 590 591 /* On statically configured aggregates (e.g. loadbalance 592 * without LACP) ports will always be tx_enabled, even if the 593 * link is down. Thus we require both link_up and tx_enabled 594 * in order to include it in the tx set. 595 */ 596 tx_enabled = linfo->link_up && linfo->tx_enabled; 597 598 if (tx_enabled == dp->lag_tx_enabled) 599 return 0; 600 601 dp->lag_tx_enabled = tx_enabled; 602 603 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info); 604 } 605 606 static int dsa_port_lag_create(struct dsa_port *dp, 607 struct net_device *lag_dev) 608 { 609 struct dsa_switch *ds = dp->ds; 610 struct dsa_lag *lag; 611 612 lag = dsa_tree_lag_find(ds->dst, lag_dev); 613 if (lag) { 614 refcount_inc(&lag->refcount); 615 dp->lag = lag; 616 return 0; 617 } 618 619 lag = kzalloc(sizeof(*lag), GFP_KERNEL); 620 if (!lag) 621 return -ENOMEM; 622 623 refcount_set(&lag->refcount, 1); 624 mutex_init(&lag->fdb_lock); 625 INIT_LIST_HEAD(&lag->fdbs); 626 lag->dev = lag_dev; 627 dsa_lag_map(ds->dst, lag); 628 dp->lag = lag; 629 630 return 0; 631 } 632 633 static void dsa_port_lag_destroy(struct dsa_port *dp) 634 { 635 struct dsa_lag *lag = dp->lag; 636 637 dp->lag = NULL; 638 dp->lag_tx_enabled = false; 639 640 if (!refcount_dec_and_test(&lag->refcount)) 641 return; 642 643 WARN_ON(!list_empty(&lag->fdbs)); 644 dsa_lag_unmap(dp->ds->dst, lag); 645 kfree(lag); 646 } 647 648 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev, 649 struct netdev_lag_upper_info *uinfo, 650 struct netlink_ext_ack *extack) 651 { 652 struct dsa_notifier_lag_info info = { 653 .dp = dp, 654 .info = uinfo, 655 .extack = extack, 656 }; 657 struct net_device *bridge_dev; 658 int err; 659 660 err = dsa_port_lag_create(dp, lag_dev); 661 if (err) 662 goto err_lag_create; 663 664 info.lag = *dp->lag; 665 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info); 666 if (err) 667 goto err_lag_join; 668 669 bridge_dev = netdev_master_upper_dev_get(lag_dev); 670 if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) 671 return 0; 672 673 err = dsa_port_bridge_join(dp, bridge_dev, extack); 674 if (err) 675 goto err_bridge_join; 676 677 return 0; 678 679 err_bridge_join: 680 dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 681 err_lag_join: 682 dsa_port_lag_destroy(dp); 683 err_lag_create: 684 return err; 685 } 686 687 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 688 { 689 struct net_device *br = dsa_port_bridge_dev_get(dp); 690 691 if (br) 692 dsa_port_pre_bridge_leave(dp, br); 693 } 694 695 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 696 { 697 struct net_device *br = dsa_port_bridge_dev_get(dp); 698 struct dsa_notifier_lag_info info = { 699 .dp = dp, 700 }; 701 int err; 702 703 if (!dp->lag) 704 return; 705 706 /* Port might have been part of a LAG that in turn was 707 * attached to a bridge. 708 */ 709 if (br) 710 dsa_port_bridge_leave(dp, br); 711 712 info.lag = *dp->lag; 713 714 dsa_port_lag_destroy(dp); 715 716 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 717 if (err) 718 dev_err(dp->ds->dev, 719 "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n", 720 dp->index, ERR_PTR(err)); 721 } 722 723 /* Must be called under rcu_read_lock() */ 724 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, 725 bool vlan_filtering, 726 struct netlink_ext_ack *extack) 727 { 728 struct dsa_switch *ds = dp->ds; 729 struct dsa_port *other_dp; 730 int err; 731 732 /* VLAN awareness was off, so the question is "can we turn it on". 733 * We may have had 8021q uppers, those need to go. Make sure we don't 734 * enter an inconsistent state: deny changing the VLAN awareness state 735 * as long as we have 8021q uppers. 736 */ 737 if (vlan_filtering && dsa_port_is_user(dp)) { 738 struct net_device *br = dsa_port_bridge_dev_get(dp); 739 struct net_device *upper_dev, *slave = dp->slave; 740 struct list_head *iter; 741 742 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { 743 struct bridge_vlan_info br_info; 744 u16 vid; 745 746 if (!is_vlan_dev(upper_dev)) 747 continue; 748 749 vid = vlan_dev_vlan_id(upper_dev); 750 751 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the 752 * device, respectively the VID is not found, returning 753 * 0 means success, which is a failure for us here. 754 */ 755 err = br_vlan_get_info(br, vid, &br_info); 756 if (err == 0) { 757 NL_SET_ERR_MSG_MOD(extack, 758 "Must first remove VLAN uppers having VIDs also present in bridge"); 759 return false; 760 } 761 } 762 } 763 764 if (!ds->vlan_filtering_is_global) 765 return true; 766 767 /* For cases where enabling/disabling VLAN awareness is global to the 768 * switch, we need to handle the case where multiple bridges span 769 * different ports of the same switch device and one of them has a 770 * different setting than what is being requested. 771 */ 772 dsa_switch_for_each_port(other_dp, ds) { 773 struct net_device *other_br = dsa_port_bridge_dev_get(other_dp); 774 775 /* If it's the same bridge, it also has same 776 * vlan_filtering setting => no need to check 777 */ 778 if (!other_br || other_br == dsa_port_bridge_dev_get(dp)) 779 continue; 780 781 if (br_vlan_enabled(other_br) != vlan_filtering) { 782 NL_SET_ERR_MSG_MOD(extack, 783 "VLAN filtering is a global setting"); 784 return false; 785 } 786 } 787 return true; 788 } 789 790 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, 791 struct netlink_ext_ack *extack) 792 { 793 bool old_vlan_filtering = dsa_port_is_vlan_filtering(dp); 794 struct dsa_switch *ds = dp->ds; 795 bool apply; 796 int err; 797 798 if (!ds->ops->port_vlan_filtering) 799 return -EOPNOTSUPP; 800 801 /* We are called from dsa_slave_switchdev_blocking_event(), 802 * which is not under rcu_read_lock(), unlike 803 * dsa_slave_switchdev_event(). 804 */ 805 rcu_read_lock(); 806 apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack); 807 rcu_read_unlock(); 808 if (!apply) 809 return -EINVAL; 810 811 if (dsa_port_is_vlan_filtering(dp) == vlan_filtering) 812 return 0; 813 814 err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering, 815 extack); 816 if (err) 817 return err; 818 819 if (ds->vlan_filtering_is_global) { 820 struct dsa_port *other_dp; 821 822 ds->vlan_filtering = vlan_filtering; 823 824 dsa_switch_for_each_user_port(other_dp, ds) { 825 struct net_device *slave = other_dp->slave; 826 827 /* We might be called in the unbind path, so not 828 * all slave devices might still be registered. 829 */ 830 if (!slave) 831 continue; 832 833 err = dsa_slave_manage_vlan_filtering(slave, 834 vlan_filtering); 835 if (err) 836 goto restore; 837 } 838 } else { 839 dp->vlan_filtering = vlan_filtering; 840 841 err = dsa_slave_manage_vlan_filtering(dp->slave, 842 vlan_filtering); 843 if (err) 844 goto restore; 845 } 846 847 return 0; 848 849 restore: 850 ds->ops->port_vlan_filtering(ds, dp->index, old_vlan_filtering, NULL); 851 852 if (ds->vlan_filtering_is_global) 853 ds->vlan_filtering = old_vlan_filtering; 854 else 855 dp->vlan_filtering = old_vlan_filtering; 856 857 return err; 858 } 859 860 /* This enforces legacy behavior for switch drivers which assume they can't 861 * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0 862 */ 863 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp) 864 { 865 struct net_device *br = dsa_port_bridge_dev_get(dp); 866 struct dsa_switch *ds = dp->ds; 867 868 if (!br) 869 return false; 870 871 return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br); 872 } 873 874 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock) 875 { 876 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock); 877 unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies); 878 struct dsa_notifier_ageing_time_info info; 879 int err; 880 881 info.ageing_time = ageing_time; 882 883 err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info); 884 if (err) 885 return err; 886 887 dp->ageing_time = ageing_time; 888 889 return 0; 890 } 891 892 int dsa_port_mst_enable(struct dsa_port *dp, bool on, 893 struct netlink_ext_ack *extack) 894 { 895 if (on && !dsa_port_supports_mst(dp)) { 896 NL_SET_ERR_MSG_MOD(extack, "Hardware does not support MST"); 897 return -EINVAL; 898 } 899 900 return 0; 901 } 902 903 int dsa_port_pre_bridge_flags(const struct dsa_port *dp, 904 struct switchdev_brport_flags flags, 905 struct netlink_ext_ack *extack) 906 { 907 struct dsa_switch *ds = dp->ds; 908 909 if (!ds->ops->port_pre_bridge_flags) 910 return -EINVAL; 911 912 return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack); 913 } 914 915 int dsa_port_bridge_flags(struct dsa_port *dp, 916 struct switchdev_brport_flags flags, 917 struct netlink_ext_ack *extack) 918 { 919 struct dsa_switch *ds = dp->ds; 920 int err; 921 922 if (!ds->ops->port_bridge_flags) 923 return -EOPNOTSUPP; 924 925 err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack); 926 if (err) 927 return err; 928 929 if (flags.mask & BR_LEARNING) { 930 bool learning = flags.val & BR_LEARNING; 931 932 if (learning == dp->learning) 933 return 0; 934 935 if ((dp->learning && !learning) && 936 (dp->stp_state == BR_STATE_LEARNING || 937 dp->stp_state == BR_STATE_FORWARDING)) 938 dsa_port_fast_age(dp); 939 940 dp->learning = learning; 941 } 942 943 return 0; 944 } 945 946 void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc) 947 { 948 struct dsa_switch *ds = dp->ds; 949 950 if (ds->ops->port_set_host_flood) 951 ds->ops->port_set_host_flood(ds, dp->index, uc, mc); 952 } 953 954 int dsa_port_vlan_msti(struct dsa_port *dp, 955 const struct switchdev_vlan_msti *msti) 956 { 957 struct dsa_switch *ds = dp->ds; 958 959 if (!ds->ops->vlan_msti_set) 960 return -EOPNOTSUPP; 961 962 return ds->ops->vlan_msti_set(ds, *dp->bridge, msti); 963 } 964 965 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu) 966 { 967 struct dsa_notifier_mtu_info info = { 968 .dp = dp, 969 .mtu = new_mtu, 970 }; 971 972 return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info); 973 } 974 975 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, 976 u16 vid) 977 { 978 struct dsa_notifier_fdb_info info = { 979 .dp = dp, 980 .addr = addr, 981 .vid = vid, 982 .db = { 983 .type = DSA_DB_BRIDGE, 984 .bridge = *dp->bridge, 985 }, 986 }; 987 988 /* Refcounting takes bridge.num as a key, and should be global for all 989 * bridges in the absence of FDB isolation, and per bridge otherwise. 990 * Force the bridge.num to zero here in the absence of FDB isolation. 991 */ 992 if (!dp->ds->fdb_isolation) 993 info.db.bridge.num = 0; 994 995 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info); 996 } 997 998 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr, 999 u16 vid) 1000 { 1001 struct dsa_notifier_fdb_info info = { 1002 .dp = dp, 1003 .addr = addr, 1004 .vid = vid, 1005 .db = { 1006 .type = DSA_DB_BRIDGE, 1007 .bridge = *dp->bridge, 1008 }, 1009 }; 1010 1011 if (!dp->ds->fdb_isolation) 1012 info.db.bridge.num = 0; 1013 1014 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info); 1015 } 1016 1017 static int dsa_port_host_fdb_add(struct dsa_port *dp, 1018 const unsigned char *addr, u16 vid, 1019 struct dsa_db db) 1020 { 1021 struct dsa_notifier_fdb_info info = { 1022 .dp = dp, 1023 .addr = addr, 1024 .vid = vid, 1025 .db = db, 1026 }; 1027 1028 if (!dp->ds->fdb_isolation) 1029 info.db.bridge.num = 0; 1030 1031 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info); 1032 } 1033 1034 int dsa_port_standalone_host_fdb_add(struct dsa_port *dp, 1035 const unsigned char *addr, u16 vid) 1036 { 1037 struct dsa_db db = { 1038 .type = DSA_DB_PORT, 1039 .dp = dp, 1040 }; 1041 1042 return dsa_port_host_fdb_add(dp, addr, vid, db); 1043 } 1044 1045 int dsa_port_bridge_host_fdb_add(struct dsa_port *dp, 1046 const unsigned char *addr, u16 vid) 1047 { 1048 struct net_device *master = dsa_port_to_master(dp); 1049 struct dsa_db db = { 1050 .type = DSA_DB_BRIDGE, 1051 .bridge = *dp->bridge, 1052 }; 1053 int err; 1054 1055 /* Avoid a call to __dev_set_promiscuity() on the master, which 1056 * requires rtnl_lock(), since we can't guarantee that is held here, 1057 * and we can't take it either. 1058 */ 1059 if (master->priv_flags & IFF_UNICAST_FLT) { 1060 err = dev_uc_add(master, addr); 1061 if (err) 1062 return err; 1063 } 1064 1065 return dsa_port_host_fdb_add(dp, addr, vid, db); 1066 } 1067 1068 static int dsa_port_host_fdb_del(struct dsa_port *dp, 1069 const unsigned char *addr, u16 vid, 1070 struct dsa_db db) 1071 { 1072 struct dsa_notifier_fdb_info info = { 1073 .dp = dp, 1074 .addr = addr, 1075 .vid = vid, 1076 .db = db, 1077 }; 1078 1079 if (!dp->ds->fdb_isolation) 1080 info.db.bridge.num = 0; 1081 1082 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info); 1083 } 1084 1085 int dsa_port_standalone_host_fdb_del(struct dsa_port *dp, 1086 const unsigned char *addr, u16 vid) 1087 { 1088 struct dsa_db db = { 1089 .type = DSA_DB_PORT, 1090 .dp = dp, 1091 }; 1092 1093 return dsa_port_host_fdb_del(dp, addr, vid, db); 1094 } 1095 1096 int dsa_port_bridge_host_fdb_del(struct dsa_port *dp, 1097 const unsigned char *addr, u16 vid) 1098 { 1099 struct net_device *master = dsa_port_to_master(dp); 1100 struct dsa_db db = { 1101 .type = DSA_DB_BRIDGE, 1102 .bridge = *dp->bridge, 1103 }; 1104 int err; 1105 1106 if (master->priv_flags & IFF_UNICAST_FLT) { 1107 err = dev_uc_del(master, addr); 1108 if (err) 1109 return err; 1110 } 1111 1112 return dsa_port_host_fdb_del(dp, addr, vid, db); 1113 } 1114 1115 int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr, 1116 u16 vid) 1117 { 1118 struct dsa_notifier_lag_fdb_info info = { 1119 .lag = dp->lag, 1120 .addr = addr, 1121 .vid = vid, 1122 .db = { 1123 .type = DSA_DB_BRIDGE, 1124 .bridge = *dp->bridge, 1125 }, 1126 }; 1127 1128 if (!dp->ds->fdb_isolation) 1129 info.db.bridge.num = 0; 1130 1131 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_ADD, &info); 1132 } 1133 1134 int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr, 1135 u16 vid) 1136 { 1137 struct dsa_notifier_lag_fdb_info info = { 1138 .lag = dp->lag, 1139 .addr = addr, 1140 .vid = vid, 1141 .db = { 1142 .type = DSA_DB_BRIDGE, 1143 .bridge = *dp->bridge, 1144 }, 1145 }; 1146 1147 if (!dp->ds->fdb_isolation) 1148 info.db.bridge.num = 0; 1149 1150 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_DEL, &info); 1151 } 1152 1153 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data) 1154 { 1155 struct dsa_switch *ds = dp->ds; 1156 int port = dp->index; 1157 1158 if (!ds->ops->port_fdb_dump) 1159 return -EOPNOTSUPP; 1160 1161 return ds->ops->port_fdb_dump(ds, port, cb, data); 1162 } 1163 1164 int dsa_port_mdb_add(const struct dsa_port *dp, 1165 const struct switchdev_obj_port_mdb *mdb) 1166 { 1167 struct dsa_notifier_mdb_info info = { 1168 .dp = dp, 1169 .mdb = mdb, 1170 .db = { 1171 .type = DSA_DB_BRIDGE, 1172 .bridge = *dp->bridge, 1173 }, 1174 }; 1175 1176 if (!dp->ds->fdb_isolation) 1177 info.db.bridge.num = 0; 1178 1179 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info); 1180 } 1181 1182 int dsa_port_mdb_del(const struct dsa_port *dp, 1183 const struct switchdev_obj_port_mdb *mdb) 1184 { 1185 struct dsa_notifier_mdb_info info = { 1186 .dp = dp, 1187 .mdb = mdb, 1188 .db = { 1189 .type = DSA_DB_BRIDGE, 1190 .bridge = *dp->bridge, 1191 }, 1192 }; 1193 1194 if (!dp->ds->fdb_isolation) 1195 info.db.bridge.num = 0; 1196 1197 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info); 1198 } 1199 1200 static int dsa_port_host_mdb_add(const struct dsa_port *dp, 1201 const struct switchdev_obj_port_mdb *mdb, 1202 struct dsa_db db) 1203 { 1204 struct dsa_notifier_mdb_info info = { 1205 .dp = dp, 1206 .mdb = mdb, 1207 .db = db, 1208 }; 1209 1210 if (!dp->ds->fdb_isolation) 1211 info.db.bridge.num = 0; 1212 1213 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info); 1214 } 1215 1216 int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp, 1217 const struct switchdev_obj_port_mdb *mdb) 1218 { 1219 struct dsa_db db = { 1220 .type = DSA_DB_PORT, 1221 .dp = dp, 1222 }; 1223 1224 return dsa_port_host_mdb_add(dp, mdb, db); 1225 } 1226 1227 int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp, 1228 const struct switchdev_obj_port_mdb *mdb) 1229 { 1230 struct net_device *master = dsa_port_to_master(dp); 1231 struct dsa_db db = { 1232 .type = DSA_DB_BRIDGE, 1233 .bridge = *dp->bridge, 1234 }; 1235 int err; 1236 1237 err = dev_mc_add(master, mdb->addr); 1238 if (err) 1239 return err; 1240 1241 return dsa_port_host_mdb_add(dp, mdb, db); 1242 } 1243 1244 static int dsa_port_host_mdb_del(const struct dsa_port *dp, 1245 const struct switchdev_obj_port_mdb *mdb, 1246 struct dsa_db db) 1247 { 1248 struct dsa_notifier_mdb_info info = { 1249 .dp = dp, 1250 .mdb = mdb, 1251 .db = db, 1252 }; 1253 1254 if (!dp->ds->fdb_isolation) 1255 info.db.bridge.num = 0; 1256 1257 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info); 1258 } 1259 1260 int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp, 1261 const struct switchdev_obj_port_mdb *mdb) 1262 { 1263 struct dsa_db db = { 1264 .type = DSA_DB_PORT, 1265 .dp = dp, 1266 }; 1267 1268 return dsa_port_host_mdb_del(dp, mdb, db); 1269 } 1270 1271 int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp, 1272 const struct switchdev_obj_port_mdb *mdb) 1273 { 1274 struct net_device *master = dsa_port_to_master(dp); 1275 struct dsa_db db = { 1276 .type = DSA_DB_BRIDGE, 1277 .bridge = *dp->bridge, 1278 }; 1279 int err; 1280 1281 err = dev_mc_del(master, mdb->addr); 1282 if (err) 1283 return err; 1284 1285 return dsa_port_host_mdb_del(dp, mdb, db); 1286 } 1287 1288 int dsa_port_vlan_add(struct dsa_port *dp, 1289 const struct switchdev_obj_port_vlan *vlan, 1290 struct netlink_ext_ack *extack) 1291 { 1292 struct dsa_notifier_vlan_info info = { 1293 .dp = dp, 1294 .vlan = vlan, 1295 .extack = extack, 1296 }; 1297 1298 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info); 1299 } 1300 1301 int dsa_port_vlan_del(struct dsa_port *dp, 1302 const struct switchdev_obj_port_vlan *vlan) 1303 { 1304 struct dsa_notifier_vlan_info info = { 1305 .dp = dp, 1306 .vlan = vlan, 1307 }; 1308 1309 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info); 1310 } 1311 1312 int dsa_port_host_vlan_add(struct dsa_port *dp, 1313 const struct switchdev_obj_port_vlan *vlan, 1314 struct netlink_ext_ack *extack) 1315 { 1316 struct net_device *master = dsa_port_to_master(dp); 1317 struct dsa_notifier_vlan_info info = { 1318 .dp = dp, 1319 .vlan = vlan, 1320 .extack = extack, 1321 }; 1322 int err; 1323 1324 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_ADD, &info); 1325 if (err && err != -EOPNOTSUPP) 1326 return err; 1327 1328 vlan_vid_add(master, htons(ETH_P_8021Q), vlan->vid); 1329 1330 return err; 1331 } 1332 1333 int dsa_port_host_vlan_del(struct dsa_port *dp, 1334 const struct switchdev_obj_port_vlan *vlan) 1335 { 1336 struct net_device *master = dsa_port_to_master(dp); 1337 struct dsa_notifier_vlan_info info = { 1338 .dp = dp, 1339 .vlan = vlan, 1340 }; 1341 int err; 1342 1343 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_DEL, &info); 1344 if (err && err != -EOPNOTSUPP) 1345 return err; 1346 1347 vlan_vid_del(master, htons(ETH_P_8021Q), vlan->vid); 1348 1349 return err; 1350 } 1351 1352 int dsa_port_mrp_add(const struct dsa_port *dp, 1353 const struct switchdev_obj_mrp *mrp) 1354 { 1355 struct dsa_switch *ds = dp->ds; 1356 1357 if (!ds->ops->port_mrp_add) 1358 return -EOPNOTSUPP; 1359 1360 return ds->ops->port_mrp_add(ds, dp->index, mrp); 1361 } 1362 1363 int dsa_port_mrp_del(const struct dsa_port *dp, 1364 const struct switchdev_obj_mrp *mrp) 1365 { 1366 struct dsa_switch *ds = dp->ds; 1367 1368 if (!ds->ops->port_mrp_del) 1369 return -EOPNOTSUPP; 1370 1371 return ds->ops->port_mrp_del(ds, dp->index, mrp); 1372 } 1373 1374 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp, 1375 const struct switchdev_obj_ring_role_mrp *mrp) 1376 { 1377 struct dsa_switch *ds = dp->ds; 1378 1379 if (!ds->ops->port_mrp_add_ring_role) 1380 return -EOPNOTSUPP; 1381 1382 return ds->ops->port_mrp_add_ring_role(ds, dp->index, mrp); 1383 } 1384 1385 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp, 1386 const struct switchdev_obj_ring_role_mrp *mrp) 1387 { 1388 struct dsa_switch *ds = dp->ds; 1389 1390 if (!ds->ops->port_mrp_del_ring_role) 1391 return -EOPNOTSUPP; 1392 1393 return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp); 1394 } 1395 1396 static int dsa_port_assign_master(struct dsa_port *dp, 1397 struct net_device *master, 1398 struct netlink_ext_ack *extack, 1399 bool fail_on_err) 1400 { 1401 struct dsa_switch *ds = dp->ds; 1402 int port = dp->index, err; 1403 1404 err = ds->ops->port_change_master(ds, port, master, extack); 1405 if (err && !fail_on_err) 1406 dev_err(ds->dev, "port %d failed to assign master %s: %pe\n", 1407 port, master->name, ERR_PTR(err)); 1408 1409 if (err && fail_on_err) 1410 return err; 1411 1412 dp->cpu_dp = master->dsa_ptr; 1413 dp->cpu_port_in_lag = netif_is_lag_master(master); 1414 1415 return 0; 1416 } 1417 1418 /* Change the dp->cpu_dp affinity for a user port. Note that both cross-chip 1419 * notifiers and drivers have implicit assumptions about user-to-CPU-port 1420 * mappings, so we unfortunately cannot delay the deletion of the objects 1421 * (switchdev, standalone addresses, standalone VLANs) on the old CPU port 1422 * until the new CPU port has been set up. So we need to completely tear down 1423 * the old CPU port before changing it, and restore it on errors during the 1424 * bringup of the new one. 1425 */ 1426 int dsa_port_change_master(struct dsa_port *dp, struct net_device *master, 1427 struct netlink_ext_ack *extack) 1428 { 1429 struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp); 1430 struct net_device *old_master = dsa_port_to_master(dp); 1431 struct net_device *dev = dp->slave; 1432 struct dsa_switch *ds = dp->ds; 1433 bool vlan_filtering; 1434 int err, tmp; 1435 1436 /* Bridges may hold host FDB, MDB and VLAN objects. These need to be 1437 * migrated, so dynamically unoffload and later reoffload the bridge 1438 * port. 1439 */ 1440 if (bridge_dev) { 1441 dsa_port_pre_bridge_leave(dp, bridge_dev); 1442 dsa_port_bridge_leave(dp, bridge_dev); 1443 } 1444 1445 /* The port might still be VLAN filtering even if it's no longer 1446 * under a bridge, either due to ds->vlan_filtering_is_global or 1447 * ds->needs_standalone_vlan_filtering. In turn this means VLANs 1448 * on the CPU port. 1449 */ 1450 vlan_filtering = dsa_port_is_vlan_filtering(dp); 1451 if (vlan_filtering) { 1452 err = dsa_slave_manage_vlan_filtering(dev, false); 1453 if (err) { 1454 NL_SET_ERR_MSG_MOD(extack, 1455 "Failed to remove standalone VLANs"); 1456 goto rewind_old_bridge; 1457 } 1458 } 1459 1460 /* Standalone addresses, and addresses of upper interfaces like 1461 * VLAN, LAG, HSR need to be migrated. 1462 */ 1463 dsa_slave_unsync_ha(dev); 1464 1465 err = dsa_port_assign_master(dp, master, extack, true); 1466 if (err) 1467 goto rewind_old_addrs; 1468 1469 dsa_slave_sync_ha(dev); 1470 1471 if (vlan_filtering) { 1472 err = dsa_slave_manage_vlan_filtering(dev, true); 1473 if (err) { 1474 NL_SET_ERR_MSG_MOD(extack, 1475 "Failed to restore standalone VLANs"); 1476 goto rewind_new_addrs; 1477 } 1478 } 1479 1480 if (bridge_dev) { 1481 err = dsa_port_bridge_join(dp, bridge_dev, extack); 1482 if (err && err == -EOPNOTSUPP) { 1483 NL_SET_ERR_MSG_MOD(extack, 1484 "Failed to reoffload bridge"); 1485 goto rewind_new_vlan; 1486 } 1487 } 1488 1489 return 0; 1490 1491 rewind_new_vlan: 1492 if (vlan_filtering) 1493 dsa_slave_manage_vlan_filtering(dev, false); 1494 1495 rewind_new_addrs: 1496 dsa_slave_unsync_ha(dev); 1497 1498 dsa_port_assign_master(dp, old_master, NULL, false); 1499 1500 /* Restore the objects on the old CPU port */ 1501 rewind_old_addrs: 1502 dsa_slave_sync_ha(dev); 1503 1504 if (vlan_filtering) { 1505 tmp = dsa_slave_manage_vlan_filtering(dev, true); 1506 if (tmp) { 1507 dev_err(ds->dev, 1508 "port %d failed to restore standalone VLANs: %pe\n", 1509 dp->index, ERR_PTR(tmp)); 1510 } 1511 } 1512 1513 rewind_old_bridge: 1514 if (bridge_dev) { 1515 tmp = dsa_port_bridge_join(dp, bridge_dev, extack); 1516 if (tmp) { 1517 dev_err(ds->dev, 1518 "port %d failed to rejoin bridge %s: %pe\n", 1519 dp->index, bridge_dev->name, ERR_PTR(tmp)); 1520 } 1521 } 1522 1523 return err; 1524 } 1525 1526 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp, 1527 const struct dsa_device_ops *tag_ops) 1528 { 1529 cpu_dp->rcv = tag_ops->rcv; 1530 cpu_dp->tag_ops = tag_ops; 1531 } 1532 1533 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp) 1534 { 1535 struct device_node *phy_dn; 1536 struct phy_device *phydev; 1537 1538 phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0); 1539 if (!phy_dn) 1540 return NULL; 1541 1542 phydev = of_phy_find_device(phy_dn); 1543 if (!phydev) { 1544 of_node_put(phy_dn); 1545 return ERR_PTR(-EPROBE_DEFER); 1546 } 1547 1548 of_node_put(phy_dn); 1549 return phydev; 1550 } 1551 1552 static void dsa_port_phylink_validate(struct phylink_config *config, 1553 unsigned long *supported, 1554 struct phylink_link_state *state) 1555 { 1556 /* Skip call for drivers which don't yet set mac_capabilities, 1557 * since validating in that case would mean their PHY will advertise 1558 * nothing. In turn, skipping validation makes them advertise 1559 * everything that the PHY supports, so those drivers should be 1560 * converted ASAP. 1561 */ 1562 if (config->mac_capabilities) 1563 phylink_generic_validate(config, supported, state); 1564 } 1565 1566 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config, 1567 struct phylink_link_state *state) 1568 { 1569 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1570 struct dsa_switch *ds = dp->ds; 1571 int err; 1572 1573 /* Only called for inband modes */ 1574 if (!ds->ops->phylink_mac_link_state) { 1575 state->link = 0; 1576 return; 1577 } 1578 1579 err = ds->ops->phylink_mac_link_state(ds, dp->index, state); 1580 if (err < 0) { 1581 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n", 1582 dp->index, err); 1583 state->link = 0; 1584 } 1585 } 1586 1587 static struct phylink_pcs * 1588 dsa_port_phylink_mac_select_pcs(struct phylink_config *config, 1589 phy_interface_t interface) 1590 { 1591 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1592 struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP); 1593 struct dsa_switch *ds = dp->ds; 1594 1595 if (ds->ops->phylink_mac_select_pcs) 1596 pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface); 1597 1598 return pcs; 1599 } 1600 1601 static void dsa_port_phylink_mac_config(struct phylink_config *config, 1602 unsigned int mode, 1603 const struct phylink_link_state *state) 1604 { 1605 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1606 struct dsa_switch *ds = dp->ds; 1607 1608 if (!ds->ops->phylink_mac_config) 1609 return; 1610 1611 ds->ops->phylink_mac_config(ds, dp->index, mode, state); 1612 } 1613 1614 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config) 1615 { 1616 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1617 struct dsa_switch *ds = dp->ds; 1618 1619 if (!ds->ops->phylink_mac_an_restart) 1620 return; 1621 1622 ds->ops->phylink_mac_an_restart(ds, dp->index); 1623 } 1624 1625 static void dsa_port_phylink_mac_link_down(struct phylink_config *config, 1626 unsigned int mode, 1627 phy_interface_t interface) 1628 { 1629 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1630 struct phy_device *phydev = NULL; 1631 struct dsa_switch *ds = dp->ds; 1632 1633 if (dsa_port_is_user(dp)) 1634 phydev = dp->slave->phydev; 1635 1636 if (!ds->ops->phylink_mac_link_down) { 1637 if (ds->ops->adjust_link && phydev) 1638 ds->ops->adjust_link(ds, dp->index, phydev); 1639 return; 1640 } 1641 1642 ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface); 1643 } 1644 1645 static void dsa_port_phylink_mac_link_up(struct phylink_config *config, 1646 struct phy_device *phydev, 1647 unsigned int mode, 1648 phy_interface_t interface, 1649 int speed, int duplex, 1650 bool tx_pause, bool rx_pause) 1651 { 1652 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1653 struct dsa_switch *ds = dp->ds; 1654 1655 if (!ds->ops->phylink_mac_link_up) { 1656 if (ds->ops->adjust_link && phydev) 1657 ds->ops->adjust_link(ds, dp->index, phydev); 1658 return; 1659 } 1660 1661 ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev, 1662 speed, duplex, tx_pause, rx_pause); 1663 } 1664 1665 static const struct phylink_mac_ops dsa_port_phylink_mac_ops = { 1666 .validate = dsa_port_phylink_validate, 1667 .mac_select_pcs = dsa_port_phylink_mac_select_pcs, 1668 .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state, 1669 .mac_config = dsa_port_phylink_mac_config, 1670 .mac_an_restart = dsa_port_phylink_mac_an_restart, 1671 .mac_link_down = dsa_port_phylink_mac_link_down, 1672 .mac_link_up = dsa_port_phylink_mac_link_up, 1673 }; 1674 1675 int dsa_port_phylink_create(struct dsa_port *dp) 1676 { 1677 struct dsa_switch *ds = dp->ds; 1678 phy_interface_t mode; 1679 struct phylink *pl; 1680 int err; 1681 1682 err = of_get_phy_mode(dp->dn, &mode); 1683 if (err) 1684 mode = PHY_INTERFACE_MODE_NA; 1685 1686 /* Presence of phylink_mac_link_state or phylink_mac_an_restart is 1687 * an indicator of a legacy phylink driver. 1688 */ 1689 if (ds->ops->phylink_mac_link_state || 1690 ds->ops->phylink_mac_an_restart) 1691 dp->pl_config.legacy_pre_march2020 = true; 1692 1693 if (ds->ops->phylink_get_caps) 1694 ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config); 1695 1696 pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn), 1697 mode, &dsa_port_phylink_mac_ops); 1698 if (IS_ERR(pl)) { 1699 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(pl)); 1700 return PTR_ERR(pl); 1701 } 1702 1703 dp->pl = pl; 1704 1705 return 0; 1706 } 1707 1708 void dsa_port_phylink_destroy(struct dsa_port *dp) 1709 { 1710 phylink_destroy(dp->pl); 1711 dp->pl = NULL; 1712 } 1713 1714 static int dsa_shared_port_setup_phy_of(struct dsa_port *dp, bool enable) 1715 { 1716 struct dsa_switch *ds = dp->ds; 1717 struct phy_device *phydev; 1718 int port = dp->index; 1719 int err = 0; 1720 1721 phydev = dsa_port_get_phy_device(dp); 1722 if (!phydev) 1723 return 0; 1724 1725 if (IS_ERR(phydev)) 1726 return PTR_ERR(phydev); 1727 1728 if (enable) { 1729 err = genphy_resume(phydev); 1730 if (err < 0) 1731 goto err_put_dev; 1732 1733 err = genphy_read_status(phydev); 1734 if (err < 0) 1735 goto err_put_dev; 1736 } else { 1737 err = genphy_suspend(phydev); 1738 if (err < 0) 1739 goto err_put_dev; 1740 } 1741 1742 if (ds->ops->adjust_link) 1743 ds->ops->adjust_link(ds, port, phydev); 1744 1745 dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev)); 1746 1747 err_put_dev: 1748 put_device(&phydev->mdio.dev); 1749 return err; 1750 } 1751 1752 static int dsa_shared_port_fixed_link_register_of(struct dsa_port *dp) 1753 { 1754 struct device_node *dn = dp->dn; 1755 struct dsa_switch *ds = dp->ds; 1756 struct phy_device *phydev; 1757 int port = dp->index; 1758 phy_interface_t mode; 1759 int err; 1760 1761 err = of_phy_register_fixed_link(dn); 1762 if (err) { 1763 dev_err(ds->dev, 1764 "failed to register the fixed PHY of port %d\n", 1765 port); 1766 return err; 1767 } 1768 1769 phydev = of_phy_find_device(dn); 1770 1771 err = of_get_phy_mode(dn, &mode); 1772 if (err) 1773 mode = PHY_INTERFACE_MODE_NA; 1774 phydev->interface = mode; 1775 1776 genphy_read_status(phydev); 1777 1778 if (ds->ops->adjust_link) 1779 ds->ops->adjust_link(ds, port, phydev); 1780 1781 put_device(&phydev->mdio.dev); 1782 1783 return 0; 1784 } 1785 1786 static int dsa_shared_port_phylink_register(struct dsa_port *dp) 1787 { 1788 struct dsa_switch *ds = dp->ds; 1789 struct device_node *port_dn = dp->dn; 1790 int err; 1791 1792 dp->pl_config.dev = ds->dev; 1793 dp->pl_config.type = PHYLINK_DEV; 1794 1795 err = dsa_port_phylink_create(dp); 1796 if (err) 1797 return err; 1798 1799 err = phylink_of_phy_connect(dp->pl, port_dn, 0); 1800 if (err && err != -ENODEV) { 1801 pr_err("could not attach to PHY: %d\n", err); 1802 goto err_phy_connect; 1803 } 1804 1805 return 0; 1806 1807 err_phy_connect: 1808 dsa_port_phylink_destroy(dp); 1809 return err; 1810 } 1811 1812 /* During the initial DSA driver migration to OF, port nodes were sometimes 1813 * added to device trees with no indication of how they should operate from a 1814 * link management perspective (phy-handle, fixed-link, etc). Additionally, the 1815 * phy-mode may be absent. The interpretation of these port OF nodes depends on 1816 * their type. 1817 * 1818 * User ports with no phy-handle or fixed-link are expected to connect to an 1819 * internal PHY located on the ds->slave_mii_bus at an MDIO address equal to 1820 * the port number. This description is still actively supported. 1821 * 1822 * Shared (CPU and DSA) ports with no phy-handle or fixed-link are expected to 1823 * operate at the maximum speed that their phy-mode is capable of. If the 1824 * phy-mode is absent, they are expected to operate using the phy-mode 1825 * supported by the port that gives the highest link speed. It is unspecified 1826 * if the port should use flow control or not, half duplex or full duplex, or 1827 * if the phy-mode is a SERDES link, whether in-band autoneg is expected to be 1828 * enabled or not. 1829 * 1830 * In the latter case of shared ports, omitting the link management description 1831 * from the firmware node is deprecated and strongly discouraged. DSA uses 1832 * phylink, which rejects the firmware nodes of these ports for lacking 1833 * required properties. 1834 * 1835 * For switches in this table, DSA will skip enforcing validation and will 1836 * later omit registering a phylink instance for the shared ports, if they lack 1837 * a fixed-link, a phy-handle, or a managed = "in-band-status" property. 1838 * It becomes the responsibility of the driver to ensure that these ports 1839 * operate at the maximum speed (whatever this means) and will interoperate 1840 * with the DSA master or other cascade port, since phylink methods will not be 1841 * invoked for them. 1842 * 1843 * If you are considering expanding this table for newly introduced switches, 1844 * think again. It is OK to remove switches from this table if there aren't DT 1845 * blobs in circulation which rely on defaulting the shared ports. 1846 */ 1847 static const char * const dsa_switches_apply_workarounds[] = { 1848 #if IS_ENABLED(CONFIG_NET_DSA_XRS700X) 1849 "arrow,xrs7003e", 1850 "arrow,xrs7003f", 1851 "arrow,xrs7004e", 1852 "arrow,xrs7004f", 1853 #endif 1854 #if IS_ENABLED(CONFIG_B53) 1855 "brcm,bcm5325", 1856 "brcm,bcm53115", 1857 "brcm,bcm53125", 1858 "brcm,bcm53128", 1859 "brcm,bcm5365", 1860 "brcm,bcm5389", 1861 "brcm,bcm5395", 1862 "brcm,bcm5397", 1863 "brcm,bcm5398", 1864 "brcm,bcm53010-srab", 1865 "brcm,bcm53011-srab", 1866 "brcm,bcm53012-srab", 1867 "brcm,bcm53018-srab", 1868 "brcm,bcm53019-srab", 1869 "brcm,bcm5301x-srab", 1870 "brcm,bcm11360-srab", 1871 "brcm,bcm58522-srab", 1872 "brcm,bcm58525-srab", 1873 "brcm,bcm58535-srab", 1874 "brcm,bcm58622-srab", 1875 "brcm,bcm58623-srab", 1876 "brcm,bcm58625-srab", 1877 "brcm,bcm88312-srab", 1878 "brcm,cygnus-srab", 1879 "brcm,nsp-srab", 1880 "brcm,omega-srab", 1881 "brcm,bcm3384-switch", 1882 "brcm,bcm6328-switch", 1883 "brcm,bcm6368-switch", 1884 "brcm,bcm63xx-switch", 1885 #endif 1886 #if IS_ENABLED(CONFIG_NET_DSA_BCM_SF2) 1887 "brcm,bcm7445-switch-v4.0", 1888 "brcm,bcm7278-switch-v4.0", 1889 "brcm,bcm7278-switch-v4.8", 1890 #endif 1891 #if IS_ENABLED(CONFIG_NET_DSA_LANTIQ_GSWIP) 1892 "lantiq,xrx200-gswip", 1893 "lantiq,xrx300-gswip", 1894 "lantiq,xrx330-gswip", 1895 #endif 1896 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6060) 1897 "marvell,mv88e6060", 1898 #endif 1899 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6XXX) 1900 "marvell,mv88e6085", 1901 "marvell,mv88e6190", 1902 "marvell,mv88e6250", 1903 #endif 1904 #if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) 1905 "microchip,ksz8765", 1906 "microchip,ksz8794", 1907 "microchip,ksz8795", 1908 "microchip,ksz8863", 1909 "microchip,ksz8873", 1910 "microchip,ksz9477", 1911 "microchip,ksz9897", 1912 "microchip,ksz9893", 1913 "microchip,ksz9563", 1914 "microchip,ksz8563", 1915 "microchip,ksz9567", 1916 #endif 1917 #if IS_ENABLED(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) 1918 "smsc,lan9303-mdio", 1919 #endif 1920 #if IS_ENABLED(CONFIG_NET_DSA_SMSC_LAN9303_I2C) 1921 "smsc,lan9303-i2c", 1922 #endif 1923 NULL, 1924 }; 1925 1926 static void dsa_shared_port_validate_of(struct dsa_port *dp, 1927 bool *missing_phy_mode, 1928 bool *missing_link_description) 1929 { 1930 struct device_node *dn = dp->dn, *phy_np; 1931 struct dsa_switch *ds = dp->ds; 1932 phy_interface_t mode; 1933 1934 *missing_phy_mode = false; 1935 *missing_link_description = false; 1936 1937 if (of_get_phy_mode(dn, &mode)) { 1938 *missing_phy_mode = true; 1939 dev_err(ds->dev, 1940 "OF node %pOF of %s port %d lacks the required \"phy-mode\" property\n", 1941 dn, dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); 1942 } 1943 1944 /* Note: of_phy_is_fixed_link() also returns true for 1945 * managed = "in-band-status" 1946 */ 1947 if (of_phy_is_fixed_link(dn)) 1948 return; 1949 1950 phy_np = of_parse_phandle(dn, "phy-handle", 0); 1951 if (phy_np) { 1952 of_node_put(phy_np); 1953 return; 1954 } 1955 1956 *missing_link_description = true; 1957 1958 dev_err(ds->dev, 1959 "OF node %pOF of %s port %d lacks the required \"phy-handle\", \"fixed-link\" or \"managed\" properties\n", 1960 dn, dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); 1961 } 1962 1963 int dsa_shared_port_link_register_of(struct dsa_port *dp) 1964 { 1965 struct dsa_switch *ds = dp->ds; 1966 bool missing_link_description; 1967 bool missing_phy_mode; 1968 int port = dp->index; 1969 1970 dsa_shared_port_validate_of(dp, &missing_phy_mode, 1971 &missing_link_description); 1972 1973 if ((missing_phy_mode || missing_link_description) && 1974 !of_device_compatible_match(ds->dev->of_node, 1975 dsa_switches_apply_workarounds)) 1976 return -EINVAL; 1977 1978 if (!ds->ops->adjust_link) { 1979 if (missing_link_description) { 1980 dev_warn(ds->dev, 1981 "Skipping phylink registration for %s port %d\n", 1982 dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); 1983 } else { 1984 if (ds->ops->phylink_mac_link_down) 1985 ds->ops->phylink_mac_link_down(ds, port, 1986 MLO_AN_FIXED, PHY_INTERFACE_MODE_NA); 1987 1988 return dsa_shared_port_phylink_register(dp); 1989 } 1990 return 0; 1991 } 1992 1993 dev_warn(ds->dev, 1994 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n"); 1995 1996 if (of_phy_is_fixed_link(dp->dn)) 1997 return dsa_shared_port_fixed_link_register_of(dp); 1998 else 1999 return dsa_shared_port_setup_phy_of(dp, true); 2000 } 2001 2002 void dsa_shared_port_link_unregister_of(struct dsa_port *dp) 2003 { 2004 struct dsa_switch *ds = dp->ds; 2005 2006 if (!ds->ops->adjust_link && dp->pl) { 2007 rtnl_lock(); 2008 phylink_disconnect_phy(dp->pl); 2009 rtnl_unlock(); 2010 dsa_port_phylink_destroy(dp); 2011 return; 2012 } 2013 2014 if (of_phy_is_fixed_link(dp->dn)) 2015 of_phy_deregister_fixed_link(dp->dn); 2016 else 2017 dsa_shared_port_setup_phy_of(dp, false); 2018 } 2019 2020 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr) 2021 { 2022 struct dsa_switch *ds = dp->ds; 2023 int err; 2024 2025 if (!ds->ops->port_hsr_join) 2026 return -EOPNOTSUPP; 2027 2028 dp->hsr_dev = hsr; 2029 2030 err = ds->ops->port_hsr_join(ds, dp->index, hsr); 2031 if (err) 2032 dp->hsr_dev = NULL; 2033 2034 return err; 2035 } 2036 2037 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr) 2038 { 2039 struct dsa_switch *ds = dp->ds; 2040 int err; 2041 2042 dp->hsr_dev = NULL; 2043 2044 if (ds->ops->port_hsr_leave) { 2045 err = ds->ops->port_hsr_leave(ds, dp->index, hsr); 2046 if (err) 2047 dev_err(dp->ds->dev, 2048 "port %d failed to leave HSR %s: %pe\n", 2049 dp->index, hsr->name, ERR_PTR(err)); 2050 } 2051 } 2052 2053 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast) 2054 { 2055 struct dsa_notifier_tag_8021q_vlan_info info = { 2056 .dp = dp, 2057 .vid = vid, 2058 }; 2059 2060 if (broadcast) 2061 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 2062 2063 return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 2064 } 2065 2066 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast) 2067 { 2068 struct dsa_notifier_tag_8021q_vlan_info info = { 2069 .dp = dp, 2070 .vid = vid, 2071 }; 2072 int err; 2073 2074 if (broadcast) 2075 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 2076 else 2077 err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 2078 if (err) 2079 dev_err(dp->ds->dev, 2080 "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n", 2081 dp->index, vid, ERR_PTR(err)); 2082 } 2083