1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a single switch port 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include <linux/if_bridge.h> 10 #include <linux/notifier.h> 11 #include <linux/of_mdio.h> 12 #include <linux/of_net.h> 13 14 #include "dsa_priv.h" 15 16 /** 17 * dsa_port_notify - Notify the switching fabric of changes to a port 18 * @dp: port on which change occurred 19 * @e: event, must be of type DSA_NOTIFIER_* 20 * @v: event-specific value. 21 * 22 * Notify all switches in the DSA tree that this port's switch belongs to, 23 * including this switch itself, of an event. Allows the other switches to 24 * reconfigure themselves for cross-chip operations. Can also be used to 25 * reconfigure ports without net_devices (CPU ports, DSA links) whenever 26 * a user port's state changes. 27 */ 28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v) 29 { 30 return dsa_tree_notify(dp->ds->dst, e, v); 31 } 32 33 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp, u16 vid) 34 { 35 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 36 struct switchdev_notifier_fdb_info info = { 37 .vid = vid, 38 }; 39 40 /* When the port becomes standalone it has already left the bridge. 41 * Don't notify the bridge in that case. 42 */ 43 if (!brport_dev) 44 return; 45 46 call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE, 47 brport_dev, &info.info, NULL); 48 } 49 50 static void dsa_port_fast_age(const struct dsa_port *dp) 51 { 52 struct dsa_switch *ds = dp->ds; 53 54 if (!ds->ops->port_fast_age) 55 return; 56 57 ds->ops->port_fast_age(ds, dp->index); 58 59 /* flush all VLANs */ 60 dsa_port_notify_bridge_fdb_flush(dp, 0); 61 } 62 63 static int dsa_port_vlan_fast_age(const struct dsa_port *dp, u16 vid) 64 { 65 struct dsa_switch *ds = dp->ds; 66 int err; 67 68 if (!ds->ops->port_vlan_fast_age) 69 return -EOPNOTSUPP; 70 71 err = ds->ops->port_vlan_fast_age(ds, dp->index, vid); 72 73 if (!err) 74 dsa_port_notify_bridge_fdb_flush(dp, vid); 75 76 return err; 77 } 78 79 static int dsa_port_msti_fast_age(const struct dsa_port *dp, u16 msti) 80 { 81 DECLARE_BITMAP(vids, VLAN_N_VID) = { 0 }; 82 int err, vid; 83 84 err = br_mst_get_info(dsa_port_bridge_dev_get(dp), msti, vids); 85 if (err) 86 return err; 87 88 for_each_set_bit(vid, vids, VLAN_N_VID) { 89 err = dsa_port_vlan_fast_age(dp, vid); 90 if (err) 91 return err; 92 } 93 94 return 0; 95 } 96 97 static bool dsa_port_can_configure_learning(struct dsa_port *dp) 98 { 99 struct switchdev_brport_flags flags = { 100 .mask = BR_LEARNING, 101 }; 102 struct dsa_switch *ds = dp->ds; 103 int err; 104 105 if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags) 106 return false; 107 108 err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL); 109 return !err; 110 } 111 112 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age) 113 { 114 struct dsa_switch *ds = dp->ds; 115 int port = dp->index; 116 117 if (!ds->ops->port_stp_state_set) 118 return -EOPNOTSUPP; 119 120 ds->ops->port_stp_state_set(ds, port, state); 121 122 if (!dsa_port_can_configure_learning(dp) || 123 (do_fast_age && dp->learning)) { 124 /* Fast age FDB entries or flush appropriate forwarding database 125 * for the given port, if we are moving it from Learning or 126 * Forwarding state, to Disabled or Blocking or Listening state. 127 * Ports that were standalone before the STP state change don't 128 * need to fast age the FDB, since address learning is off in 129 * standalone mode. 130 */ 131 132 if ((dp->stp_state == BR_STATE_LEARNING || 133 dp->stp_state == BR_STATE_FORWARDING) && 134 (state == BR_STATE_DISABLED || 135 state == BR_STATE_BLOCKING || 136 state == BR_STATE_LISTENING)) 137 dsa_port_fast_age(dp); 138 } 139 140 dp->stp_state = state; 141 142 return 0; 143 } 144 145 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state, 146 bool do_fast_age) 147 { 148 struct dsa_switch *ds = dp->ds; 149 int err; 150 151 err = dsa_port_set_state(dp, state, do_fast_age); 152 if (err && err != -EOPNOTSUPP) { 153 dev_err(ds->dev, "port %d failed to set STP state %u: %pe\n", 154 dp->index, state, ERR_PTR(err)); 155 } 156 } 157 158 int dsa_port_set_mst_state(struct dsa_port *dp, 159 const struct switchdev_mst_state *state, 160 struct netlink_ext_ack *extack) 161 { 162 struct dsa_switch *ds = dp->ds; 163 u8 prev_state; 164 int err; 165 166 if (!ds->ops->port_mst_state_set) 167 return -EOPNOTSUPP; 168 169 err = br_mst_get_state(dsa_port_to_bridge_port(dp), state->msti, 170 &prev_state); 171 if (err) 172 return err; 173 174 err = ds->ops->port_mst_state_set(ds, dp->index, state); 175 if (err) 176 return err; 177 178 if (!(dp->learning && 179 (prev_state == BR_STATE_LEARNING || 180 prev_state == BR_STATE_FORWARDING) && 181 (state->state == BR_STATE_DISABLED || 182 state->state == BR_STATE_BLOCKING || 183 state->state == BR_STATE_LISTENING))) 184 return 0; 185 186 err = dsa_port_msti_fast_age(dp, state->msti); 187 if (err) 188 NL_SET_ERR_MSG_MOD(extack, 189 "Unable to flush associated VLANs"); 190 191 return 0; 192 } 193 194 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy) 195 { 196 struct dsa_switch *ds = dp->ds; 197 int port = dp->index; 198 int err; 199 200 if (ds->ops->port_enable) { 201 err = ds->ops->port_enable(ds, port, phy); 202 if (err) 203 return err; 204 } 205 206 if (!dp->bridge) 207 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false); 208 209 if (dp->pl) 210 phylink_start(dp->pl); 211 212 return 0; 213 } 214 215 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) 216 { 217 int err; 218 219 rtnl_lock(); 220 err = dsa_port_enable_rt(dp, phy); 221 rtnl_unlock(); 222 223 return err; 224 } 225 226 void dsa_port_disable_rt(struct dsa_port *dp) 227 { 228 struct dsa_switch *ds = dp->ds; 229 int port = dp->index; 230 231 if (dp->pl) 232 phylink_stop(dp->pl); 233 234 if (!dp->bridge) 235 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false); 236 237 if (ds->ops->port_disable) 238 ds->ops->port_disable(ds, port); 239 } 240 241 void dsa_port_disable(struct dsa_port *dp) 242 { 243 rtnl_lock(); 244 dsa_port_disable_rt(dp); 245 rtnl_unlock(); 246 } 247 248 static void dsa_port_reset_vlan_filtering(struct dsa_port *dp, 249 struct dsa_bridge bridge) 250 { 251 struct netlink_ext_ack extack = {0}; 252 bool change_vlan_filtering = false; 253 struct dsa_switch *ds = dp->ds; 254 struct dsa_port *other_dp; 255 bool vlan_filtering; 256 int err; 257 258 if (ds->needs_standalone_vlan_filtering && 259 !br_vlan_enabled(bridge.dev)) { 260 change_vlan_filtering = true; 261 vlan_filtering = true; 262 } else if (!ds->needs_standalone_vlan_filtering && 263 br_vlan_enabled(bridge.dev)) { 264 change_vlan_filtering = true; 265 vlan_filtering = false; 266 } 267 268 /* If the bridge was vlan_filtering, the bridge core doesn't trigger an 269 * event for changing vlan_filtering setting upon slave ports leaving 270 * it. That is a good thing, because that lets us handle it and also 271 * handle the case where the switch's vlan_filtering setting is global 272 * (not per port). When that happens, the correct moment to trigger the 273 * vlan_filtering callback is only when the last port leaves the last 274 * VLAN-aware bridge. 275 */ 276 if (change_vlan_filtering && ds->vlan_filtering_is_global) { 277 dsa_switch_for_each_port(other_dp, ds) { 278 struct net_device *br = dsa_port_bridge_dev_get(other_dp); 279 280 if (br && br_vlan_enabled(br)) { 281 change_vlan_filtering = false; 282 break; 283 } 284 } 285 } 286 287 if (!change_vlan_filtering) 288 return; 289 290 err = dsa_port_vlan_filtering(dp, vlan_filtering, &extack); 291 if (extack._msg) { 292 dev_err(ds->dev, "port %d: %s\n", dp->index, 293 extack._msg); 294 } 295 if (err && err != -EOPNOTSUPP) { 296 dev_err(ds->dev, 297 "port %d failed to reset VLAN filtering to %d: %pe\n", 298 dp->index, vlan_filtering, ERR_PTR(err)); 299 } 300 } 301 302 static int dsa_port_inherit_brport_flags(struct dsa_port *dp, 303 struct netlink_ext_ack *extack) 304 { 305 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 306 BR_BCAST_FLOOD | BR_PORT_LOCKED; 307 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 308 int flag, err; 309 310 for_each_set_bit(flag, &mask, 32) { 311 struct switchdev_brport_flags flags = {0}; 312 313 flags.mask = BIT(flag); 314 315 if (br_port_flag_is_set(brport_dev, BIT(flag))) 316 flags.val = BIT(flag); 317 318 err = dsa_port_bridge_flags(dp, flags, extack); 319 if (err && err != -EOPNOTSUPP) 320 return err; 321 } 322 323 return 0; 324 } 325 326 static void dsa_port_clear_brport_flags(struct dsa_port *dp) 327 { 328 const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; 329 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 330 BR_BCAST_FLOOD | BR_PORT_LOCKED; 331 int flag, err; 332 333 for_each_set_bit(flag, &mask, 32) { 334 struct switchdev_brport_flags flags = {0}; 335 336 flags.mask = BIT(flag); 337 flags.val = val & BIT(flag); 338 339 err = dsa_port_bridge_flags(dp, flags, NULL); 340 if (err && err != -EOPNOTSUPP) 341 dev_err(dp->ds->dev, 342 "failed to clear bridge port flag %lu: %pe\n", 343 flags.val, ERR_PTR(err)); 344 } 345 } 346 347 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp, 348 struct netlink_ext_ack *extack) 349 { 350 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 351 struct net_device *br = dsa_port_bridge_dev_get(dp); 352 int err; 353 354 err = dsa_port_inherit_brport_flags(dp, extack); 355 if (err) 356 return err; 357 358 err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false); 359 if (err && err != -EOPNOTSUPP) 360 return err; 361 362 err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack); 363 if (err && err != -EOPNOTSUPP) 364 return err; 365 366 err = dsa_port_ageing_time(dp, br_get_ageing_time(br)); 367 if (err && err != -EOPNOTSUPP) 368 return err; 369 370 return 0; 371 } 372 373 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp, 374 struct dsa_bridge bridge) 375 { 376 /* Configure the port for standalone mode (no address learning, 377 * flood everything). 378 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events 379 * when the user requests it through netlink or sysfs, but not 380 * automatically at port join or leave, so we need to handle resetting 381 * the brport flags ourselves. But we even prefer it that way, because 382 * otherwise, some setups might never get the notification they need, 383 * for example, when a port leaves a LAG that offloads the bridge, 384 * it becomes standalone, but as far as the bridge is concerned, no 385 * port ever left. 386 */ 387 dsa_port_clear_brport_flags(dp); 388 389 /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, 390 * so allow it to be in BR_STATE_FORWARDING to be kept functional 391 */ 392 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true); 393 394 dsa_port_reset_vlan_filtering(dp, bridge); 395 396 /* Ageing time may be global to the switch chip, so don't change it 397 * here because we have no good reason (or value) to change it to. 398 */ 399 } 400 401 static int dsa_port_bridge_create(struct dsa_port *dp, 402 struct net_device *br, 403 struct netlink_ext_ack *extack) 404 { 405 struct dsa_switch *ds = dp->ds; 406 struct dsa_bridge *bridge; 407 408 bridge = dsa_tree_bridge_find(ds->dst, br); 409 if (bridge) { 410 refcount_inc(&bridge->refcount); 411 dp->bridge = bridge; 412 return 0; 413 } 414 415 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 416 if (!bridge) 417 return -ENOMEM; 418 419 refcount_set(&bridge->refcount, 1); 420 421 bridge->dev = br; 422 423 bridge->num = dsa_bridge_num_get(br, ds->max_num_bridges); 424 if (ds->max_num_bridges && !bridge->num) { 425 NL_SET_ERR_MSG_MOD(extack, 426 "Range of offloadable bridges exceeded"); 427 kfree(bridge); 428 return -EOPNOTSUPP; 429 } 430 431 dp->bridge = bridge; 432 433 return 0; 434 } 435 436 static void dsa_port_bridge_destroy(struct dsa_port *dp, 437 const struct net_device *br) 438 { 439 struct dsa_bridge *bridge = dp->bridge; 440 441 dp->bridge = NULL; 442 443 if (!refcount_dec_and_test(&bridge->refcount)) 444 return; 445 446 if (bridge->num) 447 dsa_bridge_num_put(br, bridge->num); 448 449 kfree(bridge); 450 } 451 452 static bool dsa_port_supports_mst(struct dsa_port *dp) 453 { 454 struct dsa_switch *ds = dp->ds; 455 456 return ds->ops->vlan_msti_set && 457 ds->ops->port_mst_state_set && 458 ds->ops->port_vlan_fast_age && 459 dsa_port_can_configure_learning(dp); 460 } 461 462 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br, 463 struct netlink_ext_ack *extack) 464 { 465 struct dsa_notifier_bridge_info info = { 466 .dp = dp, 467 .extack = extack, 468 }; 469 struct net_device *dev = dp->slave; 470 struct net_device *brport_dev; 471 int err; 472 473 if (br_mst_enabled(br) && !dsa_port_supports_mst(dp)) 474 return -EOPNOTSUPP; 475 476 /* Here the interface is already bridged. Reflect the current 477 * configuration so that drivers can program their chips accordingly. 478 */ 479 err = dsa_port_bridge_create(dp, br, extack); 480 if (err) 481 return err; 482 483 brport_dev = dsa_port_to_bridge_port(dp); 484 485 info.bridge = *dp->bridge; 486 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info); 487 if (err) 488 goto out_rollback; 489 490 /* Drivers which support bridge TX forwarding should set this */ 491 dp->bridge->tx_fwd_offload = info.tx_fwd_offload; 492 493 err = switchdev_bridge_port_offload(brport_dev, dev, dp, 494 &dsa_slave_switchdev_notifier, 495 &dsa_slave_switchdev_blocking_notifier, 496 dp->bridge->tx_fwd_offload, extack); 497 if (err) 498 goto out_rollback_unbridge; 499 500 err = dsa_port_switchdev_sync_attrs(dp, extack); 501 if (err) 502 goto out_rollback_unoffload; 503 504 return 0; 505 506 out_rollback_unoffload: 507 switchdev_bridge_port_unoffload(brport_dev, dp, 508 &dsa_slave_switchdev_notifier, 509 &dsa_slave_switchdev_blocking_notifier); 510 dsa_flush_workqueue(); 511 out_rollback_unbridge: 512 dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 513 out_rollback: 514 dsa_port_bridge_destroy(dp, br); 515 return err; 516 } 517 518 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br) 519 { 520 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 521 522 /* Don't try to unoffload something that is not offloaded */ 523 if (!brport_dev) 524 return; 525 526 switchdev_bridge_port_unoffload(brport_dev, dp, 527 &dsa_slave_switchdev_notifier, 528 &dsa_slave_switchdev_blocking_notifier); 529 530 dsa_flush_workqueue(); 531 } 532 533 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) 534 { 535 struct dsa_notifier_bridge_info info = { 536 .dp = dp, 537 }; 538 int err; 539 540 /* If the port could not be offloaded to begin with, then 541 * there is nothing to do. 542 */ 543 if (!dp->bridge) 544 return; 545 546 info.bridge = *dp->bridge; 547 548 /* Here the port is already unbridged. Reflect the current configuration 549 * so that drivers can program their chips accordingly. 550 */ 551 dsa_port_bridge_destroy(dp, br); 552 553 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 554 if (err) 555 dev_err(dp->ds->dev, 556 "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n", 557 dp->index, ERR_PTR(err)); 558 559 dsa_port_switchdev_unsync_attrs(dp, info.bridge); 560 } 561 562 int dsa_port_lag_change(struct dsa_port *dp, 563 struct netdev_lag_lower_state_info *linfo) 564 { 565 struct dsa_notifier_lag_info info = { 566 .dp = dp, 567 }; 568 bool tx_enabled; 569 570 if (!dp->lag) 571 return 0; 572 573 /* On statically configured aggregates (e.g. loadbalance 574 * without LACP) ports will always be tx_enabled, even if the 575 * link is down. Thus we require both link_up and tx_enabled 576 * in order to include it in the tx set. 577 */ 578 tx_enabled = linfo->link_up && linfo->tx_enabled; 579 580 if (tx_enabled == dp->lag_tx_enabled) 581 return 0; 582 583 dp->lag_tx_enabled = tx_enabled; 584 585 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info); 586 } 587 588 static int dsa_port_lag_create(struct dsa_port *dp, 589 struct net_device *lag_dev) 590 { 591 struct dsa_switch *ds = dp->ds; 592 struct dsa_lag *lag; 593 594 lag = dsa_tree_lag_find(ds->dst, lag_dev); 595 if (lag) { 596 refcount_inc(&lag->refcount); 597 dp->lag = lag; 598 return 0; 599 } 600 601 lag = kzalloc(sizeof(*lag), GFP_KERNEL); 602 if (!lag) 603 return -ENOMEM; 604 605 refcount_set(&lag->refcount, 1); 606 mutex_init(&lag->fdb_lock); 607 INIT_LIST_HEAD(&lag->fdbs); 608 lag->dev = lag_dev; 609 dsa_lag_map(ds->dst, lag); 610 dp->lag = lag; 611 612 return 0; 613 } 614 615 static void dsa_port_lag_destroy(struct dsa_port *dp) 616 { 617 struct dsa_lag *lag = dp->lag; 618 619 dp->lag = NULL; 620 dp->lag_tx_enabled = false; 621 622 if (!refcount_dec_and_test(&lag->refcount)) 623 return; 624 625 WARN_ON(!list_empty(&lag->fdbs)); 626 dsa_lag_unmap(dp->ds->dst, lag); 627 kfree(lag); 628 } 629 630 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev, 631 struct netdev_lag_upper_info *uinfo, 632 struct netlink_ext_ack *extack) 633 { 634 struct dsa_notifier_lag_info info = { 635 .dp = dp, 636 .info = uinfo, 637 }; 638 struct net_device *bridge_dev; 639 int err; 640 641 err = dsa_port_lag_create(dp, lag_dev); 642 if (err) 643 goto err_lag_create; 644 645 info.lag = *dp->lag; 646 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info); 647 if (err) 648 goto err_lag_join; 649 650 bridge_dev = netdev_master_upper_dev_get(lag_dev); 651 if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) 652 return 0; 653 654 err = dsa_port_bridge_join(dp, bridge_dev, extack); 655 if (err) 656 goto err_bridge_join; 657 658 return 0; 659 660 err_bridge_join: 661 dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 662 err_lag_join: 663 dsa_port_lag_destroy(dp); 664 err_lag_create: 665 return err; 666 } 667 668 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 669 { 670 struct net_device *br = dsa_port_bridge_dev_get(dp); 671 672 if (br) 673 dsa_port_pre_bridge_leave(dp, br); 674 } 675 676 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 677 { 678 struct net_device *br = dsa_port_bridge_dev_get(dp); 679 struct dsa_notifier_lag_info info = { 680 .dp = dp, 681 }; 682 int err; 683 684 if (!dp->lag) 685 return; 686 687 /* Port might have been part of a LAG that in turn was 688 * attached to a bridge. 689 */ 690 if (br) 691 dsa_port_bridge_leave(dp, br); 692 693 info.lag = *dp->lag; 694 695 dsa_port_lag_destroy(dp); 696 697 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 698 if (err) 699 dev_err(dp->ds->dev, 700 "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n", 701 dp->index, ERR_PTR(err)); 702 } 703 704 /* Must be called under rcu_read_lock() */ 705 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, 706 bool vlan_filtering, 707 struct netlink_ext_ack *extack) 708 { 709 struct dsa_switch *ds = dp->ds; 710 struct dsa_port *other_dp; 711 int err; 712 713 /* VLAN awareness was off, so the question is "can we turn it on". 714 * We may have had 8021q uppers, those need to go. Make sure we don't 715 * enter an inconsistent state: deny changing the VLAN awareness state 716 * as long as we have 8021q uppers. 717 */ 718 if (vlan_filtering && dsa_port_is_user(dp)) { 719 struct net_device *br = dsa_port_bridge_dev_get(dp); 720 struct net_device *upper_dev, *slave = dp->slave; 721 struct list_head *iter; 722 723 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { 724 struct bridge_vlan_info br_info; 725 u16 vid; 726 727 if (!is_vlan_dev(upper_dev)) 728 continue; 729 730 vid = vlan_dev_vlan_id(upper_dev); 731 732 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the 733 * device, respectively the VID is not found, returning 734 * 0 means success, which is a failure for us here. 735 */ 736 err = br_vlan_get_info(br, vid, &br_info); 737 if (err == 0) { 738 NL_SET_ERR_MSG_MOD(extack, 739 "Must first remove VLAN uppers having VIDs also present in bridge"); 740 return false; 741 } 742 } 743 } 744 745 if (!ds->vlan_filtering_is_global) 746 return true; 747 748 /* For cases where enabling/disabling VLAN awareness is global to the 749 * switch, we need to handle the case where multiple bridges span 750 * different ports of the same switch device and one of them has a 751 * different setting than what is being requested. 752 */ 753 dsa_switch_for_each_port(other_dp, ds) { 754 struct net_device *other_br = dsa_port_bridge_dev_get(other_dp); 755 756 /* If it's the same bridge, it also has same 757 * vlan_filtering setting => no need to check 758 */ 759 if (!other_br || other_br == dsa_port_bridge_dev_get(dp)) 760 continue; 761 762 if (br_vlan_enabled(other_br) != vlan_filtering) { 763 NL_SET_ERR_MSG_MOD(extack, 764 "VLAN filtering is a global setting"); 765 return false; 766 } 767 } 768 return true; 769 } 770 771 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, 772 struct netlink_ext_ack *extack) 773 { 774 bool old_vlan_filtering = dsa_port_is_vlan_filtering(dp); 775 struct dsa_switch *ds = dp->ds; 776 bool apply; 777 int err; 778 779 if (!ds->ops->port_vlan_filtering) 780 return -EOPNOTSUPP; 781 782 /* We are called from dsa_slave_switchdev_blocking_event(), 783 * which is not under rcu_read_lock(), unlike 784 * dsa_slave_switchdev_event(). 785 */ 786 rcu_read_lock(); 787 apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack); 788 rcu_read_unlock(); 789 if (!apply) 790 return -EINVAL; 791 792 if (dsa_port_is_vlan_filtering(dp) == vlan_filtering) 793 return 0; 794 795 err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering, 796 extack); 797 if (err) 798 return err; 799 800 if (ds->vlan_filtering_is_global) { 801 struct dsa_port *other_dp; 802 803 ds->vlan_filtering = vlan_filtering; 804 805 dsa_switch_for_each_user_port(other_dp, ds) { 806 struct net_device *slave = other_dp->slave; 807 808 /* We might be called in the unbind path, so not 809 * all slave devices might still be registered. 810 */ 811 if (!slave) 812 continue; 813 814 err = dsa_slave_manage_vlan_filtering(slave, 815 vlan_filtering); 816 if (err) 817 goto restore; 818 } 819 } else { 820 dp->vlan_filtering = vlan_filtering; 821 822 err = dsa_slave_manage_vlan_filtering(dp->slave, 823 vlan_filtering); 824 if (err) 825 goto restore; 826 } 827 828 return 0; 829 830 restore: 831 ds->ops->port_vlan_filtering(ds, dp->index, old_vlan_filtering, NULL); 832 833 if (ds->vlan_filtering_is_global) 834 ds->vlan_filtering = old_vlan_filtering; 835 else 836 dp->vlan_filtering = old_vlan_filtering; 837 838 return err; 839 } 840 841 /* This enforces legacy behavior for switch drivers which assume they can't 842 * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0 843 */ 844 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp) 845 { 846 struct net_device *br = dsa_port_bridge_dev_get(dp); 847 struct dsa_switch *ds = dp->ds; 848 849 if (!br) 850 return false; 851 852 return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br); 853 } 854 855 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock) 856 { 857 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock); 858 unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies); 859 struct dsa_notifier_ageing_time_info info; 860 int err; 861 862 info.ageing_time = ageing_time; 863 864 err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info); 865 if (err) 866 return err; 867 868 dp->ageing_time = ageing_time; 869 870 return 0; 871 } 872 873 int dsa_port_mst_enable(struct dsa_port *dp, bool on, 874 struct netlink_ext_ack *extack) 875 { 876 if (on && !dsa_port_supports_mst(dp)) { 877 NL_SET_ERR_MSG_MOD(extack, "Hardware does not support MST"); 878 return -EINVAL; 879 } 880 881 return 0; 882 } 883 884 int dsa_port_pre_bridge_flags(const struct dsa_port *dp, 885 struct switchdev_brport_flags flags, 886 struct netlink_ext_ack *extack) 887 { 888 struct dsa_switch *ds = dp->ds; 889 890 if (!ds->ops->port_pre_bridge_flags) 891 return -EINVAL; 892 893 return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack); 894 } 895 896 int dsa_port_bridge_flags(struct dsa_port *dp, 897 struct switchdev_brport_flags flags, 898 struct netlink_ext_ack *extack) 899 { 900 struct dsa_switch *ds = dp->ds; 901 int err; 902 903 if (!ds->ops->port_bridge_flags) 904 return -EOPNOTSUPP; 905 906 err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack); 907 if (err) 908 return err; 909 910 if (flags.mask & BR_LEARNING) { 911 bool learning = flags.val & BR_LEARNING; 912 913 if (learning == dp->learning) 914 return 0; 915 916 if ((dp->learning && !learning) && 917 (dp->stp_state == BR_STATE_LEARNING || 918 dp->stp_state == BR_STATE_FORWARDING)) 919 dsa_port_fast_age(dp); 920 921 dp->learning = learning; 922 } 923 924 return 0; 925 } 926 927 void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc) 928 { 929 struct dsa_switch *ds = dp->ds; 930 931 if (ds->ops->port_set_host_flood) 932 ds->ops->port_set_host_flood(ds, dp->index, uc, mc); 933 } 934 935 int dsa_port_vlan_msti(struct dsa_port *dp, 936 const struct switchdev_vlan_msti *msti) 937 { 938 struct dsa_switch *ds = dp->ds; 939 940 if (!ds->ops->vlan_msti_set) 941 return -EOPNOTSUPP; 942 943 return ds->ops->vlan_msti_set(ds, *dp->bridge, msti); 944 } 945 946 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu) 947 { 948 struct dsa_notifier_mtu_info info = { 949 .dp = dp, 950 .mtu = new_mtu, 951 }; 952 953 return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info); 954 } 955 956 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, 957 u16 vid) 958 { 959 struct dsa_notifier_fdb_info info = { 960 .dp = dp, 961 .addr = addr, 962 .vid = vid, 963 .db = { 964 .type = DSA_DB_BRIDGE, 965 .bridge = *dp->bridge, 966 }, 967 }; 968 969 /* Refcounting takes bridge.num as a key, and should be global for all 970 * bridges in the absence of FDB isolation, and per bridge otherwise. 971 * Force the bridge.num to zero here in the absence of FDB isolation. 972 */ 973 if (!dp->ds->fdb_isolation) 974 info.db.bridge.num = 0; 975 976 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info); 977 } 978 979 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr, 980 u16 vid) 981 { 982 struct dsa_notifier_fdb_info info = { 983 .dp = dp, 984 .addr = addr, 985 .vid = vid, 986 .db = { 987 .type = DSA_DB_BRIDGE, 988 .bridge = *dp->bridge, 989 }, 990 }; 991 992 if (!dp->ds->fdb_isolation) 993 info.db.bridge.num = 0; 994 995 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info); 996 } 997 998 static int dsa_port_host_fdb_add(struct dsa_port *dp, 999 const unsigned char *addr, u16 vid, 1000 struct dsa_db db) 1001 { 1002 struct dsa_notifier_fdb_info info = { 1003 .dp = dp, 1004 .addr = addr, 1005 .vid = vid, 1006 .db = db, 1007 }; 1008 1009 if (!dp->ds->fdb_isolation) 1010 info.db.bridge.num = 0; 1011 1012 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info); 1013 } 1014 1015 int dsa_port_standalone_host_fdb_add(struct dsa_port *dp, 1016 const unsigned char *addr, u16 vid) 1017 { 1018 struct dsa_db db = { 1019 .type = DSA_DB_PORT, 1020 .dp = dp, 1021 }; 1022 1023 return dsa_port_host_fdb_add(dp, addr, vid, db); 1024 } 1025 1026 int dsa_port_bridge_host_fdb_add(struct dsa_port *dp, 1027 const unsigned char *addr, u16 vid) 1028 { 1029 struct dsa_port *cpu_dp = dp->cpu_dp; 1030 struct dsa_db db = { 1031 .type = DSA_DB_BRIDGE, 1032 .bridge = *dp->bridge, 1033 }; 1034 int err; 1035 1036 /* Avoid a call to __dev_set_promiscuity() on the master, which 1037 * requires rtnl_lock(), since we can't guarantee that is held here, 1038 * and we can't take it either. 1039 */ 1040 if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) { 1041 err = dev_uc_add(cpu_dp->master, addr); 1042 if (err) 1043 return err; 1044 } 1045 1046 return dsa_port_host_fdb_add(dp, addr, vid, db); 1047 } 1048 1049 static int dsa_port_host_fdb_del(struct dsa_port *dp, 1050 const unsigned char *addr, u16 vid, 1051 struct dsa_db db) 1052 { 1053 struct dsa_notifier_fdb_info info = { 1054 .dp = dp, 1055 .addr = addr, 1056 .vid = vid, 1057 .db = db, 1058 }; 1059 1060 if (!dp->ds->fdb_isolation) 1061 info.db.bridge.num = 0; 1062 1063 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info); 1064 } 1065 1066 int dsa_port_standalone_host_fdb_del(struct dsa_port *dp, 1067 const unsigned char *addr, u16 vid) 1068 { 1069 struct dsa_db db = { 1070 .type = DSA_DB_PORT, 1071 .dp = dp, 1072 }; 1073 1074 return dsa_port_host_fdb_del(dp, addr, vid, db); 1075 } 1076 1077 int dsa_port_bridge_host_fdb_del(struct dsa_port *dp, 1078 const unsigned char *addr, u16 vid) 1079 { 1080 struct dsa_port *cpu_dp = dp->cpu_dp; 1081 struct dsa_db db = { 1082 .type = DSA_DB_BRIDGE, 1083 .bridge = *dp->bridge, 1084 }; 1085 int err; 1086 1087 if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) { 1088 err = dev_uc_del(cpu_dp->master, addr); 1089 if (err) 1090 return err; 1091 } 1092 1093 return dsa_port_host_fdb_del(dp, addr, vid, db); 1094 } 1095 1096 int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr, 1097 u16 vid) 1098 { 1099 struct dsa_notifier_lag_fdb_info info = { 1100 .lag = dp->lag, 1101 .addr = addr, 1102 .vid = vid, 1103 .db = { 1104 .type = DSA_DB_BRIDGE, 1105 .bridge = *dp->bridge, 1106 }, 1107 }; 1108 1109 if (!dp->ds->fdb_isolation) 1110 info.db.bridge.num = 0; 1111 1112 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_ADD, &info); 1113 } 1114 1115 int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr, 1116 u16 vid) 1117 { 1118 struct dsa_notifier_lag_fdb_info info = { 1119 .lag = dp->lag, 1120 .addr = addr, 1121 .vid = vid, 1122 .db = { 1123 .type = DSA_DB_BRIDGE, 1124 .bridge = *dp->bridge, 1125 }, 1126 }; 1127 1128 if (!dp->ds->fdb_isolation) 1129 info.db.bridge.num = 0; 1130 1131 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_DEL, &info); 1132 } 1133 1134 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data) 1135 { 1136 struct dsa_switch *ds = dp->ds; 1137 int port = dp->index; 1138 1139 if (!ds->ops->port_fdb_dump) 1140 return -EOPNOTSUPP; 1141 1142 return ds->ops->port_fdb_dump(ds, port, cb, data); 1143 } 1144 1145 int dsa_port_mdb_add(const struct dsa_port *dp, 1146 const struct switchdev_obj_port_mdb *mdb) 1147 { 1148 struct dsa_notifier_mdb_info info = { 1149 .dp = dp, 1150 .mdb = mdb, 1151 .db = { 1152 .type = DSA_DB_BRIDGE, 1153 .bridge = *dp->bridge, 1154 }, 1155 }; 1156 1157 if (!dp->ds->fdb_isolation) 1158 info.db.bridge.num = 0; 1159 1160 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info); 1161 } 1162 1163 int dsa_port_mdb_del(const struct dsa_port *dp, 1164 const struct switchdev_obj_port_mdb *mdb) 1165 { 1166 struct dsa_notifier_mdb_info info = { 1167 .dp = dp, 1168 .mdb = mdb, 1169 .db = { 1170 .type = DSA_DB_BRIDGE, 1171 .bridge = *dp->bridge, 1172 }, 1173 }; 1174 1175 if (!dp->ds->fdb_isolation) 1176 info.db.bridge.num = 0; 1177 1178 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info); 1179 } 1180 1181 static int dsa_port_host_mdb_add(const struct dsa_port *dp, 1182 const struct switchdev_obj_port_mdb *mdb, 1183 struct dsa_db db) 1184 { 1185 struct dsa_notifier_mdb_info info = { 1186 .dp = dp, 1187 .mdb = mdb, 1188 .db = db, 1189 }; 1190 1191 if (!dp->ds->fdb_isolation) 1192 info.db.bridge.num = 0; 1193 1194 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info); 1195 } 1196 1197 int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp, 1198 const struct switchdev_obj_port_mdb *mdb) 1199 { 1200 struct dsa_db db = { 1201 .type = DSA_DB_PORT, 1202 .dp = dp, 1203 }; 1204 1205 return dsa_port_host_mdb_add(dp, mdb, db); 1206 } 1207 1208 int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp, 1209 const struct switchdev_obj_port_mdb *mdb) 1210 { 1211 struct dsa_port *cpu_dp = dp->cpu_dp; 1212 struct dsa_db db = { 1213 .type = DSA_DB_BRIDGE, 1214 .bridge = *dp->bridge, 1215 }; 1216 int err; 1217 1218 err = dev_mc_add(cpu_dp->master, mdb->addr); 1219 if (err) 1220 return err; 1221 1222 return dsa_port_host_mdb_add(dp, mdb, db); 1223 } 1224 1225 static int dsa_port_host_mdb_del(const struct dsa_port *dp, 1226 const struct switchdev_obj_port_mdb *mdb, 1227 struct dsa_db db) 1228 { 1229 struct dsa_notifier_mdb_info info = { 1230 .dp = dp, 1231 .mdb = mdb, 1232 .db = db, 1233 }; 1234 1235 if (!dp->ds->fdb_isolation) 1236 info.db.bridge.num = 0; 1237 1238 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info); 1239 } 1240 1241 int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp, 1242 const struct switchdev_obj_port_mdb *mdb) 1243 { 1244 struct dsa_db db = { 1245 .type = DSA_DB_PORT, 1246 .dp = dp, 1247 }; 1248 1249 return dsa_port_host_mdb_del(dp, mdb, db); 1250 } 1251 1252 int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp, 1253 const struct switchdev_obj_port_mdb *mdb) 1254 { 1255 struct dsa_port *cpu_dp = dp->cpu_dp; 1256 struct dsa_db db = { 1257 .type = DSA_DB_BRIDGE, 1258 .bridge = *dp->bridge, 1259 }; 1260 int err; 1261 1262 err = dev_mc_del(cpu_dp->master, mdb->addr); 1263 if (err) 1264 return err; 1265 1266 return dsa_port_host_mdb_del(dp, mdb, db); 1267 } 1268 1269 int dsa_port_vlan_add(struct dsa_port *dp, 1270 const struct switchdev_obj_port_vlan *vlan, 1271 struct netlink_ext_ack *extack) 1272 { 1273 struct dsa_notifier_vlan_info info = { 1274 .dp = dp, 1275 .vlan = vlan, 1276 .extack = extack, 1277 }; 1278 1279 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info); 1280 } 1281 1282 int dsa_port_vlan_del(struct dsa_port *dp, 1283 const struct switchdev_obj_port_vlan *vlan) 1284 { 1285 struct dsa_notifier_vlan_info info = { 1286 .dp = dp, 1287 .vlan = vlan, 1288 }; 1289 1290 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info); 1291 } 1292 1293 int dsa_port_host_vlan_add(struct dsa_port *dp, 1294 const struct switchdev_obj_port_vlan *vlan, 1295 struct netlink_ext_ack *extack) 1296 { 1297 struct dsa_notifier_vlan_info info = { 1298 .dp = dp, 1299 .vlan = vlan, 1300 .extack = extack, 1301 }; 1302 struct dsa_port *cpu_dp = dp->cpu_dp; 1303 int err; 1304 1305 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_ADD, &info); 1306 if (err && err != -EOPNOTSUPP) 1307 return err; 1308 1309 vlan_vid_add(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid); 1310 1311 return err; 1312 } 1313 1314 int dsa_port_host_vlan_del(struct dsa_port *dp, 1315 const struct switchdev_obj_port_vlan *vlan) 1316 { 1317 struct dsa_notifier_vlan_info info = { 1318 .dp = dp, 1319 .vlan = vlan, 1320 }; 1321 struct dsa_port *cpu_dp = dp->cpu_dp; 1322 int err; 1323 1324 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_DEL, &info); 1325 if (err && err != -EOPNOTSUPP) 1326 return err; 1327 1328 vlan_vid_del(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid); 1329 1330 return err; 1331 } 1332 1333 int dsa_port_mrp_add(const struct dsa_port *dp, 1334 const struct switchdev_obj_mrp *mrp) 1335 { 1336 struct dsa_switch *ds = dp->ds; 1337 1338 if (!ds->ops->port_mrp_add) 1339 return -EOPNOTSUPP; 1340 1341 return ds->ops->port_mrp_add(ds, dp->index, mrp); 1342 } 1343 1344 int dsa_port_mrp_del(const struct dsa_port *dp, 1345 const struct switchdev_obj_mrp *mrp) 1346 { 1347 struct dsa_switch *ds = dp->ds; 1348 1349 if (!ds->ops->port_mrp_del) 1350 return -EOPNOTSUPP; 1351 1352 return ds->ops->port_mrp_del(ds, dp->index, mrp); 1353 } 1354 1355 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp, 1356 const struct switchdev_obj_ring_role_mrp *mrp) 1357 { 1358 struct dsa_switch *ds = dp->ds; 1359 1360 if (!ds->ops->port_mrp_add_ring_role) 1361 return -EOPNOTSUPP; 1362 1363 return ds->ops->port_mrp_add_ring_role(ds, dp->index, mrp); 1364 } 1365 1366 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp, 1367 const struct switchdev_obj_ring_role_mrp *mrp) 1368 { 1369 struct dsa_switch *ds = dp->ds; 1370 1371 if (!ds->ops->port_mrp_del_ring_role) 1372 return -EOPNOTSUPP; 1373 1374 return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp); 1375 } 1376 1377 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp, 1378 const struct dsa_device_ops *tag_ops) 1379 { 1380 cpu_dp->rcv = tag_ops->rcv; 1381 cpu_dp->tag_ops = tag_ops; 1382 } 1383 1384 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp) 1385 { 1386 struct device_node *phy_dn; 1387 struct phy_device *phydev; 1388 1389 phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0); 1390 if (!phy_dn) 1391 return NULL; 1392 1393 phydev = of_phy_find_device(phy_dn); 1394 if (!phydev) { 1395 of_node_put(phy_dn); 1396 return ERR_PTR(-EPROBE_DEFER); 1397 } 1398 1399 of_node_put(phy_dn); 1400 return phydev; 1401 } 1402 1403 static void dsa_port_phylink_validate(struct phylink_config *config, 1404 unsigned long *supported, 1405 struct phylink_link_state *state) 1406 { 1407 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1408 struct dsa_switch *ds = dp->ds; 1409 1410 if (!ds->ops->phylink_validate) { 1411 if (config->mac_capabilities) 1412 phylink_generic_validate(config, supported, state); 1413 return; 1414 } 1415 1416 ds->ops->phylink_validate(ds, dp->index, supported, state); 1417 } 1418 1419 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config, 1420 struct phylink_link_state *state) 1421 { 1422 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1423 struct dsa_switch *ds = dp->ds; 1424 int err; 1425 1426 /* Only called for inband modes */ 1427 if (!ds->ops->phylink_mac_link_state) { 1428 state->link = 0; 1429 return; 1430 } 1431 1432 err = ds->ops->phylink_mac_link_state(ds, dp->index, state); 1433 if (err < 0) { 1434 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n", 1435 dp->index, err); 1436 state->link = 0; 1437 } 1438 } 1439 1440 static struct phylink_pcs * 1441 dsa_port_phylink_mac_select_pcs(struct phylink_config *config, 1442 phy_interface_t interface) 1443 { 1444 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1445 struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP); 1446 struct dsa_switch *ds = dp->ds; 1447 1448 if (ds->ops->phylink_mac_select_pcs) 1449 pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface); 1450 1451 return pcs; 1452 } 1453 1454 static void dsa_port_phylink_mac_config(struct phylink_config *config, 1455 unsigned int mode, 1456 const struct phylink_link_state *state) 1457 { 1458 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1459 struct dsa_switch *ds = dp->ds; 1460 1461 if (!ds->ops->phylink_mac_config) 1462 return; 1463 1464 ds->ops->phylink_mac_config(ds, dp->index, mode, state); 1465 } 1466 1467 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config) 1468 { 1469 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1470 struct dsa_switch *ds = dp->ds; 1471 1472 if (!ds->ops->phylink_mac_an_restart) 1473 return; 1474 1475 ds->ops->phylink_mac_an_restart(ds, dp->index); 1476 } 1477 1478 static void dsa_port_phylink_mac_link_down(struct phylink_config *config, 1479 unsigned int mode, 1480 phy_interface_t interface) 1481 { 1482 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1483 struct phy_device *phydev = NULL; 1484 struct dsa_switch *ds = dp->ds; 1485 1486 if (dsa_port_is_user(dp)) 1487 phydev = dp->slave->phydev; 1488 1489 if (!ds->ops->phylink_mac_link_down) { 1490 if (ds->ops->adjust_link && phydev) 1491 ds->ops->adjust_link(ds, dp->index, phydev); 1492 return; 1493 } 1494 1495 ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface); 1496 } 1497 1498 static void dsa_port_phylink_mac_link_up(struct phylink_config *config, 1499 struct phy_device *phydev, 1500 unsigned int mode, 1501 phy_interface_t interface, 1502 int speed, int duplex, 1503 bool tx_pause, bool rx_pause) 1504 { 1505 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1506 struct dsa_switch *ds = dp->ds; 1507 1508 if (!ds->ops->phylink_mac_link_up) { 1509 if (ds->ops->adjust_link && phydev) 1510 ds->ops->adjust_link(ds, dp->index, phydev); 1511 return; 1512 } 1513 1514 ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev, 1515 speed, duplex, tx_pause, rx_pause); 1516 } 1517 1518 static const struct phylink_mac_ops dsa_port_phylink_mac_ops = { 1519 .validate = dsa_port_phylink_validate, 1520 .mac_select_pcs = dsa_port_phylink_mac_select_pcs, 1521 .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state, 1522 .mac_config = dsa_port_phylink_mac_config, 1523 .mac_an_restart = dsa_port_phylink_mac_an_restart, 1524 .mac_link_down = dsa_port_phylink_mac_link_down, 1525 .mac_link_up = dsa_port_phylink_mac_link_up, 1526 }; 1527 1528 int dsa_port_phylink_create(struct dsa_port *dp) 1529 { 1530 struct dsa_switch *ds = dp->ds; 1531 phy_interface_t mode; 1532 int err; 1533 1534 err = of_get_phy_mode(dp->dn, &mode); 1535 if (err) 1536 mode = PHY_INTERFACE_MODE_NA; 1537 1538 /* Presence of phylink_mac_link_state or phylink_mac_an_restart is 1539 * an indicator of a legacy phylink driver. 1540 */ 1541 if (ds->ops->phylink_mac_link_state || 1542 ds->ops->phylink_mac_an_restart) 1543 dp->pl_config.legacy_pre_march2020 = true; 1544 1545 if (ds->ops->phylink_get_caps) 1546 ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config); 1547 1548 dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn), 1549 mode, &dsa_port_phylink_mac_ops); 1550 if (IS_ERR(dp->pl)) { 1551 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); 1552 return PTR_ERR(dp->pl); 1553 } 1554 1555 return 0; 1556 } 1557 1558 static int dsa_shared_port_setup_phy_of(struct dsa_port *dp, bool enable) 1559 { 1560 struct dsa_switch *ds = dp->ds; 1561 struct phy_device *phydev; 1562 int port = dp->index; 1563 int err = 0; 1564 1565 phydev = dsa_port_get_phy_device(dp); 1566 if (!phydev) 1567 return 0; 1568 1569 if (IS_ERR(phydev)) 1570 return PTR_ERR(phydev); 1571 1572 if (enable) { 1573 err = genphy_resume(phydev); 1574 if (err < 0) 1575 goto err_put_dev; 1576 1577 err = genphy_read_status(phydev); 1578 if (err < 0) 1579 goto err_put_dev; 1580 } else { 1581 err = genphy_suspend(phydev); 1582 if (err < 0) 1583 goto err_put_dev; 1584 } 1585 1586 if (ds->ops->adjust_link) 1587 ds->ops->adjust_link(ds, port, phydev); 1588 1589 dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev)); 1590 1591 err_put_dev: 1592 put_device(&phydev->mdio.dev); 1593 return err; 1594 } 1595 1596 static int dsa_shared_port_fixed_link_register_of(struct dsa_port *dp) 1597 { 1598 struct device_node *dn = dp->dn; 1599 struct dsa_switch *ds = dp->ds; 1600 struct phy_device *phydev; 1601 int port = dp->index; 1602 phy_interface_t mode; 1603 int err; 1604 1605 err = of_phy_register_fixed_link(dn); 1606 if (err) { 1607 dev_err(ds->dev, 1608 "failed to register the fixed PHY of port %d\n", 1609 port); 1610 return err; 1611 } 1612 1613 phydev = of_phy_find_device(dn); 1614 1615 err = of_get_phy_mode(dn, &mode); 1616 if (err) 1617 mode = PHY_INTERFACE_MODE_NA; 1618 phydev->interface = mode; 1619 1620 genphy_read_status(phydev); 1621 1622 if (ds->ops->adjust_link) 1623 ds->ops->adjust_link(ds, port, phydev); 1624 1625 put_device(&phydev->mdio.dev); 1626 1627 return 0; 1628 } 1629 1630 static int dsa_shared_port_phylink_register(struct dsa_port *dp) 1631 { 1632 struct dsa_switch *ds = dp->ds; 1633 struct device_node *port_dn = dp->dn; 1634 int err; 1635 1636 dp->pl_config.dev = ds->dev; 1637 dp->pl_config.type = PHYLINK_DEV; 1638 1639 err = dsa_port_phylink_create(dp); 1640 if (err) 1641 return err; 1642 1643 err = phylink_of_phy_connect(dp->pl, port_dn, 0); 1644 if (err && err != -ENODEV) { 1645 pr_err("could not attach to PHY: %d\n", err); 1646 goto err_phy_connect; 1647 } 1648 1649 return 0; 1650 1651 err_phy_connect: 1652 phylink_destroy(dp->pl); 1653 return err; 1654 } 1655 1656 /* During the initial DSA driver migration to OF, port nodes were sometimes 1657 * added to device trees with no indication of how they should operate from a 1658 * link management perspective (phy-handle, fixed-link, etc). Additionally, the 1659 * phy-mode may be absent. The interpretation of these port OF nodes depends on 1660 * their type. 1661 * 1662 * User ports with no phy-handle or fixed-link are expected to connect to an 1663 * internal PHY located on the ds->slave_mii_bus at an MDIO address equal to 1664 * the port number. This description is still actively supported. 1665 * 1666 * Shared (CPU and DSA) ports with no phy-handle or fixed-link are expected to 1667 * operate at the maximum speed that their phy-mode is capable of. If the 1668 * phy-mode is absent, they are expected to operate using the phy-mode 1669 * supported by the port that gives the highest link speed. It is unspecified 1670 * if the port should use flow control or not, half duplex or full duplex, or 1671 * if the phy-mode is a SERDES link, whether in-band autoneg is expected to be 1672 * enabled or not. 1673 * 1674 * In the latter case of shared ports, omitting the link management description 1675 * from the firmware node is deprecated and strongly discouraged. DSA uses 1676 * phylink, which rejects the firmware nodes of these ports for lacking 1677 * required properties. 1678 * 1679 * For switches in this table, DSA will skip enforcing validation and will 1680 * later omit registering a phylink instance for the shared ports, if they lack 1681 * a fixed-link, a phy-handle, or a managed = "in-band-status" property. 1682 * It becomes the responsibility of the driver to ensure that these ports 1683 * operate at the maximum speed (whatever this means) and will interoperate 1684 * with the DSA master or other cascade port, since phylink methods will not be 1685 * invoked for them. 1686 * 1687 * If you are considering expanding this table for newly introduced switches, 1688 * think again. It is OK to remove switches from this table if there aren't DT 1689 * blobs in circulation which rely on defaulting the shared ports. 1690 */ 1691 static const char * const dsa_switches_apply_workarounds[] = { 1692 #if IS_ENABLED(CONFIG_NET_DSA_XRS700X) 1693 "arrow,xrs7003e", 1694 "arrow,xrs7003f", 1695 "arrow,xrs7004e", 1696 "arrow,xrs7004f", 1697 #endif 1698 #if IS_ENABLED(CONFIG_B53) 1699 "brcm,bcm5325", 1700 "brcm,bcm53115", 1701 "brcm,bcm53125", 1702 "brcm,bcm53128", 1703 "brcm,bcm5365", 1704 "brcm,bcm5389", 1705 "brcm,bcm5395", 1706 "brcm,bcm5397", 1707 "brcm,bcm5398", 1708 "brcm,bcm53010-srab", 1709 "brcm,bcm53011-srab", 1710 "brcm,bcm53012-srab", 1711 "brcm,bcm53018-srab", 1712 "brcm,bcm53019-srab", 1713 "brcm,bcm5301x-srab", 1714 "brcm,bcm11360-srab", 1715 "brcm,bcm58522-srab", 1716 "brcm,bcm58525-srab", 1717 "brcm,bcm58535-srab", 1718 "brcm,bcm58622-srab", 1719 "brcm,bcm58623-srab", 1720 "brcm,bcm58625-srab", 1721 "brcm,bcm88312-srab", 1722 "brcm,cygnus-srab", 1723 "brcm,nsp-srab", 1724 "brcm,omega-srab", 1725 "brcm,bcm3384-switch", 1726 "brcm,bcm6328-switch", 1727 "brcm,bcm6368-switch", 1728 "brcm,bcm63xx-switch", 1729 #endif 1730 #if IS_ENABLED(CONFIG_NET_DSA_BCM_SF2) 1731 "brcm,bcm7445-switch-v4.0", 1732 "brcm,bcm7278-switch-v4.0", 1733 "brcm,bcm7278-switch-v4.8", 1734 #endif 1735 #if IS_ENABLED(CONFIG_NET_DSA_LANTIQ_GSWIP) 1736 "lantiq,xrx200-gswip", 1737 "lantiq,xrx300-gswip", 1738 "lantiq,xrx330-gswip", 1739 #endif 1740 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6060) 1741 "marvell,mv88e6060", 1742 #endif 1743 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6XXX) 1744 "marvell,mv88e6085", 1745 "marvell,mv88e6190", 1746 "marvell,mv88e6250", 1747 #endif 1748 #if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) 1749 "microchip,ksz8765", 1750 "microchip,ksz8794", 1751 "microchip,ksz8795", 1752 "microchip,ksz8863", 1753 "microchip,ksz8873", 1754 "microchip,ksz9477", 1755 "microchip,ksz9897", 1756 "microchip,ksz9893", 1757 "microchip,ksz9563", 1758 "microchip,ksz8563", 1759 "microchip,ksz9567", 1760 #endif 1761 #if IS_ENABLED(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) 1762 "smsc,lan9303-mdio", 1763 #endif 1764 #if IS_ENABLED(CONFIG_NET_DSA_SMSC_LAN9303_I2C) 1765 "smsc,lan9303-i2c", 1766 #endif 1767 NULL, 1768 }; 1769 1770 static void dsa_shared_port_validate_of(struct dsa_port *dp, 1771 bool *missing_phy_mode, 1772 bool *missing_link_description) 1773 { 1774 struct device_node *dn = dp->dn, *phy_np; 1775 struct dsa_switch *ds = dp->ds; 1776 phy_interface_t mode; 1777 1778 *missing_phy_mode = false; 1779 *missing_link_description = false; 1780 1781 if (of_get_phy_mode(dn, &mode)) { 1782 *missing_phy_mode = true; 1783 dev_err(ds->dev, 1784 "OF node %pOF of %s port %d lacks the required \"phy-mode\" property\n", 1785 dn, dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); 1786 } 1787 1788 /* Note: of_phy_is_fixed_link() also returns true for 1789 * managed = "in-band-status" 1790 */ 1791 if (of_phy_is_fixed_link(dn)) 1792 return; 1793 1794 phy_np = of_parse_phandle(dn, "phy-handle", 0); 1795 if (phy_np) { 1796 of_node_put(phy_np); 1797 return; 1798 } 1799 1800 *missing_link_description = true; 1801 1802 dev_err(ds->dev, 1803 "OF node %pOF of %s port %d lacks the required \"phy-handle\", \"fixed-link\" or \"managed\" properties\n", 1804 dn, dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); 1805 } 1806 1807 int dsa_shared_port_link_register_of(struct dsa_port *dp) 1808 { 1809 struct dsa_switch *ds = dp->ds; 1810 bool missing_link_description; 1811 bool missing_phy_mode; 1812 int port = dp->index; 1813 1814 dsa_shared_port_validate_of(dp, &missing_phy_mode, 1815 &missing_link_description); 1816 1817 if ((missing_phy_mode || missing_link_description) && 1818 !of_device_compatible_match(ds->dev->of_node, 1819 dsa_switches_apply_workarounds)) 1820 return -EINVAL; 1821 1822 if (!ds->ops->adjust_link) { 1823 if (missing_link_description) { 1824 dev_warn(ds->dev, 1825 "Skipping phylink registration for %s port %d\n", 1826 dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); 1827 } else { 1828 if (ds->ops->phylink_mac_link_down) 1829 ds->ops->phylink_mac_link_down(ds, port, 1830 MLO_AN_FIXED, PHY_INTERFACE_MODE_NA); 1831 1832 return dsa_shared_port_phylink_register(dp); 1833 } 1834 return 0; 1835 } 1836 1837 dev_warn(ds->dev, 1838 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n"); 1839 1840 if (of_phy_is_fixed_link(dp->dn)) 1841 return dsa_shared_port_fixed_link_register_of(dp); 1842 else 1843 return dsa_shared_port_setup_phy_of(dp, true); 1844 } 1845 1846 void dsa_shared_port_link_unregister_of(struct dsa_port *dp) 1847 { 1848 struct dsa_switch *ds = dp->ds; 1849 1850 if (!ds->ops->adjust_link && dp->pl) { 1851 rtnl_lock(); 1852 phylink_disconnect_phy(dp->pl); 1853 rtnl_unlock(); 1854 phylink_destroy(dp->pl); 1855 dp->pl = NULL; 1856 return; 1857 } 1858 1859 if (of_phy_is_fixed_link(dp->dn)) 1860 of_phy_deregister_fixed_link(dp->dn); 1861 else 1862 dsa_shared_port_setup_phy_of(dp, false); 1863 } 1864 1865 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr) 1866 { 1867 struct dsa_switch *ds = dp->ds; 1868 int err; 1869 1870 if (!ds->ops->port_hsr_join) 1871 return -EOPNOTSUPP; 1872 1873 dp->hsr_dev = hsr; 1874 1875 err = ds->ops->port_hsr_join(ds, dp->index, hsr); 1876 if (err) 1877 dp->hsr_dev = NULL; 1878 1879 return err; 1880 } 1881 1882 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr) 1883 { 1884 struct dsa_switch *ds = dp->ds; 1885 int err; 1886 1887 dp->hsr_dev = NULL; 1888 1889 if (ds->ops->port_hsr_leave) { 1890 err = ds->ops->port_hsr_leave(ds, dp->index, hsr); 1891 if (err) 1892 dev_err(dp->ds->dev, 1893 "port %d failed to leave HSR %s: %pe\n", 1894 dp->index, hsr->name, ERR_PTR(err)); 1895 } 1896 } 1897 1898 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast) 1899 { 1900 struct dsa_notifier_tag_8021q_vlan_info info = { 1901 .dp = dp, 1902 .vid = vid, 1903 }; 1904 1905 if (broadcast) 1906 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 1907 1908 return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 1909 } 1910 1911 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast) 1912 { 1913 struct dsa_notifier_tag_8021q_vlan_info info = { 1914 .dp = dp, 1915 .vid = vid, 1916 }; 1917 int err; 1918 1919 if (broadcast) 1920 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 1921 else 1922 err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 1923 if (err) 1924 dev_err(dp->ds->dev, 1925 "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n", 1926 dp->index, vid, ERR_PTR(err)); 1927 } 1928