1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a single switch port 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include <linux/if_bridge.h> 10 #include <linux/notifier.h> 11 #include <linux/of_mdio.h> 12 #include <linux/of_net.h> 13 14 #include "dsa_priv.h" 15 16 /** 17 * dsa_port_notify - Notify the switching fabric of changes to a port 18 * @dp: port on which change occurred 19 * @e: event, must be of type DSA_NOTIFIER_* 20 * @v: event-specific value. 21 * 22 * Notify all switches in the DSA tree that this port's switch belongs to, 23 * including this switch itself, of an event. Allows the other switches to 24 * reconfigure themselves for cross-chip operations. Can also be used to 25 * reconfigure ports without net_devices (CPU ports, DSA links) whenever 26 * a user port's state changes. 27 */ 28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v) 29 { 30 return dsa_tree_notify(dp->ds->dst, e, v); 31 } 32 33 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp, u16 vid) 34 { 35 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 36 struct switchdev_notifier_fdb_info info = { 37 .vid = vid, 38 }; 39 40 /* When the port becomes standalone it has already left the bridge. 41 * Don't notify the bridge in that case. 42 */ 43 if (!brport_dev) 44 return; 45 46 call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE, 47 brport_dev, &info.info, NULL); 48 } 49 50 static void dsa_port_fast_age(const struct dsa_port *dp) 51 { 52 struct dsa_switch *ds = dp->ds; 53 54 if (!ds->ops->port_fast_age) 55 return; 56 57 ds->ops->port_fast_age(ds, dp->index); 58 59 /* flush all VLANs */ 60 dsa_port_notify_bridge_fdb_flush(dp, 0); 61 } 62 63 static int dsa_port_vlan_fast_age(const struct dsa_port *dp, u16 vid) 64 { 65 struct dsa_switch *ds = dp->ds; 66 int err; 67 68 if (!ds->ops->port_vlan_fast_age) 69 return -EOPNOTSUPP; 70 71 err = ds->ops->port_vlan_fast_age(ds, dp->index, vid); 72 73 if (!err) 74 dsa_port_notify_bridge_fdb_flush(dp, vid); 75 76 return err; 77 } 78 79 static int dsa_port_msti_fast_age(const struct dsa_port *dp, u16 msti) 80 { 81 DECLARE_BITMAP(vids, VLAN_N_VID) = { 0 }; 82 int err, vid; 83 84 err = br_mst_get_info(dsa_port_bridge_dev_get(dp), msti, vids); 85 if (err) 86 return err; 87 88 for_each_set_bit(vid, vids, VLAN_N_VID) { 89 err = dsa_port_vlan_fast_age(dp, vid); 90 if (err) 91 return err; 92 } 93 94 return 0; 95 } 96 97 static bool dsa_port_can_configure_learning(struct dsa_port *dp) 98 { 99 struct switchdev_brport_flags flags = { 100 .mask = BR_LEARNING, 101 }; 102 struct dsa_switch *ds = dp->ds; 103 int err; 104 105 if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags) 106 return false; 107 108 err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL); 109 return !err; 110 } 111 112 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age) 113 { 114 struct dsa_switch *ds = dp->ds; 115 int port = dp->index; 116 117 if (!ds->ops->port_stp_state_set) 118 return -EOPNOTSUPP; 119 120 ds->ops->port_stp_state_set(ds, port, state); 121 122 if (!dsa_port_can_configure_learning(dp) || 123 (do_fast_age && dp->learning)) { 124 /* Fast age FDB entries or flush appropriate forwarding database 125 * for the given port, if we are moving it from Learning or 126 * Forwarding state, to Disabled or Blocking or Listening state. 127 * Ports that were standalone before the STP state change don't 128 * need to fast age the FDB, since address learning is off in 129 * standalone mode. 130 */ 131 132 if ((dp->stp_state == BR_STATE_LEARNING || 133 dp->stp_state == BR_STATE_FORWARDING) && 134 (state == BR_STATE_DISABLED || 135 state == BR_STATE_BLOCKING || 136 state == BR_STATE_LISTENING)) 137 dsa_port_fast_age(dp); 138 } 139 140 dp->stp_state = state; 141 142 return 0; 143 } 144 145 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state, 146 bool do_fast_age) 147 { 148 int err; 149 150 err = dsa_port_set_state(dp, state, do_fast_age); 151 if (err) 152 pr_err("DSA: failed to set STP state %u (%d)\n", state, err); 153 } 154 155 int dsa_port_set_mst_state(struct dsa_port *dp, 156 const struct switchdev_mst_state *state, 157 struct netlink_ext_ack *extack) 158 { 159 struct dsa_switch *ds = dp->ds; 160 u8 prev_state; 161 int err; 162 163 if (!ds->ops->port_mst_state_set) 164 return -EOPNOTSUPP; 165 166 err = br_mst_get_state(dsa_port_to_bridge_port(dp), state->msti, 167 &prev_state); 168 if (err) 169 return err; 170 171 err = ds->ops->port_mst_state_set(ds, dp->index, state); 172 if (err) 173 return err; 174 175 if (!(dp->learning && 176 (prev_state == BR_STATE_LEARNING || 177 prev_state == BR_STATE_FORWARDING) && 178 (state->state == BR_STATE_DISABLED || 179 state->state == BR_STATE_BLOCKING || 180 state->state == BR_STATE_LISTENING))) 181 return 0; 182 183 err = dsa_port_msti_fast_age(dp, state->msti); 184 if (err) 185 NL_SET_ERR_MSG_MOD(extack, 186 "Unable to flush associated VLANs"); 187 188 return 0; 189 } 190 191 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy) 192 { 193 struct dsa_switch *ds = dp->ds; 194 int port = dp->index; 195 int err; 196 197 if (ds->ops->port_enable) { 198 err = ds->ops->port_enable(ds, port, phy); 199 if (err) 200 return err; 201 } 202 203 if (!dp->bridge) 204 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false); 205 206 if (dp->pl) 207 phylink_start(dp->pl); 208 209 return 0; 210 } 211 212 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) 213 { 214 int err; 215 216 rtnl_lock(); 217 err = dsa_port_enable_rt(dp, phy); 218 rtnl_unlock(); 219 220 return err; 221 } 222 223 void dsa_port_disable_rt(struct dsa_port *dp) 224 { 225 struct dsa_switch *ds = dp->ds; 226 int port = dp->index; 227 228 if (dp->pl) 229 phylink_stop(dp->pl); 230 231 if (!dp->bridge) 232 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false); 233 234 if (ds->ops->port_disable) 235 ds->ops->port_disable(ds, port); 236 } 237 238 void dsa_port_disable(struct dsa_port *dp) 239 { 240 rtnl_lock(); 241 dsa_port_disable_rt(dp); 242 rtnl_unlock(); 243 } 244 245 static void dsa_port_reset_vlan_filtering(struct dsa_port *dp, 246 struct dsa_bridge bridge) 247 { 248 struct netlink_ext_ack extack = {0}; 249 bool change_vlan_filtering = false; 250 struct dsa_switch *ds = dp->ds; 251 bool vlan_filtering; 252 int err; 253 254 if (ds->needs_standalone_vlan_filtering && 255 !br_vlan_enabled(bridge.dev)) { 256 change_vlan_filtering = true; 257 vlan_filtering = true; 258 } else if (!ds->needs_standalone_vlan_filtering && 259 br_vlan_enabled(bridge.dev)) { 260 change_vlan_filtering = true; 261 vlan_filtering = false; 262 } 263 264 /* If the bridge was vlan_filtering, the bridge core doesn't trigger an 265 * event for changing vlan_filtering setting upon slave ports leaving 266 * it. That is a good thing, because that lets us handle it and also 267 * handle the case where the switch's vlan_filtering setting is global 268 * (not per port). When that happens, the correct moment to trigger the 269 * vlan_filtering callback is only when the last port leaves the last 270 * VLAN-aware bridge. 271 */ 272 if (change_vlan_filtering && ds->vlan_filtering_is_global) { 273 dsa_switch_for_each_port(dp, ds) { 274 struct net_device *br = dsa_port_bridge_dev_get(dp); 275 276 if (br && br_vlan_enabled(br)) { 277 change_vlan_filtering = false; 278 break; 279 } 280 } 281 } 282 283 if (!change_vlan_filtering) 284 return; 285 286 err = dsa_port_vlan_filtering(dp, vlan_filtering, &extack); 287 if (extack._msg) { 288 dev_err(ds->dev, "port %d: %s\n", dp->index, 289 extack._msg); 290 } 291 if (err && err != -EOPNOTSUPP) { 292 dev_err(ds->dev, 293 "port %d failed to reset VLAN filtering to %d: %pe\n", 294 dp->index, vlan_filtering, ERR_PTR(err)); 295 } 296 } 297 298 static int dsa_port_inherit_brport_flags(struct dsa_port *dp, 299 struct netlink_ext_ack *extack) 300 { 301 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 302 BR_BCAST_FLOOD | BR_PORT_LOCKED; 303 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 304 int flag, err; 305 306 for_each_set_bit(flag, &mask, 32) { 307 struct switchdev_brport_flags flags = {0}; 308 309 flags.mask = BIT(flag); 310 311 if (br_port_flag_is_set(brport_dev, BIT(flag))) 312 flags.val = BIT(flag); 313 314 err = dsa_port_bridge_flags(dp, flags, extack); 315 if (err && err != -EOPNOTSUPP) 316 return err; 317 } 318 319 return 0; 320 } 321 322 static void dsa_port_clear_brport_flags(struct dsa_port *dp) 323 { 324 const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; 325 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 326 BR_BCAST_FLOOD | BR_PORT_LOCKED; 327 int flag, err; 328 329 for_each_set_bit(flag, &mask, 32) { 330 struct switchdev_brport_flags flags = {0}; 331 332 flags.mask = BIT(flag); 333 flags.val = val & BIT(flag); 334 335 err = dsa_port_bridge_flags(dp, flags, NULL); 336 if (err && err != -EOPNOTSUPP) 337 dev_err(dp->ds->dev, 338 "failed to clear bridge port flag %lu: %pe\n", 339 flags.val, ERR_PTR(err)); 340 } 341 } 342 343 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp, 344 struct netlink_ext_ack *extack) 345 { 346 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 347 struct net_device *br = dsa_port_bridge_dev_get(dp); 348 int err; 349 350 err = dsa_port_inherit_brport_flags(dp, extack); 351 if (err) 352 return err; 353 354 err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false); 355 if (err && err != -EOPNOTSUPP) 356 return err; 357 358 err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack); 359 if (err && err != -EOPNOTSUPP) 360 return err; 361 362 err = dsa_port_ageing_time(dp, br_get_ageing_time(br)); 363 if (err && err != -EOPNOTSUPP) 364 return err; 365 366 return 0; 367 } 368 369 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp, 370 struct dsa_bridge bridge) 371 { 372 /* Configure the port for standalone mode (no address learning, 373 * flood everything). 374 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events 375 * when the user requests it through netlink or sysfs, but not 376 * automatically at port join or leave, so we need to handle resetting 377 * the brport flags ourselves. But we even prefer it that way, because 378 * otherwise, some setups might never get the notification they need, 379 * for example, when a port leaves a LAG that offloads the bridge, 380 * it becomes standalone, but as far as the bridge is concerned, no 381 * port ever left. 382 */ 383 dsa_port_clear_brport_flags(dp); 384 385 /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, 386 * so allow it to be in BR_STATE_FORWARDING to be kept functional 387 */ 388 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true); 389 390 dsa_port_reset_vlan_filtering(dp, bridge); 391 392 /* Ageing time may be global to the switch chip, so don't change it 393 * here because we have no good reason (or value) to change it to. 394 */ 395 } 396 397 static int dsa_port_bridge_create(struct dsa_port *dp, 398 struct net_device *br, 399 struct netlink_ext_ack *extack) 400 { 401 struct dsa_switch *ds = dp->ds; 402 struct dsa_bridge *bridge; 403 404 bridge = dsa_tree_bridge_find(ds->dst, br); 405 if (bridge) { 406 refcount_inc(&bridge->refcount); 407 dp->bridge = bridge; 408 return 0; 409 } 410 411 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 412 if (!bridge) 413 return -ENOMEM; 414 415 refcount_set(&bridge->refcount, 1); 416 417 bridge->dev = br; 418 419 bridge->num = dsa_bridge_num_get(br, ds->max_num_bridges); 420 if (ds->max_num_bridges && !bridge->num) { 421 NL_SET_ERR_MSG_MOD(extack, 422 "Range of offloadable bridges exceeded"); 423 kfree(bridge); 424 return -EOPNOTSUPP; 425 } 426 427 dp->bridge = bridge; 428 429 return 0; 430 } 431 432 static void dsa_port_bridge_destroy(struct dsa_port *dp, 433 const struct net_device *br) 434 { 435 struct dsa_bridge *bridge = dp->bridge; 436 437 dp->bridge = NULL; 438 439 if (!refcount_dec_and_test(&bridge->refcount)) 440 return; 441 442 if (bridge->num) 443 dsa_bridge_num_put(br, bridge->num); 444 445 kfree(bridge); 446 } 447 448 static bool dsa_port_supports_mst(struct dsa_port *dp) 449 { 450 struct dsa_switch *ds = dp->ds; 451 452 return ds->ops->vlan_msti_set && 453 ds->ops->port_mst_state_set && 454 ds->ops->port_vlan_fast_age && 455 dsa_port_can_configure_learning(dp); 456 } 457 458 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br, 459 struct netlink_ext_ack *extack) 460 { 461 struct dsa_notifier_bridge_info info = { 462 .dp = dp, 463 .extack = extack, 464 }; 465 struct net_device *dev = dp->slave; 466 struct net_device *brport_dev; 467 int err; 468 469 if (br_mst_enabled(br) && !dsa_port_supports_mst(dp)) 470 return -EOPNOTSUPP; 471 472 /* Here the interface is already bridged. Reflect the current 473 * configuration so that drivers can program their chips accordingly. 474 */ 475 err = dsa_port_bridge_create(dp, br, extack); 476 if (err) 477 return err; 478 479 brport_dev = dsa_port_to_bridge_port(dp); 480 481 info.bridge = *dp->bridge; 482 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info); 483 if (err) 484 goto out_rollback; 485 486 /* Drivers which support bridge TX forwarding should set this */ 487 dp->bridge->tx_fwd_offload = info.tx_fwd_offload; 488 489 err = switchdev_bridge_port_offload(brport_dev, dev, dp, 490 &dsa_slave_switchdev_notifier, 491 &dsa_slave_switchdev_blocking_notifier, 492 dp->bridge->tx_fwd_offload, extack); 493 if (err) 494 goto out_rollback_unbridge; 495 496 err = dsa_port_switchdev_sync_attrs(dp, extack); 497 if (err) 498 goto out_rollback_unoffload; 499 500 return 0; 501 502 out_rollback_unoffload: 503 switchdev_bridge_port_unoffload(brport_dev, dp, 504 &dsa_slave_switchdev_notifier, 505 &dsa_slave_switchdev_blocking_notifier); 506 out_rollback_unbridge: 507 dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 508 out_rollback: 509 dsa_port_bridge_destroy(dp, br); 510 return err; 511 } 512 513 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br) 514 { 515 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 516 517 /* Don't try to unoffload something that is not offloaded */ 518 if (!brport_dev) 519 return; 520 521 switchdev_bridge_port_unoffload(brport_dev, dp, 522 &dsa_slave_switchdev_notifier, 523 &dsa_slave_switchdev_blocking_notifier); 524 525 dsa_flush_workqueue(); 526 } 527 528 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) 529 { 530 struct dsa_notifier_bridge_info info = { 531 .dp = dp, 532 }; 533 int err; 534 535 /* If the port could not be offloaded to begin with, then 536 * there is nothing to do. 537 */ 538 if (!dp->bridge) 539 return; 540 541 info.bridge = *dp->bridge; 542 543 /* Here the port is already unbridged. Reflect the current configuration 544 * so that drivers can program their chips accordingly. 545 */ 546 dsa_port_bridge_destroy(dp, br); 547 548 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 549 if (err) 550 dev_err(dp->ds->dev, 551 "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n", 552 dp->index, ERR_PTR(err)); 553 554 dsa_port_switchdev_unsync_attrs(dp, info.bridge); 555 } 556 557 int dsa_port_lag_change(struct dsa_port *dp, 558 struct netdev_lag_lower_state_info *linfo) 559 { 560 struct dsa_notifier_lag_info info = { 561 .dp = dp, 562 }; 563 bool tx_enabled; 564 565 if (!dp->lag) 566 return 0; 567 568 /* On statically configured aggregates (e.g. loadbalance 569 * without LACP) ports will always be tx_enabled, even if the 570 * link is down. Thus we require both link_up and tx_enabled 571 * in order to include it in the tx set. 572 */ 573 tx_enabled = linfo->link_up && linfo->tx_enabled; 574 575 if (tx_enabled == dp->lag_tx_enabled) 576 return 0; 577 578 dp->lag_tx_enabled = tx_enabled; 579 580 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info); 581 } 582 583 static int dsa_port_lag_create(struct dsa_port *dp, 584 struct net_device *lag_dev) 585 { 586 struct dsa_switch *ds = dp->ds; 587 struct dsa_lag *lag; 588 589 lag = dsa_tree_lag_find(ds->dst, lag_dev); 590 if (lag) { 591 refcount_inc(&lag->refcount); 592 dp->lag = lag; 593 return 0; 594 } 595 596 lag = kzalloc(sizeof(*lag), GFP_KERNEL); 597 if (!lag) 598 return -ENOMEM; 599 600 refcount_set(&lag->refcount, 1); 601 mutex_init(&lag->fdb_lock); 602 INIT_LIST_HEAD(&lag->fdbs); 603 lag->dev = lag_dev; 604 dsa_lag_map(ds->dst, lag); 605 dp->lag = lag; 606 607 return 0; 608 } 609 610 static void dsa_port_lag_destroy(struct dsa_port *dp) 611 { 612 struct dsa_lag *lag = dp->lag; 613 614 dp->lag = NULL; 615 dp->lag_tx_enabled = false; 616 617 if (!refcount_dec_and_test(&lag->refcount)) 618 return; 619 620 WARN_ON(!list_empty(&lag->fdbs)); 621 dsa_lag_unmap(dp->ds->dst, lag); 622 kfree(lag); 623 } 624 625 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev, 626 struct netdev_lag_upper_info *uinfo, 627 struct netlink_ext_ack *extack) 628 { 629 struct dsa_notifier_lag_info info = { 630 .dp = dp, 631 .info = uinfo, 632 }; 633 struct net_device *bridge_dev; 634 int err; 635 636 err = dsa_port_lag_create(dp, lag_dev); 637 if (err) 638 goto err_lag_create; 639 640 info.lag = *dp->lag; 641 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info); 642 if (err) 643 goto err_lag_join; 644 645 bridge_dev = netdev_master_upper_dev_get(lag_dev); 646 if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) 647 return 0; 648 649 err = dsa_port_bridge_join(dp, bridge_dev, extack); 650 if (err) 651 goto err_bridge_join; 652 653 return 0; 654 655 err_bridge_join: 656 dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 657 err_lag_join: 658 dsa_port_lag_destroy(dp); 659 err_lag_create: 660 return err; 661 } 662 663 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 664 { 665 struct net_device *br = dsa_port_bridge_dev_get(dp); 666 667 if (br) 668 dsa_port_pre_bridge_leave(dp, br); 669 } 670 671 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 672 { 673 struct net_device *br = dsa_port_bridge_dev_get(dp); 674 struct dsa_notifier_lag_info info = { 675 .dp = dp, 676 }; 677 int err; 678 679 if (!dp->lag) 680 return; 681 682 /* Port might have been part of a LAG that in turn was 683 * attached to a bridge. 684 */ 685 if (br) 686 dsa_port_bridge_leave(dp, br); 687 688 info.lag = *dp->lag; 689 690 dsa_port_lag_destroy(dp); 691 692 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 693 if (err) 694 dev_err(dp->ds->dev, 695 "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n", 696 dp->index, ERR_PTR(err)); 697 } 698 699 /* Must be called under rcu_read_lock() */ 700 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, 701 bool vlan_filtering, 702 struct netlink_ext_ack *extack) 703 { 704 struct dsa_switch *ds = dp->ds; 705 struct dsa_port *other_dp; 706 int err; 707 708 /* VLAN awareness was off, so the question is "can we turn it on". 709 * We may have had 8021q uppers, those need to go. Make sure we don't 710 * enter an inconsistent state: deny changing the VLAN awareness state 711 * as long as we have 8021q uppers. 712 */ 713 if (vlan_filtering && dsa_port_is_user(dp)) { 714 struct net_device *br = dsa_port_bridge_dev_get(dp); 715 struct net_device *upper_dev, *slave = dp->slave; 716 struct list_head *iter; 717 718 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { 719 struct bridge_vlan_info br_info; 720 u16 vid; 721 722 if (!is_vlan_dev(upper_dev)) 723 continue; 724 725 vid = vlan_dev_vlan_id(upper_dev); 726 727 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the 728 * device, respectively the VID is not found, returning 729 * 0 means success, which is a failure for us here. 730 */ 731 err = br_vlan_get_info(br, vid, &br_info); 732 if (err == 0) { 733 NL_SET_ERR_MSG_MOD(extack, 734 "Must first remove VLAN uppers having VIDs also present in bridge"); 735 return false; 736 } 737 } 738 } 739 740 if (!ds->vlan_filtering_is_global) 741 return true; 742 743 /* For cases where enabling/disabling VLAN awareness is global to the 744 * switch, we need to handle the case where multiple bridges span 745 * different ports of the same switch device and one of them has a 746 * different setting than what is being requested. 747 */ 748 dsa_switch_for_each_port(other_dp, ds) { 749 struct net_device *other_br = dsa_port_bridge_dev_get(other_dp); 750 751 /* If it's the same bridge, it also has same 752 * vlan_filtering setting => no need to check 753 */ 754 if (!other_br || other_br == dsa_port_bridge_dev_get(dp)) 755 continue; 756 757 if (br_vlan_enabled(other_br) != vlan_filtering) { 758 NL_SET_ERR_MSG_MOD(extack, 759 "VLAN filtering is a global setting"); 760 return false; 761 } 762 } 763 return true; 764 } 765 766 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, 767 struct netlink_ext_ack *extack) 768 { 769 bool old_vlan_filtering = dsa_port_is_vlan_filtering(dp); 770 struct dsa_switch *ds = dp->ds; 771 bool apply; 772 int err; 773 774 if (!ds->ops->port_vlan_filtering) 775 return -EOPNOTSUPP; 776 777 /* We are called from dsa_slave_switchdev_blocking_event(), 778 * which is not under rcu_read_lock(), unlike 779 * dsa_slave_switchdev_event(). 780 */ 781 rcu_read_lock(); 782 apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack); 783 rcu_read_unlock(); 784 if (!apply) 785 return -EINVAL; 786 787 if (dsa_port_is_vlan_filtering(dp) == vlan_filtering) 788 return 0; 789 790 err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering, 791 extack); 792 if (err) 793 return err; 794 795 if (ds->vlan_filtering_is_global) { 796 struct dsa_port *other_dp; 797 798 ds->vlan_filtering = vlan_filtering; 799 800 dsa_switch_for_each_user_port(other_dp, ds) { 801 struct net_device *slave = dp->slave; 802 803 /* We might be called in the unbind path, so not 804 * all slave devices might still be registered. 805 */ 806 if (!slave) 807 continue; 808 809 err = dsa_slave_manage_vlan_filtering(slave, 810 vlan_filtering); 811 if (err) 812 goto restore; 813 } 814 } else { 815 dp->vlan_filtering = vlan_filtering; 816 817 err = dsa_slave_manage_vlan_filtering(dp->slave, 818 vlan_filtering); 819 if (err) 820 goto restore; 821 } 822 823 return 0; 824 825 restore: 826 ds->ops->port_vlan_filtering(ds, dp->index, old_vlan_filtering, NULL); 827 828 if (ds->vlan_filtering_is_global) 829 ds->vlan_filtering = old_vlan_filtering; 830 else 831 dp->vlan_filtering = old_vlan_filtering; 832 833 return err; 834 } 835 836 /* This enforces legacy behavior for switch drivers which assume they can't 837 * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0 838 */ 839 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp) 840 { 841 struct net_device *br = dsa_port_bridge_dev_get(dp); 842 struct dsa_switch *ds = dp->ds; 843 844 if (!br) 845 return false; 846 847 return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br); 848 } 849 850 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock) 851 { 852 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock); 853 unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies); 854 struct dsa_notifier_ageing_time_info info; 855 int err; 856 857 info.ageing_time = ageing_time; 858 859 err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info); 860 if (err) 861 return err; 862 863 dp->ageing_time = ageing_time; 864 865 return 0; 866 } 867 868 int dsa_port_mst_enable(struct dsa_port *dp, bool on, 869 struct netlink_ext_ack *extack) 870 { 871 if (on && !dsa_port_supports_mst(dp)) { 872 NL_SET_ERR_MSG_MOD(extack, "Hardware does not support MST"); 873 return -EINVAL; 874 } 875 876 return 0; 877 } 878 879 int dsa_port_pre_bridge_flags(const struct dsa_port *dp, 880 struct switchdev_brport_flags flags, 881 struct netlink_ext_ack *extack) 882 { 883 struct dsa_switch *ds = dp->ds; 884 885 if (!ds->ops->port_pre_bridge_flags) 886 return -EINVAL; 887 888 return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack); 889 } 890 891 int dsa_port_bridge_flags(struct dsa_port *dp, 892 struct switchdev_brport_flags flags, 893 struct netlink_ext_ack *extack) 894 { 895 struct dsa_switch *ds = dp->ds; 896 int err; 897 898 if (!ds->ops->port_bridge_flags) 899 return -EOPNOTSUPP; 900 901 err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack); 902 if (err) 903 return err; 904 905 if (flags.mask & BR_LEARNING) { 906 bool learning = flags.val & BR_LEARNING; 907 908 if (learning == dp->learning) 909 return 0; 910 911 if ((dp->learning && !learning) && 912 (dp->stp_state == BR_STATE_LEARNING || 913 dp->stp_state == BR_STATE_FORWARDING)) 914 dsa_port_fast_age(dp); 915 916 dp->learning = learning; 917 } 918 919 return 0; 920 } 921 922 int dsa_port_vlan_msti(struct dsa_port *dp, 923 const struct switchdev_vlan_msti *msti) 924 { 925 struct dsa_switch *ds = dp->ds; 926 927 if (!ds->ops->vlan_msti_set) 928 return -EOPNOTSUPP; 929 930 return ds->ops->vlan_msti_set(ds, *dp->bridge, msti); 931 } 932 933 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu) 934 { 935 struct dsa_notifier_mtu_info info = { 936 .dp = dp, 937 .mtu = new_mtu, 938 }; 939 940 return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info); 941 } 942 943 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, 944 u16 vid) 945 { 946 struct dsa_notifier_fdb_info info = { 947 .dp = dp, 948 .addr = addr, 949 .vid = vid, 950 .db = { 951 .type = DSA_DB_BRIDGE, 952 .bridge = *dp->bridge, 953 }, 954 }; 955 956 /* Refcounting takes bridge.num as a key, and should be global for all 957 * bridges in the absence of FDB isolation, and per bridge otherwise. 958 * Force the bridge.num to zero here in the absence of FDB isolation. 959 */ 960 if (!dp->ds->fdb_isolation) 961 info.db.bridge.num = 0; 962 963 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info); 964 } 965 966 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr, 967 u16 vid) 968 { 969 struct dsa_notifier_fdb_info info = { 970 .dp = dp, 971 .addr = addr, 972 .vid = vid, 973 .db = { 974 .type = DSA_DB_BRIDGE, 975 .bridge = *dp->bridge, 976 }, 977 }; 978 979 if (!dp->ds->fdb_isolation) 980 info.db.bridge.num = 0; 981 982 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info); 983 } 984 985 static int dsa_port_host_fdb_add(struct dsa_port *dp, 986 const unsigned char *addr, u16 vid, 987 struct dsa_db db) 988 { 989 struct dsa_notifier_fdb_info info = { 990 .dp = dp, 991 .addr = addr, 992 .vid = vid, 993 .db = db, 994 }; 995 996 if (!dp->ds->fdb_isolation) 997 info.db.bridge.num = 0; 998 999 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info); 1000 } 1001 1002 int dsa_port_standalone_host_fdb_add(struct dsa_port *dp, 1003 const unsigned char *addr, u16 vid) 1004 { 1005 struct dsa_db db = { 1006 .type = DSA_DB_PORT, 1007 .dp = dp, 1008 }; 1009 1010 return dsa_port_host_fdb_add(dp, addr, vid, db); 1011 } 1012 1013 int dsa_port_bridge_host_fdb_add(struct dsa_port *dp, 1014 const unsigned char *addr, u16 vid) 1015 { 1016 struct dsa_port *cpu_dp = dp->cpu_dp; 1017 struct dsa_db db = { 1018 .type = DSA_DB_BRIDGE, 1019 .bridge = *dp->bridge, 1020 }; 1021 int err; 1022 1023 /* Avoid a call to __dev_set_promiscuity() on the master, which 1024 * requires rtnl_lock(), since we can't guarantee that is held here, 1025 * and we can't take it either. 1026 */ 1027 if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) { 1028 err = dev_uc_add(cpu_dp->master, addr); 1029 if (err) 1030 return err; 1031 } 1032 1033 return dsa_port_host_fdb_add(dp, addr, vid, db); 1034 } 1035 1036 static int dsa_port_host_fdb_del(struct dsa_port *dp, 1037 const unsigned char *addr, u16 vid, 1038 struct dsa_db db) 1039 { 1040 struct dsa_notifier_fdb_info info = { 1041 .dp = dp, 1042 .addr = addr, 1043 .vid = vid, 1044 .db = db, 1045 }; 1046 1047 if (!dp->ds->fdb_isolation) 1048 info.db.bridge.num = 0; 1049 1050 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info); 1051 } 1052 1053 int dsa_port_standalone_host_fdb_del(struct dsa_port *dp, 1054 const unsigned char *addr, u16 vid) 1055 { 1056 struct dsa_db db = { 1057 .type = DSA_DB_PORT, 1058 .dp = dp, 1059 }; 1060 1061 return dsa_port_host_fdb_del(dp, addr, vid, db); 1062 } 1063 1064 int dsa_port_bridge_host_fdb_del(struct dsa_port *dp, 1065 const unsigned char *addr, u16 vid) 1066 { 1067 struct dsa_port *cpu_dp = dp->cpu_dp; 1068 struct dsa_db db = { 1069 .type = DSA_DB_BRIDGE, 1070 .bridge = *dp->bridge, 1071 }; 1072 int err; 1073 1074 if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) { 1075 err = dev_uc_del(cpu_dp->master, addr); 1076 if (err) 1077 return err; 1078 } 1079 1080 return dsa_port_host_fdb_del(dp, addr, vid, db); 1081 } 1082 1083 int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr, 1084 u16 vid) 1085 { 1086 struct dsa_notifier_lag_fdb_info info = { 1087 .lag = dp->lag, 1088 .addr = addr, 1089 .vid = vid, 1090 .db = { 1091 .type = DSA_DB_BRIDGE, 1092 .bridge = *dp->bridge, 1093 }, 1094 }; 1095 1096 if (!dp->ds->fdb_isolation) 1097 info.db.bridge.num = 0; 1098 1099 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_ADD, &info); 1100 } 1101 1102 int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr, 1103 u16 vid) 1104 { 1105 struct dsa_notifier_lag_fdb_info info = { 1106 .lag = dp->lag, 1107 .addr = addr, 1108 .vid = vid, 1109 .db = { 1110 .type = DSA_DB_BRIDGE, 1111 .bridge = *dp->bridge, 1112 }, 1113 }; 1114 1115 if (!dp->ds->fdb_isolation) 1116 info.db.bridge.num = 0; 1117 1118 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_DEL, &info); 1119 } 1120 1121 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data) 1122 { 1123 struct dsa_switch *ds = dp->ds; 1124 int port = dp->index; 1125 1126 if (!ds->ops->port_fdb_dump) 1127 return -EOPNOTSUPP; 1128 1129 return ds->ops->port_fdb_dump(ds, port, cb, data); 1130 } 1131 1132 int dsa_port_mdb_add(const struct dsa_port *dp, 1133 const struct switchdev_obj_port_mdb *mdb) 1134 { 1135 struct dsa_notifier_mdb_info info = { 1136 .dp = dp, 1137 .mdb = mdb, 1138 .db = { 1139 .type = DSA_DB_BRIDGE, 1140 .bridge = *dp->bridge, 1141 }, 1142 }; 1143 1144 if (!dp->ds->fdb_isolation) 1145 info.db.bridge.num = 0; 1146 1147 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info); 1148 } 1149 1150 int dsa_port_mdb_del(const struct dsa_port *dp, 1151 const struct switchdev_obj_port_mdb *mdb) 1152 { 1153 struct dsa_notifier_mdb_info info = { 1154 .dp = dp, 1155 .mdb = mdb, 1156 .db = { 1157 .type = DSA_DB_BRIDGE, 1158 .bridge = *dp->bridge, 1159 }, 1160 }; 1161 1162 if (!dp->ds->fdb_isolation) 1163 info.db.bridge.num = 0; 1164 1165 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info); 1166 } 1167 1168 static int dsa_port_host_mdb_add(const struct dsa_port *dp, 1169 const struct switchdev_obj_port_mdb *mdb, 1170 struct dsa_db db) 1171 { 1172 struct dsa_notifier_mdb_info info = { 1173 .dp = dp, 1174 .mdb = mdb, 1175 .db = db, 1176 }; 1177 1178 if (!dp->ds->fdb_isolation) 1179 info.db.bridge.num = 0; 1180 1181 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info); 1182 } 1183 1184 int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp, 1185 const struct switchdev_obj_port_mdb *mdb) 1186 { 1187 struct dsa_db db = { 1188 .type = DSA_DB_PORT, 1189 .dp = dp, 1190 }; 1191 1192 return dsa_port_host_mdb_add(dp, mdb, db); 1193 } 1194 1195 int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp, 1196 const struct switchdev_obj_port_mdb *mdb) 1197 { 1198 struct dsa_port *cpu_dp = dp->cpu_dp; 1199 struct dsa_db db = { 1200 .type = DSA_DB_BRIDGE, 1201 .bridge = *dp->bridge, 1202 }; 1203 int err; 1204 1205 err = dev_mc_add(cpu_dp->master, mdb->addr); 1206 if (err) 1207 return err; 1208 1209 return dsa_port_host_mdb_add(dp, mdb, db); 1210 } 1211 1212 static int dsa_port_host_mdb_del(const struct dsa_port *dp, 1213 const struct switchdev_obj_port_mdb *mdb, 1214 struct dsa_db db) 1215 { 1216 struct dsa_notifier_mdb_info info = { 1217 .dp = dp, 1218 .mdb = mdb, 1219 .db = db, 1220 }; 1221 1222 if (!dp->ds->fdb_isolation) 1223 info.db.bridge.num = 0; 1224 1225 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info); 1226 } 1227 1228 int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp, 1229 const struct switchdev_obj_port_mdb *mdb) 1230 { 1231 struct dsa_db db = { 1232 .type = DSA_DB_PORT, 1233 .dp = dp, 1234 }; 1235 1236 return dsa_port_host_mdb_del(dp, mdb, db); 1237 } 1238 1239 int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp, 1240 const struct switchdev_obj_port_mdb *mdb) 1241 { 1242 struct dsa_port *cpu_dp = dp->cpu_dp; 1243 struct dsa_db db = { 1244 .type = DSA_DB_BRIDGE, 1245 .bridge = *dp->bridge, 1246 }; 1247 int err; 1248 1249 err = dev_mc_del(cpu_dp->master, mdb->addr); 1250 if (err) 1251 return err; 1252 1253 return dsa_port_host_mdb_del(dp, mdb, db); 1254 } 1255 1256 int dsa_port_vlan_add(struct dsa_port *dp, 1257 const struct switchdev_obj_port_vlan *vlan, 1258 struct netlink_ext_ack *extack) 1259 { 1260 struct dsa_notifier_vlan_info info = { 1261 .dp = dp, 1262 .vlan = vlan, 1263 .extack = extack, 1264 }; 1265 1266 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info); 1267 } 1268 1269 int dsa_port_vlan_del(struct dsa_port *dp, 1270 const struct switchdev_obj_port_vlan *vlan) 1271 { 1272 struct dsa_notifier_vlan_info info = { 1273 .dp = dp, 1274 .vlan = vlan, 1275 }; 1276 1277 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info); 1278 } 1279 1280 int dsa_port_host_vlan_add(struct dsa_port *dp, 1281 const struct switchdev_obj_port_vlan *vlan, 1282 struct netlink_ext_ack *extack) 1283 { 1284 struct dsa_notifier_vlan_info info = { 1285 .dp = dp, 1286 .vlan = vlan, 1287 .extack = extack, 1288 }; 1289 struct dsa_port *cpu_dp = dp->cpu_dp; 1290 int err; 1291 1292 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_ADD, &info); 1293 if (err && err != -EOPNOTSUPP) 1294 return err; 1295 1296 vlan_vid_add(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid); 1297 1298 return err; 1299 } 1300 1301 int dsa_port_host_vlan_del(struct dsa_port *dp, 1302 const struct switchdev_obj_port_vlan *vlan) 1303 { 1304 struct dsa_notifier_vlan_info info = { 1305 .dp = dp, 1306 .vlan = vlan, 1307 }; 1308 struct dsa_port *cpu_dp = dp->cpu_dp; 1309 int err; 1310 1311 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_DEL, &info); 1312 if (err && err != -EOPNOTSUPP) 1313 return err; 1314 1315 vlan_vid_del(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid); 1316 1317 return err; 1318 } 1319 1320 int dsa_port_mrp_add(const struct dsa_port *dp, 1321 const struct switchdev_obj_mrp *mrp) 1322 { 1323 struct dsa_switch *ds = dp->ds; 1324 1325 if (!ds->ops->port_mrp_add) 1326 return -EOPNOTSUPP; 1327 1328 return ds->ops->port_mrp_add(ds, dp->index, mrp); 1329 } 1330 1331 int dsa_port_mrp_del(const struct dsa_port *dp, 1332 const struct switchdev_obj_mrp *mrp) 1333 { 1334 struct dsa_switch *ds = dp->ds; 1335 1336 if (!ds->ops->port_mrp_del) 1337 return -EOPNOTSUPP; 1338 1339 return ds->ops->port_mrp_del(ds, dp->index, mrp); 1340 } 1341 1342 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp, 1343 const struct switchdev_obj_ring_role_mrp *mrp) 1344 { 1345 struct dsa_switch *ds = dp->ds; 1346 1347 if (!ds->ops->port_mrp_add_ring_role) 1348 return -EOPNOTSUPP; 1349 1350 return ds->ops->port_mrp_add_ring_role(ds, dp->index, mrp); 1351 } 1352 1353 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp, 1354 const struct switchdev_obj_ring_role_mrp *mrp) 1355 { 1356 struct dsa_switch *ds = dp->ds; 1357 1358 if (!ds->ops->port_mrp_del_ring_role) 1359 return -EOPNOTSUPP; 1360 1361 return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp); 1362 } 1363 1364 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp, 1365 const struct dsa_device_ops *tag_ops) 1366 { 1367 cpu_dp->rcv = tag_ops->rcv; 1368 cpu_dp->tag_ops = tag_ops; 1369 } 1370 1371 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp) 1372 { 1373 struct device_node *phy_dn; 1374 struct phy_device *phydev; 1375 1376 phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0); 1377 if (!phy_dn) 1378 return NULL; 1379 1380 phydev = of_phy_find_device(phy_dn); 1381 if (!phydev) { 1382 of_node_put(phy_dn); 1383 return ERR_PTR(-EPROBE_DEFER); 1384 } 1385 1386 of_node_put(phy_dn); 1387 return phydev; 1388 } 1389 1390 static void dsa_port_phylink_validate(struct phylink_config *config, 1391 unsigned long *supported, 1392 struct phylink_link_state *state) 1393 { 1394 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1395 struct dsa_switch *ds = dp->ds; 1396 1397 if (!ds->ops->phylink_validate) { 1398 if (config->mac_capabilities) 1399 phylink_generic_validate(config, supported, state); 1400 return; 1401 } 1402 1403 ds->ops->phylink_validate(ds, dp->index, supported, state); 1404 } 1405 1406 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config, 1407 struct phylink_link_state *state) 1408 { 1409 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1410 struct dsa_switch *ds = dp->ds; 1411 int err; 1412 1413 /* Only called for inband modes */ 1414 if (!ds->ops->phylink_mac_link_state) { 1415 state->link = 0; 1416 return; 1417 } 1418 1419 err = ds->ops->phylink_mac_link_state(ds, dp->index, state); 1420 if (err < 0) { 1421 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n", 1422 dp->index, err); 1423 state->link = 0; 1424 } 1425 } 1426 1427 static struct phylink_pcs * 1428 dsa_port_phylink_mac_select_pcs(struct phylink_config *config, 1429 phy_interface_t interface) 1430 { 1431 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1432 struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP); 1433 struct dsa_switch *ds = dp->ds; 1434 1435 if (ds->ops->phylink_mac_select_pcs) 1436 pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface); 1437 1438 return pcs; 1439 } 1440 1441 static void dsa_port_phylink_mac_config(struct phylink_config *config, 1442 unsigned int mode, 1443 const struct phylink_link_state *state) 1444 { 1445 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1446 struct dsa_switch *ds = dp->ds; 1447 1448 if (!ds->ops->phylink_mac_config) 1449 return; 1450 1451 ds->ops->phylink_mac_config(ds, dp->index, mode, state); 1452 } 1453 1454 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config) 1455 { 1456 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1457 struct dsa_switch *ds = dp->ds; 1458 1459 if (!ds->ops->phylink_mac_an_restart) 1460 return; 1461 1462 ds->ops->phylink_mac_an_restart(ds, dp->index); 1463 } 1464 1465 static void dsa_port_phylink_mac_link_down(struct phylink_config *config, 1466 unsigned int mode, 1467 phy_interface_t interface) 1468 { 1469 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1470 struct phy_device *phydev = NULL; 1471 struct dsa_switch *ds = dp->ds; 1472 1473 if (dsa_port_is_user(dp)) 1474 phydev = dp->slave->phydev; 1475 1476 if (!ds->ops->phylink_mac_link_down) { 1477 if (ds->ops->adjust_link && phydev) 1478 ds->ops->adjust_link(ds, dp->index, phydev); 1479 return; 1480 } 1481 1482 ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface); 1483 } 1484 1485 static void dsa_port_phylink_mac_link_up(struct phylink_config *config, 1486 struct phy_device *phydev, 1487 unsigned int mode, 1488 phy_interface_t interface, 1489 int speed, int duplex, 1490 bool tx_pause, bool rx_pause) 1491 { 1492 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1493 struct dsa_switch *ds = dp->ds; 1494 1495 if (!ds->ops->phylink_mac_link_up) { 1496 if (ds->ops->adjust_link && phydev) 1497 ds->ops->adjust_link(ds, dp->index, phydev); 1498 return; 1499 } 1500 1501 ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev, 1502 speed, duplex, tx_pause, rx_pause); 1503 } 1504 1505 static const struct phylink_mac_ops dsa_port_phylink_mac_ops = { 1506 .validate = dsa_port_phylink_validate, 1507 .mac_select_pcs = dsa_port_phylink_mac_select_pcs, 1508 .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state, 1509 .mac_config = dsa_port_phylink_mac_config, 1510 .mac_an_restart = dsa_port_phylink_mac_an_restart, 1511 .mac_link_down = dsa_port_phylink_mac_link_down, 1512 .mac_link_up = dsa_port_phylink_mac_link_up, 1513 }; 1514 1515 int dsa_port_phylink_create(struct dsa_port *dp) 1516 { 1517 struct dsa_switch *ds = dp->ds; 1518 phy_interface_t mode; 1519 int err; 1520 1521 err = of_get_phy_mode(dp->dn, &mode); 1522 if (err) 1523 mode = PHY_INTERFACE_MODE_NA; 1524 1525 /* Presence of phylink_mac_link_state or phylink_mac_an_restart is 1526 * an indicator of a legacy phylink driver. 1527 */ 1528 if (ds->ops->phylink_mac_link_state || 1529 ds->ops->phylink_mac_an_restart) 1530 dp->pl_config.legacy_pre_march2020 = true; 1531 1532 if (ds->ops->phylink_get_caps) 1533 ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config); 1534 1535 dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn), 1536 mode, &dsa_port_phylink_mac_ops); 1537 if (IS_ERR(dp->pl)) { 1538 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); 1539 return PTR_ERR(dp->pl); 1540 } 1541 1542 return 0; 1543 } 1544 1545 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable) 1546 { 1547 struct dsa_switch *ds = dp->ds; 1548 struct phy_device *phydev; 1549 int port = dp->index; 1550 int err = 0; 1551 1552 phydev = dsa_port_get_phy_device(dp); 1553 if (!phydev) 1554 return 0; 1555 1556 if (IS_ERR(phydev)) 1557 return PTR_ERR(phydev); 1558 1559 if (enable) { 1560 err = genphy_resume(phydev); 1561 if (err < 0) 1562 goto err_put_dev; 1563 1564 err = genphy_read_status(phydev); 1565 if (err < 0) 1566 goto err_put_dev; 1567 } else { 1568 err = genphy_suspend(phydev); 1569 if (err < 0) 1570 goto err_put_dev; 1571 } 1572 1573 if (ds->ops->adjust_link) 1574 ds->ops->adjust_link(ds, port, phydev); 1575 1576 dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev)); 1577 1578 err_put_dev: 1579 put_device(&phydev->mdio.dev); 1580 return err; 1581 } 1582 1583 static int dsa_port_fixed_link_register_of(struct dsa_port *dp) 1584 { 1585 struct device_node *dn = dp->dn; 1586 struct dsa_switch *ds = dp->ds; 1587 struct phy_device *phydev; 1588 int port = dp->index; 1589 phy_interface_t mode; 1590 int err; 1591 1592 err = of_phy_register_fixed_link(dn); 1593 if (err) { 1594 dev_err(ds->dev, 1595 "failed to register the fixed PHY of port %d\n", 1596 port); 1597 return err; 1598 } 1599 1600 phydev = of_phy_find_device(dn); 1601 1602 err = of_get_phy_mode(dn, &mode); 1603 if (err) 1604 mode = PHY_INTERFACE_MODE_NA; 1605 phydev->interface = mode; 1606 1607 genphy_read_status(phydev); 1608 1609 if (ds->ops->adjust_link) 1610 ds->ops->adjust_link(ds, port, phydev); 1611 1612 put_device(&phydev->mdio.dev); 1613 1614 return 0; 1615 } 1616 1617 static int dsa_port_phylink_register(struct dsa_port *dp) 1618 { 1619 struct dsa_switch *ds = dp->ds; 1620 struct device_node *port_dn = dp->dn; 1621 int err; 1622 1623 dp->pl_config.dev = ds->dev; 1624 dp->pl_config.type = PHYLINK_DEV; 1625 1626 err = dsa_port_phylink_create(dp); 1627 if (err) 1628 return err; 1629 1630 err = phylink_of_phy_connect(dp->pl, port_dn, 0); 1631 if (err && err != -ENODEV) { 1632 pr_err("could not attach to PHY: %d\n", err); 1633 goto err_phy_connect; 1634 } 1635 1636 return 0; 1637 1638 err_phy_connect: 1639 phylink_destroy(dp->pl); 1640 return err; 1641 } 1642 1643 int dsa_port_link_register_of(struct dsa_port *dp) 1644 { 1645 struct dsa_switch *ds = dp->ds; 1646 struct device_node *phy_np; 1647 int port = dp->index; 1648 1649 if (!ds->ops->adjust_link) { 1650 phy_np = of_parse_phandle(dp->dn, "phy-handle", 0); 1651 if (of_phy_is_fixed_link(dp->dn) || phy_np) { 1652 if (ds->ops->phylink_mac_link_down) 1653 ds->ops->phylink_mac_link_down(ds, port, 1654 MLO_AN_FIXED, PHY_INTERFACE_MODE_NA); 1655 of_node_put(phy_np); 1656 return dsa_port_phylink_register(dp); 1657 } 1658 of_node_put(phy_np); 1659 return 0; 1660 } 1661 1662 dev_warn(ds->dev, 1663 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n"); 1664 1665 if (of_phy_is_fixed_link(dp->dn)) 1666 return dsa_port_fixed_link_register_of(dp); 1667 else 1668 return dsa_port_setup_phy_of(dp, true); 1669 } 1670 1671 void dsa_port_link_unregister_of(struct dsa_port *dp) 1672 { 1673 struct dsa_switch *ds = dp->ds; 1674 1675 if (!ds->ops->adjust_link && dp->pl) { 1676 rtnl_lock(); 1677 phylink_disconnect_phy(dp->pl); 1678 rtnl_unlock(); 1679 phylink_destroy(dp->pl); 1680 dp->pl = NULL; 1681 return; 1682 } 1683 1684 if (of_phy_is_fixed_link(dp->dn)) 1685 of_phy_deregister_fixed_link(dp->dn); 1686 else 1687 dsa_port_setup_phy_of(dp, false); 1688 } 1689 1690 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr) 1691 { 1692 struct dsa_switch *ds = dp->ds; 1693 int err; 1694 1695 if (!ds->ops->port_hsr_join) 1696 return -EOPNOTSUPP; 1697 1698 dp->hsr_dev = hsr; 1699 1700 err = ds->ops->port_hsr_join(ds, dp->index, hsr); 1701 if (err) 1702 dp->hsr_dev = NULL; 1703 1704 return err; 1705 } 1706 1707 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr) 1708 { 1709 struct dsa_switch *ds = dp->ds; 1710 int err; 1711 1712 dp->hsr_dev = NULL; 1713 1714 if (ds->ops->port_hsr_leave) { 1715 err = ds->ops->port_hsr_leave(ds, dp->index, hsr); 1716 if (err) 1717 dev_err(dp->ds->dev, 1718 "port %d failed to leave HSR %s: %pe\n", 1719 dp->index, hsr->name, ERR_PTR(err)); 1720 } 1721 } 1722 1723 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast) 1724 { 1725 struct dsa_notifier_tag_8021q_vlan_info info = { 1726 .dp = dp, 1727 .vid = vid, 1728 }; 1729 1730 if (broadcast) 1731 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 1732 1733 return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 1734 } 1735 1736 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast) 1737 { 1738 struct dsa_notifier_tag_8021q_vlan_info info = { 1739 .dp = dp, 1740 .vid = vid, 1741 }; 1742 int err; 1743 1744 if (broadcast) 1745 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 1746 else 1747 err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 1748 if (err) 1749 dev_err(dp->ds->dev, 1750 "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n", 1751 dp->index, vid, ERR_PTR(err)); 1752 } 1753