1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a single switch port 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include <linux/if_bridge.h> 10 #include <linux/notifier.h> 11 #include <linux/of_mdio.h> 12 #include <linux/of_net.h> 13 14 #include "dsa_priv.h" 15 16 /** 17 * dsa_port_notify - Notify the switching fabric of changes to a port 18 * @dp: port on which change occurred 19 * @e: event, must be of type DSA_NOTIFIER_* 20 * @v: event-specific value. 21 * 22 * Notify all switches in the DSA tree that this port's switch belongs to, 23 * including this switch itself, of an event. Allows the other switches to 24 * reconfigure themselves for cross-chip operations. Can also be used to 25 * reconfigure ports without net_devices (CPU ports, DSA links) whenever 26 * a user port's state changes. 27 */ 28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v) 29 { 30 return dsa_tree_notify(dp->ds->dst, e, v); 31 } 32 33 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp) 34 { 35 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 36 struct switchdev_notifier_fdb_info info = { 37 /* flush all VLANs */ 38 .vid = 0, 39 }; 40 41 /* When the port becomes standalone it has already left the bridge. 42 * Don't notify the bridge in that case. 43 */ 44 if (!brport_dev) 45 return; 46 47 call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE, 48 brport_dev, &info.info, NULL); 49 } 50 51 static void dsa_port_fast_age(const struct dsa_port *dp) 52 { 53 struct dsa_switch *ds = dp->ds; 54 55 if (!ds->ops->port_fast_age) 56 return; 57 58 ds->ops->port_fast_age(ds, dp->index); 59 60 dsa_port_notify_bridge_fdb_flush(dp); 61 } 62 63 static bool dsa_port_can_configure_learning(struct dsa_port *dp) 64 { 65 struct switchdev_brport_flags flags = { 66 .mask = BR_LEARNING, 67 }; 68 struct dsa_switch *ds = dp->ds; 69 int err; 70 71 if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags) 72 return false; 73 74 err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL); 75 return !err; 76 } 77 78 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age) 79 { 80 struct dsa_switch *ds = dp->ds; 81 int port = dp->index; 82 83 if (!ds->ops->port_stp_state_set) 84 return -EOPNOTSUPP; 85 86 ds->ops->port_stp_state_set(ds, port, state); 87 88 if (!dsa_port_can_configure_learning(dp) || 89 (do_fast_age && dp->learning)) { 90 /* Fast age FDB entries or flush appropriate forwarding database 91 * for the given port, if we are moving it from Learning or 92 * Forwarding state, to Disabled or Blocking or Listening state. 93 * Ports that were standalone before the STP state change don't 94 * need to fast age the FDB, since address learning is off in 95 * standalone mode. 96 */ 97 98 if ((dp->stp_state == BR_STATE_LEARNING || 99 dp->stp_state == BR_STATE_FORWARDING) && 100 (state == BR_STATE_DISABLED || 101 state == BR_STATE_BLOCKING || 102 state == BR_STATE_LISTENING)) 103 dsa_port_fast_age(dp); 104 } 105 106 dp->stp_state = state; 107 108 return 0; 109 } 110 111 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state, 112 bool do_fast_age) 113 { 114 int err; 115 116 err = dsa_port_set_state(dp, state, do_fast_age); 117 if (err) 118 pr_err("DSA: failed to set STP state %u (%d)\n", state, err); 119 } 120 121 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy) 122 { 123 struct dsa_switch *ds = dp->ds; 124 int port = dp->index; 125 int err; 126 127 if (ds->ops->port_enable) { 128 err = ds->ops->port_enable(ds, port, phy); 129 if (err) 130 return err; 131 } 132 133 if (!dp->bridge) 134 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false); 135 136 if (dp->pl) 137 phylink_start(dp->pl); 138 139 return 0; 140 } 141 142 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) 143 { 144 int err; 145 146 rtnl_lock(); 147 err = dsa_port_enable_rt(dp, phy); 148 rtnl_unlock(); 149 150 return err; 151 } 152 153 void dsa_port_disable_rt(struct dsa_port *dp) 154 { 155 struct dsa_switch *ds = dp->ds; 156 int port = dp->index; 157 158 if (dp->pl) 159 phylink_stop(dp->pl); 160 161 if (!dp->bridge) 162 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false); 163 164 if (ds->ops->port_disable) 165 ds->ops->port_disable(ds, port); 166 } 167 168 void dsa_port_disable(struct dsa_port *dp) 169 { 170 rtnl_lock(); 171 dsa_port_disable_rt(dp); 172 rtnl_unlock(); 173 } 174 175 static int dsa_port_inherit_brport_flags(struct dsa_port *dp, 176 struct netlink_ext_ack *extack) 177 { 178 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 179 BR_BCAST_FLOOD | BR_PORT_LOCKED; 180 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 181 int flag, err; 182 183 for_each_set_bit(flag, &mask, 32) { 184 struct switchdev_brport_flags flags = {0}; 185 186 flags.mask = BIT(flag); 187 188 if (br_port_flag_is_set(brport_dev, BIT(flag))) 189 flags.val = BIT(flag); 190 191 err = dsa_port_bridge_flags(dp, flags, extack); 192 if (err && err != -EOPNOTSUPP) 193 return err; 194 } 195 196 return 0; 197 } 198 199 static void dsa_port_clear_brport_flags(struct dsa_port *dp) 200 { 201 const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; 202 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 203 BR_BCAST_FLOOD | BR_PORT_LOCKED; 204 int flag, err; 205 206 for_each_set_bit(flag, &mask, 32) { 207 struct switchdev_brport_flags flags = {0}; 208 209 flags.mask = BIT(flag); 210 flags.val = val & BIT(flag); 211 212 err = dsa_port_bridge_flags(dp, flags, NULL); 213 if (err && err != -EOPNOTSUPP) 214 dev_err(dp->ds->dev, 215 "failed to clear bridge port flag %lu: %pe\n", 216 flags.val, ERR_PTR(err)); 217 } 218 } 219 220 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp, 221 struct netlink_ext_ack *extack) 222 { 223 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 224 struct net_device *br = dsa_port_bridge_dev_get(dp); 225 int err; 226 227 err = dsa_port_inherit_brport_flags(dp, extack); 228 if (err) 229 return err; 230 231 err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false); 232 if (err && err != -EOPNOTSUPP) 233 return err; 234 235 err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack); 236 if (err && err != -EOPNOTSUPP) 237 return err; 238 239 err = dsa_port_ageing_time(dp, br_get_ageing_time(br)); 240 if (err && err != -EOPNOTSUPP) 241 return err; 242 243 return 0; 244 } 245 246 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp) 247 { 248 /* Configure the port for standalone mode (no address learning, 249 * flood everything). 250 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events 251 * when the user requests it through netlink or sysfs, but not 252 * automatically at port join or leave, so we need to handle resetting 253 * the brport flags ourselves. But we even prefer it that way, because 254 * otherwise, some setups might never get the notification they need, 255 * for example, when a port leaves a LAG that offloads the bridge, 256 * it becomes standalone, but as far as the bridge is concerned, no 257 * port ever left. 258 */ 259 dsa_port_clear_brport_flags(dp); 260 261 /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, 262 * so allow it to be in BR_STATE_FORWARDING to be kept functional 263 */ 264 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true); 265 266 /* VLAN filtering is handled by dsa_switch_bridge_leave */ 267 268 /* Ageing time may be global to the switch chip, so don't change it 269 * here because we have no good reason (or value) to change it to. 270 */ 271 } 272 273 static int dsa_port_bridge_create(struct dsa_port *dp, 274 struct net_device *br, 275 struct netlink_ext_ack *extack) 276 { 277 struct dsa_switch *ds = dp->ds; 278 struct dsa_bridge *bridge; 279 280 bridge = dsa_tree_bridge_find(ds->dst, br); 281 if (bridge) { 282 refcount_inc(&bridge->refcount); 283 dp->bridge = bridge; 284 return 0; 285 } 286 287 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 288 if (!bridge) 289 return -ENOMEM; 290 291 refcount_set(&bridge->refcount, 1); 292 293 bridge->dev = br; 294 295 bridge->num = dsa_bridge_num_get(br, ds->max_num_bridges); 296 if (ds->max_num_bridges && !bridge->num) { 297 NL_SET_ERR_MSG_MOD(extack, 298 "Range of offloadable bridges exceeded"); 299 kfree(bridge); 300 return -EOPNOTSUPP; 301 } 302 303 dp->bridge = bridge; 304 305 return 0; 306 } 307 308 static void dsa_port_bridge_destroy(struct dsa_port *dp, 309 const struct net_device *br) 310 { 311 struct dsa_bridge *bridge = dp->bridge; 312 313 dp->bridge = NULL; 314 315 if (!refcount_dec_and_test(&bridge->refcount)) 316 return; 317 318 if (bridge->num) 319 dsa_bridge_num_put(br, bridge->num); 320 321 kfree(bridge); 322 } 323 324 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br, 325 struct netlink_ext_ack *extack) 326 { 327 struct dsa_notifier_bridge_info info = { 328 .tree_index = dp->ds->dst->index, 329 .sw_index = dp->ds->index, 330 .port = dp->index, 331 .extack = extack, 332 }; 333 struct net_device *dev = dp->slave; 334 struct net_device *brport_dev; 335 int err; 336 337 /* Here the interface is already bridged. Reflect the current 338 * configuration so that drivers can program their chips accordingly. 339 */ 340 err = dsa_port_bridge_create(dp, br, extack); 341 if (err) 342 return err; 343 344 brport_dev = dsa_port_to_bridge_port(dp); 345 346 info.bridge = *dp->bridge; 347 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info); 348 if (err) 349 goto out_rollback; 350 351 /* Drivers which support bridge TX forwarding should set this */ 352 dp->bridge->tx_fwd_offload = info.tx_fwd_offload; 353 354 err = switchdev_bridge_port_offload(brport_dev, dev, dp, 355 &dsa_slave_switchdev_notifier, 356 &dsa_slave_switchdev_blocking_notifier, 357 dp->bridge->tx_fwd_offload, extack); 358 if (err) 359 goto out_rollback_unbridge; 360 361 err = dsa_port_switchdev_sync_attrs(dp, extack); 362 if (err) 363 goto out_rollback_unoffload; 364 365 return 0; 366 367 out_rollback_unoffload: 368 switchdev_bridge_port_unoffload(brport_dev, dp, 369 &dsa_slave_switchdev_notifier, 370 &dsa_slave_switchdev_blocking_notifier); 371 out_rollback_unbridge: 372 dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 373 out_rollback: 374 dsa_port_bridge_destroy(dp, br); 375 return err; 376 } 377 378 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br) 379 { 380 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 381 382 /* Don't try to unoffload something that is not offloaded */ 383 if (!brport_dev) 384 return; 385 386 switchdev_bridge_port_unoffload(brport_dev, dp, 387 &dsa_slave_switchdev_notifier, 388 &dsa_slave_switchdev_blocking_notifier); 389 390 dsa_flush_workqueue(); 391 } 392 393 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) 394 { 395 struct dsa_notifier_bridge_info info = { 396 .tree_index = dp->ds->dst->index, 397 .sw_index = dp->ds->index, 398 .port = dp->index, 399 }; 400 int err; 401 402 /* If the port could not be offloaded to begin with, then 403 * there is nothing to do. 404 */ 405 if (!dp->bridge) 406 return; 407 408 info.bridge = *dp->bridge; 409 410 /* Here the port is already unbridged. Reflect the current configuration 411 * so that drivers can program their chips accordingly. 412 */ 413 dsa_port_bridge_destroy(dp, br); 414 415 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 416 if (err) 417 dev_err(dp->ds->dev, 418 "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n", 419 dp->index, ERR_PTR(err)); 420 421 dsa_port_switchdev_unsync_attrs(dp); 422 } 423 424 int dsa_port_lag_change(struct dsa_port *dp, 425 struct netdev_lag_lower_state_info *linfo) 426 { 427 struct dsa_notifier_lag_info info = { 428 .sw_index = dp->ds->index, 429 .port = dp->index, 430 }; 431 bool tx_enabled; 432 433 if (!dp->lag) 434 return 0; 435 436 /* On statically configured aggregates (e.g. loadbalance 437 * without LACP) ports will always be tx_enabled, even if the 438 * link is down. Thus we require both link_up and tx_enabled 439 * in order to include it in the tx set. 440 */ 441 tx_enabled = linfo->link_up && linfo->tx_enabled; 442 443 if (tx_enabled == dp->lag_tx_enabled) 444 return 0; 445 446 dp->lag_tx_enabled = tx_enabled; 447 448 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info); 449 } 450 451 static int dsa_port_lag_create(struct dsa_port *dp, 452 struct net_device *lag_dev) 453 { 454 struct dsa_switch *ds = dp->ds; 455 struct dsa_lag *lag; 456 457 lag = dsa_tree_lag_find(ds->dst, lag_dev); 458 if (lag) { 459 refcount_inc(&lag->refcount); 460 dp->lag = lag; 461 return 0; 462 } 463 464 lag = kzalloc(sizeof(*lag), GFP_KERNEL); 465 if (!lag) 466 return -ENOMEM; 467 468 refcount_set(&lag->refcount, 1); 469 mutex_init(&lag->fdb_lock); 470 INIT_LIST_HEAD(&lag->fdbs); 471 lag->dev = lag_dev; 472 dsa_lag_map(ds->dst, lag); 473 dp->lag = lag; 474 475 return 0; 476 } 477 478 static void dsa_port_lag_destroy(struct dsa_port *dp) 479 { 480 struct dsa_lag *lag = dp->lag; 481 482 dp->lag = NULL; 483 dp->lag_tx_enabled = false; 484 485 if (!refcount_dec_and_test(&lag->refcount)) 486 return; 487 488 WARN_ON(!list_empty(&lag->fdbs)); 489 dsa_lag_unmap(dp->ds->dst, lag); 490 kfree(lag); 491 } 492 493 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev, 494 struct netdev_lag_upper_info *uinfo, 495 struct netlink_ext_ack *extack) 496 { 497 struct dsa_notifier_lag_info info = { 498 .sw_index = dp->ds->index, 499 .port = dp->index, 500 .info = uinfo, 501 }; 502 struct net_device *bridge_dev; 503 int err; 504 505 err = dsa_port_lag_create(dp, lag_dev); 506 if (err) 507 goto err_lag_create; 508 509 info.lag = *dp->lag; 510 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info); 511 if (err) 512 goto err_lag_join; 513 514 bridge_dev = netdev_master_upper_dev_get(lag_dev); 515 if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) 516 return 0; 517 518 err = dsa_port_bridge_join(dp, bridge_dev, extack); 519 if (err) 520 goto err_bridge_join; 521 522 return 0; 523 524 err_bridge_join: 525 dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 526 err_lag_join: 527 dsa_port_lag_destroy(dp); 528 err_lag_create: 529 return err; 530 } 531 532 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 533 { 534 struct net_device *br = dsa_port_bridge_dev_get(dp); 535 536 if (br) 537 dsa_port_pre_bridge_leave(dp, br); 538 } 539 540 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 541 { 542 struct net_device *br = dsa_port_bridge_dev_get(dp); 543 struct dsa_notifier_lag_info info = { 544 .sw_index = dp->ds->index, 545 .port = dp->index, 546 }; 547 int err; 548 549 if (!dp->lag) 550 return; 551 552 /* Port might have been part of a LAG that in turn was 553 * attached to a bridge. 554 */ 555 if (br) 556 dsa_port_bridge_leave(dp, br); 557 558 info.lag = *dp->lag; 559 560 dsa_port_lag_destroy(dp); 561 562 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 563 if (err) 564 dev_err(dp->ds->dev, 565 "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n", 566 dp->index, ERR_PTR(err)); 567 } 568 569 /* Must be called under rcu_read_lock() */ 570 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, 571 bool vlan_filtering, 572 struct netlink_ext_ack *extack) 573 { 574 struct dsa_switch *ds = dp->ds; 575 struct dsa_port *other_dp; 576 int err; 577 578 /* VLAN awareness was off, so the question is "can we turn it on". 579 * We may have had 8021q uppers, those need to go. Make sure we don't 580 * enter an inconsistent state: deny changing the VLAN awareness state 581 * as long as we have 8021q uppers. 582 */ 583 if (vlan_filtering && dsa_port_is_user(dp)) { 584 struct net_device *br = dsa_port_bridge_dev_get(dp); 585 struct net_device *upper_dev, *slave = dp->slave; 586 struct list_head *iter; 587 588 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { 589 struct bridge_vlan_info br_info; 590 u16 vid; 591 592 if (!is_vlan_dev(upper_dev)) 593 continue; 594 595 vid = vlan_dev_vlan_id(upper_dev); 596 597 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the 598 * device, respectively the VID is not found, returning 599 * 0 means success, which is a failure for us here. 600 */ 601 err = br_vlan_get_info(br, vid, &br_info); 602 if (err == 0) { 603 NL_SET_ERR_MSG_MOD(extack, 604 "Must first remove VLAN uppers having VIDs also present in bridge"); 605 return false; 606 } 607 } 608 } 609 610 if (!ds->vlan_filtering_is_global) 611 return true; 612 613 /* For cases where enabling/disabling VLAN awareness is global to the 614 * switch, we need to handle the case where multiple bridges span 615 * different ports of the same switch device and one of them has a 616 * different setting than what is being requested. 617 */ 618 dsa_switch_for_each_port(other_dp, ds) { 619 struct net_device *other_br = dsa_port_bridge_dev_get(other_dp); 620 621 /* If it's the same bridge, it also has same 622 * vlan_filtering setting => no need to check 623 */ 624 if (!other_br || other_br == dsa_port_bridge_dev_get(dp)) 625 continue; 626 627 if (br_vlan_enabled(other_br) != vlan_filtering) { 628 NL_SET_ERR_MSG_MOD(extack, 629 "VLAN filtering is a global setting"); 630 return false; 631 } 632 } 633 return true; 634 } 635 636 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, 637 struct netlink_ext_ack *extack) 638 { 639 bool old_vlan_filtering = dsa_port_is_vlan_filtering(dp); 640 struct dsa_switch *ds = dp->ds; 641 bool apply; 642 int err; 643 644 if (!ds->ops->port_vlan_filtering) 645 return -EOPNOTSUPP; 646 647 /* We are called from dsa_slave_switchdev_blocking_event(), 648 * which is not under rcu_read_lock(), unlike 649 * dsa_slave_switchdev_event(). 650 */ 651 rcu_read_lock(); 652 apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack); 653 rcu_read_unlock(); 654 if (!apply) 655 return -EINVAL; 656 657 if (dsa_port_is_vlan_filtering(dp) == vlan_filtering) 658 return 0; 659 660 err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering, 661 extack); 662 if (err) 663 return err; 664 665 if (ds->vlan_filtering_is_global) { 666 struct dsa_port *other_dp; 667 668 ds->vlan_filtering = vlan_filtering; 669 670 dsa_switch_for_each_user_port(other_dp, ds) { 671 struct net_device *slave = dp->slave; 672 673 /* We might be called in the unbind path, so not 674 * all slave devices might still be registered. 675 */ 676 if (!slave) 677 continue; 678 679 err = dsa_slave_manage_vlan_filtering(slave, 680 vlan_filtering); 681 if (err) 682 goto restore; 683 } 684 } else { 685 dp->vlan_filtering = vlan_filtering; 686 687 err = dsa_slave_manage_vlan_filtering(dp->slave, 688 vlan_filtering); 689 if (err) 690 goto restore; 691 } 692 693 return 0; 694 695 restore: 696 ds->ops->port_vlan_filtering(ds, dp->index, old_vlan_filtering, NULL); 697 698 if (ds->vlan_filtering_is_global) 699 ds->vlan_filtering = old_vlan_filtering; 700 else 701 dp->vlan_filtering = old_vlan_filtering; 702 703 return err; 704 } 705 706 /* This enforces legacy behavior for switch drivers which assume they can't 707 * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0 708 */ 709 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp) 710 { 711 struct net_device *br = dsa_port_bridge_dev_get(dp); 712 struct dsa_switch *ds = dp->ds; 713 714 if (!br) 715 return false; 716 717 return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br); 718 } 719 720 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock) 721 { 722 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock); 723 unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies); 724 struct dsa_notifier_ageing_time_info info; 725 int err; 726 727 info.ageing_time = ageing_time; 728 729 err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info); 730 if (err) 731 return err; 732 733 dp->ageing_time = ageing_time; 734 735 return 0; 736 } 737 738 int dsa_port_pre_bridge_flags(const struct dsa_port *dp, 739 struct switchdev_brport_flags flags, 740 struct netlink_ext_ack *extack) 741 { 742 struct dsa_switch *ds = dp->ds; 743 744 if (!ds->ops->port_pre_bridge_flags) 745 return -EINVAL; 746 747 return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack); 748 } 749 750 int dsa_port_bridge_flags(struct dsa_port *dp, 751 struct switchdev_brport_flags flags, 752 struct netlink_ext_ack *extack) 753 { 754 struct dsa_switch *ds = dp->ds; 755 int err; 756 757 if (!ds->ops->port_bridge_flags) 758 return -EOPNOTSUPP; 759 760 err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack); 761 if (err) 762 return err; 763 764 if (flags.mask & BR_LEARNING) { 765 bool learning = flags.val & BR_LEARNING; 766 767 if (learning == dp->learning) 768 return 0; 769 770 if ((dp->learning && !learning) && 771 (dp->stp_state == BR_STATE_LEARNING || 772 dp->stp_state == BR_STATE_FORWARDING)) 773 dsa_port_fast_age(dp); 774 775 dp->learning = learning; 776 } 777 778 return 0; 779 } 780 781 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu, 782 bool targeted_match) 783 { 784 struct dsa_notifier_mtu_info info = { 785 .sw_index = dp->ds->index, 786 .targeted_match = targeted_match, 787 .port = dp->index, 788 .mtu = new_mtu, 789 }; 790 791 return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info); 792 } 793 794 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, 795 u16 vid) 796 { 797 struct dsa_notifier_fdb_info info = { 798 .sw_index = dp->ds->index, 799 .port = dp->index, 800 .addr = addr, 801 .vid = vid, 802 .db = { 803 .type = DSA_DB_BRIDGE, 804 .bridge = *dp->bridge, 805 }, 806 }; 807 808 /* Refcounting takes bridge.num as a key, and should be global for all 809 * bridges in the absence of FDB isolation, and per bridge otherwise. 810 * Force the bridge.num to zero here in the absence of FDB isolation. 811 */ 812 if (!dp->ds->fdb_isolation) 813 info.db.bridge.num = 0; 814 815 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info); 816 } 817 818 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr, 819 u16 vid) 820 { 821 struct dsa_notifier_fdb_info info = { 822 .sw_index = dp->ds->index, 823 .port = dp->index, 824 .addr = addr, 825 .vid = vid, 826 .db = { 827 .type = DSA_DB_BRIDGE, 828 .bridge = *dp->bridge, 829 }, 830 }; 831 832 if (!dp->ds->fdb_isolation) 833 info.db.bridge.num = 0; 834 835 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info); 836 } 837 838 static int dsa_port_host_fdb_add(struct dsa_port *dp, 839 const unsigned char *addr, u16 vid, 840 struct dsa_db db) 841 { 842 struct dsa_notifier_fdb_info info = { 843 .sw_index = dp->ds->index, 844 .port = dp->index, 845 .addr = addr, 846 .vid = vid, 847 .db = db, 848 }; 849 850 if (!dp->ds->fdb_isolation) 851 info.db.bridge.num = 0; 852 853 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info); 854 } 855 856 int dsa_port_standalone_host_fdb_add(struct dsa_port *dp, 857 const unsigned char *addr, u16 vid) 858 { 859 struct dsa_db db = { 860 .type = DSA_DB_PORT, 861 .dp = dp, 862 }; 863 864 return dsa_port_host_fdb_add(dp, addr, vid, db); 865 } 866 867 int dsa_port_bridge_host_fdb_add(struct dsa_port *dp, 868 const unsigned char *addr, u16 vid) 869 { 870 struct dsa_port *cpu_dp = dp->cpu_dp; 871 struct dsa_db db = { 872 .type = DSA_DB_BRIDGE, 873 .bridge = *dp->bridge, 874 }; 875 int err; 876 877 /* Avoid a call to __dev_set_promiscuity() on the master, which 878 * requires rtnl_lock(), since we can't guarantee that is held here, 879 * and we can't take it either. 880 */ 881 if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) { 882 err = dev_uc_add(cpu_dp->master, addr); 883 if (err) 884 return err; 885 } 886 887 return dsa_port_host_fdb_add(dp, addr, vid, db); 888 } 889 890 static int dsa_port_host_fdb_del(struct dsa_port *dp, 891 const unsigned char *addr, u16 vid, 892 struct dsa_db db) 893 { 894 struct dsa_notifier_fdb_info info = { 895 .sw_index = dp->ds->index, 896 .port = dp->index, 897 .addr = addr, 898 .vid = vid, 899 .db = db, 900 }; 901 902 if (!dp->ds->fdb_isolation) 903 info.db.bridge.num = 0; 904 905 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info); 906 } 907 908 int dsa_port_standalone_host_fdb_del(struct dsa_port *dp, 909 const unsigned char *addr, u16 vid) 910 { 911 struct dsa_db db = { 912 .type = DSA_DB_PORT, 913 .dp = dp, 914 }; 915 916 return dsa_port_host_fdb_del(dp, addr, vid, db); 917 } 918 919 int dsa_port_bridge_host_fdb_del(struct dsa_port *dp, 920 const unsigned char *addr, u16 vid) 921 { 922 struct dsa_port *cpu_dp = dp->cpu_dp; 923 struct dsa_db db = { 924 .type = DSA_DB_BRIDGE, 925 .bridge = *dp->bridge, 926 }; 927 int err; 928 929 if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) { 930 err = dev_uc_del(cpu_dp->master, addr); 931 if (err) 932 return err; 933 } 934 935 return dsa_port_host_fdb_del(dp, addr, vid, db); 936 } 937 938 int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr, 939 u16 vid) 940 { 941 struct dsa_notifier_lag_fdb_info info = { 942 .lag = dp->lag, 943 .addr = addr, 944 .vid = vid, 945 .db = { 946 .type = DSA_DB_BRIDGE, 947 .bridge = *dp->bridge, 948 }, 949 }; 950 951 if (!dp->ds->fdb_isolation) 952 info.db.bridge.num = 0; 953 954 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_ADD, &info); 955 } 956 957 int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr, 958 u16 vid) 959 { 960 struct dsa_notifier_lag_fdb_info info = { 961 .lag = dp->lag, 962 .addr = addr, 963 .vid = vid, 964 .db = { 965 .type = DSA_DB_BRIDGE, 966 .bridge = *dp->bridge, 967 }, 968 }; 969 970 if (!dp->ds->fdb_isolation) 971 info.db.bridge.num = 0; 972 973 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_DEL, &info); 974 } 975 976 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data) 977 { 978 struct dsa_switch *ds = dp->ds; 979 int port = dp->index; 980 981 if (!ds->ops->port_fdb_dump) 982 return -EOPNOTSUPP; 983 984 return ds->ops->port_fdb_dump(ds, port, cb, data); 985 } 986 987 int dsa_port_mdb_add(const struct dsa_port *dp, 988 const struct switchdev_obj_port_mdb *mdb) 989 { 990 struct dsa_notifier_mdb_info info = { 991 .sw_index = dp->ds->index, 992 .port = dp->index, 993 .mdb = mdb, 994 .db = { 995 .type = DSA_DB_BRIDGE, 996 .bridge = *dp->bridge, 997 }, 998 }; 999 1000 if (!dp->ds->fdb_isolation) 1001 info.db.bridge.num = 0; 1002 1003 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info); 1004 } 1005 1006 int dsa_port_mdb_del(const struct dsa_port *dp, 1007 const struct switchdev_obj_port_mdb *mdb) 1008 { 1009 struct dsa_notifier_mdb_info info = { 1010 .sw_index = dp->ds->index, 1011 .port = dp->index, 1012 .mdb = mdb, 1013 .db = { 1014 .type = DSA_DB_BRIDGE, 1015 .bridge = *dp->bridge, 1016 }, 1017 }; 1018 1019 if (!dp->ds->fdb_isolation) 1020 info.db.bridge.num = 0; 1021 1022 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info); 1023 } 1024 1025 static int dsa_port_host_mdb_add(const struct dsa_port *dp, 1026 const struct switchdev_obj_port_mdb *mdb, 1027 struct dsa_db db) 1028 { 1029 struct dsa_notifier_mdb_info info = { 1030 .sw_index = dp->ds->index, 1031 .port = dp->index, 1032 .mdb = mdb, 1033 .db = db, 1034 }; 1035 1036 if (!dp->ds->fdb_isolation) 1037 info.db.bridge.num = 0; 1038 1039 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info); 1040 } 1041 1042 int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp, 1043 const struct switchdev_obj_port_mdb *mdb) 1044 { 1045 struct dsa_db db = { 1046 .type = DSA_DB_PORT, 1047 .dp = dp, 1048 }; 1049 1050 return dsa_port_host_mdb_add(dp, mdb, db); 1051 } 1052 1053 int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp, 1054 const struct switchdev_obj_port_mdb *mdb) 1055 { 1056 struct dsa_port *cpu_dp = dp->cpu_dp; 1057 struct dsa_db db = { 1058 .type = DSA_DB_BRIDGE, 1059 .bridge = *dp->bridge, 1060 }; 1061 int err; 1062 1063 err = dev_mc_add(cpu_dp->master, mdb->addr); 1064 if (err) 1065 return err; 1066 1067 return dsa_port_host_mdb_add(dp, mdb, db); 1068 } 1069 1070 static int dsa_port_host_mdb_del(const struct dsa_port *dp, 1071 const struct switchdev_obj_port_mdb *mdb, 1072 struct dsa_db db) 1073 { 1074 struct dsa_notifier_mdb_info info = { 1075 .sw_index = dp->ds->index, 1076 .port = dp->index, 1077 .mdb = mdb, 1078 .db = db, 1079 }; 1080 1081 if (!dp->ds->fdb_isolation) 1082 info.db.bridge.num = 0; 1083 1084 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info); 1085 } 1086 1087 int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp, 1088 const struct switchdev_obj_port_mdb *mdb) 1089 { 1090 struct dsa_db db = { 1091 .type = DSA_DB_PORT, 1092 .dp = dp, 1093 }; 1094 1095 return dsa_port_host_mdb_del(dp, mdb, db); 1096 } 1097 1098 int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp, 1099 const struct switchdev_obj_port_mdb *mdb) 1100 { 1101 struct dsa_port *cpu_dp = dp->cpu_dp; 1102 struct dsa_db db = { 1103 .type = DSA_DB_BRIDGE, 1104 .bridge = *dp->bridge, 1105 }; 1106 int err; 1107 1108 err = dev_mc_del(cpu_dp->master, mdb->addr); 1109 if (err) 1110 return err; 1111 1112 return dsa_port_host_mdb_del(dp, mdb, db); 1113 } 1114 1115 int dsa_port_vlan_add(struct dsa_port *dp, 1116 const struct switchdev_obj_port_vlan *vlan, 1117 struct netlink_ext_ack *extack) 1118 { 1119 struct dsa_notifier_vlan_info info = { 1120 .sw_index = dp->ds->index, 1121 .port = dp->index, 1122 .vlan = vlan, 1123 .extack = extack, 1124 }; 1125 1126 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info); 1127 } 1128 1129 int dsa_port_vlan_del(struct dsa_port *dp, 1130 const struct switchdev_obj_port_vlan *vlan) 1131 { 1132 struct dsa_notifier_vlan_info info = { 1133 .sw_index = dp->ds->index, 1134 .port = dp->index, 1135 .vlan = vlan, 1136 }; 1137 1138 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info); 1139 } 1140 1141 int dsa_port_host_vlan_add(struct dsa_port *dp, 1142 const struct switchdev_obj_port_vlan *vlan, 1143 struct netlink_ext_ack *extack) 1144 { 1145 struct dsa_notifier_vlan_info info = { 1146 .sw_index = dp->ds->index, 1147 .port = dp->index, 1148 .vlan = vlan, 1149 .extack = extack, 1150 }; 1151 struct dsa_port *cpu_dp = dp->cpu_dp; 1152 int err; 1153 1154 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_ADD, &info); 1155 if (err && err != -EOPNOTSUPP) 1156 return err; 1157 1158 vlan_vid_add(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid); 1159 1160 return err; 1161 } 1162 1163 int dsa_port_host_vlan_del(struct dsa_port *dp, 1164 const struct switchdev_obj_port_vlan *vlan) 1165 { 1166 struct dsa_notifier_vlan_info info = { 1167 .sw_index = dp->ds->index, 1168 .port = dp->index, 1169 .vlan = vlan, 1170 }; 1171 struct dsa_port *cpu_dp = dp->cpu_dp; 1172 int err; 1173 1174 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_DEL, &info); 1175 if (err && err != -EOPNOTSUPP) 1176 return err; 1177 1178 vlan_vid_del(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid); 1179 1180 return err; 1181 } 1182 1183 int dsa_port_mrp_add(const struct dsa_port *dp, 1184 const struct switchdev_obj_mrp *mrp) 1185 { 1186 struct dsa_switch *ds = dp->ds; 1187 1188 if (!ds->ops->port_mrp_add) 1189 return -EOPNOTSUPP; 1190 1191 return ds->ops->port_mrp_add(ds, dp->index, mrp); 1192 } 1193 1194 int dsa_port_mrp_del(const struct dsa_port *dp, 1195 const struct switchdev_obj_mrp *mrp) 1196 { 1197 struct dsa_switch *ds = dp->ds; 1198 1199 if (!ds->ops->port_mrp_del) 1200 return -EOPNOTSUPP; 1201 1202 return ds->ops->port_mrp_del(ds, dp->index, mrp); 1203 } 1204 1205 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp, 1206 const struct switchdev_obj_ring_role_mrp *mrp) 1207 { 1208 struct dsa_switch *ds = dp->ds; 1209 1210 if (!ds->ops->port_mrp_add_ring_role) 1211 return -EOPNOTSUPP; 1212 1213 return ds->ops->port_mrp_add_ring_role(ds, dp->index, mrp); 1214 } 1215 1216 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp, 1217 const struct switchdev_obj_ring_role_mrp *mrp) 1218 { 1219 struct dsa_switch *ds = dp->ds; 1220 1221 if (!ds->ops->port_mrp_del_ring_role) 1222 return -EOPNOTSUPP; 1223 1224 return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp); 1225 } 1226 1227 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp, 1228 const struct dsa_device_ops *tag_ops) 1229 { 1230 cpu_dp->rcv = tag_ops->rcv; 1231 cpu_dp->tag_ops = tag_ops; 1232 } 1233 1234 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp) 1235 { 1236 struct device_node *phy_dn; 1237 struct phy_device *phydev; 1238 1239 phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0); 1240 if (!phy_dn) 1241 return NULL; 1242 1243 phydev = of_phy_find_device(phy_dn); 1244 if (!phydev) { 1245 of_node_put(phy_dn); 1246 return ERR_PTR(-EPROBE_DEFER); 1247 } 1248 1249 of_node_put(phy_dn); 1250 return phydev; 1251 } 1252 1253 static void dsa_port_phylink_validate(struct phylink_config *config, 1254 unsigned long *supported, 1255 struct phylink_link_state *state) 1256 { 1257 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1258 struct dsa_switch *ds = dp->ds; 1259 1260 if (!ds->ops->phylink_validate) { 1261 if (config->mac_capabilities) 1262 phylink_generic_validate(config, supported, state); 1263 return; 1264 } 1265 1266 ds->ops->phylink_validate(ds, dp->index, supported, state); 1267 } 1268 1269 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config, 1270 struct phylink_link_state *state) 1271 { 1272 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1273 struct dsa_switch *ds = dp->ds; 1274 int err; 1275 1276 /* Only called for inband modes */ 1277 if (!ds->ops->phylink_mac_link_state) { 1278 state->link = 0; 1279 return; 1280 } 1281 1282 err = ds->ops->phylink_mac_link_state(ds, dp->index, state); 1283 if (err < 0) { 1284 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n", 1285 dp->index, err); 1286 state->link = 0; 1287 } 1288 } 1289 1290 static struct phylink_pcs * 1291 dsa_port_phylink_mac_select_pcs(struct phylink_config *config, 1292 phy_interface_t interface) 1293 { 1294 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1295 struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP); 1296 struct dsa_switch *ds = dp->ds; 1297 1298 if (ds->ops->phylink_mac_select_pcs) 1299 pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface); 1300 1301 return pcs; 1302 } 1303 1304 static void dsa_port_phylink_mac_config(struct phylink_config *config, 1305 unsigned int mode, 1306 const struct phylink_link_state *state) 1307 { 1308 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1309 struct dsa_switch *ds = dp->ds; 1310 1311 if (!ds->ops->phylink_mac_config) 1312 return; 1313 1314 ds->ops->phylink_mac_config(ds, dp->index, mode, state); 1315 } 1316 1317 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config) 1318 { 1319 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1320 struct dsa_switch *ds = dp->ds; 1321 1322 if (!ds->ops->phylink_mac_an_restart) 1323 return; 1324 1325 ds->ops->phylink_mac_an_restart(ds, dp->index); 1326 } 1327 1328 static void dsa_port_phylink_mac_link_down(struct phylink_config *config, 1329 unsigned int mode, 1330 phy_interface_t interface) 1331 { 1332 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1333 struct phy_device *phydev = NULL; 1334 struct dsa_switch *ds = dp->ds; 1335 1336 if (dsa_port_is_user(dp)) 1337 phydev = dp->slave->phydev; 1338 1339 if (!ds->ops->phylink_mac_link_down) { 1340 if (ds->ops->adjust_link && phydev) 1341 ds->ops->adjust_link(ds, dp->index, phydev); 1342 return; 1343 } 1344 1345 ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface); 1346 } 1347 1348 static void dsa_port_phylink_mac_link_up(struct phylink_config *config, 1349 struct phy_device *phydev, 1350 unsigned int mode, 1351 phy_interface_t interface, 1352 int speed, int duplex, 1353 bool tx_pause, bool rx_pause) 1354 { 1355 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1356 struct dsa_switch *ds = dp->ds; 1357 1358 if (!ds->ops->phylink_mac_link_up) { 1359 if (ds->ops->adjust_link && phydev) 1360 ds->ops->adjust_link(ds, dp->index, phydev); 1361 return; 1362 } 1363 1364 ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev, 1365 speed, duplex, tx_pause, rx_pause); 1366 } 1367 1368 static const struct phylink_mac_ops dsa_port_phylink_mac_ops = { 1369 .validate = dsa_port_phylink_validate, 1370 .mac_select_pcs = dsa_port_phylink_mac_select_pcs, 1371 .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state, 1372 .mac_config = dsa_port_phylink_mac_config, 1373 .mac_an_restart = dsa_port_phylink_mac_an_restart, 1374 .mac_link_down = dsa_port_phylink_mac_link_down, 1375 .mac_link_up = dsa_port_phylink_mac_link_up, 1376 }; 1377 1378 int dsa_port_phylink_create(struct dsa_port *dp) 1379 { 1380 struct dsa_switch *ds = dp->ds; 1381 phy_interface_t mode; 1382 int err; 1383 1384 err = of_get_phy_mode(dp->dn, &mode); 1385 if (err) 1386 mode = PHY_INTERFACE_MODE_NA; 1387 1388 /* Presence of phylink_mac_link_state or phylink_mac_an_restart is 1389 * an indicator of a legacy phylink driver. 1390 */ 1391 if (ds->ops->phylink_mac_link_state || 1392 ds->ops->phylink_mac_an_restart) 1393 dp->pl_config.legacy_pre_march2020 = true; 1394 1395 if (ds->ops->phylink_get_caps) 1396 ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config); 1397 1398 dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn), 1399 mode, &dsa_port_phylink_mac_ops); 1400 if (IS_ERR(dp->pl)) { 1401 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); 1402 return PTR_ERR(dp->pl); 1403 } 1404 1405 return 0; 1406 } 1407 1408 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable) 1409 { 1410 struct dsa_switch *ds = dp->ds; 1411 struct phy_device *phydev; 1412 int port = dp->index; 1413 int err = 0; 1414 1415 phydev = dsa_port_get_phy_device(dp); 1416 if (!phydev) 1417 return 0; 1418 1419 if (IS_ERR(phydev)) 1420 return PTR_ERR(phydev); 1421 1422 if (enable) { 1423 err = genphy_resume(phydev); 1424 if (err < 0) 1425 goto err_put_dev; 1426 1427 err = genphy_read_status(phydev); 1428 if (err < 0) 1429 goto err_put_dev; 1430 } else { 1431 err = genphy_suspend(phydev); 1432 if (err < 0) 1433 goto err_put_dev; 1434 } 1435 1436 if (ds->ops->adjust_link) 1437 ds->ops->adjust_link(ds, port, phydev); 1438 1439 dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev)); 1440 1441 err_put_dev: 1442 put_device(&phydev->mdio.dev); 1443 return err; 1444 } 1445 1446 static int dsa_port_fixed_link_register_of(struct dsa_port *dp) 1447 { 1448 struct device_node *dn = dp->dn; 1449 struct dsa_switch *ds = dp->ds; 1450 struct phy_device *phydev; 1451 int port = dp->index; 1452 phy_interface_t mode; 1453 int err; 1454 1455 err = of_phy_register_fixed_link(dn); 1456 if (err) { 1457 dev_err(ds->dev, 1458 "failed to register the fixed PHY of port %d\n", 1459 port); 1460 return err; 1461 } 1462 1463 phydev = of_phy_find_device(dn); 1464 1465 err = of_get_phy_mode(dn, &mode); 1466 if (err) 1467 mode = PHY_INTERFACE_MODE_NA; 1468 phydev->interface = mode; 1469 1470 genphy_read_status(phydev); 1471 1472 if (ds->ops->adjust_link) 1473 ds->ops->adjust_link(ds, port, phydev); 1474 1475 put_device(&phydev->mdio.dev); 1476 1477 return 0; 1478 } 1479 1480 static int dsa_port_phylink_register(struct dsa_port *dp) 1481 { 1482 struct dsa_switch *ds = dp->ds; 1483 struct device_node *port_dn = dp->dn; 1484 int err; 1485 1486 dp->pl_config.dev = ds->dev; 1487 dp->pl_config.type = PHYLINK_DEV; 1488 1489 err = dsa_port_phylink_create(dp); 1490 if (err) 1491 return err; 1492 1493 err = phylink_of_phy_connect(dp->pl, port_dn, 0); 1494 if (err && err != -ENODEV) { 1495 pr_err("could not attach to PHY: %d\n", err); 1496 goto err_phy_connect; 1497 } 1498 1499 return 0; 1500 1501 err_phy_connect: 1502 phylink_destroy(dp->pl); 1503 return err; 1504 } 1505 1506 int dsa_port_link_register_of(struct dsa_port *dp) 1507 { 1508 struct dsa_switch *ds = dp->ds; 1509 struct device_node *phy_np; 1510 int port = dp->index; 1511 1512 if (!ds->ops->adjust_link) { 1513 phy_np = of_parse_phandle(dp->dn, "phy-handle", 0); 1514 if (of_phy_is_fixed_link(dp->dn) || phy_np) { 1515 if (ds->ops->phylink_mac_link_down) 1516 ds->ops->phylink_mac_link_down(ds, port, 1517 MLO_AN_FIXED, PHY_INTERFACE_MODE_NA); 1518 return dsa_port_phylink_register(dp); 1519 } 1520 return 0; 1521 } 1522 1523 dev_warn(ds->dev, 1524 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n"); 1525 1526 if (of_phy_is_fixed_link(dp->dn)) 1527 return dsa_port_fixed_link_register_of(dp); 1528 else 1529 return dsa_port_setup_phy_of(dp, true); 1530 } 1531 1532 void dsa_port_link_unregister_of(struct dsa_port *dp) 1533 { 1534 struct dsa_switch *ds = dp->ds; 1535 1536 if (!ds->ops->adjust_link && dp->pl) { 1537 rtnl_lock(); 1538 phylink_disconnect_phy(dp->pl); 1539 rtnl_unlock(); 1540 phylink_destroy(dp->pl); 1541 dp->pl = NULL; 1542 return; 1543 } 1544 1545 if (of_phy_is_fixed_link(dp->dn)) 1546 of_phy_deregister_fixed_link(dp->dn); 1547 else 1548 dsa_port_setup_phy_of(dp, false); 1549 } 1550 1551 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr) 1552 { 1553 struct dsa_switch *ds = dp->ds; 1554 int err; 1555 1556 if (!ds->ops->port_hsr_join) 1557 return -EOPNOTSUPP; 1558 1559 dp->hsr_dev = hsr; 1560 1561 err = ds->ops->port_hsr_join(ds, dp->index, hsr); 1562 if (err) 1563 dp->hsr_dev = NULL; 1564 1565 return err; 1566 } 1567 1568 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr) 1569 { 1570 struct dsa_switch *ds = dp->ds; 1571 int err; 1572 1573 dp->hsr_dev = NULL; 1574 1575 if (ds->ops->port_hsr_leave) { 1576 err = ds->ops->port_hsr_leave(ds, dp->index, hsr); 1577 if (err) 1578 dev_err(dp->ds->dev, 1579 "port %d failed to leave HSR %s: %pe\n", 1580 dp->index, hsr->name, ERR_PTR(err)); 1581 } 1582 } 1583 1584 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast) 1585 { 1586 struct dsa_notifier_tag_8021q_vlan_info info = { 1587 .tree_index = dp->ds->dst->index, 1588 .sw_index = dp->ds->index, 1589 .port = dp->index, 1590 .vid = vid, 1591 }; 1592 1593 if (broadcast) 1594 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 1595 1596 return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 1597 } 1598 1599 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast) 1600 { 1601 struct dsa_notifier_tag_8021q_vlan_info info = { 1602 .tree_index = dp->ds->dst->index, 1603 .sw_index = dp->ds->index, 1604 .port = dp->index, 1605 .vid = vid, 1606 }; 1607 int err; 1608 1609 if (broadcast) 1610 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 1611 else 1612 err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 1613 if (err) 1614 dev_err(dp->ds->dev, 1615 "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n", 1616 dp->index, vid, ERR_PTR(err)); 1617 } 1618