1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a single switch port 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include <linux/if_bridge.h> 10 #include <linux/notifier.h> 11 #include <linux/of_mdio.h> 12 #include <linux/of_net.h> 13 14 #include "dsa_priv.h" 15 16 static int dsa_broadcast(unsigned long e, void *v) 17 { 18 struct dsa_switch_tree *dst; 19 int err = 0; 20 21 list_for_each_entry(dst, &dsa_tree_list, list) { 22 struct raw_notifier_head *nh = &dst->nh; 23 24 err = raw_notifier_call_chain(nh, e, v); 25 err = notifier_to_errno(err); 26 if (err) 27 break; 28 } 29 30 return err; 31 } 32 33 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v) 34 { 35 struct raw_notifier_head *nh = &dp->ds->dst->nh; 36 int err; 37 38 err = raw_notifier_call_chain(nh, e, v); 39 40 return notifier_to_errno(err); 41 } 42 43 int dsa_port_set_state(struct dsa_port *dp, u8 state, 44 struct switchdev_trans *trans) 45 { 46 struct dsa_switch *ds = dp->ds; 47 int port = dp->index; 48 49 if (switchdev_trans_ph_prepare(trans)) 50 return ds->ops->port_stp_state_set ? 0 : -EOPNOTSUPP; 51 52 if (ds->ops->port_stp_state_set) 53 ds->ops->port_stp_state_set(ds, port, state); 54 55 if (ds->ops->port_fast_age) { 56 /* Fast age FDB entries or flush appropriate forwarding database 57 * for the given port, if we are moving it from Learning or 58 * Forwarding state, to Disabled or Blocking or Listening state. 59 */ 60 61 if ((dp->stp_state == BR_STATE_LEARNING || 62 dp->stp_state == BR_STATE_FORWARDING) && 63 (state == BR_STATE_DISABLED || 64 state == BR_STATE_BLOCKING || 65 state == BR_STATE_LISTENING)) 66 ds->ops->port_fast_age(ds, port); 67 } 68 69 dp->stp_state = state; 70 71 return 0; 72 } 73 74 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state) 75 { 76 int err; 77 78 err = dsa_port_set_state(dp, state, NULL); 79 if (err) 80 pr_err("DSA: failed to set STP state %u (%d)\n", state, err); 81 } 82 83 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy) 84 { 85 struct dsa_switch *ds = dp->ds; 86 int port = dp->index; 87 int err; 88 89 if (ds->ops->port_enable) { 90 err = ds->ops->port_enable(ds, port, phy); 91 if (err) 92 return err; 93 } 94 95 if (!dp->bridge_dev) 96 dsa_port_set_state_now(dp, BR_STATE_FORWARDING); 97 98 if (dp->pl) 99 phylink_start(dp->pl); 100 101 return 0; 102 } 103 104 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) 105 { 106 int err; 107 108 rtnl_lock(); 109 err = dsa_port_enable_rt(dp, phy); 110 rtnl_unlock(); 111 112 return err; 113 } 114 115 void dsa_port_disable_rt(struct dsa_port *dp) 116 { 117 struct dsa_switch *ds = dp->ds; 118 int port = dp->index; 119 120 if (dp->pl) 121 phylink_stop(dp->pl); 122 123 if (!dp->bridge_dev) 124 dsa_port_set_state_now(dp, BR_STATE_DISABLED); 125 126 if (ds->ops->port_disable) 127 ds->ops->port_disable(ds, port); 128 } 129 130 void dsa_port_disable(struct dsa_port *dp) 131 { 132 rtnl_lock(); 133 dsa_port_disable_rt(dp); 134 rtnl_unlock(); 135 } 136 137 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br) 138 { 139 struct dsa_notifier_bridge_info info = { 140 .tree_index = dp->ds->dst->index, 141 .sw_index = dp->ds->index, 142 .port = dp->index, 143 .br = br, 144 }; 145 int err; 146 147 /* Set the flooding mode before joining the port in the switch */ 148 err = dsa_port_bridge_flags(dp, BR_FLOOD | BR_MCAST_FLOOD, NULL); 149 if (err) 150 return err; 151 152 /* Here the interface is already bridged. Reflect the current 153 * configuration so that drivers can program their chips accordingly. 154 */ 155 dp->bridge_dev = br; 156 157 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info); 158 159 /* The bridging is rolled back on error */ 160 if (err) { 161 dsa_port_bridge_flags(dp, 0, NULL); 162 dp->bridge_dev = NULL; 163 } 164 165 return err; 166 } 167 168 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) 169 { 170 struct dsa_notifier_bridge_info info = { 171 .tree_index = dp->ds->dst->index, 172 .sw_index = dp->ds->index, 173 .port = dp->index, 174 .br = br, 175 }; 176 int err; 177 178 /* Here the port is already unbridged. Reflect the current configuration 179 * so that drivers can program their chips accordingly. 180 */ 181 dp->bridge_dev = NULL; 182 183 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 184 if (err) 185 pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n"); 186 187 /* Port is leaving the bridge, disable flooding */ 188 dsa_port_bridge_flags(dp, 0, NULL); 189 190 /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, 191 * so allow it to be in BR_STATE_FORWARDING to be kept functional 192 */ 193 dsa_port_set_state_now(dp, BR_STATE_FORWARDING); 194 } 195 196 /* Must be called under rcu_read_lock() */ 197 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, 198 bool vlan_filtering) 199 { 200 struct dsa_switch *ds = dp->ds; 201 int err, i; 202 203 /* VLAN awareness was off, so the question is "can we turn it on". 204 * We may have had 8021q uppers, those need to go. Make sure we don't 205 * enter an inconsistent state: deny changing the VLAN awareness state 206 * as long as we have 8021q uppers. 207 */ 208 if (vlan_filtering && dsa_is_user_port(ds, dp->index)) { 209 struct net_device *upper_dev, *slave = dp->slave; 210 struct net_device *br = dp->bridge_dev; 211 struct list_head *iter; 212 213 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { 214 struct bridge_vlan_info br_info; 215 u16 vid; 216 217 if (!is_vlan_dev(upper_dev)) 218 continue; 219 220 vid = vlan_dev_vlan_id(upper_dev); 221 222 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the 223 * device, respectively the VID is not found, returning 224 * 0 means success, which is a failure for us here. 225 */ 226 err = br_vlan_get_info(br, vid, &br_info); 227 if (err == 0) { 228 dev_err(ds->dev, "Must remove upper %s first\n", 229 upper_dev->name); 230 return false; 231 } 232 } 233 } 234 235 if (!ds->vlan_filtering_is_global) 236 return true; 237 238 /* For cases where enabling/disabling VLAN awareness is global to the 239 * switch, we need to handle the case where multiple bridges span 240 * different ports of the same switch device and one of them has a 241 * different setting than what is being requested. 242 */ 243 for (i = 0; i < ds->num_ports; i++) { 244 struct net_device *other_bridge; 245 246 other_bridge = dsa_to_port(ds, i)->bridge_dev; 247 if (!other_bridge) 248 continue; 249 /* If it's the same bridge, it also has same 250 * vlan_filtering setting => no need to check 251 */ 252 if (other_bridge == dp->bridge_dev) 253 continue; 254 if (br_vlan_enabled(other_bridge) != vlan_filtering) { 255 dev_err(ds->dev, "VLAN filtering is a global setting\n"); 256 return false; 257 } 258 } 259 return true; 260 } 261 262 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, 263 struct switchdev_trans *trans) 264 { 265 struct dsa_switch *ds = dp->ds; 266 int err; 267 268 if (switchdev_trans_ph_prepare(trans)) { 269 bool apply; 270 271 if (!ds->ops->port_vlan_filtering) 272 return -EOPNOTSUPP; 273 274 /* We are called from dsa_slave_switchdev_blocking_event(), 275 * which is not under rcu_read_lock(), unlike 276 * dsa_slave_switchdev_event(). 277 */ 278 rcu_read_lock(); 279 apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering); 280 rcu_read_unlock(); 281 if (!apply) 282 return -EINVAL; 283 } 284 285 if (dsa_port_is_vlan_filtering(dp) == vlan_filtering) 286 return 0; 287 288 err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering, 289 trans); 290 if (err) 291 return err; 292 293 if (switchdev_trans_ph_commit(trans)) { 294 if (ds->vlan_filtering_is_global) 295 ds->vlan_filtering = vlan_filtering; 296 else 297 dp->vlan_filtering = vlan_filtering; 298 } 299 300 return 0; 301 } 302 303 /* This enforces legacy behavior for switch drivers which assume they can't 304 * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0 305 */ 306 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp) 307 { 308 struct dsa_switch *ds = dp->ds; 309 310 if (!dp->bridge_dev) 311 return false; 312 313 return (!ds->configure_vlan_while_not_filtering && 314 !br_vlan_enabled(dp->bridge_dev)); 315 } 316 317 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock, 318 struct switchdev_trans *trans) 319 { 320 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock); 321 unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies); 322 struct dsa_notifier_ageing_time_info info = { 323 .ageing_time = ageing_time, 324 .trans = trans, 325 }; 326 327 if (switchdev_trans_ph_prepare(trans)) 328 return dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info); 329 330 dp->ageing_time = ageing_time; 331 332 return dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info); 333 } 334 335 int dsa_port_pre_bridge_flags(const struct dsa_port *dp, unsigned long flags, 336 struct switchdev_trans *trans) 337 { 338 struct dsa_switch *ds = dp->ds; 339 340 if (!ds->ops->port_egress_floods || 341 (flags & ~(BR_FLOOD | BR_MCAST_FLOOD))) 342 return -EINVAL; 343 344 return 0; 345 } 346 347 int dsa_port_bridge_flags(const struct dsa_port *dp, unsigned long flags, 348 struct switchdev_trans *trans) 349 { 350 struct dsa_switch *ds = dp->ds; 351 int port = dp->index; 352 int err = 0; 353 354 if (switchdev_trans_ph_prepare(trans)) 355 return 0; 356 357 if (ds->ops->port_egress_floods) 358 err = ds->ops->port_egress_floods(ds, port, flags & BR_FLOOD, 359 flags & BR_MCAST_FLOOD); 360 361 return err; 362 } 363 364 int dsa_port_mrouter(struct dsa_port *dp, bool mrouter, 365 struct switchdev_trans *trans) 366 { 367 struct dsa_switch *ds = dp->ds; 368 int port = dp->index; 369 370 if (switchdev_trans_ph_prepare(trans)) 371 return ds->ops->port_egress_floods ? 0 : -EOPNOTSUPP; 372 373 return ds->ops->port_egress_floods(ds, port, true, mrouter); 374 } 375 376 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu, 377 bool propagate_upstream) 378 { 379 struct dsa_notifier_mtu_info info = { 380 .sw_index = dp->ds->index, 381 .propagate_upstream = propagate_upstream, 382 .port = dp->index, 383 .mtu = new_mtu, 384 }; 385 386 return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info); 387 } 388 389 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, 390 u16 vid) 391 { 392 struct dsa_notifier_fdb_info info = { 393 .sw_index = dp->ds->index, 394 .port = dp->index, 395 .addr = addr, 396 .vid = vid, 397 }; 398 399 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info); 400 } 401 402 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr, 403 u16 vid) 404 { 405 struct dsa_notifier_fdb_info info = { 406 .sw_index = dp->ds->index, 407 .port = dp->index, 408 .addr = addr, 409 .vid = vid, 410 411 }; 412 413 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info); 414 } 415 416 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data) 417 { 418 struct dsa_switch *ds = dp->ds; 419 int port = dp->index; 420 421 if (!ds->ops->port_fdb_dump) 422 return -EOPNOTSUPP; 423 424 return ds->ops->port_fdb_dump(ds, port, cb, data); 425 } 426 427 int dsa_port_mdb_add(const struct dsa_port *dp, 428 const struct switchdev_obj_port_mdb *mdb) 429 { 430 struct dsa_notifier_mdb_info info = { 431 .sw_index = dp->ds->index, 432 .port = dp->index, 433 .mdb = mdb, 434 }; 435 436 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info); 437 } 438 439 int dsa_port_mdb_del(const struct dsa_port *dp, 440 const struct switchdev_obj_port_mdb *mdb) 441 { 442 struct dsa_notifier_mdb_info info = { 443 .sw_index = dp->ds->index, 444 .port = dp->index, 445 .mdb = mdb, 446 }; 447 448 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info); 449 } 450 451 int dsa_port_vlan_add(struct dsa_port *dp, 452 const struct switchdev_obj_port_vlan *vlan) 453 { 454 struct dsa_notifier_vlan_info info = { 455 .sw_index = dp->ds->index, 456 .port = dp->index, 457 .vlan = vlan, 458 }; 459 460 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info); 461 } 462 463 int dsa_port_vlan_del(struct dsa_port *dp, 464 const struct switchdev_obj_port_vlan *vlan) 465 { 466 struct dsa_notifier_vlan_info info = { 467 .sw_index = dp->ds->index, 468 .port = dp->index, 469 .vlan = vlan, 470 }; 471 472 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info); 473 } 474 475 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp) 476 { 477 struct device_node *phy_dn; 478 struct phy_device *phydev; 479 480 phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0); 481 if (!phy_dn) 482 return NULL; 483 484 phydev = of_phy_find_device(phy_dn); 485 if (!phydev) { 486 of_node_put(phy_dn); 487 return ERR_PTR(-EPROBE_DEFER); 488 } 489 490 of_node_put(phy_dn); 491 return phydev; 492 } 493 494 static void dsa_port_phylink_validate(struct phylink_config *config, 495 unsigned long *supported, 496 struct phylink_link_state *state) 497 { 498 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 499 struct dsa_switch *ds = dp->ds; 500 501 if (!ds->ops->phylink_validate) 502 return; 503 504 ds->ops->phylink_validate(ds, dp->index, supported, state); 505 } 506 507 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config, 508 struct phylink_link_state *state) 509 { 510 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 511 struct dsa_switch *ds = dp->ds; 512 int err; 513 514 /* Only called for inband modes */ 515 if (!ds->ops->phylink_mac_link_state) { 516 state->link = 0; 517 return; 518 } 519 520 err = ds->ops->phylink_mac_link_state(ds, dp->index, state); 521 if (err < 0) { 522 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n", 523 dp->index, err); 524 state->link = 0; 525 } 526 } 527 528 static void dsa_port_phylink_mac_config(struct phylink_config *config, 529 unsigned int mode, 530 const struct phylink_link_state *state) 531 { 532 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 533 struct dsa_switch *ds = dp->ds; 534 535 if (!ds->ops->phylink_mac_config) 536 return; 537 538 ds->ops->phylink_mac_config(ds, dp->index, mode, state); 539 } 540 541 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config) 542 { 543 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 544 struct dsa_switch *ds = dp->ds; 545 546 if (!ds->ops->phylink_mac_an_restart) 547 return; 548 549 ds->ops->phylink_mac_an_restart(ds, dp->index); 550 } 551 552 static void dsa_port_phylink_mac_link_down(struct phylink_config *config, 553 unsigned int mode, 554 phy_interface_t interface) 555 { 556 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 557 struct phy_device *phydev = NULL; 558 struct dsa_switch *ds = dp->ds; 559 560 if (dsa_is_user_port(ds, dp->index)) 561 phydev = dp->slave->phydev; 562 563 if (!ds->ops->phylink_mac_link_down) { 564 if (ds->ops->adjust_link && phydev) 565 ds->ops->adjust_link(ds, dp->index, phydev); 566 return; 567 } 568 569 ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface); 570 } 571 572 static void dsa_port_phylink_mac_link_up(struct phylink_config *config, 573 struct phy_device *phydev, 574 unsigned int mode, 575 phy_interface_t interface, 576 int speed, int duplex, 577 bool tx_pause, bool rx_pause) 578 { 579 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 580 struct dsa_switch *ds = dp->ds; 581 582 if (!ds->ops->phylink_mac_link_up) { 583 if (ds->ops->adjust_link && phydev) 584 ds->ops->adjust_link(ds, dp->index, phydev); 585 return; 586 } 587 588 ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev, 589 speed, duplex, tx_pause, rx_pause); 590 } 591 592 const struct phylink_mac_ops dsa_port_phylink_mac_ops = { 593 .validate = dsa_port_phylink_validate, 594 .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state, 595 .mac_config = dsa_port_phylink_mac_config, 596 .mac_an_restart = dsa_port_phylink_mac_an_restart, 597 .mac_link_down = dsa_port_phylink_mac_link_down, 598 .mac_link_up = dsa_port_phylink_mac_link_up, 599 }; 600 601 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable) 602 { 603 struct dsa_switch *ds = dp->ds; 604 struct phy_device *phydev; 605 int port = dp->index; 606 int err = 0; 607 608 phydev = dsa_port_get_phy_device(dp); 609 if (!phydev) 610 return 0; 611 612 if (IS_ERR(phydev)) 613 return PTR_ERR(phydev); 614 615 if (enable) { 616 err = genphy_resume(phydev); 617 if (err < 0) 618 goto err_put_dev; 619 620 err = genphy_read_status(phydev); 621 if (err < 0) 622 goto err_put_dev; 623 } else { 624 err = genphy_suspend(phydev); 625 if (err < 0) 626 goto err_put_dev; 627 } 628 629 if (ds->ops->adjust_link) 630 ds->ops->adjust_link(ds, port, phydev); 631 632 dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev)); 633 634 err_put_dev: 635 put_device(&phydev->mdio.dev); 636 return err; 637 } 638 639 static int dsa_port_fixed_link_register_of(struct dsa_port *dp) 640 { 641 struct device_node *dn = dp->dn; 642 struct dsa_switch *ds = dp->ds; 643 struct phy_device *phydev; 644 int port = dp->index; 645 phy_interface_t mode; 646 int err; 647 648 err = of_phy_register_fixed_link(dn); 649 if (err) { 650 dev_err(ds->dev, 651 "failed to register the fixed PHY of port %d\n", 652 port); 653 return err; 654 } 655 656 phydev = of_phy_find_device(dn); 657 658 err = of_get_phy_mode(dn, &mode); 659 if (err) 660 mode = PHY_INTERFACE_MODE_NA; 661 phydev->interface = mode; 662 663 genphy_read_status(phydev); 664 665 if (ds->ops->adjust_link) 666 ds->ops->adjust_link(ds, port, phydev); 667 668 put_device(&phydev->mdio.dev); 669 670 return 0; 671 } 672 673 static int dsa_port_phylink_register(struct dsa_port *dp) 674 { 675 struct dsa_switch *ds = dp->ds; 676 struct device_node *port_dn = dp->dn; 677 phy_interface_t mode; 678 int err; 679 680 err = of_get_phy_mode(port_dn, &mode); 681 if (err) 682 mode = PHY_INTERFACE_MODE_NA; 683 684 dp->pl_config.dev = ds->dev; 685 dp->pl_config.type = PHYLINK_DEV; 686 dp->pl_config.pcs_poll = ds->pcs_poll; 687 688 dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), 689 mode, &dsa_port_phylink_mac_ops); 690 if (IS_ERR(dp->pl)) { 691 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); 692 return PTR_ERR(dp->pl); 693 } 694 695 err = phylink_of_phy_connect(dp->pl, port_dn, 0); 696 if (err && err != -ENODEV) { 697 pr_err("could not attach to PHY: %d\n", err); 698 goto err_phy_connect; 699 } 700 701 return 0; 702 703 err_phy_connect: 704 phylink_destroy(dp->pl); 705 return err; 706 } 707 708 int dsa_port_link_register_of(struct dsa_port *dp) 709 { 710 struct dsa_switch *ds = dp->ds; 711 struct device_node *phy_np; 712 int port = dp->index; 713 714 if (!ds->ops->adjust_link) { 715 phy_np = of_parse_phandle(dp->dn, "phy-handle", 0); 716 if (of_phy_is_fixed_link(dp->dn) || phy_np) { 717 if (ds->ops->phylink_mac_link_down) 718 ds->ops->phylink_mac_link_down(ds, port, 719 MLO_AN_FIXED, PHY_INTERFACE_MODE_NA); 720 return dsa_port_phylink_register(dp); 721 } 722 return 0; 723 } 724 725 dev_warn(ds->dev, 726 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n"); 727 728 if (of_phy_is_fixed_link(dp->dn)) 729 return dsa_port_fixed_link_register_of(dp); 730 else 731 return dsa_port_setup_phy_of(dp, true); 732 } 733 734 void dsa_port_link_unregister_of(struct dsa_port *dp) 735 { 736 struct dsa_switch *ds = dp->ds; 737 738 if (!ds->ops->adjust_link && dp->pl) { 739 rtnl_lock(); 740 phylink_disconnect_phy(dp->pl); 741 rtnl_unlock(); 742 phylink_destroy(dp->pl); 743 dp->pl = NULL; 744 return; 745 } 746 747 if (of_phy_is_fixed_link(dp->dn)) 748 of_phy_deregister_fixed_link(dp->dn); 749 else 750 dsa_port_setup_phy_of(dp, false); 751 } 752 753 int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data) 754 { 755 struct phy_device *phydev; 756 int ret = -EOPNOTSUPP; 757 758 if (of_phy_is_fixed_link(dp->dn)) 759 return ret; 760 761 phydev = dsa_port_get_phy_device(dp); 762 if (IS_ERR_OR_NULL(phydev)) 763 return ret; 764 765 ret = phy_ethtool_get_strings(phydev, data); 766 put_device(&phydev->mdio.dev); 767 768 return ret; 769 } 770 EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings); 771 772 int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data) 773 { 774 struct phy_device *phydev; 775 int ret = -EOPNOTSUPP; 776 777 if (of_phy_is_fixed_link(dp->dn)) 778 return ret; 779 780 phydev = dsa_port_get_phy_device(dp); 781 if (IS_ERR_OR_NULL(phydev)) 782 return ret; 783 784 ret = phy_ethtool_get_stats(phydev, NULL, data); 785 put_device(&phydev->mdio.dev); 786 787 return ret; 788 } 789 EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats); 790 791 int dsa_port_get_phy_sset_count(struct dsa_port *dp) 792 { 793 struct phy_device *phydev; 794 int ret = -EOPNOTSUPP; 795 796 if (of_phy_is_fixed_link(dp->dn)) 797 return ret; 798 799 phydev = dsa_port_get_phy_device(dp); 800 if (IS_ERR_OR_NULL(phydev)) 801 return ret; 802 803 ret = phy_ethtool_get_sset_count(phydev); 804 put_device(&phydev->mdio.dev); 805 806 return ret; 807 } 808 EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count); 809