1 /* Framework for configuring and reading PHY devices 2 * Based on code in sungem_phy.c and gianfar_phy.c 3 * 4 * Author: Andy Fleming 5 * 6 * Copyright (c) 2004 Freescale Semiconductor, Inc. 7 * Copyright (c) 2006, 2007 Maciej W. Rozycki 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2 of the License, or (at your 12 * option) any later version. 13 * 14 */ 15 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 18 #include <linux/kernel.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/unistd.h> 22 #include <linux/interrupt.h> 23 #include <linux/delay.h> 24 #include <linux/netdevice.h> 25 #include <linux/etherdevice.h> 26 #include <linux/skbuff.h> 27 #include <linux/mm.h> 28 #include <linux/module.h> 29 #include <linux/mii.h> 30 #include <linux/ethtool.h> 31 #include <linux/phy.h> 32 #include <linux/phy_led_triggers.h> 33 #include <linux/workqueue.h> 34 #include <linux/mdio.h> 35 #include <linux/io.h> 36 #include <linux/uaccess.h> 37 #include <linux/atomic.h> 38 39 #include <asm/irq.h> 40 41 #define PHY_STATE_STR(_state) \ 42 case PHY_##_state: \ 43 return __stringify(_state); \ 44 45 static const char *phy_state_to_str(enum phy_state st) 46 { 47 switch (st) { 48 PHY_STATE_STR(DOWN) 49 PHY_STATE_STR(STARTING) 50 PHY_STATE_STR(READY) 51 PHY_STATE_STR(PENDING) 52 PHY_STATE_STR(UP) 53 PHY_STATE_STR(AN) 54 PHY_STATE_STR(RUNNING) 55 PHY_STATE_STR(NOLINK) 56 PHY_STATE_STR(FORCING) 57 PHY_STATE_STR(CHANGELINK) 58 PHY_STATE_STR(HALTED) 59 PHY_STATE_STR(RESUMING) 60 } 61 62 return NULL; 63 } 64 65 66 /** 67 * phy_print_status - Convenience function to print out the current phy status 68 * @phydev: the phy_device struct 69 */ 70 void phy_print_status(struct phy_device *phydev) 71 { 72 if (phydev->link) { 73 netdev_info(phydev->attached_dev, 74 "Link is Up - %s/%s - flow control %s\n", 75 phy_speed_to_str(phydev->speed), 76 phy_duplex_to_str(phydev->duplex), 77 phydev->pause ? "rx/tx" : "off"); 78 } else { 79 netdev_info(phydev->attached_dev, "Link is Down\n"); 80 } 81 } 82 EXPORT_SYMBOL(phy_print_status); 83 84 /** 85 * phy_clear_interrupt - Ack the phy device's interrupt 86 * @phydev: the phy_device struct 87 * 88 * If the @phydev driver has an ack_interrupt function, call it to 89 * ack and clear the phy device's interrupt. 90 * 91 * Returns 0 on success or < 0 on error. 92 */ 93 static int phy_clear_interrupt(struct phy_device *phydev) 94 { 95 if (phydev->drv->ack_interrupt) 96 return phydev->drv->ack_interrupt(phydev); 97 98 return 0; 99 } 100 101 /** 102 * phy_config_interrupt - configure the PHY device for the requested interrupts 103 * @phydev: the phy_device struct 104 * @interrupts: interrupt flags to configure for this @phydev 105 * 106 * Returns 0 on success or < 0 on error. 107 */ 108 static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts) 109 { 110 phydev->interrupts = interrupts; 111 if (phydev->drv->config_intr) 112 return phydev->drv->config_intr(phydev); 113 114 return 0; 115 } 116 117 /** 118 * phy_restart_aneg - restart auto-negotiation 119 * @phydev: target phy_device struct 120 * 121 * Restart the autonegotiation on @phydev. Returns >= 0 on success or 122 * negative errno on error. 123 */ 124 int phy_restart_aneg(struct phy_device *phydev) 125 { 126 int ret; 127 128 if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) 129 ret = genphy_c45_restart_aneg(phydev); 130 else 131 ret = genphy_restart_aneg(phydev); 132 133 return ret; 134 } 135 EXPORT_SYMBOL_GPL(phy_restart_aneg); 136 137 /** 138 * phy_aneg_done - return auto-negotiation status 139 * @phydev: target phy_device struct 140 * 141 * Description: Return the auto-negotiation status from this @phydev 142 * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation 143 * is still pending. 144 */ 145 int phy_aneg_done(struct phy_device *phydev) 146 { 147 if (phydev->drv && phydev->drv->aneg_done) 148 return phydev->drv->aneg_done(phydev); 149 150 /* Avoid genphy_aneg_done() if the Clause 45 PHY does not 151 * implement Clause 22 registers 152 */ 153 if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) 154 return -EINVAL; 155 156 return genphy_aneg_done(phydev); 157 } 158 EXPORT_SYMBOL(phy_aneg_done); 159 160 /** 161 * phy_find_valid - find a PHY setting that matches the requested parameters 162 * @speed: desired speed 163 * @duplex: desired duplex 164 * @supported: mask of supported link modes 165 * 166 * Locate a supported phy setting that is, in priority order: 167 * - an exact match for the specified speed and duplex mode 168 * - a match for the specified speed, or slower speed 169 * - the slowest supported speed 170 * Returns the matched phy_setting entry, or %NULL if no supported phy 171 * settings were found. 172 */ 173 static const struct phy_setting * 174 phy_find_valid(int speed, int duplex, u32 supported) 175 { 176 unsigned long mask = supported; 177 178 return phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, false); 179 } 180 181 /** 182 * phy_supported_speeds - return all speeds currently supported by a phy device 183 * @phy: The phy device to return supported speeds of. 184 * @speeds: buffer to store supported speeds in. 185 * @size: size of speeds buffer. 186 * 187 * Description: Returns the number of supported speeds, and fills the speeds 188 * buffer with the supported speeds. If speeds buffer is too small to contain 189 * all currently supported speeds, will return as many speeds as can fit. 190 */ 191 unsigned int phy_supported_speeds(struct phy_device *phy, 192 unsigned int *speeds, 193 unsigned int size) 194 { 195 unsigned long supported = phy->supported; 196 197 return phy_speeds(speeds, size, &supported, BITS_PER_LONG); 198 } 199 200 /** 201 * phy_check_valid - check if there is a valid PHY setting which matches 202 * speed, duplex, and feature mask 203 * @speed: speed to match 204 * @duplex: duplex to match 205 * @features: A mask of the valid settings 206 * 207 * Description: Returns true if there is a valid setting, false otherwise. 208 */ 209 static inline bool phy_check_valid(int speed, int duplex, u32 features) 210 { 211 unsigned long mask = features; 212 213 return !!phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, true); 214 } 215 216 /** 217 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex 218 * @phydev: the target phy_device struct 219 * 220 * Description: Make sure the PHY is set to supported speeds and 221 * duplexes. Drop down by one in this order: 1000/FULL, 222 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF. 223 */ 224 static void phy_sanitize_settings(struct phy_device *phydev) 225 { 226 const struct phy_setting *setting; 227 u32 features = phydev->supported; 228 229 /* Sanitize settings based on PHY capabilities */ 230 if ((features & SUPPORTED_Autoneg) == 0) 231 phydev->autoneg = AUTONEG_DISABLE; 232 233 setting = phy_find_valid(phydev->speed, phydev->duplex, features); 234 if (setting) { 235 phydev->speed = setting->speed; 236 phydev->duplex = setting->duplex; 237 } else { 238 /* We failed to find anything (no supported speeds?) */ 239 phydev->speed = SPEED_UNKNOWN; 240 phydev->duplex = DUPLEX_UNKNOWN; 241 } 242 } 243 244 /** 245 * phy_ethtool_sset - generic ethtool sset function, handles all the details 246 * @phydev: target phy_device struct 247 * @cmd: ethtool_cmd 248 * 249 * A few notes about parameter checking: 250 * 251 * - We don't set port or transceiver, so we don't care what they 252 * were set to. 253 * - phy_start_aneg() will make sure forced settings are sane, and 254 * choose the next best ones from the ones selected, so we don't 255 * care if ethtool tries to give us bad values. 256 */ 257 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) 258 { 259 u32 speed = ethtool_cmd_speed(cmd); 260 261 if (cmd->phy_address != phydev->mdio.addr) 262 return -EINVAL; 263 264 /* We make sure that we don't pass unsupported values in to the PHY */ 265 cmd->advertising &= phydev->supported; 266 267 /* Verify the settings we care about. */ 268 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) 269 return -EINVAL; 270 271 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0) 272 return -EINVAL; 273 274 if (cmd->autoneg == AUTONEG_DISABLE && 275 ((speed != SPEED_1000 && 276 speed != SPEED_100 && 277 speed != SPEED_10) || 278 (cmd->duplex != DUPLEX_HALF && 279 cmd->duplex != DUPLEX_FULL))) 280 return -EINVAL; 281 282 phydev->autoneg = cmd->autoneg; 283 284 phydev->speed = speed; 285 286 phydev->advertising = cmd->advertising; 287 288 if (AUTONEG_ENABLE == cmd->autoneg) 289 phydev->advertising |= ADVERTISED_Autoneg; 290 else 291 phydev->advertising &= ~ADVERTISED_Autoneg; 292 293 phydev->duplex = cmd->duplex; 294 295 phydev->mdix_ctrl = cmd->eth_tp_mdix_ctrl; 296 297 /* Restart the PHY */ 298 phy_start_aneg(phydev); 299 300 return 0; 301 } 302 EXPORT_SYMBOL(phy_ethtool_sset); 303 304 int phy_ethtool_ksettings_set(struct phy_device *phydev, 305 const struct ethtool_link_ksettings *cmd) 306 { 307 u8 autoneg = cmd->base.autoneg; 308 u8 duplex = cmd->base.duplex; 309 u32 speed = cmd->base.speed; 310 u32 advertising; 311 312 if (cmd->base.phy_address != phydev->mdio.addr) 313 return -EINVAL; 314 315 ethtool_convert_link_mode_to_legacy_u32(&advertising, 316 cmd->link_modes.advertising); 317 318 /* We make sure that we don't pass unsupported values in to the PHY */ 319 advertising &= phydev->supported; 320 321 /* Verify the settings we care about. */ 322 if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE) 323 return -EINVAL; 324 325 if (autoneg == AUTONEG_ENABLE && advertising == 0) 326 return -EINVAL; 327 328 if (autoneg == AUTONEG_DISABLE && 329 ((speed != SPEED_1000 && 330 speed != SPEED_100 && 331 speed != SPEED_10) || 332 (duplex != DUPLEX_HALF && 333 duplex != DUPLEX_FULL))) 334 return -EINVAL; 335 336 phydev->autoneg = autoneg; 337 338 phydev->speed = speed; 339 340 phydev->advertising = advertising; 341 342 if (autoneg == AUTONEG_ENABLE) 343 phydev->advertising |= ADVERTISED_Autoneg; 344 else 345 phydev->advertising &= ~ADVERTISED_Autoneg; 346 347 phydev->duplex = duplex; 348 349 phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl; 350 351 /* Restart the PHY */ 352 phy_start_aneg(phydev); 353 354 return 0; 355 } 356 EXPORT_SYMBOL(phy_ethtool_ksettings_set); 357 358 void phy_ethtool_ksettings_get(struct phy_device *phydev, 359 struct ethtool_link_ksettings *cmd) 360 { 361 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 362 phydev->supported); 363 364 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 365 phydev->advertising); 366 367 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, 368 phydev->lp_advertising); 369 370 cmd->base.speed = phydev->speed; 371 cmd->base.duplex = phydev->duplex; 372 if (phydev->interface == PHY_INTERFACE_MODE_MOCA) 373 cmd->base.port = PORT_BNC; 374 else 375 cmd->base.port = PORT_MII; 376 cmd->base.transceiver = phy_is_internal(phydev) ? 377 XCVR_INTERNAL : XCVR_EXTERNAL; 378 cmd->base.phy_address = phydev->mdio.addr; 379 cmd->base.autoneg = phydev->autoneg; 380 cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl; 381 cmd->base.eth_tp_mdix = phydev->mdix; 382 } 383 EXPORT_SYMBOL(phy_ethtool_ksettings_get); 384 385 /** 386 * phy_mii_ioctl - generic PHY MII ioctl interface 387 * @phydev: the phy_device struct 388 * @ifr: &struct ifreq for socket ioctl's 389 * @cmd: ioctl cmd to execute 390 * 391 * Note that this function is currently incompatible with the 392 * PHYCONTROL layer. It changes registers without regard to 393 * current state. Use at own risk. 394 */ 395 int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) 396 { 397 struct mii_ioctl_data *mii_data = if_mii(ifr); 398 u16 val = mii_data->val_in; 399 bool change_autoneg = false; 400 401 switch (cmd) { 402 case SIOCGMIIPHY: 403 mii_data->phy_id = phydev->mdio.addr; 404 /* fall through */ 405 406 case SIOCGMIIREG: 407 mii_data->val_out = mdiobus_read(phydev->mdio.bus, 408 mii_data->phy_id, 409 mii_data->reg_num); 410 return 0; 411 412 case SIOCSMIIREG: 413 if (mii_data->phy_id == phydev->mdio.addr) { 414 switch (mii_data->reg_num) { 415 case MII_BMCR: 416 if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) { 417 if (phydev->autoneg == AUTONEG_ENABLE) 418 change_autoneg = true; 419 phydev->autoneg = AUTONEG_DISABLE; 420 if (val & BMCR_FULLDPLX) 421 phydev->duplex = DUPLEX_FULL; 422 else 423 phydev->duplex = DUPLEX_HALF; 424 if (val & BMCR_SPEED1000) 425 phydev->speed = SPEED_1000; 426 else if (val & BMCR_SPEED100) 427 phydev->speed = SPEED_100; 428 else phydev->speed = SPEED_10; 429 } 430 else { 431 if (phydev->autoneg == AUTONEG_DISABLE) 432 change_autoneg = true; 433 phydev->autoneg = AUTONEG_ENABLE; 434 } 435 break; 436 case MII_ADVERTISE: 437 phydev->advertising = mii_adv_to_ethtool_adv_t(val); 438 change_autoneg = true; 439 break; 440 default: 441 /* do nothing */ 442 break; 443 } 444 } 445 446 mdiobus_write(phydev->mdio.bus, mii_data->phy_id, 447 mii_data->reg_num, val); 448 449 if (mii_data->phy_id == phydev->mdio.addr && 450 mii_data->reg_num == MII_BMCR && 451 val & BMCR_RESET) 452 return phy_init_hw(phydev); 453 454 if (change_autoneg) 455 return phy_start_aneg(phydev); 456 457 return 0; 458 459 case SIOCSHWTSTAMP: 460 if (phydev->drv && phydev->drv->hwtstamp) 461 return phydev->drv->hwtstamp(phydev, ifr); 462 /* fall through */ 463 464 default: 465 return -EOPNOTSUPP; 466 } 467 } 468 EXPORT_SYMBOL(phy_mii_ioctl); 469 470 static int phy_config_aneg(struct phy_device *phydev) 471 { 472 if (phydev->drv->config_aneg) 473 return phydev->drv->config_aneg(phydev); 474 475 /* Clause 45 PHYs that don't implement Clause 22 registers are not 476 * allowed to call genphy_config_aneg() 477 */ 478 if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) 479 return -EOPNOTSUPP; 480 481 return genphy_config_aneg(phydev); 482 } 483 484 /** 485 * phy_start_aneg_priv - start auto-negotiation for this PHY device 486 * @phydev: the phy_device struct 487 * @sync: indicate whether we should wait for the workqueue cancelation 488 * 489 * Description: Sanitizes the settings (if we're not autonegotiating 490 * them), and then calls the driver's config_aneg function. 491 * If the PHYCONTROL Layer is operating, we change the state to 492 * reflect the beginning of Auto-negotiation or forcing. 493 */ 494 static int phy_start_aneg_priv(struct phy_device *phydev, bool sync) 495 { 496 bool trigger = 0; 497 int err; 498 499 if (!phydev->drv) 500 return -EIO; 501 502 mutex_lock(&phydev->lock); 503 504 if (AUTONEG_DISABLE == phydev->autoneg) 505 phy_sanitize_settings(phydev); 506 507 /* Invalidate LP advertising flags */ 508 phydev->lp_advertising = 0; 509 510 err = phy_config_aneg(phydev); 511 if (err < 0) 512 goto out_unlock; 513 514 if (phydev->state != PHY_HALTED) { 515 if (AUTONEG_ENABLE == phydev->autoneg) { 516 phydev->state = PHY_AN; 517 phydev->link_timeout = PHY_AN_TIMEOUT; 518 } else { 519 phydev->state = PHY_FORCING; 520 phydev->link_timeout = PHY_FORCE_TIMEOUT; 521 } 522 } 523 524 /* Re-schedule a PHY state machine to check PHY status because 525 * negotiation may already be done and aneg interrupt may not be 526 * generated. 527 */ 528 if (!phy_polling_mode(phydev) && phydev->state == PHY_AN) { 529 err = phy_aneg_done(phydev); 530 if (err > 0) { 531 trigger = true; 532 err = 0; 533 } 534 } 535 536 out_unlock: 537 mutex_unlock(&phydev->lock); 538 539 if (trigger) 540 phy_trigger_machine(phydev, sync); 541 542 return err; 543 } 544 545 /** 546 * phy_start_aneg - start auto-negotiation for this PHY device 547 * @phydev: the phy_device struct 548 * 549 * Description: Sanitizes the settings (if we're not autonegotiating 550 * them), and then calls the driver's config_aneg function. 551 * If the PHYCONTROL Layer is operating, we change the state to 552 * reflect the beginning of Auto-negotiation or forcing. 553 */ 554 int phy_start_aneg(struct phy_device *phydev) 555 { 556 return phy_start_aneg_priv(phydev, true); 557 } 558 EXPORT_SYMBOL(phy_start_aneg); 559 560 static int phy_poll_aneg_done(struct phy_device *phydev) 561 { 562 unsigned int retries = 100; 563 int ret; 564 565 do { 566 msleep(100); 567 ret = phy_aneg_done(phydev); 568 } while (!ret && --retries); 569 570 if (!ret) 571 return -ETIMEDOUT; 572 573 return ret < 0 ? ret : 0; 574 } 575 576 /** 577 * phy_speed_down - set speed to lowest speed supported by both link partners 578 * @phydev: the phy_device struct 579 * @sync: perform action synchronously 580 * 581 * Description: Typically used to save energy when waiting for a WoL packet 582 * 583 * WARNING: Setting sync to false may cause the system being unable to suspend 584 * in case the PHY generates an interrupt when finishing the autonegotiation. 585 * This interrupt may wake up the system immediately after suspend. 586 * Therefore use sync = false only if you're sure it's safe with the respective 587 * network chip. 588 */ 589 int phy_speed_down(struct phy_device *phydev, bool sync) 590 { 591 u32 adv = phydev->lp_advertising & phydev->supported; 592 u32 adv_old = phydev->advertising; 593 int ret; 594 595 if (phydev->autoneg != AUTONEG_ENABLE) 596 return 0; 597 598 if (adv & PHY_10BT_FEATURES) 599 phydev->advertising &= ~(PHY_100BT_FEATURES | 600 PHY_1000BT_FEATURES); 601 else if (adv & PHY_100BT_FEATURES) 602 phydev->advertising &= ~PHY_1000BT_FEATURES; 603 604 if (phydev->advertising == adv_old) 605 return 0; 606 607 ret = phy_config_aneg(phydev); 608 if (ret) 609 return ret; 610 611 return sync ? phy_poll_aneg_done(phydev) : 0; 612 } 613 EXPORT_SYMBOL_GPL(phy_speed_down); 614 615 /** 616 * phy_speed_up - (re)set advertised speeds to all supported speeds 617 * @phydev: the phy_device struct 618 * 619 * Description: Used to revert the effect of phy_speed_down 620 */ 621 int phy_speed_up(struct phy_device *phydev) 622 { 623 u32 mask = PHY_10BT_FEATURES | PHY_100BT_FEATURES | PHY_1000BT_FEATURES; 624 u32 adv_old = phydev->advertising; 625 626 if (phydev->autoneg != AUTONEG_ENABLE) 627 return 0; 628 629 phydev->advertising = (adv_old & ~mask) | (phydev->supported & mask); 630 631 if (phydev->advertising == adv_old) 632 return 0; 633 634 return phy_config_aneg(phydev); 635 } 636 EXPORT_SYMBOL_GPL(phy_speed_up); 637 638 /** 639 * phy_start_machine - start PHY state machine tracking 640 * @phydev: the phy_device struct 641 * 642 * Description: The PHY infrastructure can run a state machine 643 * which tracks whether the PHY is starting up, negotiating, 644 * etc. This function starts the delayed workqueue which tracks 645 * the state of the PHY. If you want to maintain your own state machine, 646 * do not call this function. 647 */ 648 void phy_start_machine(struct phy_device *phydev) 649 { 650 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ); 651 } 652 EXPORT_SYMBOL_GPL(phy_start_machine); 653 654 /** 655 * phy_trigger_machine - trigger the state machine to run 656 * 657 * @phydev: the phy_device struct 658 * @sync: indicate whether we should wait for the workqueue cancelation 659 * 660 * Description: There has been a change in state which requires that the 661 * state machine runs. 662 */ 663 664 void phy_trigger_machine(struct phy_device *phydev, bool sync) 665 { 666 if (sync) 667 cancel_delayed_work_sync(&phydev->state_queue); 668 else 669 cancel_delayed_work(&phydev->state_queue); 670 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0); 671 } 672 673 /** 674 * phy_stop_machine - stop the PHY state machine tracking 675 * @phydev: target phy_device struct 676 * 677 * Description: Stops the state machine delayed workqueue, sets the 678 * state to UP (unless it wasn't up yet). This function must be 679 * called BEFORE phy_detach. 680 */ 681 void phy_stop_machine(struct phy_device *phydev) 682 { 683 cancel_delayed_work_sync(&phydev->state_queue); 684 685 mutex_lock(&phydev->lock); 686 if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) 687 phydev->state = PHY_UP; 688 mutex_unlock(&phydev->lock); 689 } 690 691 /** 692 * phy_error - enter HALTED state for this PHY device 693 * @phydev: target phy_device struct 694 * 695 * Moves the PHY to the HALTED state in response to a read 696 * or write error, and tells the controller the link is down. 697 * Must not be called from interrupt context, or while the 698 * phydev->lock is held. 699 */ 700 static void phy_error(struct phy_device *phydev) 701 { 702 mutex_lock(&phydev->lock); 703 phydev->state = PHY_HALTED; 704 mutex_unlock(&phydev->lock); 705 706 phy_trigger_machine(phydev, false); 707 } 708 709 /** 710 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side 711 * @phydev: target phy_device struct 712 */ 713 static int phy_disable_interrupts(struct phy_device *phydev) 714 { 715 int err; 716 717 /* Disable PHY interrupts */ 718 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); 719 if (err) 720 return err; 721 722 /* Clear the interrupt */ 723 return phy_clear_interrupt(phydev); 724 } 725 726 /** 727 * phy_change - Called by the phy_interrupt to handle PHY changes 728 * @phydev: phy_device struct that interrupted 729 */ 730 static irqreturn_t phy_change(struct phy_device *phydev) 731 { 732 if (phy_interrupt_is_valid(phydev)) { 733 if (phydev->drv->did_interrupt && 734 !phydev->drv->did_interrupt(phydev)) 735 return IRQ_NONE; 736 737 if (phydev->state == PHY_HALTED) 738 if (phy_disable_interrupts(phydev)) 739 goto phy_err; 740 } 741 742 mutex_lock(&phydev->lock); 743 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) 744 phydev->state = PHY_CHANGELINK; 745 mutex_unlock(&phydev->lock); 746 747 /* reschedule state queue work to run as soon as possible */ 748 phy_trigger_machine(phydev, true); 749 750 if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev)) 751 goto phy_err; 752 return IRQ_HANDLED; 753 754 phy_err: 755 phy_error(phydev); 756 return IRQ_NONE; 757 } 758 759 /** 760 * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes 761 * @work: work_struct that describes the work to be done 762 */ 763 void phy_change_work(struct work_struct *work) 764 { 765 struct phy_device *phydev = 766 container_of(work, struct phy_device, phy_queue); 767 768 phy_change(phydev); 769 } 770 771 /** 772 * phy_interrupt - PHY interrupt handler 773 * @irq: interrupt line 774 * @phy_dat: phy_device pointer 775 * 776 * Description: When a PHY interrupt occurs, the handler disables 777 * interrupts, and uses phy_change to handle the interrupt. 778 */ 779 static irqreturn_t phy_interrupt(int irq, void *phy_dat) 780 { 781 struct phy_device *phydev = phy_dat; 782 783 if (PHY_HALTED == phydev->state) 784 return IRQ_NONE; /* It can't be ours. */ 785 786 return phy_change(phydev); 787 } 788 789 /** 790 * phy_enable_interrupts - Enable the interrupts from the PHY side 791 * @phydev: target phy_device struct 792 */ 793 static int phy_enable_interrupts(struct phy_device *phydev) 794 { 795 int err = phy_clear_interrupt(phydev); 796 797 if (err < 0) 798 return err; 799 800 return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); 801 } 802 803 /** 804 * phy_start_interrupts - request and enable interrupts for a PHY device 805 * @phydev: target phy_device struct 806 * 807 * Description: Request the interrupt for the given PHY. 808 * If this fails, then we set irq to PHY_POLL. 809 * Otherwise, we enable the interrupts in the PHY. 810 * This should only be called with a valid IRQ number. 811 * Returns 0 on success or < 0 on error. 812 */ 813 int phy_start_interrupts(struct phy_device *phydev) 814 { 815 if (request_threaded_irq(phydev->irq, NULL, phy_interrupt, 816 IRQF_ONESHOT | IRQF_SHARED, 817 phydev_name(phydev), phydev) < 0) { 818 pr_warn("%s: Can't get IRQ %d (PHY)\n", 819 phydev->mdio.bus->name, phydev->irq); 820 phydev->irq = PHY_POLL; 821 return 0; 822 } 823 824 return phy_enable_interrupts(phydev); 825 } 826 EXPORT_SYMBOL(phy_start_interrupts); 827 828 /** 829 * phy_stop_interrupts - disable interrupts from a PHY device 830 * @phydev: target phy_device struct 831 */ 832 int phy_stop_interrupts(struct phy_device *phydev) 833 { 834 int err = phy_disable_interrupts(phydev); 835 836 if (err) 837 phy_error(phydev); 838 839 free_irq(phydev->irq, phydev); 840 841 return err; 842 } 843 EXPORT_SYMBOL(phy_stop_interrupts); 844 845 /** 846 * phy_stop - Bring down the PHY link, and stop checking the status 847 * @phydev: target phy_device struct 848 */ 849 void phy_stop(struct phy_device *phydev) 850 { 851 mutex_lock(&phydev->lock); 852 853 if (PHY_HALTED == phydev->state) 854 goto out_unlock; 855 856 if (phy_interrupt_is_valid(phydev)) 857 phy_disable_interrupts(phydev); 858 859 phydev->state = PHY_HALTED; 860 861 out_unlock: 862 mutex_unlock(&phydev->lock); 863 864 /* Cannot call flush_scheduled_work() here as desired because 865 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change() 866 * will not reenable interrupts. 867 */ 868 } 869 EXPORT_SYMBOL(phy_stop); 870 871 /** 872 * phy_start - start or restart a PHY device 873 * @phydev: target phy_device struct 874 * 875 * Description: Indicates the attached device's readiness to 876 * handle PHY-related work. Used during startup to start the 877 * PHY, and after a call to phy_stop() to resume operation. 878 * Also used to indicate the MDIO bus has cleared an error 879 * condition. 880 */ 881 void phy_start(struct phy_device *phydev) 882 { 883 int err = 0; 884 885 mutex_lock(&phydev->lock); 886 887 switch (phydev->state) { 888 case PHY_STARTING: 889 phydev->state = PHY_PENDING; 890 break; 891 case PHY_READY: 892 phydev->state = PHY_UP; 893 break; 894 case PHY_HALTED: 895 /* if phy was suspended, bring the physical link up again */ 896 __phy_resume(phydev); 897 898 /* make sure interrupts are re-enabled for the PHY */ 899 if (phy_interrupt_is_valid(phydev)) { 900 err = phy_enable_interrupts(phydev); 901 if (err < 0) 902 break; 903 } 904 905 phydev->state = PHY_RESUMING; 906 break; 907 default: 908 break; 909 } 910 mutex_unlock(&phydev->lock); 911 912 phy_trigger_machine(phydev, true); 913 } 914 EXPORT_SYMBOL(phy_start); 915 916 static void phy_link_up(struct phy_device *phydev) 917 { 918 phydev->phy_link_change(phydev, true, true); 919 phy_led_trigger_change_speed(phydev); 920 } 921 922 static void phy_link_down(struct phy_device *phydev, bool do_carrier) 923 { 924 phydev->phy_link_change(phydev, false, do_carrier); 925 phy_led_trigger_change_speed(phydev); 926 } 927 928 /** 929 * phy_state_machine - Handle the state machine 930 * @work: work_struct that describes the work to be done 931 */ 932 void phy_state_machine(struct work_struct *work) 933 { 934 struct delayed_work *dwork = to_delayed_work(work); 935 struct phy_device *phydev = 936 container_of(dwork, struct phy_device, state_queue); 937 bool needs_aneg = false, do_suspend = false; 938 enum phy_state old_state; 939 int err = 0; 940 int old_link; 941 942 mutex_lock(&phydev->lock); 943 944 old_state = phydev->state; 945 946 if (phydev->drv && phydev->drv->link_change_notify) 947 phydev->drv->link_change_notify(phydev); 948 949 switch (phydev->state) { 950 case PHY_DOWN: 951 case PHY_STARTING: 952 case PHY_READY: 953 case PHY_PENDING: 954 break; 955 case PHY_UP: 956 needs_aneg = true; 957 958 phydev->link_timeout = PHY_AN_TIMEOUT; 959 960 break; 961 case PHY_AN: 962 err = phy_read_status(phydev); 963 if (err < 0) 964 break; 965 966 /* If the link is down, give up on negotiation for now */ 967 if (!phydev->link) { 968 phydev->state = PHY_NOLINK; 969 phy_link_down(phydev, true); 970 break; 971 } 972 973 /* Check if negotiation is done. Break if there's an error */ 974 err = phy_aneg_done(phydev); 975 if (err < 0) 976 break; 977 978 /* If AN is done, we're running */ 979 if (err > 0) { 980 phydev->state = PHY_RUNNING; 981 phy_link_up(phydev); 982 } else if (0 == phydev->link_timeout--) 983 needs_aneg = true; 984 break; 985 case PHY_NOLINK: 986 if (!phy_polling_mode(phydev)) 987 break; 988 989 err = phy_read_status(phydev); 990 if (err) 991 break; 992 993 if (phydev->link) { 994 if (AUTONEG_ENABLE == phydev->autoneg) { 995 err = phy_aneg_done(phydev); 996 if (err < 0) 997 break; 998 999 if (!err) { 1000 phydev->state = PHY_AN; 1001 phydev->link_timeout = PHY_AN_TIMEOUT; 1002 break; 1003 } 1004 } 1005 phydev->state = PHY_RUNNING; 1006 phy_link_up(phydev); 1007 } 1008 break; 1009 case PHY_FORCING: 1010 err = genphy_update_link(phydev); 1011 if (err) 1012 break; 1013 1014 if (phydev->link) { 1015 phydev->state = PHY_RUNNING; 1016 phy_link_up(phydev); 1017 } else { 1018 if (0 == phydev->link_timeout--) 1019 needs_aneg = true; 1020 phy_link_down(phydev, false); 1021 } 1022 break; 1023 case PHY_RUNNING: 1024 /* Only register a CHANGE if we are polling and link changed 1025 * since latest checking. 1026 */ 1027 if (phy_polling_mode(phydev)) { 1028 old_link = phydev->link; 1029 err = phy_read_status(phydev); 1030 if (err) 1031 break; 1032 1033 if (old_link != phydev->link) 1034 phydev->state = PHY_CHANGELINK; 1035 } 1036 /* 1037 * Failsafe: check that nobody set phydev->link=0 between two 1038 * poll cycles, otherwise we won't leave RUNNING state as long 1039 * as link remains down. 1040 */ 1041 if (!phydev->link && phydev->state == PHY_RUNNING) { 1042 phydev->state = PHY_CHANGELINK; 1043 phydev_err(phydev, "no link in PHY_RUNNING\n"); 1044 } 1045 break; 1046 case PHY_CHANGELINK: 1047 err = phy_read_status(phydev); 1048 if (err) 1049 break; 1050 1051 if (phydev->link) { 1052 phydev->state = PHY_RUNNING; 1053 phy_link_up(phydev); 1054 } else { 1055 phydev->state = PHY_NOLINK; 1056 phy_link_down(phydev, true); 1057 } 1058 break; 1059 case PHY_HALTED: 1060 if (phydev->link) { 1061 phydev->link = 0; 1062 phy_link_down(phydev, true); 1063 do_suspend = true; 1064 } 1065 break; 1066 case PHY_RESUMING: 1067 if (AUTONEG_ENABLE == phydev->autoneg) { 1068 err = phy_aneg_done(phydev); 1069 if (err < 0) 1070 break; 1071 1072 /* err > 0 if AN is done. 1073 * Otherwise, it's 0, and we're still waiting for AN 1074 */ 1075 if (err > 0) { 1076 err = phy_read_status(phydev); 1077 if (err) 1078 break; 1079 1080 if (phydev->link) { 1081 phydev->state = PHY_RUNNING; 1082 phy_link_up(phydev); 1083 } else { 1084 phydev->state = PHY_NOLINK; 1085 phy_link_down(phydev, false); 1086 } 1087 } else { 1088 phydev->state = PHY_AN; 1089 phydev->link_timeout = PHY_AN_TIMEOUT; 1090 } 1091 } else { 1092 err = phy_read_status(phydev); 1093 if (err) 1094 break; 1095 1096 if (phydev->link) { 1097 phydev->state = PHY_RUNNING; 1098 phy_link_up(phydev); 1099 } else { 1100 phydev->state = PHY_NOLINK; 1101 phy_link_down(phydev, false); 1102 } 1103 } 1104 break; 1105 } 1106 1107 mutex_unlock(&phydev->lock); 1108 1109 if (needs_aneg) 1110 err = phy_start_aneg_priv(phydev, false); 1111 else if (do_suspend) 1112 phy_suspend(phydev); 1113 1114 if (err < 0) 1115 phy_error(phydev); 1116 1117 if (old_state != phydev->state) 1118 phydev_dbg(phydev, "PHY state change %s -> %s\n", 1119 phy_state_to_str(old_state), 1120 phy_state_to_str(phydev->state)); 1121 1122 /* Only re-schedule a PHY state machine change if we are polling the 1123 * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving 1124 * between states from phy_mac_interrupt() 1125 */ 1126 if (phy_polling_mode(phydev)) 1127 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 1128 PHY_STATE_TIME * HZ); 1129 } 1130 1131 /** 1132 * phy_mac_interrupt - MAC says the link has changed 1133 * @phydev: phy_device struct with changed link 1134 * 1135 * The MAC layer is able to indicate there has been a change in the PHY link 1136 * status. Trigger the state machine and work a work queue. 1137 */ 1138 void phy_mac_interrupt(struct phy_device *phydev) 1139 { 1140 /* Trigger a state machine change */ 1141 queue_work(system_power_efficient_wq, &phydev->phy_queue); 1142 } 1143 EXPORT_SYMBOL(phy_mac_interrupt); 1144 1145 /** 1146 * phy_init_eee - init and check the EEE feature 1147 * @phydev: target phy_device struct 1148 * @clk_stop_enable: PHY may stop the clock during LPI 1149 * 1150 * Description: it checks if the Energy-Efficient Ethernet (EEE) 1151 * is supported by looking at the MMD registers 3.20 and 7.60/61 1152 * and it programs the MMD register 3.0 setting the "Clock stop enable" 1153 * bit if required. 1154 */ 1155 int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) 1156 { 1157 if (!phydev->drv) 1158 return -EIO; 1159 1160 /* According to 802.3az,the EEE is supported only in full duplex-mode. 1161 */ 1162 if (phydev->duplex == DUPLEX_FULL) { 1163 int eee_lp, eee_cap, eee_adv; 1164 u32 lp, cap, adv; 1165 int status; 1166 1167 /* Read phy status to properly get the right settings */ 1168 status = phy_read_status(phydev); 1169 if (status) 1170 return status; 1171 1172 /* First check if the EEE ability is supported */ 1173 eee_cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); 1174 if (eee_cap <= 0) 1175 goto eee_exit_err; 1176 1177 cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap); 1178 if (!cap) 1179 goto eee_exit_err; 1180 1181 /* Check which link settings negotiated and verify it in 1182 * the EEE advertising registers. 1183 */ 1184 eee_lp = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE); 1185 if (eee_lp <= 0) 1186 goto eee_exit_err; 1187 1188 eee_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV); 1189 if (eee_adv <= 0) 1190 goto eee_exit_err; 1191 1192 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); 1193 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); 1194 if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv)) 1195 goto eee_exit_err; 1196 1197 if (clk_stop_enable) { 1198 /* Configure the PHY to stop receiving xMII 1199 * clock while it is signaling LPI. 1200 */ 1201 int val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1); 1202 if (val < 0) 1203 return val; 1204 1205 val |= MDIO_PCS_CTRL1_CLKSTOP_EN; 1206 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, val); 1207 } 1208 1209 return 0; /* EEE supported */ 1210 } 1211 eee_exit_err: 1212 return -EPROTONOSUPPORT; 1213 } 1214 EXPORT_SYMBOL(phy_init_eee); 1215 1216 /** 1217 * phy_get_eee_err - report the EEE wake error count 1218 * @phydev: target phy_device struct 1219 * 1220 * Description: it is to report the number of time where the PHY 1221 * failed to complete its normal wake sequence. 1222 */ 1223 int phy_get_eee_err(struct phy_device *phydev) 1224 { 1225 if (!phydev->drv) 1226 return -EIO; 1227 1228 return phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_WK_ERR); 1229 } 1230 EXPORT_SYMBOL(phy_get_eee_err); 1231 1232 /** 1233 * phy_ethtool_get_eee - get EEE supported and status 1234 * @phydev: target phy_device struct 1235 * @data: ethtool_eee data 1236 * 1237 * Description: it reportes the Supported/Advertisement/LP Advertisement 1238 * capabilities. 1239 */ 1240 int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data) 1241 { 1242 int val; 1243 1244 if (!phydev->drv) 1245 return -EIO; 1246 1247 /* Get Supported EEE */ 1248 val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); 1249 if (val < 0) 1250 return val; 1251 data->supported = mmd_eee_cap_to_ethtool_sup_t(val); 1252 1253 /* Get advertisement EEE */ 1254 val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV); 1255 if (val < 0) 1256 return val; 1257 data->advertised = mmd_eee_adv_to_ethtool_adv_t(val); 1258 1259 /* Get LP advertisement EEE */ 1260 val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE); 1261 if (val < 0) 1262 return val; 1263 data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); 1264 1265 return 0; 1266 } 1267 EXPORT_SYMBOL(phy_ethtool_get_eee); 1268 1269 /** 1270 * phy_ethtool_set_eee - set EEE supported and status 1271 * @phydev: target phy_device struct 1272 * @data: ethtool_eee data 1273 * 1274 * Description: it is to program the Advertisement EEE register. 1275 */ 1276 int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) 1277 { 1278 int cap, old_adv, adv, ret; 1279 1280 if (!phydev->drv) 1281 return -EIO; 1282 1283 /* Get Supported EEE */ 1284 cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); 1285 if (cap < 0) 1286 return cap; 1287 1288 old_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV); 1289 if (old_adv < 0) 1290 return old_adv; 1291 1292 adv = ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap; 1293 1294 /* Mask prohibited EEE modes */ 1295 adv &= ~phydev->eee_broken_modes; 1296 1297 if (old_adv != adv) { 1298 ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv); 1299 if (ret < 0) 1300 return ret; 1301 1302 /* Restart autonegotiation so the new modes get sent to the 1303 * link partner. 1304 */ 1305 ret = phy_restart_aneg(phydev); 1306 if (ret < 0) 1307 return ret; 1308 } 1309 1310 return 0; 1311 } 1312 EXPORT_SYMBOL(phy_ethtool_set_eee); 1313 1314 int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) 1315 { 1316 if (phydev->drv && phydev->drv->set_wol) 1317 return phydev->drv->set_wol(phydev, wol); 1318 1319 return -EOPNOTSUPP; 1320 } 1321 EXPORT_SYMBOL(phy_ethtool_set_wol); 1322 1323 void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) 1324 { 1325 if (phydev->drv && phydev->drv->get_wol) 1326 phydev->drv->get_wol(phydev, wol); 1327 } 1328 EXPORT_SYMBOL(phy_ethtool_get_wol); 1329 1330 int phy_ethtool_get_link_ksettings(struct net_device *ndev, 1331 struct ethtool_link_ksettings *cmd) 1332 { 1333 struct phy_device *phydev = ndev->phydev; 1334 1335 if (!phydev) 1336 return -ENODEV; 1337 1338 phy_ethtool_ksettings_get(phydev, cmd); 1339 1340 return 0; 1341 } 1342 EXPORT_SYMBOL(phy_ethtool_get_link_ksettings); 1343 1344 int phy_ethtool_set_link_ksettings(struct net_device *ndev, 1345 const struct ethtool_link_ksettings *cmd) 1346 { 1347 struct phy_device *phydev = ndev->phydev; 1348 1349 if (!phydev) 1350 return -ENODEV; 1351 1352 return phy_ethtool_ksettings_set(phydev, cmd); 1353 } 1354 EXPORT_SYMBOL(phy_ethtool_set_link_ksettings); 1355 1356 int phy_ethtool_nway_reset(struct net_device *ndev) 1357 { 1358 struct phy_device *phydev = ndev->phydev; 1359 1360 if (!phydev) 1361 return -ENODEV; 1362 1363 if (!phydev->drv) 1364 return -EIO; 1365 1366 return phy_restart_aneg(phydev); 1367 } 1368 EXPORT_SYMBOL(phy_ethtool_nway_reset); 1369