1 /* 2 * drivers/net/phy/phy.c 3 * 4 * Framework for configuring and reading PHY devices 5 * Based on code in sungem_phy.c and gianfar_phy.c 6 * 7 * Author: Andy Fleming 8 * 9 * Copyright (c) 2004 Freescale Semiconductor, Inc. 10 * Copyright (c) 2006, 2007 Maciej W. Rozycki 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License as published by the 14 * Free Software Foundation; either version 2 of the License, or (at your 15 * option) any later version. 16 * 17 */ 18 #include <linux/kernel.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/unistd.h> 22 #include <linux/slab.h> 23 #include <linux/interrupt.h> 24 #include <linux/init.h> 25 #include <linux/delay.h> 26 #include <linux/netdevice.h> 27 #include <linux/etherdevice.h> 28 #include <linux/skbuff.h> 29 #include <linux/spinlock.h> 30 #include <linux/mm.h> 31 #include <linux/module.h> 32 #include <linux/mii.h> 33 #include <linux/ethtool.h> 34 #include <linux/phy.h> 35 #include <linux/timer.h> 36 #include <linux/workqueue.h> 37 38 #include <asm/atomic.h> 39 #include <asm/io.h> 40 #include <asm/irq.h> 41 #include <asm/uaccess.h> 42 43 /** 44 * phy_print_status - Convenience function to print out the current phy status 45 * @phydev: the phy_device struct 46 */ 47 void phy_print_status(struct phy_device *phydev) 48 { 49 pr_info("PHY: %s - Link is %s", phydev->dev.bus_id, 50 phydev->link ? "Up" : "Down"); 51 if (phydev->link) 52 printk(" - %d/%s", phydev->speed, 53 DUPLEX_FULL == phydev->duplex ? 54 "Full" : "Half"); 55 56 printk("\n"); 57 } 58 EXPORT_SYMBOL(phy_print_status); 59 60 61 /** 62 * phy_read - Convenience function for reading a given PHY register 63 * @phydev: the phy_device struct 64 * @regnum: register number to read 65 * 66 * NOTE: MUST NOT be called from interrupt context, 67 * because the bus read/write functions may wait for an interrupt 68 * to conclude the operation. 69 */ 70 int phy_read(struct phy_device *phydev, u16 regnum) 71 { 72 int retval; 73 struct mii_bus *bus = phydev->bus; 74 75 spin_lock_bh(&bus->mdio_lock); 76 retval = bus->read(bus, phydev->addr, regnum); 77 spin_unlock_bh(&bus->mdio_lock); 78 79 return retval; 80 } 81 EXPORT_SYMBOL(phy_read); 82 83 /** 84 * phy_write - Convenience function for writing a given PHY register 85 * @phydev: the phy_device struct 86 * @regnum: register number to write 87 * @val: value to write to @regnum 88 * 89 * NOTE: MUST NOT be called from interrupt context, 90 * because the bus read/write functions may wait for an interrupt 91 * to conclude the operation. 92 */ 93 int phy_write(struct phy_device *phydev, u16 regnum, u16 val) 94 { 95 int err; 96 struct mii_bus *bus = phydev->bus; 97 98 spin_lock_bh(&bus->mdio_lock); 99 err = bus->write(bus, phydev->addr, regnum, val); 100 spin_unlock_bh(&bus->mdio_lock); 101 102 return err; 103 } 104 EXPORT_SYMBOL(phy_write); 105 106 /** 107 * phy_clear_interrupt - Ack the phy device's interrupt 108 * @phydev: the phy_device struct 109 * 110 * If the @phydev driver has an ack_interrupt function, call it to 111 * ack and clear the phy device's interrupt. 112 * 113 * Returns 0 on success on < 0 on error. 114 */ 115 int phy_clear_interrupt(struct phy_device *phydev) 116 { 117 int err = 0; 118 119 if (phydev->drv->ack_interrupt) 120 err = phydev->drv->ack_interrupt(phydev); 121 122 return err; 123 } 124 125 /** 126 * phy_config_interrupt - configure the PHY device for the requested interrupts 127 * @phydev: the phy_device struct 128 * @interrupts: interrupt flags to configure for this @phydev 129 * 130 * Returns 0 on success on < 0 on error. 131 */ 132 int phy_config_interrupt(struct phy_device *phydev, u32 interrupts) 133 { 134 int err = 0; 135 136 phydev->interrupts = interrupts; 137 if (phydev->drv->config_intr) 138 err = phydev->drv->config_intr(phydev); 139 140 return err; 141 } 142 143 144 /** 145 * phy_aneg_done - return auto-negotiation status 146 * @phydev: target phy_device struct 147 * 148 * Description: Reads the status register and returns 0 either if 149 * auto-negotiation is incomplete, or if there was an error. 150 * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done. 151 */ 152 static inline int phy_aneg_done(struct phy_device *phydev) 153 { 154 int retval; 155 156 retval = phy_read(phydev, MII_BMSR); 157 158 return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE); 159 } 160 161 /* A structure for mapping a particular speed and duplex 162 * combination to a particular SUPPORTED and ADVERTISED value */ 163 struct phy_setting { 164 int speed; 165 int duplex; 166 u32 setting; 167 }; 168 169 /* A mapping of all SUPPORTED settings to speed/duplex */ 170 static const struct phy_setting settings[] = { 171 { 172 .speed = 10000, 173 .duplex = DUPLEX_FULL, 174 .setting = SUPPORTED_10000baseT_Full, 175 }, 176 { 177 .speed = SPEED_1000, 178 .duplex = DUPLEX_FULL, 179 .setting = SUPPORTED_1000baseT_Full, 180 }, 181 { 182 .speed = SPEED_1000, 183 .duplex = DUPLEX_HALF, 184 .setting = SUPPORTED_1000baseT_Half, 185 }, 186 { 187 .speed = SPEED_100, 188 .duplex = DUPLEX_FULL, 189 .setting = SUPPORTED_100baseT_Full, 190 }, 191 { 192 .speed = SPEED_100, 193 .duplex = DUPLEX_HALF, 194 .setting = SUPPORTED_100baseT_Half, 195 }, 196 { 197 .speed = SPEED_10, 198 .duplex = DUPLEX_FULL, 199 .setting = SUPPORTED_10baseT_Full, 200 }, 201 { 202 .speed = SPEED_10, 203 .duplex = DUPLEX_HALF, 204 .setting = SUPPORTED_10baseT_Half, 205 }, 206 }; 207 208 #define MAX_NUM_SETTINGS ARRAY_SIZE(settings) 209 210 /** 211 * phy_find_setting - find a PHY settings array entry that matches speed & duplex 212 * @speed: speed to match 213 * @duplex: duplex to match 214 * 215 * Description: Searches the settings array for the setting which 216 * matches the desired speed and duplex, and returns the index 217 * of that setting. Returns the index of the last setting if 218 * none of the others match. 219 */ 220 static inline int phy_find_setting(int speed, int duplex) 221 { 222 int idx = 0; 223 224 while (idx < ARRAY_SIZE(settings) && 225 (settings[idx].speed != speed || 226 settings[idx].duplex != duplex)) 227 idx++; 228 229 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1; 230 } 231 232 /** 233 * phy_find_valid - find a PHY setting that matches the requested features mask 234 * @idx: The first index in settings[] to search 235 * @features: A mask of the valid settings 236 * 237 * Description: Returns the index of the first valid setting less 238 * than or equal to the one pointed to by idx, as determined by 239 * the mask in features. Returns the index of the last setting 240 * if nothing else matches. 241 */ 242 static inline int phy_find_valid(int idx, u32 features) 243 { 244 while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features)) 245 idx++; 246 247 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1; 248 } 249 250 /** 251 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex 252 * @phydev: the target phy_device struct 253 * 254 * Description: Make sure the PHY is set to supported speeds and 255 * duplexes. Drop down by one in this order: 1000/FULL, 256 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF. 257 */ 258 void phy_sanitize_settings(struct phy_device *phydev) 259 { 260 u32 features = phydev->supported; 261 int idx; 262 263 /* Sanitize settings based on PHY capabilities */ 264 if ((features & SUPPORTED_Autoneg) == 0) 265 phydev->autoneg = AUTONEG_DISABLE; 266 267 idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex), 268 features); 269 270 phydev->speed = settings[idx].speed; 271 phydev->duplex = settings[idx].duplex; 272 } 273 EXPORT_SYMBOL(phy_sanitize_settings); 274 275 /** 276 * phy_ethtool_sset - generic ethtool sset function, handles all the details 277 * @phydev: target phy_device struct 278 * @cmd: ethtool_cmd 279 * 280 * A few notes about parameter checking: 281 * - We don't set port or transceiver, so we don't care what they 282 * were set to. 283 * - phy_start_aneg() will make sure forced settings are sane, and 284 * choose the next best ones from the ones selected, so we don't 285 * care if ethtool tries to give us bad values. 286 */ 287 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) 288 { 289 if (cmd->phy_address != phydev->addr) 290 return -EINVAL; 291 292 /* We make sure that we don't pass unsupported 293 * values in to the PHY */ 294 cmd->advertising &= phydev->supported; 295 296 /* Verify the settings we care about. */ 297 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) 298 return -EINVAL; 299 300 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0) 301 return -EINVAL; 302 303 if (cmd->autoneg == AUTONEG_DISABLE 304 && ((cmd->speed != SPEED_1000 305 && cmd->speed != SPEED_100 306 && cmd->speed != SPEED_10) 307 || (cmd->duplex != DUPLEX_HALF 308 && cmd->duplex != DUPLEX_FULL))) 309 return -EINVAL; 310 311 phydev->autoneg = cmd->autoneg; 312 313 phydev->speed = cmd->speed; 314 315 phydev->advertising = cmd->advertising; 316 317 if (AUTONEG_ENABLE == cmd->autoneg) 318 phydev->advertising |= ADVERTISED_Autoneg; 319 else 320 phydev->advertising &= ~ADVERTISED_Autoneg; 321 322 phydev->duplex = cmd->duplex; 323 324 /* Restart the PHY */ 325 phy_start_aneg(phydev); 326 327 return 0; 328 } 329 EXPORT_SYMBOL(phy_ethtool_sset); 330 331 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd) 332 { 333 cmd->supported = phydev->supported; 334 335 cmd->advertising = phydev->advertising; 336 337 cmd->speed = phydev->speed; 338 cmd->duplex = phydev->duplex; 339 cmd->port = PORT_MII; 340 cmd->phy_address = phydev->addr; 341 cmd->transceiver = XCVR_EXTERNAL; 342 cmd->autoneg = phydev->autoneg; 343 344 return 0; 345 } 346 EXPORT_SYMBOL(phy_ethtool_gset); 347 348 /** 349 * phy_mii_ioctl - generic PHY MII ioctl interface 350 * @phydev: the phy_device struct 351 * @mii_data: MII ioctl data 352 * @cmd: ioctl cmd to execute 353 * 354 * Note that this function is currently incompatible with the 355 * PHYCONTROL layer. It changes registers without regard to 356 * current state. Use at own risk. 357 */ 358 int phy_mii_ioctl(struct phy_device *phydev, 359 struct mii_ioctl_data *mii_data, int cmd) 360 { 361 u16 val = mii_data->val_in; 362 363 switch (cmd) { 364 case SIOCGMIIPHY: 365 mii_data->phy_id = phydev->addr; 366 break; 367 case SIOCGMIIREG: 368 mii_data->val_out = phy_read(phydev, mii_data->reg_num); 369 break; 370 371 case SIOCSMIIREG: 372 if (!capable(CAP_NET_ADMIN)) 373 return -EPERM; 374 375 if (mii_data->phy_id == phydev->addr) { 376 switch(mii_data->reg_num) { 377 case MII_BMCR: 378 if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0) 379 phydev->autoneg = AUTONEG_DISABLE; 380 else 381 phydev->autoneg = AUTONEG_ENABLE; 382 if ((!phydev->autoneg) && (val & BMCR_FULLDPLX)) 383 phydev->duplex = DUPLEX_FULL; 384 else 385 phydev->duplex = DUPLEX_HALF; 386 if ((!phydev->autoneg) && 387 (val & BMCR_SPEED1000)) 388 phydev->speed = SPEED_1000; 389 else if ((!phydev->autoneg) && 390 (val & BMCR_SPEED100)) 391 phydev->speed = SPEED_100; 392 break; 393 case MII_ADVERTISE: 394 phydev->advertising = val; 395 break; 396 default: 397 /* do nothing */ 398 break; 399 } 400 } 401 402 phy_write(phydev, mii_data->reg_num, val); 403 404 if (mii_data->reg_num == MII_BMCR 405 && val & BMCR_RESET 406 && phydev->drv->config_init) 407 phydev->drv->config_init(phydev); 408 break; 409 410 default: 411 return -ENOTTY; 412 } 413 414 return 0; 415 } 416 EXPORT_SYMBOL(phy_mii_ioctl); 417 418 /** 419 * phy_start_aneg - start auto-negotiation for this PHY device 420 * @phydev: the phy_device struct 421 * 422 * Description: Sanitizes the settings (if we're not autonegotiating 423 * them), and then calls the driver's config_aneg function. 424 * If the PHYCONTROL Layer is operating, we change the state to 425 * reflect the beginning of Auto-negotiation or forcing. 426 */ 427 int phy_start_aneg(struct phy_device *phydev) 428 { 429 int err; 430 431 spin_lock_bh(&phydev->lock); 432 433 if (AUTONEG_DISABLE == phydev->autoneg) 434 phy_sanitize_settings(phydev); 435 436 err = phydev->drv->config_aneg(phydev); 437 438 if (err < 0) 439 goto out_unlock; 440 441 if (phydev->state != PHY_HALTED) { 442 if (AUTONEG_ENABLE == phydev->autoneg) { 443 phydev->state = PHY_AN; 444 phydev->link_timeout = PHY_AN_TIMEOUT; 445 } else { 446 phydev->state = PHY_FORCING; 447 phydev->link_timeout = PHY_FORCE_TIMEOUT; 448 } 449 } 450 451 out_unlock: 452 spin_unlock_bh(&phydev->lock); 453 return err; 454 } 455 EXPORT_SYMBOL(phy_start_aneg); 456 457 458 static void phy_change(struct work_struct *work); 459 static void phy_timer(unsigned long data); 460 461 /** 462 * phy_start_machine - start PHY state machine tracking 463 * @phydev: the phy_device struct 464 * @handler: callback function for state change notifications 465 * 466 * Description: The PHY infrastructure can run a state machine 467 * which tracks whether the PHY is starting up, negotiating, 468 * etc. This function starts the timer which tracks the state 469 * of the PHY. If you want to be notified when the state changes, 470 * pass in the callback @handler, otherwise, pass NULL. If you 471 * want to maintain your own state machine, do not call this 472 * function. 473 */ 474 void phy_start_machine(struct phy_device *phydev, 475 void (*handler)(struct net_device *)) 476 { 477 phydev->adjust_state = handler; 478 479 init_timer(&phydev->phy_timer); 480 phydev->phy_timer.function = &phy_timer; 481 phydev->phy_timer.data = (unsigned long) phydev; 482 mod_timer(&phydev->phy_timer, jiffies + HZ); 483 } 484 485 /** 486 * phy_stop_machine - stop the PHY state machine tracking 487 * @phydev: target phy_device struct 488 * 489 * Description: Stops the state machine timer, sets the state to UP 490 * (unless it wasn't up yet). This function must be called BEFORE 491 * phy_detach. 492 */ 493 void phy_stop_machine(struct phy_device *phydev) 494 { 495 del_timer_sync(&phydev->phy_timer); 496 497 spin_lock_bh(&phydev->lock); 498 if (phydev->state > PHY_UP) 499 phydev->state = PHY_UP; 500 spin_unlock_bh(&phydev->lock); 501 502 phydev->adjust_state = NULL; 503 } 504 505 /** 506 * phy_force_reduction - reduce PHY speed/duplex settings by one step 507 * @phydev: target phy_device struct 508 * 509 * Description: Reduces the speed/duplex settings by one notch, 510 * in this order-- 511 * 1000/FULL, 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF. 512 * The function bottoms out at 10/HALF. 513 */ 514 static void phy_force_reduction(struct phy_device *phydev) 515 { 516 int idx; 517 518 idx = phy_find_setting(phydev->speed, phydev->duplex); 519 520 idx++; 521 522 idx = phy_find_valid(idx, phydev->supported); 523 524 phydev->speed = settings[idx].speed; 525 phydev->duplex = settings[idx].duplex; 526 527 pr_info("Trying %d/%s\n", phydev->speed, 528 DUPLEX_FULL == phydev->duplex ? 529 "FULL" : "HALF"); 530 } 531 532 533 /** 534 * phy_error - enter HALTED state for this PHY device 535 * @phydev: target phy_device struct 536 * 537 * Moves the PHY to the HALTED state in response to a read 538 * or write error, and tells the controller the link is down. 539 * Must not be called from interrupt context, or while the 540 * phydev->lock is held. 541 */ 542 void phy_error(struct phy_device *phydev) 543 { 544 spin_lock_bh(&phydev->lock); 545 phydev->state = PHY_HALTED; 546 spin_unlock_bh(&phydev->lock); 547 } 548 549 /** 550 * phy_interrupt - PHY interrupt handler 551 * @irq: interrupt line 552 * @phy_dat: phy_device pointer 553 * 554 * Description: When a PHY interrupt occurs, the handler disables 555 * interrupts, and schedules a work task to clear the interrupt. 556 */ 557 static irqreturn_t phy_interrupt(int irq, void *phy_dat) 558 { 559 struct phy_device *phydev = phy_dat; 560 561 if (PHY_HALTED == phydev->state) 562 return IRQ_NONE; /* It can't be ours. */ 563 564 /* The MDIO bus is not allowed to be written in interrupt 565 * context, so we need to disable the irq here. A work 566 * queue will write the PHY to disable and clear the 567 * interrupt, and then reenable the irq line. */ 568 disable_irq_nosync(irq); 569 atomic_inc(&phydev->irq_disable); 570 571 schedule_work(&phydev->phy_queue); 572 573 return IRQ_HANDLED; 574 } 575 576 /** 577 * phy_enable_interrupts - Enable the interrupts from the PHY side 578 * @phydev: target phy_device struct 579 */ 580 int phy_enable_interrupts(struct phy_device *phydev) 581 { 582 int err; 583 584 err = phy_clear_interrupt(phydev); 585 586 if (err < 0) 587 return err; 588 589 err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); 590 591 return err; 592 } 593 EXPORT_SYMBOL(phy_enable_interrupts); 594 595 /** 596 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side 597 * @phydev: target phy_device struct 598 */ 599 int phy_disable_interrupts(struct phy_device *phydev) 600 { 601 int err; 602 603 /* Disable PHY interrupts */ 604 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); 605 606 if (err) 607 goto phy_err; 608 609 /* Clear the interrupt */ 610 err = phy_clear_interrupt(phydev); 611 612 if (err) 613 goto phy_err; 614 615 return 0; 616 617 phy_err: 618 phy_error(phydev); 619 620 return err; 621 } 622 EXPORT_SYMBOL(phy_disable_interrupts); 623 624 /** 625 * phy_start_interrupts - request and enable interrupts for a PHY device 626 * @phydev: target phy_device struct 627 * 628 * Description: Request the interrupt for the given PHY. 629 * If this fails, then we set irq to PHY_POLL. 630 * Otherwise, we enable the interrupts in the PHY. 631 * This should only be called with a valid IRQ number. 632 * Returns 0 on success or < 0 on error. 633 */ 634 int phy_start_interrupts(struct phy_device *phydev) 635 { 636 int err = 0; 637 638 INIT_WORK(&phydev->phy_queue, phy_change); 639 640 atomic_set(&phydev->irq_disable, 0); 641 if (request_irq(phydev->irq, phy_interrupt, 642 IRQF_SHARED, 643 "phy_interrupt", 644 phydev) < 0) { 645 printk(KERN_WARNING "%s: Can't get IRQ %d (PHY)\n", 646 phydev->bus->name, 647 phydev->irq); 648 phydev->irq = PHY_POLL; 649 return 0; 650 } 651 652 err = phy_enable_interrupts(phydev); 653 654 return err; 655 } 656 EXPORT_SYMBOL(phy_start_interrupts); 657 658 /** 659 * phy_stop_interrupts - disable interrupts from a PHY device 660 * @phydev: target phy_device struct 661 */ 662 int phy_stop_interrupts(struct phy_device *phydev) 663 { 664 int err; 665 666 err = phy_disable_interrupts(phydev); 667 668 if (err) 669 phy_error(phydev); 670 671 free_irq(phydev->irq, phydev); 672 673 /* 674 * Cannot call flush_scheduled_work() here as desired because 675 * of rtnl_lock(), but we do not really care about what would 676 * be done, except from enable_irq(), so cancel any work 677 * possibly pending and take care of the matter below. 678 */ 679 cancel_work_sync(&phydev->phy_queue); 680 /* 681 * If work indeed has been cancelled, disable_irq() will have 682 * been left unbalanced from phy_interrupt() and enable_irq() 683 * has to be called so that other devices on the line work. 684 */ 685 while (atomic_dec_return(&phydev->irq_disable) >= 0) 686 enable_irq(phydev->irq); 687 688 return err; 689 } 690 EXPORT_SYMBOL(phy_stop_interrupts); 691 692 693 /** 694 * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes 695 * @work: work_struct that describes the work to be done 696 */ 697 static void phy_change(struct work_struct *work) 698 { 699 int err; 700 struct phy_device *phydev = 701 container_of(work, struct phy_device, phy_queue); 702 703 err = phy_disable_interrupts(phydev); 704 705 if (err) 706 goto phy_err; 707 708 spin_lock_bh(&phydev->lock); 709 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) 710 phydev->state = PHY_CHANGELINK; 711 spin_unlock_bh(&phydev->lock); 712 713 atomic_dec(&phydev->irq_disable); 714 enable_irq(phydev->irq); 715 716 /* Reenable interrupts */ 717 if (PHY_HALTED != phydev->state) 718 err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); 719 720 if (err) 721 goto irq_enable_err; 722 723 return; 724 725 irq_enable_err: 726 disable_irq(phydev->irq); 727 atomic_inc(&phydev->irq_disable); 728 phy_err: 729 phy_error(phydev); 730 } 731 732 /** 733 * phy_stop - Bring down the PHY link, and stop checking the status 734 * @phydev: target phy_device struct 735 */ 736 void phy_stop(struct phy_device *phydev) 737 { 738 spin_lock_bh(&phydev->lock); 739 740 if (PHY_HALTED == phydev->state) 741 goto out_unlock; 742 743 if (phydev->irq != PHY_POLL) { 744 /* Disable PHY Interrupts */ 745 phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); 746 747 /* Clear any pending interrupts */ 748 phy_clear_interrupt(phydev); 749 } 750 751 phydev->state = PHY_HALTED; 752 753 out_unlock: 754 spin_unlock_bh(&phydev->lock); 755 756 /* 757 * Cannot call flush_scheduled_work() here as desired because 758 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change() 759 * will not reenable interrupts. 760 */ 761 } 762 763 764 /** 765 * phy_start - start or restart a PHY device 766 * @phydev: target phy_device struct 767 * 768 * Description: Indicates the attached device's readiness to 769 * handle PHY-related work. Used during startup to start the 770 * PHY, and after a call to phy_stop() to resume operation. 771 * Also used to indicate the MDIO bus has cleared an error 772 * condition. 773 */ 774 void phy_start(struct phy_device *phydev) 775 { 776 spin_lock_bh(&phydev->lock); 777 778 switch (phydev->state) { 779 case PHY_STARTING: 780 phydev->state = PHY_PENDING; 781 break; 782 case PHY_READY: 783 phydev->state = PHY_UP; 784 break; 785 case PHY_HALTED: 786 phydev->state = PHY_RESUMING; 787 default: 788 break; 789 } 790 spin_unlock_bh(&phydev->lock); 791 } 792 EXPORT_SYMBOL(phy_stop); 793 EXPORT_SYMBOL(phy_start); 794 795 /* PHY timer which handles the state machine */ 796 static void phy_timer(unsigned long data) 797 { 798 struct phy_device *phydev = (struct phy_device *)data; 799 int needs_aneg = 0; 800 int err = 0; 801 802 spin_lock_bh(&phydev->lock); 803 804 if (phydev->adjust_state) 805 phydev->adjust_state(phydev->attached_dev); 806 807 switch(phydev->state) { 808 case PHY_DOWN: 809 case PHY_STARTING: 810 case PHY_READY: 811 case PHY_PENDING: 812 break; 813 case PHY_UP: 814 needs_aneg = 1; 815 816 phydev->link_timeout = PHY_AN_TIMEOUT; 817 818 break; 819 case PHY_AN: 820 err = phy_read_status(phydev); 821 822 if (err < 0) 823 break; 824 825 /* If the link is down, give up on 826 * negotiation for now */ 827 if (!phydev->link) { 828 phydev->state = PHY_NOLINK; 829 netif_carrier_off(phydev->attached_dev); 830 phydev->adjust_link(phydev->attached_dev); 831 break; 832 } 833 834 /* Check if negotiation is done. Break 835 * if there's an error */ 836 err = phy_aneg_done(phydev); 837 if (err < 0) 838 break; 839 840 /* If AN is done, we're running */ 841 if (err > 0) { 842 phydev->state = PHY_RUNNING; 843 netif_carrier_on(phydev->attached_dev); 844 phydev->adjust_link(phydev->attached_dev); 845 846 } else if (0 == phydev->link_timeout--) { 847 int idx; 848 849 needs_aneg = 1; 850 /* If we have the magic_aneg bit, 851 * we try again */ 852 if (phydev->drv->flags & PHY_HAS_MAGICANEG) 853 break; 854 855 /* The timer expired, and we still 856 * don't have a setting, so we try 857 * forcing it until we find one that 858 * works, starting from the fastest speed, 859 * and working our way down */ 860 idx = phy_find_valid(0, phydev->supported); 861 862 phydev->speed = settings[idx].speed; 863 phydev->duplex = settings[idx].duplex; 864 865 phydev->autoneg = AUTONEG_DISABLE; 866 867 pr_info("Trying %d/%s\n", phydev->speed, 868 DUPLEX_FULL == 869 phydev->duplex ? 870 "FULL" : "HALF"); 871 } 872 break; 873 case PHY_NOLINK: 874 err = phy_read_status(phydev); 875 876 if (err) 877 break; 878 879 if (phydev->link) { 880 phydev->state = PHY_RUNNING; 881 netif_carrier_on(phydev->attached_dev); 882 phydev->adjust_link(phydev->attached_dev); 883 } 884 break; 885 case PHY_FORCING: 886 err = genphy_update_link(phydev); 887 888 if (err) 889 break; 890 891 if (phydev->link) { 892 phydev->state = PHY_RUNNING; 893 netif_carrier_on(phydev->attached_dev); 894 } else { 895 if (0 == phydev->link_timeout--) { 896 phy_force_reduction(phydev); 897 needs_aneg = 1; 898 } 899 } 900 901 phydev->adjust_link(phydev->attached_dev); 902 break; 903 case PHY_RUNNING: 904 /* Only register a CHANGE if we are 905 * polling */ 906 if (PHY_POLL == phydev->irq) 907 phydev->state = PHY_CHANGELINK; 908 break; 909 case PHY_CHANGELINK: 910 err = phy_read_status(phydev); 911 912 if (err) 913 break; 914 915 if (phydev->link) { 916 phydev->state = PHY_RUNNING; 917 netif_carrier_on(phydev->attached_dev); 918 } else { 919 phydev->state = PHY_NOLINK; 920 netif_carrier_off(phydev->attached_dev); 921 } 922 923 phydev->adjust_link(phydev->attached_dev); 924 925 if (PHY_POLL != phydev->irq) 926 err = phy_config_interrupt(phydev, 927 PHY_INTERRUPT_ENABLED); 928 break; 929 case PHY_HALTED: 930 if (phydev->link) { 931 phydev->link = 0; 932 netif_carrier_off(phydev->attached_dev); 933 phydev->adjust_link(phydev->attached_dev); 934 } 935 break; 936 case PHY_RESUMING: 937 938 err = phy_clear_interrupt(phydev); 939 940 if (err) 941 break; 942 943 err = phy_config_interrupt(phydev, 944 PHY_INTERRUPT_ENABLED); 945 946 if (err) 947 break; 948 949 if (AUTONEG_ENABLE == phydev->autoneg) { 950 err = phy_aneg_done(phydev); 951 if (err < 0) 952 break; 953 954 /* err > 0 if AN is done. 955 * Otherwise, it's 0, and we're 956 * still waiting for AN */ 957 if (err > 0) { 958 phydev->state = PHY_RUNNING; 959 } else { 960 phydev->state = PHY_AN; 961 phydev->link_timeout = PHY_AN_TIMEOUT; 962 } 963 } else 964 phydev->state = PHY_RUNNING; 965 break; 966 } 967 968 spin_unlock_bh(&phydev->lock); 969 970 if (needs_aneg) 971 err = phy_start_aneg(phydev); 972 973 if (err < 0) 974 phy_error(phydev); 975 976 mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ); 977 } 978 979