xref: /openbmc/linux/drivers/net/phy/phy.c (revision a1e58bbd)
1 /*
2  * drivers/net/phy/phy.c
3  *
4  * Framework for configuring and reading PHY devices
5  * Based on code in sungem_phy.c and gianfar_phy.c
6  *
7  * Author: Andy Fleming
8  *
9  * Copyright (c) 2004 Freescale Semiconductor, Inc.
10  * Copyright (c) 2006, 2007  Maciej W. Rozycki
11  *
12  * This program is free software; you can redistribute  it and/or modify it
13  * under  the terms of  the GNU General  Public License as published by the
14  * Free Software Foundation;  either version 2 of the  License, or (at your
15  * option) any later version.
16  *
17  */
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/unistd.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/init.h>
25 #include <linux/delay.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/mii.h>
32 #include <linux/ethtool.h>
33 #include <linux/phy.h>
34 #include <linux/timer.h>
35 #include <linux/workqueue.h>
36 
37 #include <asm/atomic.h>
38 #include <asm/io.h>
39 #include <asm/irq.h>
40 #include <asm/uaccess.h>
41 
42 /**
43  * phy_print_status - Convenience function to print out the current phy status
44  * @phydev: the phy_device struct
45  */
46 void phy_print_status(struct phy_device *phydev)
47 {
48 	pr_info("PHY: %s - Link is %s", phydev->dev.bus_id,
49 			phydev->link ? "Up" : "Down");
50 	if (phydev->link)
51 		printk(" - %d/%s", phydev->speed,
52 				DUPLEX_FULL == phydev->duplex ?
53 				"Full" : "Half");
54 
55 	printk("\n");
56 }
57 EXPORT_SYMBOL(phy_print_status);
58 
59 
60 /**
61  * phy_read - Convenience function for reading a given PHY register
62  * @phydev: the phy_device struct
63  * @regnum: register number to read
64  *
65  * NOTE: MUST NOT be called from interrupt context,
66  * because the bus read/write functions may wait for an interrupt
67  * to conclude the operation.
68  */
69 int phy_read(struct phy_device *phydev, u16 regnum)
70 {
71 	int retval;
72 	struct mii_bus *bus = phydev->bus;
73 
74 	BUG_ON(in_interrupt());
75 
76 	mutex_lock(&bus->mdio_lock);
77 	retval = bus->read(bus, phydev->addr, regnum);
78 	mutex_unlock(&bus->mdio_lock);
79 
80 	return retval;
81 }
82 EXPORT_SYMBOL(phy_read);
83 
84 /**
85  * phy_write - Convenience function for writing a given PHY register
86  * @phydev: the phy_device struct
87  * @regnum: register number to write
88  * @val: value to write to @regnum
89  *
90  * NOTE: MUST NOT be called from interrupt context,
91  * because the bus read/write functions may wait for an interrupt
92  * to conclude the operation.
93  */
94 int phy_write(struct phy_device *phydev, u16 regnum, u16 val)
95 {
96 	int err;
97 	struct mii_bus *bus = phydev->bus;
98 
99 	BUG_ON(in_interrupt());
100 
101 	mutex_lock(&bus->mdio_lock);
102 	err = bus->write(bus, phydev->addr, regnum, val);
103 	mutex_unlock(&bus->mdio_lock);
104 
105 	return err;
106 }
107 EXPORT_SYMBOL(phy_write);
108 
109 /**
110  * phy_clear_interrupt - Ack the phy device's interrupt
111  * @phydev: the phy_device struct
112  *
113  * If the @phydev driver has an ack_interrupt function, call it to
114  * ack and clear the phy device's interrupt.
115  *
116  * Returns 0 on success on < 0 on error.
117  */
118 int phy_clear_interrupt(struct phy_device *phydev)
119 {
120 	int err = 0;
121 
122 	if (phydev->drv->ack_interrupt)
123 		err = phydev->drv->ack_interrupt(phydev);
124 
125 	return err;
126 }
127 
128 /**
129  * phy_config_interrupt - configure the PHY device for the requested interrupts
130  * @phydev: the phy_device struct
131  * @interrupts: interrupt flags to configure for this @phydev
132  *
133  * Returns 0 on success on < 0 on error.
134  */
135 int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
136 {
137 	int err = 0;
138 
139 	phydev->interrupts = interrupts;
140 	if (phydev->drv->config_intr)
141 		err = phydev->drv->config_intr(phydev);
142 
143 	return err;
144 }
145 
146 
147 /**
148  * phy_aneg_done - return auto-negotiation status
149  * @phydev: target phy_device struct
150  *
151  * Description: Reads the status register and returns 0 either if
152  *   auto-negotiation is incomplete, or if there was an error.
153  *   Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
154  */
155 static inline int phy_aneg_done(struct phy_device *phydev)
156 {
157 	int retval;
158 
159 	retval = phy_read(phydev, MII_BMSR);
160 
161 	return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
162 }
163 
164 /* A structure for mapping a particular speed and duplex
165  * combination to a particular SUPPORTED and ADVERTISED value */
166 struct phy_setting {
167 	int speed;
168 	int duplex;
169 	u32 setting;
170 };
171 
172 /* A mapping of all SUPPORTED settings to speed/duplex */
173 static const struct phy_setting settings[] = {
174 	{
175 		.speed = 10000,
176 		.duplex = DUPLEX_FULL,
177 		.setting = SUPPORTED_10000baseT_Full,
178 	},
179 	{
180 		.speed = SPEED_1000,
181 		.duplex = DUPLEX_FULL,
182 		.setting = SUPPORTED_1000baseT_Full,
183 	},
184 	{
185 		.speed = SPEED_1000,
186 		.duplex = DUPLEX_HALF,
187 		.setting = SUPPORTED_1000baseT_Half,
188 	},
189 	{
190 		.speed = SPEED_100,
191 		.duplex = DUPLEX_FULL,
192 		.setting = SUPPORTED_100baseT_Full,
193 	},
194 	{
195 		.speed = SPEED_100,
196 		.duplex = DUPLEX_HALF,
197 		.setting = SUPPORTED_100baseT_Half,
198 	},
199 	{
200 		.speed = SPEED_10,
201 		.duplex = DUPLEX_FULL,
202 		.setting = SUPPORTED_10baseT_Full,
203 	},
204 	{
205 		.speed = SPEED_10,
206 		.duplex = DUPLEX_HALF,
207 		.setting = SUPPORTED_10baseT_Half,
208 	},
209 };
210 
211 #define MAX_NUM_SETTINGS ARRAY_SIZE(settings)
212 
213 /**
214  * phy_find_setting - find a PHY settings array entry that matches speed & duplex
215  * @speed: speed to match
216  * @duplex: duplex to match
217  *
218  * Description: Searches the settings array for the setting which
219  *   matches the desired speed and duplex, and returns the index
220  *   of that setting.  Returns the index of the last setting if
221  *   none of the others match.
222  */
223 static inline int phy_find_setting(int speed, int duplex)
224 {
225 	int idx = 0;
226 
227 	while (idx < ARRAY_SIZE(settings) &&
228 			(settings[idx].speed != speed ||
229 			settings[idx].duplex != duplex))
230 		idx++;
231 
232 	return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
233 }
234 
235 /**
236  * phy_find_valid - find a PHY setting that matches the requested features mask
237  * @idx: The first index in settings[] to search
238  * @features: A mask of the valid settings
239  *
240  * Description: Returns the index of the first valid setting less
241  *   than or equal to the one pointed to by idx, as determined by
242  *   the mask in features.  Returns the index of the last setting
243  *   if nothing else matches.
244  */
245 static inline int phy_find_valid(int idx, u32 features)
246 {
247 	while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
248 		idx++;
249 
250 	return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
251 }
252 
253 /**
254  * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
255  * @phydev: the target phy_device struct
256  *
257  * Description: Make sure the PHY is set to supported speeds and
258  *   duplexes.  Drop down by one in this order:  1000/FULL,
259  *   1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
260  */
261 void phy_sanitize_settings(struct phy_device *phydev)
262 {
263 	u32 features = phydev->supported;
264 	int idx;
265 
266 	/* Sanitize settings based on PHY capabilities */
267 	if ((features & SUPPORTED_Autoneg) == 0)
268 		phydev->autoneg = AUTONEG_DISABLE;
269 
270 	idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
271 			features);
272 
273 	phydev->speed = settings[idx].speed;
274 	phydev->duplex = settings[idx].duplex;
275 }
276 EXPORT_SYMBOL(phy_sanitize_settings);
277 
278 /**
279  * phy_ethtool_sset - generic ethtool sset function, handles all the details
280  * @phydev: target phy_device struct
281  * @cmd: ethtool_cmd
282  *
283  * A few notes about parameter checking:
284  * - We don't set port or transceiver, so we don't care what they
285  *   were set to.
286  * - phy_start_aneg() will make sure forced settings are sane, and
287  *   choose the next best ones from the ones selected, so we don't
288  *   care if ethtool tries to give us bad values.
289  */
290 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
291 {
292 	if (cmd->phy_address != phydev->addr)
293 		return -EINVAL;
294 
295 	/* We make sure that we don't pass unsupported
296 	 * values in to the PHY */
297 	cmd->advertising &= phydev->supported;
298 
299 	/* Verify the settings we care about. */
300 	if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
301 		return -EINVAL;
302 
303 	if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
304 		return -EINVAL;
305 
306 	if (cmd->autoneg == AUTONEG_DISABLE
307 			&& ((cmd->speed != SPEED_1000
308 					&& cmd->speed != SPEED_100
309 					&& cmd->speed != SPEED_10)
310 				|| (cmd->duplex != DUPLEX_HALF
311 					&& cmd->duplex != DUPLEX_FULL)))
312 		return -EINVAL;
313 
314 	phydev->autoneg = cmd->autoneg;
315 
316 	phydev->speed = cmd->speed;
317 
318 	phydev->advertising = cmd->advertising;
319 
320 	if (AUTONEG_ENABLE == cmd->autoneg)
321 		phydev->advertising |= ADVERTISED_Autoneg;
322 	else
323 		phydev->advertising &= ~ADVERTISED_Autoneg;
324 
325 	phydev->duplex = cmd->duplex;
326 
327 	/* Restart the PHY */
328 	phy_start_aneg(phydev);
329 
330 	return 0;
331 }
332 EXPORT_SYMBOL(phy_ethtool_sset);
333 
334 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
335 {
336 	cmd->supported = phydev->supported;
337 
338 	cmd->advertising = phydev->advertising;
339 
340 	cmd->speed = phydev->speed;
341 	cmd->duplex = phydev->duplex;
342 	cmd->port = PORT_MII;
343 	cmd->phy_address = phydev->addr;
344 	cmd->transceiver = XCVR_EXTERNAL;
345 	cmd->autoneg = phydev->autoneg;
346 
347 	return 0;
348 }
349 EXPORT_SYMBOL(phy_ethtool_gset);
350 
351 /**
352  * phy_mii_ioctl - generic PHY MII ioctl interface
353  * @phydev: the phy_device struct
354  * @mii_data: MII ioctl data
355  * @cmd: ioctl cmd to execute
356  *
357  * Note that this function is currently incompatible with the
358  * PHYCONTROL layer.  It changes registers without regard to
359  * current state.  Use at own risk.
360  */
361 int phy_mii_ioctl(struct phy_device *phydev,
362 		struct mii_ioctl_data *mii_data, int cmd)
363 {
364 	u16 val = mii_data->val_in;
365 
366 	switch (cmd) {
367 	case SIOCGMIIPHY:
368 		mii_data->phy_id = phydev->addr;
369 		break;
370 	case SIOCGMIIREG:
371 		mii_data->val_out = phy_read(phydev, mii_data->reg_num);
372 		break;
373 
374 	case SIOCSMIIREG:
375 		if (!capable(CAP_NET_ADMIN))
376 			return -EPERM;
377 
378 		if (mii_data->phy_id == phydev->addr) {
379 			switch(mii_data->reg_num) {
380 			case MII_BMCR:
381 				if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0)
382 					phydev->autoneg = AUTONEG_DISABLE;
383 				else
384 					phydev->autoneg = AUTONEG_ENABLE;
385 				if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
386 					phydev->duplex = DUPLEX_FULL;
387 				else
388 					phydev->duplex = DUPLEX_HALF;
389 				if ((!phydev->autoneg) &&
390 						(val & BMCR_SPEED1000))
391 					phydev->speed = SPEED_1000;
392 				else if ((!phydev->autoneg) &&
393 						(val & BMCR_SPEED100))
394 					phydev->speed = SPEED_100;
395 				break;
396 			case MII_ADVERTISE:
397 				phydev->advertising = val;
398 				break;
399 			default:
400 				/* do nothing */
401 				break;
402 			}
403 		}
404 
405 		phy_write(phydev, mii_data->reg_num, val);
406 
407 		if (mii_data->reg_num == MII_BMCR
408 				&& val & BMCR_RESET
409 				&& phydev->drv->config_init)
410 			phydev->drv->config_init(phydev);
411 		break;
412 
413 	default:
414 		return -ENOTTY;
415 	}
416 
417 	return 0;
418 }
419 EXPORT_SYMBOL(phy_mii_ioctl);
420 
421 /**
422  * phy_start_aneg - start auto-negotiation for this PHY device
423  * @phydev: the phy_device struct
424  *
425  * Description: Sanitizes the settings (if we're not autonegotiating
426  *   them), and then calls the driver's config_aneg function.
427  *   If the PHYCONTROL Layer is operating, we change the state to
428  *   reflect the beginning of Auto-negotiation or forcing.
429  */
430 int phy_start_aneg(struct phy_device *phydev)
431 {
432 	int err;
433 
434 	mutex_lock(&phydev->lock);
435 
436 	if (AUTONEG_DISABLE == phydev->autoneg)
437 		phy_sanitize_settings(phydev);
438 
439 	err = phydev->drv->config_aneg(phydev);
440 
441 	if (err < 0)
442 		goto out_unlock;
443 
444 	if (phydev->state != PHY_HALTED) {
445 		if (AUTONEG_ENABLE == phydev->autoneg) {
446 			phydev->state = PHY_AN;
447 			phydev->link_timeout = PHY_AN_TIMEOUT;
448 		} else {
449 			phydev->state = PHY_FORCING;
450 			phydev->link_timeout = PHY_FORCE_TIMEOUT;
451 		}
452 	}
453 
454 out_unlock:
455 	mutex_unlock(&phydev->lock);
456 	return err;
457 }
458 EXPORT_SYMBOL(phy_start_aneg);
459 
460 
461 static void phy_change(struct work_struct *work);
462 static void phy_state_machine(struct work_struct *work);
463 static void phy_timer(unsigned long data);
464 
465 /**
466  * phy_start_machine - start PHY state machine tracking
467  * @phydev: the phy_device struct
468  * @handler: callback function for state change notifications
469  *
470  * Description: The PHY infrastructure can run a state machine
471  *   which tracks whether the PHY is starting up, negotiating,
472  *   etc.  This function starts the timer which tracks the state
473  *   of the PHY.  If you want to be notified when the state changes,
474  *   pass in the callback @handler, otherwise, pass NULL.  If you
475  *   want to maintain your own state machine, do not call this
476  *   function.
477  */
478 void phy_start_machine(struct phy_device *phydev,
479 		void (*handler)(struct net_device *))
480 {
481 	phydev->adjust_state = handler;
482 
483 	INIT_WORK(&phydev->state_queue, phy_state_machine);
484 	init_timer(&phydev->phy_timer);
485 	phydev->phy_timer.function = &phy_timer;
486 	phydev->phy_timer.data = (unsigned long) phydev;
487 	mod_timer(&phydev->phy_timer, jiffies + HZ);
488 }
489 
490 /**
491  * phy_stop_machine - stop the PHY state machine tracking
492  * @phydev: target phy_device struct
493  *
494  * Description: Stops the state machine timer, sets the state to UP
495  *   (unless it wasn't up yet). This function must be called BEFORE
496  *   phy_detach.
497  */
498 void phy_stop_machine(struct phy_device *phydev)
499 {
500 	del_timer_sync(&phydev->phy_timer);
501 	cancel_work_sync(&phydev->state_queue);
502 
503 	mutex_lock(&phydev->lock);
504 	if (phydev->state > PHY_UP)
505 		phydev->state = PHY_UP;
506 	mutex_unlock(&phydev->lock);
507 
508 	phydev->adjust_state = NULL;
509 }
510 
511 /**
512  * phy_force_reduction - reduce PHY speed/duplex settings by one step
513  * @phydev: target phy_device struct
514  *
515  * Description: Reduces the speed/duplex settings by one notch,
516  *   in this order--
517  *   1000/FULL, 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
518  *   The function bottoms out at 10/HALF.
519  */
520 static void phy_force_reduction(struct phy_device *phydev)
521 {
522 	int idx;
523 
524 	idx = phy_find_setting(phydev->speed, phydev->duplex);
525 
526 	idx++;
527 
528 	idx = phy_find_valid(idx, phydev->supported);
529 
530 	phydev->speed = settings[idx].speed;
531 	phydev->duplex = settings[idx].duplex;
532 
533 	pr_info("Trying %d/%s\n", phydev->speed,
534 			DUPLEX_FULL == phydev->duplex ?
535 			"FULL" : "HALF");
536 }
537 
538 
539 /**
540  * phy_error - enter HALTED state for this PHY device
541  * @phydev: target phy_device struct
542  *
543  * Moves the PHY to the HALTED state in response to a read
544  * or write error, and tells the controller the link is down.
545  * Must not be called from interrupt context, or while the
546  * phydev->lock is held.
547  */
548 void phy_error(struct phy_device *phydev)
549 {
550 	mutex_lock(&phydev->lock);
551 	phydev->state = PHY_HALTED;
552 	mutex_unlock(&phydev->lock);
553 }
554 
555 /**
556  * phy_interrupt - PHY interrupt handler
557  * @irq: interrupt line
558  * @phy_dat: phy_device pointer
559  *
560  * Description: When a PHY interrupt occurs, the handler disables
561  * interrupts, and schedules a work task to clear the interrupt.
562  */
563 static irqreturn_t phy_interrupt(int irq, void *phy_dat)
564 {
565 	struct phy_device *phydev = phy_dat;
566 
567 	if (PHY_HALTED == phydev->state)
568 		return IRQ_NONE;		/* It can't be ours.  */
569 
570 	/* The MDIO bus is not allowed to be written in interrupt
571 	 * context, so we need to disable the irq here.  A work
572 	 * queue will write the PHY to disable and clear the
573 	 * interrupt, and then reenable the irq line. */
574 	disable_irq_nosync(irq);
575 	atomic_inc(&phydev->irq_disable);
576 
577 	schedule_work(&phydev->phy_queue);
578 
579 	return IRQ_HANDLED;
580 }
581 
582 /**
583  * phy_enable_interrupts - Enable the interrupts from the PHY side
584  * @phydev: target phy_device struct
585  */
586 int phy_enable_interrupts(struct phy_device *phydev)
587 {
588 	int err;
589 
590 	err = phy_clear_interrupt(phydev);
591 
592 	if (err < 0)
593 		return err;
594 
595 	err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
596 
597 	return err;
598 }
599 EXPORT_SYMBOL(phy_enable_interrupts);
600 
601 /**
602  * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
603  * @phydev: target phy_device struct
604  */
605 int phy_disable_interrupts(struct phy_device *phydev)
606 {
607 	int err;
608 
609 	/* Disable PHY interrupts */
610 	err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
611 
612 	if (err)
613 		goto phy_err;
614 
615 	/* Clear the interrupt */
616 	err = phy_clear_interrupt(phydev);
617 
618 	if (err)
619 		goto phy_err;
620 
621 	return 0;
622 
623 phy_err:
624 	phy_error(phydev);
625 
626 	return err;
627 }
628 EXPORT_SYMBOL(phy_disable_interrupts);
629 
630 /**
631  * phy_start_interrupts - request and enable interrupts for a PHY device
632  * @phydev: target phy_device struct
633  *
634  * Description: Request the interrupt for the given PHY.
635  *   If this fails, then we set irq to PHY_POLL.
636  *   Otherwise, we enable the interrupts in the PHY.
637  *   This should only be called with a valid IRQ number.
638  *   Returns 0 on success or < 0 on error.
639  */
640 int phy_start_interrupts(struct phy_device *phydev)
641 {
642 	int err = 0;
643 
644 	INIT_WORK(&phydev->phy_queue, phy_change);
645 
646 	atomic_set(&phydev->irq_disable, 0);
647 	if (request_irq(phydev->irq, phy_interrupt,
648 				IRQF_SHARED,
649 				"phy_interrupt",
650 				phydev) < 0) {
651 		printk(KERN_WARNING "%s: Can't get IRQ %d (PHY)\n",
652 				phydev->bus->name,
653 				phydev->irq);
654 		phydev->irq = PHY_POLL;
655 		return 0;
656 	}
657 
658 	err = phy_enable_interrupts(phydev);
659 
660 	return err;
661 }
662 EXPORT_SYMBOL(phy_start_interrupts);
663 
664 /**
665  * phy_stop_interrupts - disable interrupts from a PHY device
666  * @phydev: target phy_device struct
667  */
668 int phy_stop_interrupts(struct phy_device *phydev)
669 {
670 	int err;
671 
672 	err = phy_disable_interrupts(phydev);
673 
674 	if (err)
675 		phy_error(phydev);
676 
677 	free_irq(phydev->irq, phydev);
678 
679 	/*
680 	 * Cannot call flush_scheduled_work() here as desired because
681 	 * of rtnl_lock(), but we do not really care about what would
682 	 * be done, except from enable_irq(), so cancel any work
683 	 * possibly pending and take care of the matter below.
684 	 */
685 	cancel_work_sync(&phydev->phy_queue);
686 	/*
687 	 * If work indeed has been cancelled, disable_irq() will have
688 	 * been left unbalanced from phy_interrupt() and enable_irq()
689 	 * has to be called so that other devices on the line work.
690 	 */
691 	while (atomic_dec_return(&phydev->irq_disable) >= 0)
692 		enable_irq(phydev->irq);
693 
694 	return err;
695 }
696 EXPORT_SYMBOL(phy_stop_interrupts);
697 
698 
699 /**
700  * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
701  * @work: work_struct that describes the work to be done
702  */
703 static void phy_change(struct work_struct *work)
704 {
705 	int err;
706 	struct phy_device *phydev =
707 		container_of(work, struct phy_device, phy_queue);
708 
709 	err = phy_disable_interrupts(phydev);
710 
711 	if (err)
712 		goto phy_err;
713 
714 	mutex_lock(&phydev->lock);
715 	if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
716 		phydev->state = PHY_CHANGELINK;
717 	mutex_unlock(&phydev->lock);
718 
719 	atomic_dec(&phydev->irq_disable);
720 	enable_irq(phydev->irq);
721 
722 	/* Reenable interrupts */
723 	if (PHY_HALTED != phydev->state)
724 		err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
725 
726 	if (err)
727 		goto irq_enable_err;
728 
729 	return;
730 
731 irq_enable_err:
732 	disable_irq(phydev->irq);
733 	atomic_inc(&phydev->irq_disable);
734 phy_err:
735 	phy_error(phydev);
736 }
737 
738 /**
739  * phy_stop - Bring down the PHY link, and stop checking the status
740  * @phydev: target phy_device struct
741  */
742 void phy_stop(struct phy_device *phydev)
743 {
744 	mutex_lock(&phydev->lock);
745 
746 	if (PHY_HALTED == phydev->state)
747 		goto out_unlock;
748 
749 	if (phydev->irq != PHY_POLL) {
750 		/* Disable PHY Interrupts */
751 		phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
752 
753 		/* Clear any pending interrupts */
754 		phy_clear_interrupt(phydev);
755 	}
756 
757 	phydev->state = PHY_HALTED;
758 
759 out_unlock:
760 	mutex_unlock(&phydev->lock);
761 
762 	/*
763 	 * Cannot call flush_scheduled_work() here as desired because
764 	 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
765 	 * will not reenable interrupts.
766 	 */
767 }
768 
769 
770 /**
771  * phy_start - start or restart a PHY device
772  * @phydev: target phy_device struct
773  *
774  * Description: Indicates the attached device's readiness to
775  *   handle PHY-related work.  Used during startup to start the
776  *   PHY, and after a call to phy_stop() to resume operation.
777  *   Also used to indicate the MDIO bus has cleared an error
778  *   condition.
779  */
780 void phy_start(struct phy_device *phydev)
781 {
782 	mutex_lock(&phydev->lock);
783 
784 	switch (phydev->state) {
785 		case PHY_STARTING:
786 			phydev->state = PHY_PENDING;
787 			break;
788 		case PHY_READY:
789 			phydev->state = PHY_UP;
790 			break;
791 		case PHY_HALTED:
792 			phydev->state = PHY_RESUMING;
793 		default:
794 			break;
795 	}
796 	mutex_unlock(&phydev->lock);
797 }
798 EXPORT_SYMBOL(phy_stop);
799 EXPORT_SYMBOL(phy_start);
800 
801 /**
802  * phy_state_machine - Handle the state machine
803  * @work: work_struct that describes the work to be done
804  *
805  * Description: Scheduled by the state_queue workqueue each time
806  *   phy_timer is triggered.
807  */
808 static void phy_state_machine(struct work_struct *work)
809 {
810 	struct phy_device *phydev =
811 			container_of(work, struct phy_device, state_queue);
812 	int needs_aneg = 0;
813 	int err = 0;
814 
815 	mutex_lock(&phydev->lock);
816 
817 	if (phydev->adjust_state)
818 		phydev->adjust_state(phydev->attached_dev);
819 
820 	switch(phydev->state) {
821 		case PHY_DOWN:
822 		case PHY_STARTING:
823 		case PHY_READY:
824 		case PHY_PENDING:
825 			break;
826 		case PHY_UP:
827 			needs_aneg = 1;
828 
829 			phydev->link_timeout = PHY_AN_TIMEOUT;
830 
831 			break;
832 		case PHY_AN:
833 			err = phy_read_status(phydev);
834 
835 			if (err < 0)
836 				break;
837 
838 			/* If the link is down, give up on
839 			 * negotiation for now */
840 			if (!phydev->link) {
841 				phydev->state = PHY_NOLINK;
842 				netif_carrier_off(phydev->attached_dev);
843 				phydev->adjust_link(phydev->attached_dev);
844 				break;
845 			}
846 
847 			/* Check if negotiation is done.  Break
848 			 * if there's an error */
849 			err = phy_aneg_done(phydev);
850 			if (err < 0)
851 				break;
852 
853 			/* If AN is done, we're running */
854 			if (err > 0) {
855 				phydev->state = PHY_RUNNING;
856 				netif_carrier_on(phydev->attached_dev);
857 				phydev->adjust_link(phydev->attached_dev);
858 
859 			} else if (0 == phydev->link_timeout--) {
860 				int idx;
861 
862 				needs_aneg = 1;
863 				/* If we have the magic_aneg bit,
864 				 * we try again */
865 				if (phydev->drv->flags & PHY_HAS_MAGICANEG)
866 					break;
867 
868 				/* The timer expired, and we still
869 				 * don't have a setting, so we try
870 				 * forcing it until we find one that
871 				 * works, starting from the fastest speed,
872 				 * and working our way down */
873 				idx = phy_find_valid(0, phydev->supported);
874 
875 				phydev->speed = settings[idx].speed;
876 				phydev->duplex = settings[idx].duplex;
877 
878 				phydev->autoneg = AUTONEG_DISABLE;
879 
880 				pr_info("Trying %d/%s\n", phydev->speed,
881 						DUPLEX_FULL ==
882 						phydev->duplex ?
883 						"FULL" : "HALF");
884 			}
885 			break;
886 		case PHY_NOLINK:
887 			err = phy_read_status(phydev);
888 
889 			if (err)
890 				break;
891 
892 			if (phydev->link) {
893 				phydev->state = PHY_RUNNING;
894 				netif_carrier_on(phydev->attached_dev);
895 				phydev->adjust_link(phydev->attached_dev);
896 			}
897 			break;
898 		case PHY_FORCING:
899 			err = genphy_update_link(phydev);
900 
901 			if (err)
902 				break;
903 
904 			if (phydev->link) {
905 				phydev->state = PHY_RUNNING;
906 				netif_carrier_on(phydev->attached_dev);
907 			} else {
908 				if (0 == phydev->link_timeout--) {
909 					phy_force_reduction(phydev);
910 					needs_aneg = 1;
911 				}
912 			}
913 
914 			phydev->adjust_link(phydev->attached_dev);
915 			break;
916 		case PHY_RUNNING:
917 			/* Only register a CHANGE if we are
918 			 * polling */
919 			if (PHY_POLL == phydev->irq)
920 				phydev->state = PHY_CHANGELINK;
921 			break;
922 		case PHY_CHANGELINK:
923 			err = phy_read_status(phydev);
924 
925 			if (err)
926 				break;
927 
928 			if (phydev->link) {
929 				phydev->state = PHY_RUNNING;
930 				netif_carrier_on(phydev->attached_dev);
931 			} else {
932 				phydev->state = PHY_NOLINK;
933 				netif_carrier_off(phydev->attached_dev);
934 			}
935 
936 			phydev->adjust_link(phydev->attached_dev);
937 
938 			if (PHY_POLL != phydev->irq)
939 				err = phy_config_interrupt(phydev,
940 						PHY_INTERRUPT_ENABLED);
941 			break;
942 		case PHY_HALTED:
943 			if (phydev->link) {
944 				phydev->link = 0;
945 				netif_carrier_off(phydev->attached_dev);
946 				phydev->adjust_link(phydev->attached_dev);
947 			}
948 			break;
949 		case PHY_RESUMING:
950 
951 			err = phy_clear_interrupt(phydev);
952 
953 			if (err)
954 				break;
955 
956 			err = phy_config_interrupt(phydev,
957 					PHY_INTERRUPT_ENABLED);
958 
959 			if (err)
960 				break;
961 
962 			if (AUTONEG_ENABLE == phydev->autoneg) {
963 				err = phy_aneg_done(phydev);
964 				if (err < 0)
965 					break;
966 
967 				/* err > 0 if AN is done.
968 				 * Otherwise, it's 0, and we're
969 				 * still waiting for AN */
970 				if (err > 0) {
971 					phydev->state = PHY_RUNNING;
972 				} else {
973 					phydev->state = PHY_AN;
974 					phydev->link_timeout = PHY_AN_TIMEOUT;
975 				}
976 			} else
977 				phydev->state = PHY_RUNNING;
978 			break;
979 	}
980 
981 	mutex_unlock(&phydev->lock);
982 
983 	if (needs_aneg)
984 		err = phy_start_aneg(phydev);
985 
986 	if (err < 0)
987 		phy_error(phydev);
988 
989 	mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ);
990 }
991 
992 /* PHY timer which schedules the state machine work */
993 static void phy_timer(unsigned long data)
994 {
995 	struct phy_device *phydev = (struct phy_device *)data;
996 
997 	/*
998 	 * PHY I/O operations can potentially sleep so we ensure that
999 	 * it's done from a process context
1000 	 */
1001 	schedule_work(&phydev->state_queue);
1002 }
1003