xref: /openbmc/linux/drivers/net/phy/phy.c (revision 22246614)
1 /*
2  * drivers/net/phy/phy.c
3  *
4  * Framework for configuring and reading PHY devices
5  * Based on code in sungem_phy.c and gianfar_phy.c
6  *
7  * Author: Andy Fleming
8  *
9  * Copyright (c) 2004 Freescale Semiconductor, Inc.
10  * Copyright (c) 2006, 2007  Maciej W. Rozycki
11  *
12  * This program is free software; you can redistribute  it and/or modify it
13  * under  the terms of  the GNU General  Public License as published by the
14  * Free Software Foundation;  either version 2 of the  License, or (at your
15  * option) any later version.
16  *
17  */
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/unistd.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/init.h>
25 #include <linux/delay.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/mii.h>
32 #include <linux/ethtool.h>
33 #include <linux/phy.h>
34 #include <linux/timer.h>
35 #include <linux/workqueue.h>
36 
37 #include <asm/atomic.h>
38 #include <asm/io.h>
39 #include <asm/irq.h>
40 #include <asm/uaccess.h>
41 
42 /**
43  * phy_print_status - Convenience function to print out the current phy status
44  * @phydev: the phy_device struct
45  */
46 void phy_print_status(struct phy_device *phydev)
47 {
48 	pr_info("PHY: %s - Link is %s", phydev->dev.bus_id,
49 			phydev->link ? "Up" : "Down");
50 	if (phydev->link)
51 		printk(" - %d/%s", phydev->speed,
52 				DUPLEX_FULL == phydev->duplex ?
53 				"Full" : "Half");
54 
55 	printk("\n");
56 }
57 EXPORT_SYMBOL(phy_print_status);
58 
59 
60 /**
61  * phy_read - Convenience function for reading a given PHY register
62  * @phydev: the phy_device struct
63  * @regnum: register number to read
64  *
65  * NOTE: MUST NOT be called from interrupt context,
66  * because the bus read/write functions may wait for an interrupt
67  * to conclude the operation.
68  */
69 int phy_read(struct phy_device *phydev, u16 regnum)
70 {
71 	int retval;
72 	struct mii_bus *bus = phydev->bus;
73 
74 	BUG_ON(in_interrupt());
75 
76 	mutex_lock(&bus->mdio_lock);
77 	retval = bus->read(bus, phydev->addr, regnum);
78 	mutex_unlock(&bus->mdio_lock);
79 
80 	return retval;
81 }
82 EXPORT_SYMBOL(phy_read);
83 
84 /**
85  * phy_write - Convenience function for writing a given PHY register
86  * @phydev: the phy_device struct
87  * @regnum: register number to write
88  * @val: value to write to @regnum
89  *
90  * NOTE: MUST NOT be called from interrupt context,
91  * because the bus read/write functions may wait for an interrupt
92  * to conclude the operation.
93  */
94 int phy_write(struct phy_device *phydev, u16 regnum, u16 val)
95 {
96 	int err;
97 	struct mii_bus *bus = phydev->bus;
98 
99 	BUG_ON(in_interrupt());
100 
101 	mutex_lock(&bus->mdio_lock);
102 	err = bus->write(bus, phydev->addr, regnum, val);
103 	mutex_unlock(&bus->mdio_lock);
104 
105 	return err;
106 }
107 EXPORT_SYMBOL(phy_write);
108 
109 /**
110  * phy_clear_interrupt - Ack the phy device's interrupt
111  * @phydev: the phy_device struct
112  *
113  * If the @phydev driver has an ack_interrupt function, call it to
114  * ack and clear the phy device's interrupt.
115  *
116  * Returns 0 on success on < 0 on error.
117  */
118 int phy_clear_interrupt(struct phy_device *phydev)
119 {
120 	int err = 0;
121 
122 	if (phydev->drv->ack_interrupt)
123 		err = phydev->drv->ack_interrupt(phydev);
124 
125 	return err;
126 }
127 
128 /**
129  * phy_config_interrupt - configure the PHY device for the requested interrupts
130  * @phydev: the phy_device struct
131  * @interrupts: interrupt flags to configure for this @phydev
132  *
133  * Returns 0 on success on < 0 on error.
134  */
135 int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
136 {
137 	int err = 0;
138 
139 	phydev->interrupts = interrupts;
140 	if (phydev->drv->config_intr)
141 		err = phydev->drv->config_intr(phydev);
142 
143 	return err;
144 }
145 
146 
147 /**
148  * phy_aneg_done - return auto-negotiation status
149  * @phydev: target phy_device struct
150  *
151  * Description: Reads the status register and returns 0 either if
152  *   auto-negotiation is incomplete, or if there was an error.
153  *   Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
154  */
155 static inline int phy_aneg_done(struct phy_device *phydev)
156 {
157 	int retval;
158 
159 	retval = phy_read(phydev, MII_BMSR);
160 
161 	return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
162 }
163 
164 /* A structure for mapping a particular speed and duplex
165  * combination to a particular SUPPORTED and ADVERTISED value */
166 struct phy_setting {
167 	int speed;
168 	int duplex;
169 	u32 setting;
170 };
171 
172 /* A mapping of all SUPPORTED settings to speed/duplex */
173 static const struct phy_setting settings[] = {
174 	{
175 		.speed = 10000,
176 		.duplex = DUPLEX_FULL,
177 		.setting = SUPPORTED_10000baseT_Full,
178 	},
179 	{
180 		.speed = SPEED_1000,
181 		.duplex = DUPLEX_FULL,
182 		.setting = SUPPORTED_1000baseT_Full,
183 	},
184 	{
185 		.speed = SPEED_1000,
186 		.duplex = DUPLEX_HALF,
187 		.setting = SUPPORTED_1000baseT_Half,
188 	},
189 	{
190 		.speed = SPEED_100,
191 		.duplex = DUPLEX_FULL,
192 		.setting = SUPPORTED_100baseT_Full,
193 	},
194 	{
195 		.speed = SPEED_100,
196 		.duplex = DUPLEX_HALF,
197 		.setting = SUPPORTED_100baseT_Half,
198 	},
199 	{
200 		.speed = SPEED_10,
201 		.duplex = DUPLEX_FULL,
202 		.setting = SUPPORTED_10baseT_Full,
203 	},
204 	{
205 		.speed = SPEED_10,
206 		.duplex = DUPLEX_HALF,
207 		.setting = SUPPORTED_10baseT_Half,
208 	},
209 };
210 
211 #define MAX_NUM_SETTINGS ARRAY_SIZE(settings)
212 
213 /**
214  * phy_find_setting - find a PHY settings array entry that matches speed & duplex
215  * @speed: speed to match
216  * @duplex: duplex to match
217  *
218  * Description: Searches the settings array for the setting which
219  *   matches the desired speed and duplex, and returns the index
220  *   of that setting.  Returns the index of the last setting if
221  *   none of the others match.
222  */
223 static inline int phy_find_setting(int speed, int duplex)
224 {
225 	int idx = 0;
226 
227 	while (idx < ARRAY_SIZE(settings) &&
228 			(settings[idx].speed != speed ||
229 			settings[idx].duplex != duplex))
230 		idx++;
231 
232 	return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
233 }
234 
235 /**
236  * phy_find_valid - find a PHY setting that matches the requested features mask
237  * @idx: The first index in settings[] to search
238  * @features: A mask of the valid settings
239  *
240  * Description: Returns the index of the first valid setting less
241  *   than or equal to the one pointed to by idx, as determined by
242  *   the mask in features.  Returns the index of the last setting
243  *   if nothing else matches.
244  */
245 static inline int phy_find_valid(int idx, u32 features)
246 {
247 	while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
248 		idx++;
249 
250 	return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
251 }
252 
253 /**
254  * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
255  * @phydev: the target phy_device struct
256  *
257  * Description: Make sure the PHY is set to supported speeds and
258  *   duplexes.  Drop down by one in this order:  1000/FULL,
259  *   1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
260  */
261 void phy_sanitize_settings(struct phy_device *phydev)
262 {
263 	u32 features = phydev->supported;
264 	int idx;
265 
266 	/* Sanitize settings based on PHY capabilities */
267 	if ((features & SUPPORTED_Autoneg) == 0)
268 		phydev->autoneg = AUTONEG_DISABLE;
269 
270 	idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
271 			features);
272 
273 	phydev->speed = settings[idx].speed;
274 	phydev->duplex = settings[idx].duplex;
275 }
276 EXPORT_SYMBOL(phy_sanitize_settings);
277 
278 /**
279  * phy_ethtool_sset - generic ethtool sset function, handles all the details
280  * @phydev: target phy_device struct
281  * @cmd: ethtool_cmd
282  *
283  * A few notes about parameter checking:
284  * - We don't set port or transceiver, so we don't care what they
285  *   were set to.
286  * - phy_start_aneg() will make sure forced settings are sane, and
287  *   choose the next best ones from the ones selected, so we don't
288  *   care if ethtool tries to give us bad values.
289  */
290 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
291 {
292 	if (cmd->phy_address != phydev->addr)
293 		return -EINVAL;
294 
295 	/* We make sure that we don't pass unsupported
296 	 * values in to the PHY */
297 	cmd->advertising &= phydev->supported;
298 
299 	/* Verify the settings we care about. */
300 	if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
301 		return -EINVAL;
302 
303 	if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
304 		return -EINVAL;
305 
306 	if (cmd->autoneg == AUTONEG_DISABLE
307 			&& ((cmd->speed != SPEED_1000
308 					&& cmd->speed != SPEED_100
309 					&& cmd->speed != SPEED_10)
310 				|| (cmd->duplex != DUPLEX_HALF
311 					&& cmd->duplex != DUPLEX_FULL)))
312 		return -EINVAL;
313 
314 	phydev->autoneg = cmd->autoneg;
315 
316 	phydev->speed = cmd->speed;
317 
318 	phydev->advertising = cmd->advertising;
319 
320 	if (AUTONEG_ENABLE == cmd->autoneg)
321 		phydev->advertising |= ADVERTISED_Autoneg;
322 	else
323 		phydev->advertising &= ~ADVERTISED_Autoneg;
324 
325 	phydev->duplex = cmd->duplex;
326 
327 	/* Restart the PHY */
328 	phy_start_aneg(phydev);
329 
330 	return 0;
331 }
332 EXPORT_SYMBOL(phy_ethtool_sset);
333 
334 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
335 {
336 	cmd->supported = phydev->supported;
337 
338 	cmd->advertising = phydev->advertising;
339 
340 	cmd->speed = phydev->speed;
341 	cmd->duplex = phydev->duplex;
342 	cmd->port = PORT_MII;
343 	cmd->phy_address = phydev->addr;
344 	cmd->transceiver = XCVR_EXTERNAL;
345 	cmd->autoneg = phydev->autoneg;
346 
347 	return 0;
348 }
349 EXPORT_SYMBOL(phy_ethtool_gset);
350 
351 /**
352  * phy_mii_ioctl - generic PHY MII ioctl interface
353  * @phydev: the phy_device struct
354  * @mii_data: MII ioctl data
355  * @cmd: ioctl cmd to execute
356  *
357  * Note that this function is currently incompatible with the
358  * PHYCONTROL layer.  It changes registers without regard to
359  * current state.  Use at own risk.
360  */
361 int phy_mii_ioctl(struct phy_device *phydev,
362 		struct mii_ioctl_data *mii_data, int cmd)
363 {
364 	u16 val = mii_data->val_in;
365 
366 	switch (cmd) {
367 	case SIOCGMIIPHY:
368 		mii_data->phy_id = phydev->addr;
369 		break;
370 	case SIOCGMIIREG:
371 		mii_data->val_out = phy_read(phydev, mii_data->reg_num);
372 		break;
373 
374 	case SIOCSMIIREG:
375 		if (!capable(CAP_NET_ADMIN))
376 			return -EPERM;
377 
378 		if (mii_data->phy_id == phydev->addr) {
379 			switch(mii_data->reg_num) {
380 			case MII_BMCR:
381 				if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0)
382 					phydev->autoneg = AUTONEG_DISABLE;
383 				else
384 					phydev->autoneg = AUTONEG_ENABLE;
385 				if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
386 					phydev->duplex = DUPLEX_FULL;
387 				else
388 					phydev->duplex = DUPLEX_HALF;
389 				if ((!phydev->autoneg) &&
390 						(val & BMCR_SPEED1000))
391 					phydev->speed = SPEED_1000;
392 				else if ((!phydev->autoneg) &&
393 						(val & BMCR_SPEED100))
394 					phydev->speed = SPEED_100;
395 				break;
396 			case MII_ADVERTISE:
397 				phydev->advertising = val;
398 				break;
399 			default:
400 				/* do nothing */
401 				break;
402 			}
403 		}
404 
405 		phy_write(phydev, mii_data->reg_num, val);
406 
407 		if (mii_data->reg_num == MII_BMCR
408 				&& val & BMCR_RESET
409 				&& phydev->drv->config_init) {
410 			phy_scan_fixups(phydev);
411 			phydev->drv->config_init(phydev);
412 		}
413 		break;
414 
415 	default:
416 		return -ENOTTY;
417 	}
418 
419 	return 0;
420 }
421 EXPORT_SYMBOL(phy_mii_ioctl);
422 
423 /**
424  * phy_start_aneg - start auto-negotiation for this PHY device
425  * @phydev: the phy_device struct
426  *
427  * Description: Sanitizes the settings (if we're not autonegotiating
428  *   them), and then calls the driver's config_aneg function.
429  *   If the PHYCONTROL Layer is operating, we change the state to
430  *   reflect the beginning of Auto-negotiation or forcing.
431  */
432 int phy_start_aneg(struct phy_device *phydev)
433 {
434 	int err;
435 
436 	mutex_lock(&phydev->lock);
437 
438 	if (AUTONEG_DISABLE == phydev->autoneg)
439 		phy_sanitize_settings(phydev);
440 
441 	err = phydev->drv->config_aneg(phydev);
442 
443 	if (err < 0)
444 		goto out_unlock;
445 
446 	if (phydev->state != PHY_HALTED) {
447 		if (AUTONEG_ENABLE == phydev->autoneg) {
448 			phydev->state = PHY_AN;
449 			phydev->link_timeout = PHY_AN_TIMEOUT;
450 		} else {
451 			phydev->state = PHY_FORCING;
452 			phydev->link_timeout = PHY_FORCE_TIMEOUT;
453 		}
454 	}
455 
456 out_unlock:
457 	mutex_unlock(&phydev->lock);
458 	return err;
459 }
460 EXPORT_SYMBOL(phy_start_aneg);
461 
462 
463 static void phy_change(struct work_struct *work);
464 static void phy_state_machine(struct work_struct *work);
465 static void phy_timer(unsigned long data);
466 
467 /**
468  * phy_start_machine - start PHY state machine tracking
469  * @phydev: the phy_device struct
470  * @handler: callback function for state change notifications
471  *
472  * Description: The PHY infrastructure can run a state machine
473  *   which tracks whether the PHY is starting up, negotiating,
474  *   etc.  This function starts the timer which tracks the state
475  *   of the PHY.  If you want to be notified when the state changes,
476  *   pass in the callback @handler, otherwise, pass NULL.  If you
477  *   want to maintain your own state machine, do not call this
478  *   function.
479  */
480 void phy_start_machine(struct phy_device *phydev,
481 		void (*handler)(struct net_device *))
482 {
483 	phydev->adjust_state = handler;
484 
485 	INIT_WORK(&phydev->state_queue, phy_state_machine);
486 	init_timer(&phydev->phy_timer);
487 	phydev->phy_timer.function = &phy_timer;
488 	phydev->phy_timer.data = (unsigned long) phydev;
489 	mod_timer(&phydev->phy_timer, jiffies + HZ);
490 }
491 
492 /**
493  * phy_stop_machine - stop the PHY state machine tracking
494  * @phydev: target phy_device struct
495  *
496  * Description: Stops the state machine timer, sets the state to UP
497  *   (unless it wasn't up yet). This function must be called BEFORE
498  *   phy_detach.
499  */
500 void phy_stop_machine(struct phy_device *phydev)
501 {
502 	del_timer_sync(&phydev->phy_timer);
503 	cancel_work_sync(&phydev->state_queue);
504 
505 	mutex_lock(&phydev->lock);
506 	if (phydev->state > PHY_UP)
507 		phydev->state = PHY_UP;
508 	mutex_unlock(&phydev->lock);
509 
510 	phydev->adjust_state = NULL;
511 }
512 
513 /**
514  * phy_force_reduction - reduce PHY speed/duplex settings by one step
515  * @phydev: target phy_device struct
516  *
517  * Description: Reduces the speed/duplex settings by one notch,
518  *   in this order--
519  *   1000/FULL, 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
520  *   The function bottoms out at 10/HALF.
521  */
522 static void phy_force_reduction(struct phy_device *phydev)
523 {
524 	int idx;
525 
526 	idx = phy_find_setting(phydev->speed, phydev->duplex);
527 
528 	idx++;
529 
530 	idx = phy_find_valid(idx, phydev->supported);
531 
532 	phydev->speed = settings[idx].speed;
533 	phydev->duplex = settings[idx].duplex;
534 
535 	pr_info("Trying %d/%s\n", phydev->speed,
536 			DUPLEX_FULL == phydev->duplex ?
537 			"FULL" : "HALF");
538 }
539 
540 
541 /**
542  * phy_error - enter HALTED state for this PHY device
543  * @phydev: target phy_device struct
544  *
545  * Moves the PHY to the HALTED state in response to a read
546  * or write error, and tells the controller the link is down.
547  * Must not be called from interrupt context, or while the
548  * phydev->lock is held.
549  */
550 static void phy_error(struct phy_device *phydev)
551 {
552 	mutex_lock(&phydev->lock);
553 	phydev->state = PHY_HALTED;
554 	mutex_unlock(&phydev->lock);
555 }
556 
557 /**
558  * phy_interrupt - PHY interrupt handler
559  * @irq: interrupt line
560  * @phy_dat: phy_device pointer
561  *
562  * Description: When a PHY interrupt occurs, the handler disables
563  * interrupts, and schedules a work task to clear the interrupt.
564  */
565 static irqreturn_t phy_interrupt(int irq, void *phy_dat)
566 {
567 	struct phy_device *phydev = phy_dat;
568 
569 	if (PHY_HALTED == phydev->state)
570 		return IRQ_NONE;		/* It can't be ours.  */
571 
572 	/* The MDIO bus is not allowed to be written in interrupt
573 	 * context, so we need to disable the irq here.  A work
574 	 * queue will write the PHY to disable and clear the
575 	 * interrupt, and then reenable the irq line. */
576 	disable_irq_nosync(irq);
577 	atomic_inc(&phydev->irq_disable);
578 
579 	schedule_work(&phydev->phy_queue);
580 
581 	return IRQ_HANDLED;
582 }
583 
584 /**
585  * phy_enable_interrupts - Enable the interrupts from the PHY side
586  * @phydev: target phy_device struct
587  */
588 int phy_enable_interrupts(struct phy_device *phydev)
589 {
590 	int err;
591 
592 	err = phy_clear_interrupt(phydev);
593 
594 	if (err < 0)
595 		return err;
596 
597 	err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
598 
599 	return err;
600 }
601 EXPORT_SYMBOL(phy_enable_interrupts);
602 
603 /**
604  * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
605  * @phydev: target phy_device struct
606  */
607 int phy_disable_interrupts(struct phy_device *phydev)
608 {
609 	int err;
610 
611 	/* Disable PHY interrupts */
612 	err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
613 
614 	if (err)
615 		goto phy_err;
616 
617 	/* Clear the interrupt */
618 	err = phy_clear_interrupt(phydev);
619 
620 	if (err)
621 		goto phy_err;
622 
623 	return 0;
624 
625 phy_err:
626 	phy_error(phydev);
627 
628 	return err;
629 }
630 EXPORT_SYMBOL(phy_disable_interrupts);
631 
632 /**
633  * phy_start_interrupts - request and enable interrupts for a PHY device
634  * @phydev: target phy_device struct
635  *
636  * Description: Request the interrupt for the given PHY.
637  *   If this fails, then we set irq to PHY_POLL.
638  *   Otherwise, we enable the interrupts in the PHY.
639  *   This should only be called with a valid IRQ number.
640  *   Returns 0 on success or < 0 on error.
641  */
642 int phy_start_interrupts(struct phy_device *phydev)
643 {
644 	int err = 0;
645 
646 	INIT_WORK(&phydev->phy_queue, phy_change);
647 
648 	atomic_set(&phydev->irq_disable, 0);
649 	if (request_irq(phydev->irq, phy_interrupt,
650 				IRQF_SHARED,
651 				"phy_interrupt",
652 				phydev) < 0) {
653 		printk(KERN_WARNING "%s: Can't get IRQ %d (PHY)\n",
654 				phydev->bus->name,
655 				phydev->irq);
656 		phydev->irq = PHY_POLL;
657 		return 0;
658 	}
659 
660 	err = phy_enable_interrupts(phydev);
661 
662 	return err;
663 }
664 EXPORT_SYMBOL(phy_start_interrupts);
665 
666 /**
667  * phy_stop_interrupts - disable interrupts from a PHY device
668  * @phydev: target phy_device struct
669  */
670 int phy_stop_interrupts(struct phy_device *phydev)
671 {
672 	int err;
673 
674 	err = phy_disable_interrupts(phydev);
675 
676 	if (err)
677 		phy_error(phydev);
678 
679 	free_irq(phydev->irq, phydev);
680 
681 	/*
682 	 * Cannot call flush_scheduled_work() here as desired because
683 	 * of rtnl_lock(), but we do not really care about what would
684 	 * be done, except from enable_irq(), so cancel any work
685 	 * possibly pending and take care of the matter below.
686 	 */
687 	cancel_work_sync(&phydev->phy_queue);
688 	/*
689 	 * If work indeed has been cancelled, disable_irq() will have
690 	 * been left unbalanced from phy_interrupt() and enable_irq()
691 	 * has to be called so that other devices on the line work.
692 	 */
693 	while (atomic_dec_return(&phydev->irq_disable) >= 0)
694 		enable_irq(phydev->irq);
695 
696 	return err;
697 }
698 EXPORT_SYMBOL(phy_stop_interrupts);
699 
700 
701 /**
702  * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
703  * @work: work_struct that describes the work to be done
704  */
705 static void phy_change(struct work_struct *work)
706 {
707 	int err;
708 	struct phy_device *phydev =
709 		container_of(work, struct phy_device, phy_queue);
710 
711 	err = phy_disable_interrupts(phydev);
712 
713 	if (err)
714 		goto phy_err;
715 
716 	mutex_lock(&phydev->lock);
717 	if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
718 		phydev->state = PHY_CHANGELINK;
719 	mutex_unlock(&phydev->lock);
720 
721 	atomic_dec(&phydev->irq_disable);
722 	enable_irq(phydev->irq);
723 
724 	/* Reenable interrupts */
725 	if (PHY_HALTED != phydev->state)
726 		err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
727 
728 	if (err)
729 		goto irq_enable_err;
730 
731 	return;
732 
733 irq_enable_err:
734 	disable_irq(phydev->irq);
735 	atomic_inc(&phydev->irq_disable);
736 phy_err:
737 	phy_error(phydev);
738 }
739 
740 /**
741  * phy_stop - Bring down the PHY link, and stop checking the status
742  * @phydev: target phy_device struct
743  */
744 void phy_stop(struct phy_device *phydev)
745 {
746 	mutex_lock(&phydev->lock);
747 
748 	if (PHY_HALTED == phydev->state)
749 		goto out_unlock;
750 
751 	if (phydev->irq != PHY_POLL) {
752 		/* Disable PHY Interrupts */
753 		phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
754 
755 		/* Clear any pending interrupts */
756 		phy_clear_interrupt(phydev);
757 	}
758 
759 	phydev->state = PHY_HALTED;
760 
761 out_unlock:
762 	mutex_unlock(&phydev->lock);
763 
764 	/*
765 	 * Cannot call flush_scheduled_work() here as desired because
766 	 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
767 	 * will not reenable interrupts.
768 	 */
769 }
770 
771 
772 /**
773  * phy_start - start or restart a PHY device
774  * @phydev: target phy_device struct
775  *
776  * Description: Indicates the attached device's readiness to
777  *   handle PHY-related work.  Used during startup to start the
778  *   PHY, and after a call to phy_stop() to resume operation.
779  *   Also used to indicate the MDIO bus has cleared an error
780  *   condition.
781  */
782 void phy_start(struct phy_device *phydev)
783 {
784 	mutex_lock(&phydev->lock);
785 
786 	switch (phydev->state) {
787 		case PHY_STARTING:
788 			phydev->state = PHY_PENDING;
789 			break;
790 		case PHY_READY:
791 			phydev->state = PHY_UP;
792 			break;
793 		case PHY_HALTED:
794 			phydev->state = PHY_RESUMING;
795 		default:
796 			break;
797 	}
798 	mutex_unlock(&phydev->lock);
799 }
800 EXPORT_SYMBOL(phy_stop);
801 EXPORT_SYMBOL(phy_start);
802 
803 /**
804  * phy_state_machine - Handle the state machine
805  * @work: work_struct that describes the work to be done
806  *
807  * Description: Scheduled by the state_queue workqueue each time
808  *   phy_timer is triggered.
809  */
810 static void phy_state_machine(struct work_struct *work)
811 {
812 	struct phy_device *phydev =
813 			container_of(work, struct phy_device, state_queue);
814 	int needs_aneg = 0;
815 	int err = 0;
816 
817 	mutex_lock(&phydev->lock);
818 
819 	if (phydev->adjust_state)
820 		phydev->adjust_state(phydev->attached_dev);
821 
822 	switch(phydev->state) {
823 		case PHY_DOWN:
824 		case PHY_STARTING:
825 		case PHY_READY:
826 		case PHY_PENDING:
827 			break;
828 		case PHY_UP:
829 			needs_aneg = 1;
830 
831 			phydev->link_timeout = PHY_AN_TIMEOUT;
832 
833 			break;
834 		case PHY_AN:
835 			err = phy_read_status(phydev);
836 
837 			if (err < 0)
838 				break;
839 
840 			/* If the link is down, give up on
841 			 * negotiation for now */
842 			if (!phydev->link) {
843 				phydev->state = PHY_NOLINK;
844 				netif_carrier_off(phydev->attached_dev);
845 				phydev->adjust_link(phydev->attached_dev);
846 				break;
847 			}
848 
849 			/* Check if negotiation is done.  Break
850 			 * if there's an error */
851 			err = phy_aneg_done(phydev);
852 			if (err < 0)
853 				break;
854 
855 			/* If AN is done, we're running */
856 			if (err > 0) {
857 				phydev->state = PHY_RUNNING;
858 				netif_carrier_on(phydev->attached_dev);
859 				phydev->adjust_link(phydev->attached_dev);
860 
861 			} else if (0 == phydev->link_timeout--) {
862 				int idx;
863 
864 				needs_aneg = 1;
865 				/* If we have the magic_aneg bit,
866 				 * we try again */
867 				if (phydev->drv->flags & PHY_HAS_MAGICANEG)
868 					break;
869 
870 				/* The timer expired, and we still
871 				 * don't have a setting, so we try
872 				 * forcing it until we find one that
873 				 * works, starting from the fastest speed,
874 				 * and working our way down */
875 				idx = phy_find_valid(0, phydev->supported);
876 
877 				phydev->speed = settings[idx].speed;
878 				phydev->duplex = settings[idx].duplex;
879 
880 				phydev->autoneg = AUTONEG_DISABLE;
881 
882 				pr_info("Trying %d/%s\n", phydev->speed,
883 						DUPLEX_FULL ==
884 						phydev->duplex ?
885 						"FULL" : "HALF");
886 			}
887 			break;
888 		case PHY_NOLINK:
889 			err = phy_read_status(phydev);
890 
891 			if (err)
892 				break;
893 
894 			if (phydev->link) {
895 				phydev->state = PHY_RUNNING;
896 				netif_carrier_on(phydev->attached_dev);
897 				phydev->adjust_link(phydev->attached_dev);
898 			}
899 			break;
900 		case PHY_FORCING:
901 			err = genphy_update_link(phydev);
902 
903 			if (err)
904 				break;
905 
906 			if (phydev->link) {
907 				phydev->state = PHY_RUNNING;
908 				netif_carrier_on(phydev->attached_dev);
909 			} else {
910 				if (0 == phydev->link_timeout--) {
911 					phy_force_reduction(phydev);
912 					needs_aneg = 1;
913 				}
914 			}
915 
916 			phydev->adjust_link(phydev->attached_dev);
917 			break;
918 		case PHY_RUNNING:
919 			/* Only register a CHANGE if we are
920 			 * polling */
921 			if (PHY_POLL == phydev->irq)
922 				phydev->state = PHY_CHANGELINK;
923 			break;
924 		case PHY_CHANGELINK:
925 			err = phy_read_status(phydev);
926 
927 			if (err)
928 				break;
929 
930 			if (phydev->link) {
931 				phydev->state = PHY_RUNNING;
932 				netif_carrier_on(phydev->attached_dev);
933 			} else {
934 				phydev->state = PHY_NOLINK;
935 				netif_carrier_off(phydev->attached_dev);
936 			}
937 
938 			phydev->adjust_link(phydev->attached_dev);
939 
940 			if (PHY_POLL != phydev->irq)
941 				err = phy_config_interrupt(phydev,
942 						PHY_INTERRUPT_ENABLED);
943 			break;
944 		case PHY_HALTED:
945 			if (phydev->link) {
946 				phydev->link = 0;
947 				netif_carrier_off(phydev->attached_dev);
948 				phydev->adjust_link(phydev->attached_dev);
949 			}
950 			break;
951 		case PHY_RESUMING:
952 
953 			err = phy_clear_interrupt(phydev);
954 
955 			if (err)
956 				break;
957 
958 			err = phy_config_interrupt(phydev,
959 					PHY_INTERRUPT_ENABLED);
960 
961 			if (err)
962 				break;
963 
964 			if (AUTONEG_ENABLE == phydev->autoneg) {
965 				err = phy_aneg_done(phydev);
966 				if (err < 0)
967 					break;
968 
969 				/* err > 0 if AN is done.
970 				 * Otherwise, it's 0, and we're
971 				 * still waiting for AN */
972 				if (err > 0) {
973 					phydev->state = PHY_RUNNING;
974 				} else {
975 					phydev->state = PHY_AN;
976 					phydev->link_timeout = PHY_AN_TIMEOUT;
977 				}
978 			} else
979 				phydev->state = PHY_RUNNING;
980 			break;
981 	}
982 
983 	mutex_unlock(&phydev->lock);
984 
985 	if (needs_aneg)
986 		err = phy_start_aneg(phydev);
987 
988 	if (err < 0)
989 		phy_error(phydev);
990 
991 	mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ);
992 }
993 
994 /* PHY timer which schedules the state machine work */
995 static void phy_timer(unsigned long data)
996 {
997 	struct phy_device *phydev = (struct phy_device *)data;
998 
999 	/*
1000 	 * PHY I/O operations can potentially sleep so we ensure that
1001 	 * it's done from a process context
1002 	 */
1003 	schedule_work(&phydev->state_queue);
1004 }
1005