xref: /openbmc/linux/drivers/net/ethernet/ibm/emac/core.c (revision dc6a81c3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * drivers/net/ethernet/ibm/emac/core.c
4  *
5  * Driver for PowerPC 4xx on-chip ethernet controller.
6  *
7  * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
8  *                <benh@kernel.crashing.org>
9  *
10  * Based on the arch/ppc version of the driver:
11  *
12  * Copyright (c) 2004, 2005 Zultys Technologies.
13  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14  *
15  * Based on original work by
16  * 	Matt Porter <mporter@kernel.crashing.org>
17  *	(c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
18  *      Armin Kuster <akuster@mvista.com>
19  * 	Johnnie Peters <jpeters@mvista.com>
20  */
21 
22 #include <linux/module.h>
23 #include <linux/sched.h>
24 #include <linux/string.h>
25 #include <linux/errno.h>
26 #include <linux/delay.h>
27 #include <linux/types.h>
28 #include <linux/pci.h>
29 #include <linux/etherdevice.h>
30 #include <linux/skbuff.h>
31 #include <linux/crc32.h>
32 #include <linux/ethtool.h>
33 #include <linux/mii.h>
34 #include <linux/bitops.h>
35 #include <linux/workqueue.h>
36 #include <linux/of.h>
37 #include <linux/of_address.h>
38 #include <linux/of_irq.h>
39 #include <linux/of_net.h>
40 #include <linux/of_mdio.h>
41 #include <linux/slab.h>
42 
43 #include <asm/processor.h>
44 #include <asm/io.h>
45 #include <asm/dma.h>
46 #include <linux/uaccess.h>
47 #include <asm/dcr.h>
48 #include <asm/dcr-regs.h>
49 
50 #include "core.h"
51 
52 /*
53  * Lack of dma_unmap_???? calls is intentional.
54  *
55  * API-correct usage requires additional support state information to be
56  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
57  * EMAC design (e.g. TX buffer passed from network stack can be split into
58  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
59  * maintaining such information will add additional overhead.
60  * Current DMA API implementation for 4xx processors only ensures cache coherency
61  * and dma_unmap_???? routines are empty and are likely to stay this way.
62  * I decided to omit dma_unmap_??? calls because I don't want to add additional
63  * complexity just for the sake of following some abstract API, when it doesn't
64  * add any real benefit to the driver. I understand that this decision maybe
65  * controversial, but I really tried to make code API-correct and efficient
66  * at the same time and didn't come up with code I liked :(.                --ebs
67  */
68 
69 #define DRV_NAME        "emac"
70 #define DRV_VERSION     "3.54"
71 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
72 
73 MODULE_DESCRIPTION(DRV_DESC);
74 MODULE_AUTHOR
75     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
76 MODULE_LICENSE("GPL");
77 
78 /* minimum number of free TX descriptors required to wake up TX process */
79 #define EMAC_TX_WAKEUP_THRESH		(NUM_TX_BUFF / 4)
80 
81 /* If packet size is less than this number, we allocate small skb and copy packet
82  * contents into it instead of just sending original big skb up
83  */
84 #define EMAC_RX_COPY_THRESH		CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
85 
86 /* Since multiple EMACs share MDIO lines in various ways, we need
87  * to avoid re-using the same PHY ID in cases where the arch didn't
88  * setup precise phy_map entries
89  *
90  * XXX This is something that needs to be reworked as we can have multiple
91  * EMAC "sets" (multiple ASICs containing several EMACs) though we can
92  * probably require in that case to have explicit PHY IDs in the device-tree
93  */
94 static u32 busy_phy_map;
95 static DEFINE_MUTEX(emac_phy_map_lock);
96 
97 /* This is the wait queue used to wait on any event related to probe, that
98  * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
99  */
100 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
101 
102 /* Having stable interface names is a doomed idea. However, it would be nice
103  * if we didn't have completely random interface names at boot too :-) It's
104  * just a matter of making everybody's life easier. Since we are doing
105  * threaded probing, it's a bit harder though. The base idea here is that
106  * we make up a list of all emacs in the device-tree before we register the
107  * driver. Every emac will then wait for the previous one in the list to
108  * initialize before itself. We should also keep that list ordered by
109  * cell_index.
110  * That list is only 4 entries long, meaning that additional EMACs don't
111  * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
112  */
113 
114 #define EMAC_BOOT_LIST_SIZE	4
115 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
116 
117 /* How long should I wait for dependent devices ? */
118 #define EMAC_PROBE_DEP_TIMEOUT	(HZ * 5)
119 
120 /* I don't want to litter system log with timeout errors
121  * when we have brain-damaged PHY.
122  */
123 static inline void emac_report_timeout_error(struct emac_instance *dev,
124 					     const char *error)
125 {
126 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
127 				  EMAC_FTR_460EX_PHY_CLK_FIX |
128 				  EMAC_FTR_440EP_PHY_CLK_FIX))
129 		DBG(dev, "%s" NL, error);
130 	else if (net_ratelimit())
131 		printk(KERN_ERR "%pOF: %s\n", dev->ofdev->dev.of_node, error);
132 }
133 
134 /* EMAC PHY clock workaround:
135  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
136  * which allows controlling each EMAC clock
137  */
138 static inline void emac_rx_clk_tx(struct emac_instance *dev)
139 {
140 #ifdef CONFIG_PPC_DCR_NATIVE
141 	if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
142 		dcri_clrset(SDR0, SDR0_MFR,
143 			    0, SDR0_MFR_ECS >> dev->cell_index);
144 #endif
145 }
146 
147 static inline void emac_rx_clk_default(struct emac_instance *dev)
148 {
149 #ifdef CONFIG_PPC_DCR_NATIVE
150 	if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
151 		dcri_clrset(SDR0, SDR0_MFR,
152 			    SDR0_MFR_ECS >> dev->cell_index, 0);
153 #endif
154 }
155 
156 /* PHY polling intervals */
157 #define PHY_POLL_LINK_ON	HZ
158 #define PHY_POLL_LINK_OFF	(HZ / 5)
159 
160 /* Graceful stop timeouts in us.
161  * We should allow up to 1 frame time (full-duplex, ignoring collisions)
162  */
163 #define STOP_TIMEOUT_10		1230
164 #define STOP_TIMEOUT_100	124
165 #define STOP_TIMEOUT_1000	13
166 #define STOP_TIMEOUT_1000_JUMBO	73
167 
168 static unsigned char default_mcast_addr[] = {
169 	0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
170 };
171 
172 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
173 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
174 	"rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
175 	"tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
176 	"rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
177 	"rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
178 	"rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
179 	"rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
180 	"rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
181 	"rx_bad_packet", "rx_runt_packet", "rx_short_event",
182 	"rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
183 	"rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
184 	"tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
185 	"tx_bd_excessive_collisions", "tx_bd_late_collision",
186 	"tx_bd_multple_collisions", "tx_bd_single_collision",
187 	"tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
188 	"tx_errors"
189 };
190 
191 static irqreturn_t emac_irq(int irq, void *dev_instance);
192 static void emac_clean_tx_ring(struct emac_instance *dev);
193 static void __emac_set_multicast_list(struct emac_instance *dev);
194 
195 static inline int emac_phy_supports_gige(int phy_mode)
196 {
197 	return  phy_interface_mode_is_rgmii(phy_mode) ||
198 		phy_mode == PHY_INTERFACE_MODE_GMII ||
199 		phy_mode == PHY_INTERFACE_MODE_SGMII ||
200 		phy_mode == PHY_INTERFACE_MODE_TBI ||
201 		phy_mode == PHY_INTERFACE_MODE_RTBI;
202 }
203 
204 static inline int emac_phy_gpcs(int phy_mode)
205 {
206 	return  phy_mode == PHY_INTERFACE_MODE_SGMII ||
207 		phy_mode == PHY_INTERFACE_MODE_TBI ||
208 		phy_mode == PHY_INTERFACE_MODE_RTBI;
209 }
210 
211 static inline void emac_tx_enable(struct emac_instance *dev)
212 {
213 	struct emac_regs __iomem *p = dev->emacp;
214 	u32 r;
215 
216 	DBG(dev, "tx_enable" NL);
217 
218 	r = in_be32(&p->mr0);
219 	if (!(r & EMAC_MR0_TXE))
220 		out_be32(&p->mr0, r | EMAC_MR0_TXE);
221 }
222 
223 static void emac_tx_disable(struct emac_instance *dev)
224 {
225 	struct emac_regs __iomem *p = dev->emacp;
226 	u32 r;
227 
228 	DBG(dev, "tx_disable" NL);
229 
230 	r = in_be32(&p->mr0);
231 	if (r & EMAC_MR0_TXE) {
232 		int n = dev->stop_timeout;
233 		out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
234 		while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
235 			udelay(1);
236 			--n;
237 		}
238 		if (unlikely(!n))
239 			emac_report_timeout_error(dev, "TX disable timeout");
240 	}
241 }
242 
243 static void emac_rx_enable(struct emac_instance *dev)
244 {
245 	struct emac_regs __iomem *p = dev->emacp;
246 	u32 r;
247 
248 	if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
249 		goto out;
250 
251 	DBG(dev, "rx_enable" NL);
252 
253 	r = in_be32(&p->mr0);
254 	if (!(r & EMAC_MR0_RXE)) {
255 		if (unlikely(!(r & EMAC_MR0_RXI))) {
256 			/* Wait if previous async disable is still in progress */
257 			int n = dev->stop_timeout;
258 			while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
259 				udelay(1);
260 				--n;
261 			}
262 			if (unlikely(!n))
263 				emac_report_timeout_error(dev,
264 							  "RX disable timeout");
265 		}
266 		out_be32(&p->mr0, r | EMAC_MR0_RXE);
267 	}
268  out:
269 	;
270 }
271 
272 static void emac_rx_disable(struct emac_instance *dev)
273 {
274 	struct emac_regs __iomem *p = dev->emacp;
275 	u32 r;
276 
277 	DBG(dev, "rx_disable" NL);
278 
279 	r = in_be32(&p->mr0);
280 	if (r & EMAC_MR0_RXE) {
281 		int n = dev->stop_timeout;
282 		out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
283 		while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
284 			udelay(1);
285 			--n;
286 		}
287 		if (unlikely(!n))
288 			emac_report_timeout_error(dev, "RX disable timeout");
289 	}
290 }
291 
292 static inline void emac_netif_stop(struct emac_instance *dev)
293 {
294 	netif_tx_lock_bh(dev->ndev);
295 	netif_addr_lock(dev->ndev);
296 	dev->no_mcast = 1;
297 	netif_addr_unlock(dev->ndev);
298 	netif_tx_unlock_bh(dev->ndev);
299 	netif_trans_update(dev->ndev);	/* prevent tx timeout */
300 	mal_poll_disable(dev->mal, &dev->commac);
301 	netif_tx_disable(dev->ndev);
302 }
303 
304 static inline void emac_netif_start(struct emac_instance *dev)
305 {
306 	netif_tx_lock_bh(dev->ndev);
307 	netif_addr_lock(dev->ndev);
308 	dev->no_mcast = 0;
309 	if (dev->mcast_pending && netif_running(dev->ndev))
310 		__emac_set_multicast_list(dev);
311 	netif_addr_unlock(dev->ndev);
312 	netif_tx_unlock_bh(dev->ndev);
313 
314 	netif_wake_queue(dev->ndev);
315 
316 	/* NOTE: unconditional netif_wake_queue is only appropriate
317 	 * so long as all callers are assured to have free tx slots
318 	 * (taken from tg3... though the case where that is wrong is
319 	 *  not terribly harmful)
320 	 */
321 	mal_poll_enable(dev->mal, &dev->commac);
322 }
323 
324 static inline void emac_rx_disable_async(struct emac_instance *dev)
325 {
326 	struct emac_regs __iomem *p = dev->emacp;
327 	u32 r;
328 
329 	DBG(dev, "rx_disable_async" NL);
330 
331 	r = in_be32(&p->mr0);
332 	if (r & EMAC_MR0_RXE)
333 		out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
334 }
335 
336 static int emac_reset(struct emac_instance *dev)
337 {
338 	struct emac_regs __iomem *p = dev->emacp;
339 	int n = 20;
340 	bool __maybe_unused try_internal_clock = false;
341 
342 	DBG(dev, "reset" NL);
343 
344 	if (!dev->reset_failed) {
345 		/* 40x erratum suggests stopping RX channel before reset,
346 		 * we stop TX as well
347 		 */
348 		emac_rx_disable(dev);
349 		emac_tx_disable(dev);
350 	}
351 
352 #ifdef CONFIG_PPC_DCR_NATIVE
353 do_retry:
354 	/*
355 	 * PPC460EX/GT Embedded Processor Advanced User's Manual
356 	 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
357 	 * Note: The PHY must provide a TX Clk in order to perform a soft reset
358 	 * of the EMAC. If none is present, select the internal clock
359 	 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
360 	 * After a soft reset, select the external clock.
361 	 *
362 	 * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
363 	 * ethernet cable is not attached. This causes the reset to timeout
364 	 * and the PHY detection code in emac_init_phy() is unable to
365 	 * communicate and detect the AR8035-A PHY. As a result, the emac
366 	 * driver bails out early and the user has no ethernet.
367 	 * In order to stay compatible with existing configurations, the
368 	 * driver will temporarily switch to the internal clock, after
369 	 * the first reset fails.
370 	 */
371 	if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
372 		if (try_internal_clock || (dev->phy_address == 0xffffffff &&
373 					   dev->phy_map == 0xffffffff)) {
374 			/* No PHY: select internal loop clock before reset */
375 			dcri_clrset(SDR0, SDR0_ETH_CFG,
376 				    0, SDR0_ETH_CFG_ECS << dev->cell_index);
377 		} else {
378 			/* PHY present: select external clock before reset */
379 			dcri_clrset(SDR0, SDR0_ETH_CFG,
380 				    SDR0_ETH_CFG_ECS << dev->cell_index, 0);
381 		}
382 	}
383 #endif
384 
385 	out_be32(&p->mr0, EMAC_MR0_SRST);
386 	while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
387 		--n;
388 
389 #ifdef CONFIG_PPC_DCR_NATIVE
390 	if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
391 		if (!n && !try_internal_clock) {
392 			/* first attempt has timed out. */
393 			n = 20;
394 			try_internal_clock = true;
395 			goto do_retry;
396 		}
397 
398 		if (try_internal_clock || (dev->phy_address == 0xffffffff &&
399 					   dev->phy_map == 0xffffffff)) {
400 			/* No PHY: restore external clock source after reset */
401 			dcri_clrset(SDR0, SDR0_ETH_CFG,
402 				    SDR0_ETH_CFG_ECS << dev->cell_index, 0);
403 		}
404 	}
405 #endif
406 
407 	if (n) {
408 		dev->reset_failed = 0;
409 		return 0;
410 	} else {
411 		emac_report_timeout_error(dev, "reset timeout");
412 		dev->reset_failed = 1;
413 		return -ETIMEDOUT;
414 	}
415 }
416 
417 static void emac_hash_mc(struct emac_instance *dev)
418 {
419 	const int regs = EMAC_XAHT_REGS(dev);
420 	u32 *gaht_base = emac_gaht_base(dev);
421 	u32 gaht_temp[EMAC_XAHT_MAX_REGS];
422 	struct netdev_hw_addr *ha;
423 	int i;
424 
425 	DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
426 
427 	memset(gaht_temp, 0, sizeof (gaht_temp));
428 
429 	netdev_for_each_mc_addr(ha, dev->ndev) {
430 		int slot, reg, mask;
431 		DBG2(dev, "mc %pM" NL, ha->addr);
432 
433 		slot = EMAC_XAHT_CRC_TO_SLOT(dev,
434 					     ether_crc(ETH_ALEN, ha->addr));
435 		reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
436 		mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
437 
438 		gaht_temp[reg] |= mask;
439 	}
440 
441 	for (i = 0; i < regs; i++)
442 		out_be32(gaht_base + i, gaht_temp[i]);
443 }
444 
445 static inline u32 emac_iff2rmr(struct net_device *ndev)
446 {
447 	struct emac_instance *dev = netdev_priv(ndev);
448 	u32 r;
449 
450 	r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
451 
452 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
453 	    r |= EMAC4_RMR_BASE;
454 	else
455 	    r |= EMAC_RMR_BASE;
456 
457 	if (ndev->flags & IFF_PROMISC)
458 		r |= EMAC_RMR_PME;
459 	else if (ndev->flags & IFF_ALLMULTI ||
460 			 (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
461 		r |= EMAC_RMR_PMME;
462 	else if (!netdev_mc_empty(ndev))
463 		r |= EMAC_RMR_MAE;
464 
465 	if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
466 		r &= ~EMAC4_RMR_MJS_MASK;
467 		r |= EMAC4_RMR_MJS(ndev->mtu);
468 	}
469 
470 	return r;
471 }
472 
473 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
474 {
475 	u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
476 
477 	DBG2(dev, "__emac_calc_base_mr1" NL);
478 
479 	switch(tx_size) {
480 	case 2048:
481 		ret |= EMAC_MR1_TFS_2K;
482 		break;
483 	default:
484 		printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
485 		       dev->ndev->name, tx_size);
486 	}
487 
488 	switch(rx_size) {
489 	case 16384:
490 		ret |= EMAC_MR1_RFS_16K;
491 		break;
492 	case 4096:
493 		ret |= EMAC_MR1_RFS_4K;
494 		break;
495 	default:
496 		printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
497 		       dev->ndev->name, rx_size);
498 	}
499 
500 	return ret;
501 }
502 
503 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
504 {
505 	u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
506 		EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
507 
508 	DBG2(dev, "__emac4_calc_base_mr1" NL);
509 
510 	switch(tx_size) {
511 	case 16384:
512 		ret |= EMAC4_MR1_TFS_16K;
513 		break;
514 	case 8192:
515 		ret |= EMAC4_MR1_TFS_8K;
516 		break;
517 	case 4096:
518 		ret |= EMAC4_MR1_TFS_4K;
519 		break;
520 	case 2048:
521 		ret |= EMAC4_MR1_TFS_2K;
522 		break;
523 	default:
524 		printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
525 		       dev->ndev->name, tx_size);
526 	}
527 
528 	switch(rx_size) {
529 	case 16384:
530 		ret |= EMAC4_MR1_RFS_16K;
531 		break;
532 	case 8192:
533 		ret |= EMAC4_MR1_RFS_8K;
534 		break;
535 	case 4096:
536 		ret |= EMAC4_MR1_RFS_4K;
537 		break;
538 	case 2048:
539 		ret |= EMAC4_MR1_RFS_2K;
540 		break;
541 	default:
542 		printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
543 		       dev->ndev->name, rx_size);
544 	}
545 
546 	return ret;
547 }
548 
549 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
550 {
551 	return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
552 		__emac4_calc_base_mr1(dev, tx_size, rx_size) :
553 		__emac_calc_base_mr1(dev, tx_size, rx_size);
554 }
555 
556 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
557 {
558 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
559 		return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
560 	else
561 		return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
562 }
563 
564 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
565 				 unsigned int low, unsigned int high)
566 {
567 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
568 		return (low << 22) | ( (high & 0x3ff) << 6);
569 	else
570 		return (low << 23) | ( (high & 0x1ff) << 7);
571 }
572 
573 static int emac_configure(struct emac_instance *dev)
574 {
575 	struct emac_regs __iomem *p = dev->emacp;
576 	struct net_device *ndev = dev->ndev;
577 	int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
578 	u32 r, mr1 = 0;
579 
580 	DBG(dev, "configure" NL);
581 
582 	if (!link) {
583 		out_be32(&p->mr1, in_be32(&p->mr1)
584 			 | EMAC_MR1_FDE | EMAC_MR1_ILE);
585 		udelay(100);
586 	} else if (emac_reset(dev) < 0)
587 		return -ETIMEDOUT;
588 
589 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
590 		tah_reset(dev->tah_dev);
591 
592 	DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
593 	    link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
594 
595 	/* Default fifo sizes */
596 	tx_size = dev->tx_fifo_size;
597 	rx_size = dev->rx_fifo_size;
598 
599 	/* No link, force loopback */
600 	if (!link)
601 		mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
602 
603 	/* Check for full duplex */
604 	else if (dev->phy.duplex == DUPLEX_FULL)
605 		mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
606 
607 	/* Adjust fifo sizes, mr1 and timeouts based on link speed */
608 	dev->stop_timeout = STOP_TIMEOUT_10;
609 	switch (dev->phy.speed) {
610 	case SPEED_1000:
611 		if (emac_phy_gpcs(dev->phy.mode)) {
612 			mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
613 				(dev->phy.gpcs_address != 0xffffffff) ?
614 				 dev->phy.gpcs_address : dev->phy.address);
615 
616 			/* Put some arbitrary OUI, Manuf & Rev IDs so we can
617 			 * identify this GPCS PHY later.
618 			 */
619 			out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
620 		} else
621 			mr1 |= EMAC_MR1_MF_1000;
622 
623 		/* Extended fifo sizes */
624 		tx_size = dev->tx_fifo_size_gige;
625 		rx_size = dev->rx_fifo_size_gige;
626 
627 		if (dev->ndev->mtu > ETH_DATA_LEN) {
628 			if (emac_has_feature(dev, EMAC_FTR_EMAC4))
629 				mr1 |= EMAC4_MR1_JPSM;
630 			else
631 				mr1 |= EMAC_MR1_JPSM;
632 			dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
633 		} else
634 			dev->stop_timeout = STOP_TIMEOUT_1000;
635 		break;
636 	case SPEED_100:
637 		mr1 |= EMAC_MR1_MF_100;
638 		dev->stop_timeout = STOP_TIMEOUT_100;
639 		break;
640 	default: /* make gcc happy */
641 		break;
642 	}
643 
644 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
645 		rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
646 				dev->phy.speed);
647 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
648 		zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
649 
650 	/* on 40x erratum forces us to NOT use integrated flow control,
651 	 * let's hope it works on 44x ;)
652 	 */
653 	if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
654 	    dev->phy.duplex == DUPLEX_FULL) {
655 		if (dev->phy.pause)
656 			mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
657 		else if (dev->phy.asym_pause)
658 			mr1 |= EMAC_MR1_APP;
659 	}
660 
661 	/* Add base settings & fifo sizes & program MR1 */
662 	mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
663 	out_be32(&p->mr1, mr1);
664 
665 	/* Set individual MAC address */
666 	out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
667 	out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
668 		 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
669 		 ndev->dev_addr[5]);
670 
671 	/* VLAN Tag Protocol ID */
672 	out_be32(&p->vtpid, 0x8100);
673 
674 	/* Receive mode register */
675 	r = emac_iff2rmr(ndev);
676 	if (r & EMAC_RMR_MAE)
677 		emac_hash_mc(dev);
678 	out_be32(&p->rmr, r);
679 
680 	/* FIFOs thresholds */
681 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
682 		r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
683 			       tx_size / 2 / dev->fifo_entry_size);
684 	else
685 		r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
686 			      tx_size / 2 / dev->fifo_entry_size);
687 	out_be32(&p->tmr1, r);
688 	out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
689 
690 	/* PAUSE frame is sent when RX FIFO reaches its high-water mark,
691 	   there should be still enough space in FIFO to allow the our link
692 	   partner time to process this frame and also time to send PAUSE
693 	   frame itself.
694 
695 	   Here is the worst case scenario for the RX FIFO "headroom"
696 	   (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
697 
698 	   1) One maximum-length frame on TX                    1522 bytes
699 	   2) One PAUSE frame time                                64 bytes
700 	   3) PAUSE frame decode time allowance                   64 bytes
701 	   4) One maximum-length frame on RX                    1522 bytes
702 	   5) Round-trip propagation delay of the link (100Mb)    15 bytes
703 	   ----------
704 	   3187 bytes
705 
706 	   I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
707 	   low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
708 	 */
709 	r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
710 			   rx_size / 4 / dev->fifo_entry_size);
711 	out_be32(&p->rwmr, r);
712 
713 	/* Set PAUSE timer to the maximum */
714 	out_be32(&p->ptr, 0xffff);
715 
716 	/* IRQ sources */
717 	r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
718 		EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
719 		EMAC_ISR_IRE | EMAC_ISR_TE;
720 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
721 	    r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
722 						  EMAC4_ISR_RXOE | */;
723 	out_be32(&p->iser,  r);
724 
725 	/* We need to take GPCS PHY out of isolate mode after EMAC reset */
726 	if (emac_phy_gpcs(dev->phy.mode)) {
727 		if (dev->phy.gpcs_address != 0xffffffff)
728 			emac_mii_reset_gpcs(&dev->phy);
729 		else
730 			emac_mii_reset_phy(&dev->phy);
731 	}
732 
733 	return 0;
734 }
735 
736 static void emac_reinitialize(struct emac_instance *dev)
737 {
738 	DBG(dev, "reinitialize" NL);
739 
740 	emac_netif_stop(dev);
741 	if (!emac_configure(dev)) {
742 		emac_tx_enable(dev);
743 		emac_rx_enable(dev);
744 	}
745 	emac_netif_start(dev);
746 }
747 
748 static void emac_full_tx_reset(struct emac_instance *dev)
749 {
750 	DBG(dev, "full_tx_reset" NL);
751 
752 	emac_tx_disable(dev);
753 	mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
754 	emac_clean_tx_ring(dev);
755 	dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
756 
757 	emac_configure(dev);
758 
759 	mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
760 	emac_tx_enable(dev);
761 	emac_rx_enable(dev);
762 }
763 
764 static void emac_reset_work(struct work_struct *work)
765 {
766 	struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
767 
768 	DBG(dev, "reset_work" NL);
769 
770 	mutex_lock(&dev->link_lock);
771 	if (dev->opened) {
772 		emac_netif_stop(dev);
773 		emac_full_tx_reset(dev);
774 		emac_netif_start(dev);
775 	}
776 	mutex_unlock(&dev->link_lock);
777 }
778 
779 static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
780 {
781 	struct emac_instance *dev = netdev_priv(ndev);
782 
783 	DBG(dev, "tx_timeout" NL);
784 
785 	schedule_work(&dev->reset_work);
786 }
787 
788 
789 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
790 {
791 	int done = !!(stacr & EMAC_STACR_OC);
792 
793 	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
794 		done = !done;
795 
796 	return done;
797 };
798 
799 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
800 {
801 	struct emac_regs __iomem *p = dev->emacp;
802 	u32 r = 0;
803 	int n, err = -ETIMEDOUT;
804 
805 	mutex_lock(&dev->mdio_lock);
806 
807 	DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
808 
809 	/* Enable proper MDIO port */
810 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
811 		zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
812 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
813 		rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
814 
815 	/* Wait for management interface to become idle */
816 	n = 20;
817 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
818 		udelay(1);
819 		if (!--n) {
820 			DBG2(dev, " -> timeout wait idle\n");
821 			goto bail;
822 		}
823 	}
824 
825 	/* Issue read command */
826 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
827 		r = EMAC4_STACR_BASE(dev->opb_bus_freq);
828 	else
829 		r = EMAC_STACR_BASE(dev->opb_bus_freq);
830 	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
831 		r |= EMAC_STACR_OC;
832 	if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
833 		r |= EMACX_STACR_STAC_READ;
834 	else
835 		r |= EMAC_STACR_STAC_READ;
836 	r |= (reg & EMAC_STACR_PRA_MASK)
837 		| ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
838 	out_be32(&p->stacr, r);
839 
840 	/* Wait for read to complete */
841 	n = 200;
842 	while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
843 		udelay(1);
844 		if (!--n) {
845 			DBG2(dev, " -> timeout wait complete\n");
846 			goto bail;
847 		}
848 	}
849 
850 	if (unlikely(r & EMAC_STACR_PHYE)) {
851 		DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
852 		err = -EREMOTEIO;
853 		goto bail;
854 	}
855 
856 	r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
857 
858 	DBG2(dev, "mdio_read -> %04x" NL, r);
859 	err = 0;
860  bail:
861 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
862 		rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
863 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
864 		zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
865 	mutex_unlock(&dev->mdio_lock);
866 
867 	return err == 0 ? r : err;
868 }
869 
870 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
871 			      u16 val)
872 {
873 	struct emac_regs __iomem *p = dev->emacp;
874 	u32 r = 0;
875 	int n, err = -ETIMEDOUT;
876 
877 	mutex_lock(&dev->mdio_lock);
878 
879 	DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
880 
881 	/* Enable proper MDIO port */
882 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
883 		zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
884 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
885 		rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
886 
887 	/* Wait for management interface to be idle */
888 	n = 20;
889 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
890 		udelay(1);
891 		if (!--n) {
892 			DBG2(dev, " -> timeout wait idle\n");
893 			goto bail;
894 		}
895 	}
896 
897 	/* Issue write command */
898 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
899 		r = EMAC4_STACR_BASE(dev->opb_bus_freq);
900 	else
901 		r = EMAC_STACR_BASE(dev->opb_bus_freq);
902 	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
903 		r |= EMAC_STACR_OC;
904 	if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
905 		r |= EMACX_STACR_STAC_WRITE;
906 	else
907 		r |= EMAC_STACR_STAC_WRITE;
908 	r |= (reg & EMAC_STACR_PRA_MASK) |
909 		((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
910 		(val << EMAC_STACR_PHYD_SHIFT);
911 	out_be32(&p->stacr, r);
912 
913 	/* Wait for write to complete */
914 	n = 200;
915 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
916 		udelay(1);
917 		if (!--n) {
918 			DBG2(dev, " -> timeout wait complete\n");
919 			goto bail;
920 		}
921 	}
922 	err = 0;
923  bail:
924 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
925 		rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
926 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
927 		zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
928 	mutex_unlock(&dev->mdio_lock);
929 }
930 
931 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
932 {
933 	struct emac_instance *dev = netdev_priv(ndev);
934 	int res;
935 
936 	res = __emac_mdio_read((dev->mdio_instance &&
937 				dev->phy.gpcs_address != id) ?
938 				dev->mdio_instance : dev,
939 			       (u8) id, (u8) reg);
940 	return res;
941 }
942 
943 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
944 {
945 	struct emac_instance *dev = netdev_priv(ndev);
946 
947 	__emac_mdio_write((dev->mdio_instance &&
948 			   dev->phy.gpcs_address != id) ?
949 			   dev->mdio_instance : dev,
950 			  (u8) id, (u8) reg, (u16) val);
951 }
952 
953 /* Tx lock BH */
954 static void __emac_set_multicast_list(struct emac_instance *dev)
955 {
956 	struct emac_regs __iomem *p = dev->emacp;
957 	u32 rmr = emac_iff2rmr(dev->ndev);
958 
959 	DBG(dev, "__multicast %08x" NL, rmr);
960 
961 	/* I decided to relax register access rules here to avoid
962 	 * full EMAC reset.
963 	 *
964 	 * There is a real problem with EMAC4 core if we use MWSW_001 bit
965 	 * in MR1 register and do a full EMAC reset.
966 	 * One TX BD status update is delayed and, after EMAC reset, it
967 	 * never happens, resulting in TX hung (it'll be recovered by TX
968 	 * timeout handler eventually, but this is just gross).
969 	 * So we either have to do full TX reset or try to cheat here :)
970 	 *
971 	 * The only required change is to RX mode register, so I *think* all
972 	 * we need is just to stop RX channel. This seems to work on all
973 	 * tested SoCs.                                                --ebs
974 	 *
975 	 * If we need the full reset, we might just trigger the workqueue
976 	 * and do it async... a bit nasty but should work --BenH
977 	 */
978 	dev->mcast_pending = 0;
979 	emac_rx_disable(dev);
980 	if (rmr & EMAC_RMR_MAE)
981 		emac_hash_mc(dev);
982 	out_be32(&p->rmr, rmr);
983 	emac_rx_enable(dev);
984 }
985 
986 /* Tx lock BH */
987 static void emac_set_multicast_list(struct net_device *ndev)
988 {
989 	struct emac_instance *dev = netdev_priv(ndev);
990 
991 	DBG(dev, "multicast" NL);
992 
993 	BUG_ON(!netif_running(dev->ndev));
994 
995 	if (dev->no_mcast) {
996 		dev->mcast_pending = 1;
997 		return;
998 	}
999 
1000 	mutex_lock(&dev->link_lock);
1001 	__emac_set_multicast_list(dev);
1002 	mutex_unlock(&dev->link_lock);
1003 }
1004 
1005 static int emac_set_mac_address(struct net_device *ndev, void *sa)
1006 {
1007 	struct emac_instance *dev = netdev_priv(ndev);
1008 	struct sockaddr *addr = sa;
1009 	struct emac_regs __iomem *p = dev->emacp;
1010 
1011 	if (!is_valid_ether_addr(addr->sa_data))
1012 	       return -EADDRNOTAVAIL;
1013 
1014 	mutex_lock(&dev->link_lock);
1015 
1016 	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
1017 
1018 	emac_rx_disable(dev);
1019 	emac_tx_disable(dev);
1020 	out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
1021 	out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
1022 		(ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
1023 		ndev->dev_addr[5]);
1024 	emac_tx_enable(dev);
1025 	emac_rx_enable(dev);
1026 
1027 	mutex_unlock(&dev->link_lock);
1028 
1029 	return 0;
1030 }
1031 
1032 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
1033 {
1034 	int rx_sync_size = emac_rx_sync_size(new_mtu);
1035 	int rx_skb_size = emac_rx_skb_size(new_mtu);
1036 	int i, ret = 0;
1037 	int mr1_jumbo_bit_change = 0;
1038 
1039 	mutex_lock(&dev->link_lock);
1040 	emac_netif_stop(dev);
1041 	emac_rx_disable(dev);
1042 	mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1043 
1044 	if (dev->rx_sg_skb) {
1045 		++dev->estats.rx_dropped_resize;
1046 		dev_kfree_skb(dev->rx_sg_skb);
1047 		dev->rx_sg_skb = NULL;
1048 	}
1049 
1050 	/* Make a first pass over RX ring and mark BDs ready, dropping
1051 	 * non-processed packets on the way. We need this as a separate pass
1052 	 * to simplify error recovery in the case of allocation failure later.
1053 	 */
1054 	for (i = 0; i < NUM_RX_BUFF; ++i) {
1055 		if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
1056 			++dev->estats.rx_dropped_resize;
1057 
1058 		dev->rx_desc[i].data_len = 0;
1059 		dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
1060 		    (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1061 	}
1062 
1063 	/* Reallocate RX ring only if bigger skb buffers are required */
1064 	if (rx_skb_size <= dev->rx_skb_size)
1065 		goto skip;
1066 
1067 	/* Second pass, allocate new skbs */
1068 	for (i = 0; i < NUM_RX_BUFF; ++i) {
1069 		struct sk_buff *skb;
1070 
1071 		skb = netdev_alloc_skb_ip_align(dev->ndev, rx_skb_size);
1072 		if (!skb) {
1073 			ret = -ENOMEM;
1074 			goto oom;
1075 		}
1076 
1077 		BUG_ON(!dev->rx_skb[i]);
1078 		dev_kfree_skb(dev->rx_skb[i]);
1079 
1080 		dev->rx_desc[i].data_ptr =
1081 		    dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
1082 				   rx_sync_size, DMA_FROM_DEVICE)
1083 				   + NET_IP_ALIGN;
1084 		dev->rx_skb[i] = skb;
1085 	}
1086  skip:
1087 	/* Check if we need to change "Jumbo" bit in MR1 */
1088 	if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
1089 		mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ||
1090 				(dev->ndev->mtu > ETH_DATA_LEN);
1091 	} else {
1092 		mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ^
1093 				(dev->ndev->mtu > ETH_DATA_LEN);
1094 	}
1095 
1096 	if (mr1_jumbo_bit_change) {
1097 		/* This is to prevent starting RX channel in emac_rx_enable() */
1098 		set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1099 
1100 		dev->ndev->mtu = new_mtu;
1101 		emac_full_tx_reset(dev);
1102 	}
1103 
1104 	mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1105  oom:
1106 	/* Restart RX */
1107 	clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1108 	dev->rx_slot = 0;
1109 	mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1110 	emac_rx_enable(dev);
1111 	emac_netif_start(dev);
1112 	mutex_unlock(&dev->link_lock);
1113 
1114 	return ret;
1115 }
1116 
1117 /* Process ctx, rtnl_lock semaphore */
1118 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1119 {
1120 	struct emac_instance *dev = netdev_priv(ndev);
1121 	int ret = 0;
1122 
1123 	DBG(dev, "change_mtu(%d)" NL, new_mtu);
1124 
1125 	if (netif_running(ndev)) {
1126 		/* Check if we really need to reinitialize RX ring */
1127 		if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1128 			ret = emac_resize_rx_ring(dev, new_mtu);
1129 	}
1130 
1131 	if (!ret) {
1132 		ndev->mtu = new_mtu;
1133 		dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1134 		dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1135 	}
1136 
1137 	return ret;
1138 }
1139 
1140 static void emac_clean_tx_ring(struct emac_instance *dev)
1141 {
1142 	int i;
1143 
1144 	for (i = 0; i < NUM_TX_BUFF; ++i) {
1145 		if (dev->tx_skb[i]) {
1146 			dev_kfree_skb(dev->tx_skb[i]);
1147 			dev->tx_skb[i] = NULL;
1148 			if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1149 				++dev->estats.tx_dropped;
1150 		}
1151 		dev->tx_desc[i].ctrl = 0;
1152 		dev->tx_desc[i].data_ptr = 0;
1153 	}
1154 }
1155 
1156 static void emac_clean_rx_ring(struct emac_instance *dev)
1157 {
1158 	int i;
1159 
1160 	for (i = 0; i < NUM_RX_BUFF; ++i)
1161 		if (dev->rx_skb[i]) {
1162 			dev->rx_desc[i].ctrl = 0;
1163 			dev_kfree_skb(dev->rx_skb[i]);
1164 			dev->rx_skb[i] = NULL;
1165 			dev->rx_desc[i].data_ptr = 0;
1166 		}
1167 
1168 	if (dev->rx_sg_skb) {
1169 		dev_kfree_skb(dev->rx_sg_skb);
1170 		dev->rx_sg_skb = NULL;
1171 	}
1172 }
1173 
1174 static int
1175 __emac_prepare_rx_skb(struct sk_buff *skb, struct emac_instance *dev, int slot)
1176 {
1177 	if (unlikely(!skb))
1178 		return -ENOMEM;
1179 
1180 	dev->rx_skb[slot] = skb;
1181 	dev->rx_desc[slot].data_len = 0;
1182 
1183 	dev->rx_desc[slot].data_ptr =
1184 	    dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
1185 			   dev->rx_sync_size, DMA_FROM_DEVICE) + NET_IP_ALIGN;
1186 	wmb();
1187 	dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1188 	    (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1189 
1190 	return 0;
1191 }
1192 
1193 static int
1194 emac_alloc_rx_skb(struct emac_instance *dev, int slot)
1195 {
1196 	struct sk_buff *skb;
1197 
1198 	skb = __netdev_alloc_skb_ip_align(dev->ndev, dev->rx_skb_size,
1199 					  GFP_KERNEL);
1200 
1201 	return __emac_prepare_rx_skb(skb, dev, slot);
1202 }
1203 
1204 static int
1205 emac_alloc_rx_skb_napi(struct emac_instance *dev, int slot)
1206 {
1207 	struct sk_buff *skb;
1208 
1209 	skb = napi_alloc_skb(&dev->mal->napi, dev->rx_skb_size);
1210 
1211 	return __emac_prepare_rx_skb(skb, dev, slot);
1212 }
1213 
1214 static void emac_print_link_status(struct emac_instance *dev)
1215 {
1216 	if (netif_carrier_ok(dev->ndev))
1217 		printk(KERN_INFO "%s: link is up, %d %s%s\n",
1218 		       dev->ndev->name, dev->phy.speed,
1219 		       dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1220 		       dev->phy.pause ? ", pause enabled" :
1221 		       dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1222 	else
1223 		printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1224 }
1225 
1226 /* Process ctx, rtnl_lock semaphore */
1227 static int emac_open(struct net_device *ndev)
1228 {
1229 	struct emac_instance *dev = netdev_priv(ndev);
1230 	int err, i;
1231 
1232 	DBG(dev, "open" NL);
1233 
1234 	/* Setup error IRQ handler */
1235 	err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1236 	if (err) {
1237 		printk(KERN_ERR "%s: failed to request IRQ %d\n",
1238 		       ndev->name, dev->emac_irq);
1239 		return err;
1240 	}
1241 
1242 	/* Allocate RX ring */
1243 	for (i = 0; i < NUM_RX_BUFF; ++i)
1244 		if (emac_alloc_rx_skb(dev, i)) {
1245 			printk(KERN_ERR "%s: failed to allocate RX ring\n",
1246 			       ndev->name);
1247 			goto oom;
1248 		}
1249 
1250 	dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1251 	clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1252 	dev->rx_sg_skb = NULL;
1253 
1254 	mutex_lock(&dev->link_lock);
1255 	dev->opened = 1;
1256 
1257 	/* Start PHY polling now.
1258 	 */
1259 	if (dev->phy.address >= 0) {
1260 		int link_poll_interval;
1261 		if (dev->phy.def->ops->poll_link(&dev->phy)) {
1262 			dev->phy.def->ops->read_link(&dev->phy);
1263 			emac_rx_clk_default(dev);
1264 			netif_carrier_on(dev->ndev);
1265 			link_poll_interval = PHY_POLL_LINK_ON;
1266 		} else {
1267 			emac_rx_clk_tx(dev);
1268 			netif_carrier_off(dev->ndev);
1269 			link_poll_interval = PHY_POLL_LINK_OFF;
1270 		}
1271 		dev->link_polling = 1;
1272 		wmb();
1273 		schedule_delayed_work(&dev->link_work, link_poll_interval);
1274 		emac_print_link_status(dev);
1275 	} else
1276 		netif_carrier_on(dev->ndev);
1277 
1278 	/* Required for Pause packet support in EMAC */
1279 	dev_mc_add_global(ndev, default_mcast_addr);
1280 
1281 	emac_configure(dev);
1282 	mal_poll_add(dev->mal, &dev->commac);
1283 	mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1284 	mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1285 	mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1286 	emac_tx_enable(dev);
1287 	emac_rx_enable(dev);
1288 	emac_netif_start(dev);
1289 
1290 	mutex_unlock(&dev->link_lock);
1291 
1292 	return 0;
1293  oom:
1294 	emac_clean_rx_ring(dev);
1295 	free_irq(dev->emac_irq, dev);
1296 
1297 	return -ENOMEM;
1298 }
1299 
1300 /* BHs disabled */
1301 #if 0
1302 static int emac_link_differs(struct emac_instance *dev)
1303 {
1304 	u32 r = in_be32(&dev->emacp->mr1);
1305 
1306 	int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1307 	int speed, pause, asym_pause;
1308 
1309 	if (r & EMAC_MR1_MF_1000)
1310 		speed = SPEED_1000;
1311 	else if (r & EMAC_MR1_MF_100)
1312 		speed = SPEED_100;
1313 	else
1314 		speed = SPEED_10;
1315 
1316 	switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1317 	case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1318 		pause = 1;
1319 		asym_pause = 0;
1320 		break;
1321 	case EMAC_MR1_APP:
1322 		pause = 0;
1323 		asym_pause = 1;
1324 		break;
1325 	default:
1326 		pause = asym_pause = 0;
1327 	}
1328 	return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1329 	    pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1330 }
1331 #endif
1332 
1333 static void emac_link_timer(struct work_struct *work)
1334 {
1335 	struct emac_instance *dev =
1336 		container_of(to_delayed_work(work),
1337 			     struct emac_instance, link_work);
1338 	int link_poll_interval;
1339 
1340 	mutex_lock(&dev->link_lock);
1341 	DBG2(dev, "link timer" NL);
1342 
1343 	if (!dev->opened)
1344 		goto bail;
1345 
1346 	if (dev->phy.def->ops->poll_link(&dev->phy)) {
1347 		if (!netif_carrier_ok(dev->ndev)) {
1348 			emac_rx_clk_default(dev);
1349 			/* Get new link parameters */
1350 			dev->phy.def->ops->read_link(&dev->phy);
1351 
1352 			netif_carrier_on(dev->ndev);
1353 			emac_netif_stop(dev);
1354 			emac_full_tx_reset(dev);
1355 			emac_netif_start(dev);
1356 			emac_print_link_status(dev);
1357 		}
1358 		link_poll_interval = PHY_POLL_LINK_ON;
1359 	} else {
1360 		if (netif_carrier_ok(dev->ndev)) {
1361 			emac_rx_clk_tx(dev);
1362 			netif_carrier_off(dev->ndev);
1363 			netif_tx_disable(dev->ndev);
1364 			emac_reinitialize(dev);
1365 			emac_print_link_status(dev);
1366 		}
1367 		link_poll_interval = PHY_POLL_LINK_OFF;
1368 	}
1369 	schedule_delayed_work(&dev->link_work, link_poll_interval);
1370  bail:
1371 	mutex_unlock(&dev->link_lock);
1372 }
1373 
1374 static void emac_force_link_update(struct emac_instance *dev)
1375 {
1376 	netif_carrier_off(dev->ndev);
1377 	smp_rmb();
1378 	if (dev->link_polling) {
1379 		cancel_delayed_work_sync(&dev->link_work);
1380 		if (dev->link_polling)
1381 			schedule_delayed_work(&dev->link_work,  PHY_POLL_LINK_OFF);
1382 	}
1383 }
1384 
1385 /* Process ctx, rtnl_lock semaphore */
1386 static int emac_close(struct net_device *ndev)
1387 {
1388 	struct emac_instance *dev = netdev_priv(ndev);
1389 
1390 	DBG(dev, "close" NL);
1391 
1392 	if (dev->phy.address >= 0) {
1393 		dev->link_polling = 0;
1394 		cancel_delayed_work_sync(&dev->link_work);
1395 	}
1396 	mutex_lock(&dev->link_lock);
1397 	emac_netif_stop(dev);
1398 	dev->opened = 0;
1399 	mutex_unlock(&dev->link_lock);
1400 
1401 	emac_rx_disable(dev);
1402 	emac_tx_disable(dev);
1403 	mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1404 	mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1405 	mal_poll_del(dev->mal, &dev->commac);
1406 
1407 	emac_clean_tx_ring(dev);
1408 	emac_clean_rx_ring(dev);
1409 
1410 	free_irq(dev->emac_irq, dev);
1411 
1412 	netif_carrier_off(ndev);
1413 
1414 	return 0;
1415 }
1416 
1417 static inline u16 emac_tx_csum(struct emac_instance *dev,
1418 			       struct sk_buff *skb)
1419 {
1420 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1421 		(skb->ip_summed == CHECKSUM_PARTIAL)) {
1422 		++dev->stats.tx_packets_csum;
1423 		return EMAC_TX_CTRL_TAH_CSUM;
1424 	}
1425 	return 0;
1426 }
1427 
1428 static inline netdev_tx_t emac_xmit_finish(struct emac_instance *dev, int len)
1429 {
1430 	struct emac_regs __iomem *p = dev->emacp;
1431 	struct net_device *ndev = dev->ndev;
1432 
1433 	/* Send the packet out. If the if makes a significant perf
1434 	 * difference, then we can store the TMR0 value in "dev"
1435 	 * instead
1436 	 */
1437 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1438 		out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1439 	else
1440 		out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1441 
1442 	if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1443 		netif_stop_queue(ndev);
1444 		DBG2(dev, "stopped TX queue" NL);
1445 	}
1446 
1447 	netif_trans_update(ndev);
1448 	++dev->stats.tx_packets;
1449 	dev->stats.tx_bytes += len;
1450 
1451 	return NETDEV_TX_OK;
1452 }
1453 
1454 /* Tx lock BH */
1455 static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1456 {
1457 	struct emac_instance *dev = netdev_priv(ndev);
1458 	unsigned int len = skb->len;
1459 	int slot;
1460 
1461 	u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1462 	    MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1463 
1464 	slot = dev->tx_slot++;
1465 	if (dev->tx_slot == NUM_TX_BUFF) {
1466 		dev->tx_slot = 0;
1467 		ctrl |= MAL_TX_CTRL_WRAP;
1468 	}
1469 
1470 	DBG2(dev, "xmit(%u) %d" NL, len, slot);
1471 
1472 	dev->tx_skb[slot] = skb;
1473 	dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1474 						     skb->data, len,
1475 						     DMA_TO_DEVICE);
1476 	dev->tx_desc[slot].data_len = (u16) len;
1477 	wmb();
1478 	dev->tx_desc[slot].ctrl = ctrl;
1479 
1480 	return emac_xmit_finish(dev, len);
1481 }
1482 
1483 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1484 				  u32 pd, int len, int last, u16 base_ctrl)
1485 {
1486 	while (1) {
1487 		u16 ctrl = base_ctrl;
1488 		int chunk = min(len, MAL_MAX_TX_SIZE);
1489 		len -= chunk;
1490 
1491 		slot = (slot + 1) % NUM_TX_BUFF;
1492 
1493 		if (last && !len)
1494 			ctrl |= MAL_TX_CTRL_LAST;
1495 		if (slot == NUM_TX_BUFF - 1)
1496 			ctrl |= MAL_TX_CTRL_WRAP;
1497 
1498 		dev->tx_skb[slot] = NULL;
1499 		dev->tx_desc[slot].data_ptr = pd;
1500 		dev->tx_desc[slot].data_len = (u16) chunk;
1501 		dev->tx_desc[slot].ctrl = ctrl;
1502 		++dev->tx_cnt;
1503 
1504 		if (!len)
1505 			break;
1506 
1507 		pd += chunk;
1508 	}
1509 	return slot;
1510 }
1511 
1512 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1513 static netdev_tx_t
1514 emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1515 {
1516 	struct emac_instance *dev = netdev_priv(ndev);
1517 	int nr_frags = skb_shinfo(skb)->nr_frags;
1518 	int len = skb->len, chunk;
1519 	int slot, i;
1520 	u16 ctrl;
1521 	u32 pd;
1522 
1523 	/* This is common "fast" path */
1524 	if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1525 		return emac_start_xmit(skb, ndev);
1526 
1527 	len -= skb->data_len;
1528 
1529 	/* Note, this is only an *estimation*, we can still run out of empty
1530 	 * slots because of the additional fragmentation into
1531 	 * MAL_MAX_TX_SIZE-sized chunks
1532 	 */
1533 	if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1534 		goto stop_queue;
1535 
1536 	ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1537 	    emac_tx_csum(dev, skb);
1538 	slot = dev->tx_slot;
1539 
1540 	/* skb data */
1541 	dev->tx_skb[slot] = NULL;
1542 	chunk = min(len, MAL_MAX_TX_SIZE);
1543 	dev->tx_desc[slot].data_ptr = pd =
1544 	    dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1545 	dev->tx_desc[slot].data_len = (u16) chunk;
1546 	len -= chunk;
1547 	if (unlikely(len))
1548 		slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1549 				       ctrl);
1550 	/* skb fragments */
1551 	for (i = 0; i < nr_frags; ++i) {
1552 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1553 		len = skb_frag_size(frag);
1554 
1555 		if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1556 			goto undo_frame;
1557 
1558 		pd = skb_frag_dma_map(&dev->ofdev->dev, frag, 0, len,
1559 				      DMA_TO_DEVICE);
1560 
1561 		slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1562 				       ctrl);
1563 	}
1564 
1565 	DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1566 
1567 	/* Attach skb to the last slot so we don't release it too early */
1568 	dev->tx_skb[slot] = skb;
1569 
1570 	/* Send the packet out */
1571 	if (dev->tx_slot == NUM_TX_BUFF - 1)
1572 		ctrl |= MAL_TX_CTRL_WRAP;
1573 	wmb();
1574 	dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1575 	dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1576 
1577 	return emac_xmit_finish(dev, skb->len);
1578 
1579  undo_frame:
1580 	/* Well, too bad. Our previous estimation was overly optimistic.
1581 	 * Undo everything.
1582 	 */
1583 	while (slot != dev->tx_slot) {
1584 		dev->tx_desc[slot].ctrl = 0;
1585 		--dev->tx_cnt;
1586 		if (--slot < 0)
1587 			slot = NUM_TX_BUFF - 1;
1588 	}
1589 	++dev->estats.tx_undo;
1590 
1591  stop_queue:
1592 	netif_stop_queue(ndev);
1593 	DBG2(dev, "stopped TX queue" NL);
1594 	return NETDEV_TX_BUSY;
1595 }
1596 
1597 /* Tx lock BHs */
1598 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1599 {
1600 	struct emac_error_stats *st = &dev->estats;
1601 
1602 	DBG(dev, "BD TX error %04x" NL, ctrl);
1603 
1604 	++st->tx_bd_errors;
1605 	if (ctrl & EMAC_TX_ST_BFCS)
1606 		++st->tx_bd_bad_fcs;
1607 	if (ctrl & EMAC_TX_ST_LCS)
1608 		++st->tx_bd_carrier_loss;
1609 	if (ctrl & EMAC_TX_ST_ED)
1610 		++st->tx_bd_excessive_deferral;
1611 	if (ctrl & EMAC_TX_ST_EC)
1612 		++st->tx_bd_excessive_collisions;
1613 	if (ctrl & EMAC_TX_ST_LC)
1614 		++st->tx_bd_late_collision;
1615 	if (ctrl & EMAC_TX_ST_MC)
1616 		++st->tx_bd_multple_collisions;
1617 	if (ctrl & EMAC_TX_ST_SC)
1618 		++st->tx_bd_single_collision;
1619 	if (ctrl & EMAC_TX_ST_UR)
1620 		++st->tx_bd_underrun;
1621 	if (ctrl & EMAC_TX_ST_SQE)
1622 		++st->tx_bd_sqe;
1623 }
1624 
1625 static void emac_poll_tx(void *param)
1626 {
1627 	struct emac_instance *dev = param;
1628 	u32 bad_mask;
1629 
1630 	DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1631 
1632 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1633 		bad_mask = EMAC_IS_BAD_TX_TAH;
1634 	else
1635 		bad_mask = EMAC_IS_BAD_TX;
1636 
1637 	netif_tx_lock_bh(dev->ndev);
1638 	if (dev->tx_cnt) {
1639 		u16 ctrl;
1640 		int slot = dev->ack_slot, n = 0;
1641 	again:
1642 		ctrl = dev->tx_desc[slot].ctrl;
1643 		if (!(ctrl & MAL_TX_CTRL_READY)) {
1644 			struct sk_buff *skb = dev->tx_skb[slot];
1645 			++n;
1646 
1647 			if (skb) {
1648 				dev_kfree_skb(skb);
1649 				dev->tx_skb[slot] = NULL;
1650 			}
1651 			slot = (slot + 1) % NUM_TX_BUFF;
1652 
1653 			if (unlikely(ctrl & bad_mask))
1654 				emac_parse_tx_error(dev, ctrl);
1655 
1656 			if (--dev->tx_cnt)
1657 				goto again;
1658 		}
1659 		if (n) {
1660 			dev->ack_slot = slot;
1661 			if (netif_queue_stopped(dev->ndev) &&
1662 			    dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1663 				netif_wake_queue(dev->ndev);
1664 
1665 			DBG2(dev, "tx %d pkts" NL, n);
1666 		}
1667 	}
1668 	netif_tx_unlock_bh(dev->ndev);
1669 }
1670 
1671 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1672 				       int len)
1673 {
1674 	struct sk_buff *skb = dev->rx_skb[slot];
1675 
1676 	DBG2(dev, "recycle %d %d" NL, slot, len);
1677 
1678 	if (len)
1679 		dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
1680 			       SKB_DATA_ALIGN(len + NET_IP_ALIGN),
1681 			       DMA_FROM_DEVICE);
1682 
1683 	dev->rx_desc[slot].data_len = 0;
1684 	wmb();
1685 	dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1686 	    (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1687 }
1688 
1689 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1690 {
1691 	struct emac_error_stats *st = &dev->estats;
1692 
1693 	DBG(dev, "BD RX error %04x" NL, ctrl);
1694 
1695 	++st->rx_bd_errors;
1696 	if (ctrl & EMAC_RX_ST_OE)
1697 		++st->rx_bd_overrun;
1698 	if (ctrl & EMAC_RX_ST_BP)
1699 		++st->rx_bd_bad_packet;
1700 	if (ctrl & EMAC_RX_ST_RP)
1701 		++st->rx_bd_runt_packet;
1702 	if (ctrl & EMAC_RX_ST_SE)
1703 		++st->rx_bd_short_event;
1704 	if (ctrl & EMAC_RX_ST_AE)
1705 		++st->rx_bd_alignment_error;
1706 	if (ctrl & EMAC_RX_ST_BFCS)
1707 		++st->rx_bd_bad_fcs;
1708 	if (ctrl & EMAC_RX_ST_PTL)
1709 		++st->rx_bd_packet_too_long;
1710 	if (ctrl & EMAC_RX_ST_ORE)
1711 		++st->rx_bd_out_of_range;
1712 	if (ctrl & EMAC_RX_ST_IRE)
1713 		++st->rx_bd_in_range;
1714 }
1715 
1716 static inline void emac_rx_csum(struct emac_instance *dev,
1717 				struct sk_buff *skb, u16 ctrl)
1718 {
1719 #ifdef CONFIG_IBM_EMAC_TAH
1720 	if (!ctrl && dev->tah_dev) {
1721 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1722 		++dev->stats.rx_packets_csum;
1723 	}
1724 #endif
1725 }
1726 
1727 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1728 {
1729 	if (likely(dev->rx_sg_skb != NULL)) {
1730 		int len = dev->rx_desc[slot].data_len;
1731 		int tot_len = dev->rx_sg_skb->len + len;
1732 
1733 		if (unlikely(tot_len + NET_IP_ALIGN > dev->rx_skb_size)) {
1734 			++dev->estats.rx_dropped_mtu;
1735 			dev_kfree_skb(dev->rx_sg_skb);
1736 			dev->rx_sg_skb = NULL;
1737 		} else {
1738 			memcpy(skb_tail_pointer(dev->rx_sg_skb),
1739 					 dev->rx_skb[slot]->data, len);
1740 			skb_put(dev->rx_sg_skb, len);
1741 			emac_recycle_rx_skb(dev, slot, len);
1742 			return 0;
1743 		}
1744 	}
1745 	emac_recycle_rx_skb(dev, slot, 0);
1746 	return -1;
1747 }
1748 
1749 /* NAPI poll context */
1750 static int emac_poll_rx(void *param, int budget)
1751 {
1752 	struct emac_instance *dev = param;
1753 	int slot = dev->rx_slot, received = 0;
1754 
1755 	DBG2(dev, "poll_rx(%d)" NL, budget);
1756 
1757  again:
1758 	while (budget > 0) {
1759 		int len;
1760 		struct sk_buff *skb;
1761 		u16 ctrl = dev->rx_desc[slot].ctrl;
1762 
1763 		if (ctrl & MAL_RX_CTRL_EMPTY)
1764 			break;
1765 
1766 		skb = dev->rx_skb[slot];
1767 		mb();
1768 		len = dev->rx_desc[slot].data_len;
1769 
1770 		if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1771 			goto sg;
1772 
1773 		ctrl &= EMAC_BAD_RX_MASK;
1774 		if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1775 			emac_parse_rx_error(dev, ctrl);
1776 			++dev->estats.rx_dropped_error;
1777 			emac_recycle_rx_skb(dev, slot, 0);
1778 			len = 0;
1779 			goto next;
1780 		}
1781 
1782 		if (len < ETH_HLEN) {
1783 			++dev->estats.rx_dropped_stack;
1784 			emac_recycle_rx_skb(dev, slot, len);
1785 			goto next;
1786 		}
1787 
1788 		if (len && len < EMAC_RX_COPY_THRESH) {
1789 			struct sk_buff *copy_skb;
1790 
1791 			copy_skb = napi_alloc_skb(&dev->mal->napi, len);
1792 			if (unlikely(!copy_skb))
1793 				goto oom;
1794 
1795 			memcpy(copy_skb->data - NET_IP_ALIGN,
1796 			       skb->data - NET_IP_ALIGN,
1797 			       len + NET_IP_ALIGN);
1798 			emac_recycle_rx_skb(dev, slot, len);
1799 			skb = copy_skb;
1800 		} else if (unlikely(emac_alloc_rx_skb_napi(dev, slot)))
1801 			goto oom;
1802 
1803 		skb_put(skb, len);
1804 	push_packet:
1805 		skb->protocol = eth_type_trans(skb, dev->ndev);
1806 		emac_rx_csum(dev, skb, ctrl);
1807 
1808 		if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1809 			++dev->estats.rx_dropped_stack;
1810 	next:
1811 		++dev->stats.rx_packets;
1812 	skip:
1813 		dev->stats.rx_bytes += len;
1814 		slot = (slot + 1) % NUM_RX_BUFF;
1815 		--budget;
1816 		++received;
1817 		continue;
1818 	sg:
1819 		if (ctrl & MAL_RX_CTRL_FIRST) {
1820 			BUG_ON(dev->rx_sg_skb);
1821 			if (unlikely(emac_alloc_rx_skb_napi(dev, slot))) {
1822 				DBG(dev, "rx OOM %d" NL, slot);
1823 				++dev->estats.rx_dropped_oom;
1824 				emac_recycle_rx_skb(dev, slot, 0);
1825 			} else {
1826 				dev->rx_sg_skb = skb;
1827 				skb_put(skb, len);
1828 			}
1829 		} else if (!emac_rx_sg_append(dev, slot) &&
1830 			   (ctrl & MAL_RX_CTRL_LAST)) {
1831 
1832 			skb = dev->rx_sg_skb;
1833 			dev->rx_sg_skb = NULL;
1834 
1835 			ctrl &= EMAC_BAD_RX_MASK;
1836 			if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1837 				emac_parse_rx_error(dev, ctrl);
1838 				++dev->estats.rx_dropped_error;
1839 				dev_kfree_skb(skb);
1840 				len = 0;
1841 			} else
1842 				goto push_packet;
1843 		}
1844 		goto skip;
1845 	oom:
1846 		DBG(dev, "rx OOM %d" NL, slot);
1847 		/* Drop the packet and recycle skb */
1848 		++dev->estats.rx_dropped_oom;
1849 		emac_recycle_rx_skb(dev, slot, 0);
1850 		goto next;
1851 	}
1852 
1853 	if (received) {
1854 		DBG2(dev, "rx %d BDs" NL, received);
1855 		dev->rx_slot = slot;
1856 	}
1857 
1858 	if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1859 		mb();
1860 		if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1861 			DBG2(dev, "rx restart" NL);
1862 			received = 0;
1863 			goto again;
1864 		}
1865 
1866 		if (dev->rx_sg_skb) {
1867 			DBG2(dev, "dropping partial rx packet" NL);
1868 			++dev->estats.rx_dropped_error;
1869 			dev_kfree_skb(dev->rx_sg_skb);
1870 			dev->rx_sg_skb = NULL;
1871 		}
1872 
1873 		clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1874 		mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1875 		emac_rx_enable(dev);
1876 		dev->rx_slot = 0;
1877 	}
1878 	return received;
1879 }
1880 
1881 /* NAPI poll context */
1882 static int emac_peek_rx(void *param)
1883 {
1884 	struct emac_instance *dev = param;
1885 
1886 	return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1887 }
1888 
1889 /* NAPI poll context */
1890 static int emac_peek_rx_sg(void *param)
1891 {
1892 	struct emac_instance *dev = param;
1893 
1894 	int slot = dev->rx_slot;
1895 	while (1) {
1896 		u16 ctrl = dev->rx_desc[slot].ctrl;
1897 		if (ctrl & MAL_RX_CTRL_EMPTY)
1898 			return 0;
1899 		else if (ctrl & MAL_RX_CTRL_LAST)
1900 			return 1;
1901 
1902 		slot = (slot + 1) % NUM_RX_BUFF;
1903 
1904 		/* I'm just being paranoid here :) */
1905 		if (unlikely(slot == dev->rx_slot))
1906 			return 0;
1907 	}
1908 }
1909 
1910 /* Hard IRQ */
1911 static void emac_rxde(void *param)
1912 {
1913 	struct emac_instance *dev = param;
1914 
1915 	++dev->estats.rx_stopped;
1916 	emac_rx_disable_async(dev);
1917 }
1918 
1919 /* Hard IRQ */
1920 static irqreturn_t emac_irq(int irq, void *dev_instance)
1921 {
1922 	struct emac_instance *dev = dev_instance;
1923 	struct emac_regs __iomem *p = dev->emacp;
1924 	struct emac_error_stats *st = &dev->estats;
1925 	u32 isr;
1926 
1927 	spin_lock(&dev->lock);
1928 
1929 	isr = in_be32(&p->isr);
1930 	out_be32(&p->isr, isr);
1931 
1932 	DBG(dev, "isr = %08x" NL, isr);
1933 
1934 	if (isr & EMAC4_ISR_TXPE)
1935 		++st->tx_parity;
1936 	if (isr & EMAC4_ISR_RXPE)
1937 		++st->rx_parity;
1938 	if (isr & EMAC4_ISR_TXUE)
1939 		++st->tx_underrun;
1940 	if (isr & EMAC4_ISR_RXOE)
1941 		++st->rx_fifo_overrun;
1942 	if (isr & EMAC_ISR_OVR)
1943 		++st->rx_overrun;
1944 	if (isr & EMAC_ISR_BP)
1945 		++st->rx_bad_packet;
1946 	if (isr & EMAC_ISR_RP)
1947 		++st->rx_runt_packet;
1948 	if (isr & EMAC_ISR_SE)
1949 		++st->rx_short_event;
1950 	if (isr & EMAC_ISR_ALE)
1951 		++st->rx_alignment_error;
1952 	if (isr & EMAC_ISR_BFCS)
1953 		++st->rx_bad_fcs;
1954 	if (isr & EMAC_ISR_PTLE)
1955 		++st->rx_packet_too_long;
1956 	if (isr & EMAC_ISR_ORE)
1957 		++st->rx_out_of_range;
1958 	if (isr & EMAC_ISR_IRE)
1959 		++st->rx_in_range;
1960 	if (isr & EMAC_ISR_SQE)
1961 		++st->tx_sqe;
1962 	if (isr & EMAC_ISR_TE)
1963 		++st->tx_errors;
1964 
1965 	spin_unlock(&dev->lock);
1966 
1967 	return IRQ_HANDLED;
1968 }
1969 
1970 static struct net_device_stats *emac_stats(struct net_device *ndev)
1971 {
1972 	struct emac_instance *dev = netdev_priv(ndev);
1973 	struct emac_stats *st = &dev->stats;
1974 	struct emac_error_stats *est = &dev->estats;
1975 	struct net_device_stats *nst = &ndev->stats;
1976 	unsigned long flags;
1977 
1978 	DBG2(dev, "stats" NL);
1979 
1980 	/* Compute "legacy" statistics */
1981 	spin_lock_irqsave(&dev->lock, flags);
1982 	nst->rx_packets = (unsigned long)st->rx_packets;
1983 	nst->rx_bytes = (unsigned long)st->rx_bytes;
1984 	nst->tx_packets = (unsigned long)st->tx_packets;
1985 	nst->tx_bytes = (unsigned long)st->tx_bytes;
1986 	nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1987 					  est->rx_dropped_error +
1988 					  est->rx_dropped_resize +
1989 					  est->rx_dropped_mtu);
1990 	nst->tx_dropped = (unsigned long)est->tx_dropped;
1991 
1992 	nst->rx_errors = (unsigned long)est->rx_bd_errors;
1993 	nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1994 					      est->rx_fifo_overrun +
1995 					      est->rx_overrun);
1996 	nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1997 					       est->rx_alignment_error);
1998 	nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1999 					     est->rx_bad_fcs);
2000 	nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
2001 						est->rx_bd_short_event +
2002 						est->rx_bd_packet_too_long +
2003 						est->rx_bd_out_of_range +
2004 						est->rx_bd_in_range +
2005 						est->rx_runt_packet +
2006 						est->rx_short_event +
2007 						est->rx_packet_too_long +
2008 						est->rx_out_of_range +
2009 						est->rx_in_range);
2010 
2011 	nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
2012 	nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
2013 					      est->tx_underrun);
2014 	nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
2015 	nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
2016 					  est->tx_bd_excessive_collisions +
2017 					  est->tx_bd_late_collision +
2018 					  est->tx_bd_multple_collisions);
2019 	spin_unlock_irqrestore(&dev->lock, flags);
2020 	return nst;
2021 }
2022 
2023 static struct mal_commac_ops emac_commac_ops = {
2024 	.poll_tx = &emac_poll_tx,
2025 	.poll_rx = &emac_poll_rx,
2026 	.peek_rx = &emac_peek_rx,
2027 	.rxde = &emac_rxde,
2028 };
2029 
2030 static struct mal_commac_ops emac_commac_sg_ops = {
2031 	.poll_tx = &emac_poll_tx,
2032 	.poll_rx = &emac_poll_rx,
2033 	.peek_rx = &emac_peek_rx_sg,
2034 	.rxde = &emac_rxde,
2035 };
2036 
2037 /* Ethtool support */
2038 static int emac_ethtool_get_link_ksettings(struct net_device *ndev,
2039 					   struct ethtool_link_ksettings *cmd)
2040 {
2041 	struct emac_instance *dev = netdev_priv(ndev);
2042 	u32 supported, advertising;
2043 
2044 	supported = dev->phy.features;
2045 	cmd->base.port = PORT_MII;
2046 	cmd->base.phy_address = dev->phy.address;
2047 
2048 	mutex_lock(&dev->link_lock);
2049 	advertising = dev->phy.advertising;
2050 	cmd->base.autoneg = dev->phy.autoneg;
2051 	cmd->base.speed = dev->phy.speed;
2052 	cmd->base.duplex = dev->phy.duplex;
2053 	mutex_unlock(&dev->link_lock);
2054 
2055 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2056 						supported);
2057 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2058 						advertising);
2059 
2060 	return 0;
2061 }
2062 
2063 static int
2064 emac_ethtool_set_link_ksettings(struct net_device *ndev,
2065 				const struct ethtool_link_ksettings *cmd)
2066 {
2067 	struct emac_instance *dev = netdev_priv(ndev);
2068 	u32 f = dev->phy.features;
2069 	u32 advertising;
2070 
2071 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
2072 						cmd->link_modes.advertising);
2073 
2074 	DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
2075 	    cmd->base.autoneg, cmd->base.speed, cmd->base.duplex, advertising);
2076 
2077 	/* Basic sanity checks */
2078 	if (dev->phy.address < 0)
2079 		return -EOPNOTSUPP;
2080 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
2081 	    cmd->base.autoneg != AUTONEG_DISABLE)
2082 		return -EINVAL;
2083 	if (cmd->base.autoneg == AUTONEG_ENABLE && advertising == 0)
2084 		return -EINVAL;
2085 	if (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL)
2086 		return -EINVAL;
2087 
2088 	if (cmd->base.autoneg == AUTONEG_DISABLE) {
2089 		switch (cmd->base.speed) {
2090 		case SPEED_10:
2091 			if (cmd->base.duplex == DUPLEX_HALF &&
2092 			    !(f & SUPPORTED_10baseT_Half))
2093 				return -EINVAL;
2094 			if (cmd->base.duplex == DUPLEX_FULL &&
2095 			    !(f & SUPPORTED_10baseT_Full))
2096 				return -EINVAL;
2097 			break;
2098 		case SPEED_100:
2099 			if (cmd->base.duplex == DUPLEX_HALF &&
2100 			    !(f & SUPPORTED_100baseT_Half))
2101 				return -EINVAL;
2102 			if (cmd->base.duplex == DUPLEX_FULL &&
2103 			    !(f & SUPPORTED_100baseT_Full))
2104 				return -EINVAL;
2105 			break;
2106 		case SPEED_1000:
2107 			if (cmd->base.duplex == DUPLEX_HALF &&
2108 			    !(f & SUPPORTED_1000baseT_Half))
2109 				return -EINVAL;
2110 			if (cmd->base.duplex == DUPLEX_FULL &&
2111 			    !(f & SUPPORTED_1000baseT_Full))
2112 				return -EINVAL;
2113 			break;
2114 		default:
2115 			return -EINVAL;
2116 		}
2117 
2118 		mutex_lock(&dev->link_lock);
2119 		dev->phy.def->ops->setup_forced(&dev->phy, cmd->base.speed,
2120 						cmd->base.duplex);
2121 		mutex_unlock(&dev->link_lock);
2122 
2123 	} else {
2124 		if (!(f & SUPPORTED_Autoneg))
2125 			return -EINVAL;
2126 
2127 		mutex_lock(&dev->link_lock);
2128 		dev->phy.def->ops->setup_aneg(&dev->phy,
2129 					      (advertising & f) |
2130 					      (dev->phy.advertising &
2131 					       (ADVERTISED_Pause |
2132 						ADVERTISED_Asym_Pause)));
2133 		mutex_unlock(&dev->link_lock);
2134 	}
2135 	emac_force_link_update(dev);
2136 
2137 	return 0;
2138 }
2139 
2140 static void emac_ethtool_get_ringparam(struct net_device *ndev,
2141 				       struct ethtool_ringparam *rp)
2142 {
2143 	rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2144 	rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2145 }
2146 
2147 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2148 					struct ethtool_pauseparam *pp)
2149 {
2150 	struct emac_instance *dev = netdev_priv(ndev);
2151 
2152 	mutex_lock(&dev->link_lock);
2153 	if ((dev->phy.features & SUPPORTED_Autoneg) &&
2154 	    (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2155 		pp->autoneg = 1;
2156 
2157 	if (dev->phy.duplex == DUPLEX_FULL) {
2158 		if (dev->phy.pause)
2159 			pp->rx_pause = pp->tx_pause = 1;
2160 		else if (dev->phy.asym_pause)
2161 			pp->tx_pause = 1;
2162 	}
2163 	mutex_unlock(&dev->link_lock);
2164 }
2165 
2166 static int emac_get_regs_len(struct emac_instance *dev)
2167 {
2168 		return sizeof(struct emac_ethtool_regs_subhdr) +
2169 			sizeof(struct emac_regs);
2170 }
2171 
2172 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2173 {
2174 	struct emac_instance *dev = netdev_priv(ndev);
2175 	int size;
2176 
2177 	size = sizeof(struct emac_ethtool_regs_hdr) +
2178 		emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2179 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2180 		size += zmii_get_regs_len(dev->zmii_dev);
2181 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2182 		size += rgmii_get_regs_len(dev->rgmii_dev);
2183 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2184 		size += tah_get_regs_len(dev->tah_dev);
2185 
2186 	return size;
2187 }
2188 
2189 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2190 {
2191 	struct emac_ethtool_regs_subhdr *hdr = buf;
2192 
2193 	hdr->index = dev->cell_index;
2194 	if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2195 		hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER;
2196 	} else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2197 		hdr->version = EMAC4_ETHTOOL_REGS_VER;
2198 	} else {
2199 		hdr->version = EMAC_ETHTOOL_REGS_VER;
2200 	}
2201 	memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs));
2202 	return (void *)(hdr + 1) + sizeof(struct emac_regs);
2203 }
2204 
2205 static void emac_ethtool_get_regs(struct net_device *ndev,
2206 				  struct ethtool_regs *regs, void *buf)
2207 {
2208 	struct emac_instance *dev = netdev_priv(ndev);
2209 	struct emac_ethtool_regs_hdr *hdr = buf;
2210 
2211 	hdr->components = 0;
2212 	buf = hdr + 1;
2213 
2214 	buf = mal_dump_regs(dev->mal, buf);
2215 	buf = emac_dump_regs(dev, buf);
2216 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2217 		hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2218 		buf = zmii_dump_regs(dev->zmii_dev, buf);
2219 	}
2220 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2221 		hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2222 		buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2223 	}
2224 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2225 		hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2226 		buf = tah_dump_regs(dev->tah_dev, buf);
2227 	}
2228 }
2229 
2230 static int emac_ethtool_nway_reset(struct net_device *ndev)
2231 {
2232 	struct emac_instance *dev = netdev_priv(ndev);
2233 	int res = 0;
2234 
2235 	DBG(dev, "nway_reset" NL);
2236 
2237 	if (dev->phy.address < 0)
2238 		return -EOPNOTSUPP;
2239 
2240 	mutex_lock(&dev->link_lock);
2241 	if (!dev->phy.autoneg) {
2242 		res = -EINVAL;
2243 		goto out;
2244 	}
2245 
2246 	dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2247  out:
2248 	mutex_unlock(&dev->link_lock);
2249 	emac_force_link_update(dev);
2250 	return res;
2251 }
2252 
2253 static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2254 {
2255 	if (stringset == ETH_SS_STATS)
2256 		return EMAC_ETHTOOL_STATS_COUNT;
2257 	else
2258 		return -EINVAL;
2259 }
2260 
2261 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2262 				     u8 * buf)
2263 {
2264 	if (stringset == ETH_SS_STATS)
2265 		memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2266 }
2267 
2268 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2269 					   struct ethtool_stats *estats,
2270 					   u64 * tmp_stats)
2271 {
2272 	struct emac_instance *dev = netdev_priv(ndev);
2273 
2274 	memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2275 	tmp_stats += sizeof(dev->stats) / sizeof(u64);
2276 	memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2277 }
2278 
2279 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2280 				     struct ethtool_drvinfo *info)
2281 {
2282 	struct emac_instance *dev = netdev_priv(ndev);
2283 
2284 	strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
2285 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2286 	snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %pOF",
2287 		 dev->cell_index, dev->ofdev->dev.of_node);
2288 }
2289 
2290 static const struct ethtool_ops emac_ethtool_ops = {
2291 	.get_drvinfo = emac_ethtool_get_drvinfo,
2292 
2293 	.get_regs_len = emac_ethtool_get_regs_len,
2294 	.get_regs = emac_ethtool_get_regs,
2295 
2296 	.nway_reset = emac_ethtool_nway_reset,
2297 
2298 	.get_ringparam = emac_ethtool_get_ringparam,
2299 	.get_pauseparam = emac_ethtool_get_pauseparam,
2300 
2301 	.get_strings = emac_ethtool_get_strings,
2302 	.get_sset_count = emac_ethtool_get_sset_count,
2303 	.get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2304 
2305 	.get_link = ethtool_op_get_link,
2306 	.get_link_ksettings = emac_ethtool_get_link_ksettings,
2307 	.set_link_ksettings = emac_ethtool_set_link_ksettings,
2308 };
2309 
2310 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2311 {
2312 	struct emac_instance *dev = netdev_priv(ndev);
2313 	struct mii_ioctl_data *data = if_mii(rq);
2314 
2315 	DBG(dev, "ioctl %08x" NL, cmd);
2316 
2317 	if (dev->phy.address < 0)
2318 		return -EOPNOTSUPP;
2319 
2320 	switch (cmd) {
2321 	case SIOCGMIIPHY:
2322 		data->phy_id = dev->phy.address;
2323 		/* Fall through */
2324 	case SIOCGMIIREG:
2325 		data->val_out = emac_mdio_read(ndev, dev->phy.address,
2326 					       data->reg_num);
2327 		return 0;
2328 
2329 	case SIOCSMIIREG:
2330 		emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2331 				data->val_in);
2332 		return 0;
2333 	default:
2334 		return -EOPNOTSUPP;
2335 	}
2336 }
2337 
2338 struct emac_depentry {
2339 	u32			phandle;
2340 	struct device_node	*node;
2341 	struct platform_device	*ofdev;
2342 	void			*drvdata;
2343 };
2344 
2345 #define	EMAC_DEP_MAL_IDX	0
2346 #define	EMAC_DEP_ZMII_IDX	1
2347 #define	EMAC_DEP_RGMII_IDX	2
2348 #define	EMAC_DEP_TAH_IDX	3
2349 #define	EMAC_DEP_MDIO_IDX	4
2350 #define	EMAC_DEP_PREV_IDX	5
2351 #define	EMAC_DEP_COUNT		6
2352 
2353 static int emac_check_deps(struct emac_instance *dev,
2354 			   struct emac_depentry *deps)
2355 {
2356 	int i, there = 0;
2357 	struct device_node *np;
2358 
2359 	for (i = 0; i < EMAC_DEP_COUNT; i++) {
2360 		/* no dependency on that item, allright */
2361 		if (deps[i].phandle == 0) {
2362 			there++;
2363 			continue;
2364 		}
2365 		/* special case for blist as the dependency might go away */
2366 		if (i == EMAC_DEP_PREV_IDX) {
2367 			np = *(dev->blist - 1);
2368 			if (np == NULL) {
2369 				deps[i].phandle = 0;
2370 				there++;
2371 				continue;
2372 			}
2373 			if (deps[i].node == NULL)
2374 				deps[i].node = of_node_get(np);
2375 		}
2376 		if (deps[i].node == NULL)
2377 			deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2378 		if (deps[i].node == NULL)
2379 			continue;
2380 		if (deps[i].ofdev == NULL)
2381 			deps[i].ofdev = of_find_device_by_node(deps[i].node);
2382 		if (deps[i].ofdev == NULL)
2383 			continue;
2384 		if (deps[i].drvdata == NULL)
2385 			deps[i].drvdata = platform_get_drvdata(deps[i].ofdev);
2386 		if (deps[i].drvdata != NULL)
2387 			there++;
2388 	}
2389 	return there == EMAC_DEP_COUNT;
2390 }
2391 
2392 static void emac_put_deps(struct emac_instance *dev)
2393 {
2394 	of_dev_put(dev->mal_dev);
2395 	of_dev_put(dev->zmii_dev);
2396 	of_dev_put(dev->rgmii_dev);
2397 	of_dev_put(dev->mdio_dev);
2398 	of_dev_put(dev->tah_dev);
2399 }
2400 
2401 static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
2402 			      void *data)
2403 {
2404 	/* We are only intereted in device addition */
2405 	if (action == BUS_NOTIFY_BOUND_DRIVER)
2406 		wake_up_all(&emac_probe_wait);
2407 	return 0;
2408 }
2409 
2410 static struct notifier_block emac_of_bus_notifier = {
2411 	.notifier_call = emac_of_bus_notify
2412 };
2413 
2414 static int emac_wait_deps(struct emac_instance *dev)
2415 {
2416 	struct emac_depentry deps[EMAC_DEP_COUNT];
2417 	int i, err;
2418 
2419 	memset(&deps, 0, sizeof(deps));
2420 
2421 	deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2422 	deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2423 	deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2424 	if (dev->tah_ph)
2425 		deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2426 	if (dev->mdio_ph)
2427 		deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2428 	if (dev->blist && dev->blist > emac_boot_list)
2429 		deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2430 	bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
2431 	wait_event_timeout(emac_probe_wait,
2432 			   emac_check_deps(dev, deps),
2433 			   EMAC_PROBE_DEP_TIMEOUT);
2434 	bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
2435 	err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2436 	for (i = 0; i < EMAC_DEP_COUNT; i++) {
2437 		of_node_put(deps[i].node);
2438 		if (err)
2439 			of_dev_put(deps[i].ofdev);
2440 	}
2441 	if (err == 0) {
2442 		dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2443 		dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2444 		dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2445 		dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2446 		dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2447 	}
2448 	of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2449 	return err;
2450 }
2451 
2452 static int emac_read_uint_prop(struct device_node *np, const char *name,
2453 			       u32 *val, int fatal)
2454 {
2455 	int len;
2456 	const u32 *prop = of_get_property(np, name, &len);
2457 	if (prop == NULL || len < sizeof(u32)) {
2458 		if (fatal)
2459 			printk(KERN_ERR "%pOF: missing %s property\n",
2460 			       np, name);
2461 		return -ENODEV;
2462 	}
2463 	*val = *prop;
2464 	return 0;
2465 }
2466 
2467 static void emac_adjust_link(struct net_device *ndev)
2468 {
2469 	struct emac_instance *dev = netdev_priv(ndev);
2470 	struct phy_device *phy = dev->phy_dev;
2471 
2472 	dev->phy.autoneg = phy->autoneg;
2473 	dev->phy.speed = phy->speed;
2474 	dev->phy.duplex = phy->duplex;
2475 	dev->phy.pause = phy->pause;
2476 	dev->phy.asym_pause = phy->asym_pause;
2477 	ethtool_convert_link_mode_to_legacy_u32(&dev->phy.advertising,
2478 						phy->advertising);
2479 }
2480 
2481 static int emac_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
2482 {
2483 	int ret = emac_mdio_read(bus->priv, addr, regnum);
2484 	/* This is a workaround for powered down ports/phys.
2485 	 * In the wild, this was seen on the Cisco Meraki MX60(W).
2486 	 * This hardware disables ports as part of the handoff
2487 	 * procedure. Accessing the ports will lead to errors
2488 	 * (-ETIMEDOUT, -EREMOTEIO) that do more harm than good.
2489 	 */
2490 	return ret < 0 ? 0xffff : ret;
2491 }
2492 
2493 static int emac_mii_bus_write(struct mii_bus *bus, int addr,
2494 			      int regnum, u16 val)
2495 {
2496 	emac_mdio_write(bus->priv, addr, regnum, val);
2497 	return 0;
2498 }
2499 
2500 static int emac_mii_bus_reset(struct mii_bus *bus)
2501 {
2502 	struct emac_instance *dev = netdev_priv(bus->priv);
2503 
2504 	return emac_reset(dev);
2505 }
2506 
2507 static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
2508 				    struct phy_device *phy_dev)
2509 {
2510 	phy_dev->autoneg = phy->autoneg;
2511 	phy_dev->speed = phy->speed;
2512 	phy_dev->duplex = phy->duplex;
2513 	ethtool_convert_legacy_u32_to_link_mode(phy_dev->advertising,
2514 						phy->advertising);
2515 	return phy_start_aneg(phy_dev);
2516 }
2517 
2518 static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise)
2519 {
2520 	struct net_device *ndev = phy->dev;
2521 	struct emac_instance *dev = netdev_priv(ndev);
2522 
2523 	phy->autoneg = AUTONEG_ENABLE;
2524 	phy->advertising = advertise;
2525 	return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
2526 }
2527 
2528 static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
2529 {
2530 	struct net_device *ndev = phy->dev;
2531 	struct emac_instance *dev = netdev_priv(ndev);
2532 
2533 	phy->autoneg = AUTONEG_DISABLE;
2534 	phy->speed = speed;
2535 	phy->duplex = fd;
2536 	return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
2537 }
2538 
2539 static int emac_mdio_poll_link(struct mii_phy *phy)
2540 {
2541 	struct net_device *ndev = phy->dev;
2542 	struct emac_instance *dev = netdev_priv(ndev);
2543 	int res;
2544 
2545 	res = phy_read_status(dev->phy_dev);
2546 	if (res) {
2547 		dev_err(&dev->ofdev->dev, "link update failed (%d).", res);
2548 		return ethtool_op_get_link(ndev);
2549 	}
2550 
2551 	return dev->phy_dev->link;
2552 }
2553 
2554 static int emac_mdio_read_link(struct mii_phy *phy)
2555 {
2556 	struct net_device *ndev = phy->dev;
2557 	struct emac_instance *dev = netdev_priv(ndev);
2558 	struct phy_device *phy_dev = dev->phy_dev;
2559 	int res;
2560 
2561 	res = phy_read_status(phy_dev);
2562 	if (res)
2563 		return res;
2564 
2565 	phy->speed = phy_dev->speed;
2566 	phy->duplex = phy_dev->duplex;
2567 	phy->pause = phy_dev->pause;
2568 	phy->asym_pause = phy_dev->asym_pause;
2569 	return 0;
2570 }
2571 
2572 static int emac_mdio_init_phy(struct mii_phy *phy)
2573 {
2574 	struct net_device *ndev = phy->dev;
2575 	struct emac_instance *dev = netdev_priv(ndev);
2576 
2577 	phy_start(dev->phy_dev);
2578 	return phy_init_hw(dev->phy_dev);
2579 }
2580 
2581 static const struct mii_phy_ops emac_dt_mdio_phy_ops = {
2582 	.init		= emac_mdio_init_phy,
2583 	.setup_aneg	= emac_mdio_setup_aneg,
2584 	.setup_forced	= emac_mdio_setup_forced,
2585 	.poll_link	= emac_mdio_poll_link,
2586 	.read_link	= emac_mdio_read_link,
2587 };
2588 
2589 static int emac_dt_mdio_probe(struct emac_instance *dev)
2590 {
2591 	struct device_node *mii_np;
2592 	int res;
2593 
2594 	mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio");
2595 	if (!mii_np) {
2596 		dev_err(&dev->ofdev->dev, "no mdio definition found.");
2597 		return -ENODEV;
2598 	}
2599 
2600 	if (!of_device_is_available(mii_np)) {
2601 		res = -ENODEV;
2602 		goto put_node;
2603 	}
2604 
2605 	dev->mii_bus = devm_mdiobus_alloc(&dev->ofdev->dev);
2606 	if (!dev->mii_bus) {
2607 		res = -ENOMEM;
2608 		goto put_node;
2609 	}
2610 
2611 	dev->mii_bus->priv = dev->ndev;
2612 	dev->mii_bus->parent = dev->ndev->dev.parent;
2613 	dev->mii_bus->name = "emac_mdio";
2614 	dev->mii_bus->read = &emac_mii_bus_read;
2615 	dev->mii_bus->write = &emac_mii_bus_write;
2616 	dev->mii_bus->reset = &emac_mii_bus_reset;
2617 	snprintf(dev->mii_bus->id, MII_BUS_ID_SIZE, "%s", dev->ofdev->name);
2618 	res = of_mdiobus_register(dev->mii_bus, mii_np);
2619 	if (res) {
2620 		dev_err(&dev->ofdev->dev, "cannot register MDIO bus %s (%d)",
2621 			dev->mii_bus->name, res);
2622 	}
2623 
2624  put_node:
2625 	of_node_put(mii_np);
2626 	return res;
2627 }
2628 
2629 static int emac_dt_phy_connect(struct emac_instance *dev,
2630 			       struct device_node *phy_handle)
2631 {
2632 	dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def),
2633 				    GFP_KERNEL);
2634 	if (!dev->phy.def)
2635 		return -ENOMEM;
2636 
2637 	dev->phy_dev = of_phy_connect(dev->ndev, phy_handle, &emac_adjust_link,
2638 				      0, dev->phy_mode);
2639 	if (!dev->phy_dev) {
2640 		dev_err(&dev->ofdev->dev, "failed to connect to PHY.\n");
2641 		return -ENODEV;
2642 	}
2643 
2644 	dev->phy.def->phy_id = dev->phy_dev->drv->phy_id;
2645 	dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask;
2646 	dev->phy.def->name = dev->phy_dev->drv->name;
2647 	dev->phy.def->ops = &emac_dt_mdio_phy_ops;
2648 	ethtool_convert_link_mode_to_legacy_u32(&dev->phy.features,
2649 						dev->phy_dev->supported);
2650 	dev->phy.address = dev->phy_dev->mdio.addr;
2651 	dev->phy.mode = dev->phy_dev->interface;
2652 	return 0;
2653 }
2654 
2655 static int emac_dt_phy_probe(struct emac_instance *dev)
2656 {
2657 	struct device_node *np = dev->ofdev->dev.of_node;
2658 	struct device_node *phy_handle;
2659 	int res = 1;
2660 
2661 	phy_handle = of_parse_phandle(np, "phy-handle", 0);
2662 
2663 	if (phy_handle) {
2664 		res = emac_dt_mdio_probe(dev);
2665 		if (!res) {
2666 			res = emac_dt_phy_connect(dev, phy_handle);
2667 			if (res)
2668 				mdiobus_unregister(dev->mii_bus);
2669 		}
2670 	}
2671 
2672 	of_node_put(phy_handle);
2673 	return res;
2674 }
2675 
2676 static int emac_init_phy(struct emac_instance *dev)
2677 {
2678 	struct device_node *np = dev->ofdev->dev.of_node;
2679 	struct net_device *ndev = dev->ndev;
2680 	u32 phy_map, adv;
2681 	int i;
2682 
2683 	dev->phy.dev = ndev;
2684 	dev->phy.mode = dev->phy_mode;
2685 
2686 	/* PHY-less configuration. */
2687 	if ((dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) ||
2688 	    of_phy_is_fixed_link(np)) {
2689 		emac_reset(dev);
2690 
2691 		/* PHY-less configuration. */
2692 		dev->phy.address = -1;
2693 		dev->phy.features = SUPPORTED_MII;
2694 		if (emac_phy_supports_gige(dev->phy_mode))
2695 			dev->phy.features |= SUPPORTED_1000baseT_Full;
2696 		else
2697 			dev->phy.features |= SUPPORTED_100baseT_Full;
2698 		dev->phy.pause = 1;
2699 
2700 		if (of_phy_is_fixed_link(np)) {
2701 			int res = emac_dt_mdio_probe(dev);
2702 
2703 			if (res)
2704 				return res;
2705 
2706 			res = of_phy_register_fixed_link(np);
2707 			dev->phy_dev = of_phy_find_device(np);
2708 			if (res || !dev->phy_dev) {
2709 				mdiobus_unregister(dev->mii_bus);
2710 				return res ? res : -EINVAL;
2711 			}
2712 			emac_adjust_link(dev->ndev);
2713 			put_device(&dev->phy_dev->mdio.dev);
2714 		}
2715 		return 0;
2716 	}
2717 
2718 	mutex_lock(&emac_phy_map_lock);
2719 	phy_map = dev->phy_map | busy_phy_map;
2720 
2721 	DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2722 
2723 	dev->phy.mdio_read = emac_mdio_read;
2724 	dev->phy.mdio_write = emac_mdio_write;
2725 
2726 	/* Enable internal clock source */
2727 #ifdef CONFIG_PPC_DCR_NATIVE
2728 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2729 		dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2730 #endif
2731 	/* PHY clock workaround */
2732 	emac_rx_clk_tx(dev);
2733 
2734 	/* Enable internal clock source on 440GX*/
2735 #ifdef CONFIG_PPC_DCR_NATIVE
2736 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2737 		dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2738 #endif
2739 	/* Configure EMAC with defaults so we can at least use MDIO
2740 	 * This is needed mostly for 440GX
2741 	 */
2742 	if (emac_phy_gpcs(dev->phy.mode)) {
2743 		/* XXX
2744 		 * Make GPCS PHY address equal to EMAC index.
2745 		 * We probably should take into account busy_phy_map
2746 		 * and/or phy_map here.
2747 		 *
2748 		 * Note that the busy_phy_map is currently global
2749 		 * while it should probably be per-ASIC...
2750 		 */
2751 		dev->phy.gpcs_address = dev->gpcs_address;
2752 		if (dev->phy.gpcs_address == 0xffffffff)
2753 			dev->phy.address = dev->cell_index;
2754 	}
2755 
2756 	emac_configure(dev);
2757 
2758 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2759 		int res = emac_dt_phy_probe(dev);
2760 
2761 		switch (res) {
2762 		case 1:
2763 			/* No phy-handle property configured.
2764 			 * Continue with the existing phy probe
2765 			 * and setup code.
2766 			 */
2767 			break;
2768 
2769 		case 0:
2770 			mutex_unlock(&emac_phy_map_lock);
2771 			goto init_phy;
2772 
2773 		default:
2774 			mutex_unlock(&emac_phy_map_lock);
2775 			dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
2776 				res);
2777 			return res;
2778 		}
2779 	}
2780 
2781 	if (dev->phy_address != 0xffffffff)
2782 		phy_map = ~(1 << dev->phy_address);
2783 
2784 	for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2785 		if (!(phy_map & 1)) {
2786 			int r;
2787 			busy_phy_map |= 1 << i;
2788 
2789 			/* Quick check if there is a PHY at the address */
2790 			r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2791 			if (r == 0xffff || r < 0)
2792 				continue;
2793 			if (!emac_mii_phy_probe(&dev->phy, i))
2794 				break;
2795 		}
2796 
2797 	/* Enable external clock source */
2798 #ifdef CONFIG_PPC_DCR_NATIVE
2799 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2800 		dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2801 #endif
2802 	mutex_unlock(&emac_phy_map_lock);
2803 	if (i == 0x20) {
2804 		printk(KERN_WARNING "%pOF: can't find PHY!\n", np);
2805 		return -ENXIO;
2806 	}
2807 
2808  init_phy:
2809 	/* Init PHY */
2810 	if (dev->phy.def->ops->init)
2811 		dev->phy.def->ops->init(&dev->phy);
2812 
2813 	/* Disable any PHY features not supported by the platform */
2814 	dev->phy.def->features &= ~dev->phy_feat_exc;
2815 	dev->phy.features &= ~dev->phy_feat_exc;
2816 
2817 	/* Setup initial link parameters */
2818 	if (dev->phy.features & SUPPORTED_Autoneg) {
2819 		adv = dev->phy.features;
2820 		if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2821 			adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2822 		/* Restart autonegotiation */
2823 		dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2824 	} else {
2825 		u32 f = dev->phy.def->features;
2826 		int speed = SPEED_10, fd = DUPLEX_HALF;
2827 
2828 		/* Select highest supported speed/duplex */
2829 		if (f & SUPPORTED_1000baseT_Full) {
2830 			speed = SPEED_1000;
2831 			fd = DUPLEX_FULL;
2832 		} else if (f & SUPPORTED_1000baseT_Half)
2833 			speed = SPEED_1000;
2834 		else if (f & SUPPORTED_100baseT_Full) {
2835 			speed = SPEED_100;
2836 			fd = DUPLEX_FULL;
2837 		} else if (f & SUPPORTED_100baseT_Half)
2838 			speed = SPEED_100;
2839 		else if (f & SUPPORTED_10baseT_Full)
2840 			fd = DUPLEX_FULL;
2841 
2842 		/* Force link parameters */
2843 		dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2844 	}
2845 	return 0;
2846 }
2847 
2848 static int emac_init_config(struct emac_instance *dev)
2849 {
2850 	struct device_node *np = dev->ofdev->dev.of_node;
2851 	const void *p;
2852 	int err;
2853 
2854 	/* Read config from device-tree */
2855 	if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2856 		return -ENXIO;
2857 	if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2858 		return -ENXIO;
2859 	if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2860 		return -ENXIO;
2861 	if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2862 		return -ENXIO;
2863 	if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2864 		dev->max_mtu = ETH_DATA_LEN;
2865 	if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2866 		dev->rx_fifo_size = 2048;
2867 	if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2868 		dev->tx_fifo_size = 2048;
2869 	if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2870 		dev->rx_fifo_size_gige = dev->rx_fifo_size;
2871 	if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2872 		dev->tx_fifo_size_gige = dev->tx_fifo_size;
2873 	if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2874 		dev->phy_address = 0xffffffff;
2875 	if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2876 		dev->phy_map = 0xffffffff;
2877 	if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2878 		dev->gpcs_address = 0xffffffff;
2879 	if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2880 		return -ENXIO;
2881 	if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2882 		dev->tah_ph = 0;
2883 	if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2884 		dev->tah_port = 0;
2885 	if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2886 		dev->mdio_ph = 0;
2887 	if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2888 		dev->zmii_ph = 0;
2889 	if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2890 		dev->zmii_port = 0xffffffff;
2891 	if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2892 		dev->rgmii_ph = 0;
2893 	if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2894 		dev->rgmii_port = 0xffffffff;
2895 	if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2896 		dev->fifo_entry_size = 16;
2897 	if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2898 		dev->mal_burst_size = 256;
2899 
2900 	/* PHY mode needs some decoding */
2901 	err = of_get_phy_mode(np, &dev->phy_mode);
2902 	if (err)
2903 		dev->phy_mode = PHY_INTERFACE_MODE_NA;
2904 
2905 	/* Check EMAC version */
2906 	if (of_device_is_compatible(np, "ibm,emac4sync")) {
2907 		dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2908 		if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2909 		    of_device_is_compatible(np, "ibm,emac-460gt"))
2910 			dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2911 		if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2912 		    of_device_is_compatible(np, "ibm,emac-405exr"))
2913 			dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2914 		if (of_device_is_compatible(np, "ibm,emac-apm821xx")) {
2915 			dev->features |= (EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE |
2916 					  EMAC_FTR_APM821XX_NO_HALF_DUPLEX |
2917 					  EMAC_FTR_460EX_PHY_CLK_FIX);
2918 		}
2919 	} else if (of_device_is_compatible(np, "ibm,emac4")) {
2920 		dev->features |= EMAC_FTR_EMAC4;
2921 		if (of_device_is_compatible(np, "ibm,emac-440gx"))
2922 			dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2923 	} else {
2924 		if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2925 		    of_device_is_compatible(np, "ibm,emac-440gr"))
2926 			dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2927 		if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2928 #ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
2929 			dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2930 #else
2931 			printk(KERN_ERR "%pOF: Flow control not disabled!\n",
2932 					np);
2933 			return -ENXIO;
2934 #endif
2935 		}
2936 
2937 	}
2938 
2939 	/* Fixup some feature bits based on the device tree */
2940 	if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2941 		dev->features |= EMAC_FTR_STACR_OC_INVERT;
2942 	if (of_get_property(np, "has-new-stacr-staopc", NULL))
2943 		dev->features |= EMAC_FTR_HAS_NEW_STACR;
2944 
2945 	/* CAB lacks the appropriate properties */
2946 	if (of_device_is_compatible(np, "ibm,emac-axon"))
2947 		dev->features |= EMAC_FTR_HAS_NEW_STACR |
2948 			EMAC_FTR_STACR_OC_INVERT;
2949 
2950 	/* Enable TAH/ZMII/RGMII features as found */
2951 	if (dev->tah_ph != 0) {
2952 #ifdef CONFIG_IBM_EMAC_TAH
2953 		dev->features |= EMAC_FTR_HAS_TAH;
2954 #else
2955 		printk(KERN_ERR "%pOF: TAH support not enabled !\n", np);
2956 		return -ENXIO;
2957 #endif
2958 	}
2959 
2960 	if (dev->zmii_ph != 0) {
2961 #ifdef CONFIG_IBM_EMAC_ZMII
2962 		dev->features |= EMAC_FTR_HAS_ZMII;
2963 #else
2964 		printk(KERN_ERR "%pOF: ZMII support not enabled !\n", np);
2965 		return -ENXIO;
2966 #endif
2967 	}
2968 
2969 	if (dev->rgmii_ph != 0) {
2970 #ifdef CONFIG_IBM_EMAC_RGMII
2971 		dev->features |= EMAC_FTR_HAS_RGMII;
2972 #else
2973 		printk(KERN_ERR "%pOF: RGMII support not enabled !\n", np);
2974 		return -ENXIO;
2975 #endif
2976 	}
2977 
2978 	/* Read MAC-address */
2979 	p = of_get_property(np, "local-mac-address", NULL);
2980 	if (p == NULL) {
2981 		printk(KERN_ERR "%pOF: Can't find local-mac-address property\n",
2982 		       np);
2983 		return -ENXIO;
2984 	}
2985 	memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
2986 
2987 	/* IAHT and GAHT filter parameterization */
2988 	if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2989 		dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2990 		dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2991 	} else {
2992 		dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2993 		dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2994 	}
2995 
2996 	/* This should never happen */
2997 	if (WARN_ON(EMAC_XAHT_REGS(dev) > EMAC_XAHT_MAX_REGS))
2998 		return -ENXIO;
2999 
3000 	DBG(dev, "features     : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
3001 	DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
3002 	DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
3003 	DBG(dev, "max_mtu      : %d\n", dev->max_mtu);
3004 	DBG(dev, "OPB freq     : %d\n", dev->opb_bus_freq);
3005 
3006 	return 0;
3007 }
3008 
3009 static const struct net_device_ops emac_netdev_ops = {
3010 	.ndo_open		= emac_open,
3011 	.ndo_stop		= emac_close,
3012 	.ndo_get_stats		= emac_stats,
3013 	.ndo_set_rx_mode	= emac_set_multicast_list,
3014 	.ndo_do_ioctl		= emac_ioctl,
3015 	.ndo_tx_timeout		= emac_tx_timeout,
3016 	.ndo_validate_addr	= eth_validate_addr,
3017 	.ndo_set_mac_address	= emac_set_mac_address,
3018 	.ndo_start_xmit		= emac_start_xmit,
3019 };
3020 
3021 static const struct net_device_ops emac_gige_netdev_ops = {
3022 	.ndo_open		= emac_open,
3023 	.ndo_stop		= emac_close,
3024 	.ndo_get_stats		= emac_stats,
3025 	.ndo_set_rx_mode	= emac_set_multicast_list,
3026 	.ndo_do_ioctl		= emac_ioctl,
3027 	.ndo_tx_timeout		= emac_tx_timeout,
3028 	.ndo_validate_addr	= eth_validate_addr,
3029 	.ndo_set_mac_address	= emac_set_mac_address,
3030 	.ndo_start_xmit		= emac_start_xmit_sg,
3031 	.ndo_change_mtu		= emac_change_mtu,
3032 };
3033 
3034 static int emac_probe(struct platform_device *ofdev)
3035 {
3036 	struct net_device *ndev;
3037 	struct emac_instance *dev;
3038 	struct device_node *np = ofdev->dev.of_node;
3039 	struct device_node **blist = NULL;
3040 	int err, i;
3041 
3042 	/* Skip unused/unwired EMACS.  We leave the check for an unused
3043 	 * property here for now, but new flat device trees should set a
3044 	 * status property to "disabled" instead.
3045 	 */
3046 	if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
3047 		return -ENODEV;
3048 
3049 	/* Find ourselves in the bootlist if we are there */
3050 	for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3051 		if (emac_boot_list[i] == np)
3052 			blist = &emac_boot_list[i];
3053 
3054 	/* Allocate our net_device structure */
3055 	err = -ENOMEM;
3056 	ndev = alloc_etherdev(sizeof(struct emac_instance));
3057 	if (!ndev)
3058 		goto err_gone;
3059 
3060 	dev = netdev_priv(ndev);
3061 	dev->ndev = ndev;
3062 	dev->ofdev = ofdev;
3063 	dev->blist = blist;
3064 	SET_NETDEV_DEV(ndev, &ofdev->dev);
3065 
3066 	/* Initialize some embedded data structures */
3067 	mutex_init(&dev->mdio_lock);
3068 	mutex_init(&dev->link_lock);
3069 	spin_lock_init(&dev->lock);
3070 	INIT_WORK(&dev->reset_work, emac_reset_work);
3071 
3072 	/* Init various config data based on device-tree */
3073 	err = emac_init_config(dev);
3074 	if (err)
3075 		goto err_free;
3076 
3077 	/* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
3078 	dev->emac_irq = irq_of_parse_and_map(np, 0);
3079 	dev->wol_irq = irq_of_parse_and_map(np, 1);
3080 	if (!dev->emac_irq) {
3081 		printk(KERN_ERR "%pOF: Can't map main interrupt\n", np);
3082 		err = -ENODEV;
3083 		goto err_free;
3084 	}
3085 	ndev->irq = dev->emac_irq;
3086 
3087 	/* Map EMAC regs */
3088 	// TODO : platform_get_resource() and devm_ioremap_resource()
3089 	dev->emacp = of_iomap(np, 0);
3090 	if (dev->emacp == NULL) {
3091 		printk(KERN_ERR "%pOF: Can't map device registers!\n", np);
3092 		err = -ENOMEM;
3093 		goto err_irq_unmap;
3094 	}
3095 
3096 	/* Wait for dependent devices */
3097 	err = emac_wait_deps(dev);
3098 	if (err) {
3099 		printk(KERN_ERR
3100 		       "%pOF: Timeout waiting for dependent devices\n", np);
3101 		/*  display more info about what's missing ? */
3102 		goto err_reg_unmap;
3103 	}
3104 	dev->mal = platform_get_drvdata(dev->mal_dev);
3105 	if (dev->mdio_dev != NULL)
3106 		dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
3107 
3108 	/* Register with MAL */
3109 	dev->commac.ops = &emac_commac_ops;
3110 	dev->commac.dev = dev;
3111 	dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
3112 	dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
3113 	err = mal_register_commac(dev->mal, &dev->commac);
3114 	if (err) {
3115 		printk(KERN_ERR "%pOF: failed to register with mal %pOF!\n",
3116 		       np, dev->mal_dev->dev.of_node);
3117 		goto err_rel_deps;
3118 	}
3119 	dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
3120 	dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
3121 
3122 	/* Get pointers to BD rings */
3123 	dev->tx_desc =
3124 	    dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
3125 	dev->rx_desc =
3126 	    dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
3127 
3128 	DBG(dev, "tx_desc %p" NL, dev->tx_desc);
3129 	DBG(dev, "rx_desc %p" NL, dev->rx_desc);
3130 
3131 	/* Clean rings */
3132 	memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
3133 	memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
3134 	memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
3135 	memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
3136 
3137 	/* Attach to ZMII, if needed */
3138 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
3139 	    (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
3140 		goto err_unreg_commac;
3141 
3142 	/* Attach to RGMII, if needed */
3143 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
3144 	    (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
3145 		goto err_detach_zmii;
3146 
3147 	/* Attach to TAH, if needed */
3148 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
3149 	    (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
3150 		goto err_detach_rgmii;
3151 
3152 	/* Set some link defaults before we can find out real parameters */
3153 	dev->phy.speed = SPEED_100;
3154 	dev->phy.duplex = DUPLEX_FULL;
3155 	dev->phy.autoneg = AUTONEG_DISABLE;
3156 	dev->phy.pause = dev->phy.asym_pause = 0;
3157 	dev->stop_timeout = STOP_TIMEOUT_100;
3158 	INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
3159 
3160 	/* Some SoCs like APM821xx does not support Half Duplex mode. */
3161 	if (emac_has_feature(dev, EMAC_FTR_APM821XX_NO_HALF_DUPLEX)) {
3162 		dev->phy_feat_exc = (SUPPORTED_1000baseT_Half |
3163 				     SUPPORTED_100baseT_Half |
3164 				     SUPPORTED_10baseT_Half);
3165 	}
3166 
3167 	/* Find PHY if any */
3168 	err = emac_init_phy(dev);
3169 	if (err != 0)
3170 		goto err_detach_tah;
3171 
3172 	if (dev->tah_dev) {
3173 		ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
3174 		ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
3175 	}
3176 	ndev->watchdog_timeo = 5 * HZ;
3177 	if (emac_phy_supports_gige(dev->phy_mode)) {
3178 		ndev->netdev_ops = &emac_gige_netdev_ops;
3179 		dev->commac.ops = &emac_commac_sg_ops;
3180 	} else
3181 		ndev->netdev_ops = &emac_netdev_ops;
3182 	ndev->ethtool_ops = &emac_ethtool_ops;
3183 
3184 	/* MTU range: 46 - 1500 or whatever is in OF */
3185 	ndev->min_mtu = EMAC_MIN_MTU;
3186 	ndev->max_mtu = dev->max_mtu;
3187 
3188 	netif_carrier_off(ndev);
3189 
3190 	err = register_netdev(ndev);
3191 	if (err) {
3192 		printk(KERN_ERR "%pOF: failed to register net device (%d)!\n",
3193 		       np, err);
3194 		goto err_detach_tah;
3195 	}
3196 
3197 	/* Set our drvdata last as we don't want them visible until we are
3198 	 * fully initialized
3199 	 */
3200 	wmb();
3201 	platform_set_drvdata(ofdev, dev);
3202 
3203 	/* There's a new kid in town ! Let's tell everybody */
3204 	wake_up_all(&emac_probe_wait);
3205 
3206 
3207 	printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n",
3208 	       ndev->name, dev->cell_index, np, ndev->dev_addr);
3209 
3210 	if (dev->phy_mode == PHY_INTERFACE_MODE_SGMII)
3211 		printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
3212 
3213 	if (dev->phy.address >= 0)
3214 		printk("%s: found %s PHY (0x%02x)\n", ndev->name,
3215 		       dev->phy.def->name, dev->phy.address);
3216 
3217 	/* Life is good */
3218 	return 0;
3219 
3220 	/* I have a bad feeling about this ... */
3221 
3222  err_detach_tah:
3223 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
3224 		tah_detach(dev->tah_dev, dev->tah_port);
3225  err_detach_rgmii:
3226 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
3227 		rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
3228  err_detach_zmii:
3229 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
3230 		zmii_detach(dev->zmii_dev, dev->zmii_port);
3231  err_unreg_commac:
3232 	mal_unregister_commac(dev->mal, &dev->commac);
3233  err_rel_deps:
3234 	emac_put_deps(dev);
3235  err_reg_unmap:
3236 	iounmap(dev->emacp);
3237  err_irq_unmap:
3238 	if (dev->wol_irq)
3239 		irq_dispose_mapping(dev->wol_irq);
3240 	if (dev->emac_irq)
3241 		irq_dispose_mapping(dev->emac_irq);
3242  err_free:
3243 	free_netdev(ndev);
3244  err_gone:
3245 	/* if we were on the bootlist, remove us as we won't show up and
3246 	 * wake up all waiters to notify them in case they were waiting
3247 	 * on us
3248 	 */
3249 	if (blist) {
3250 		*blist = NULL;
3251 		wake_up_all(&emac_probe_wait);
3252 	}
3253 	return err;
3254 }
3255 
3256 static int emac_remove(struct platform_device *ofdev)
3257 {
3258 	struct emac_instance *dev = platform_get_drvdata(ofdev);
3259 
3260 	DBG(dev, "remove" NL);
3261 
3262 	unregister_netdev(dev->ndev);
3263 
3264 	cancel_work_sync(&dev->reset_work);
3265 
3266 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
3267 		tah_detach(dev->tah_dev, dev->tah_port);
3268 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
3269 		rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
3270 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
3271 		zmii_detach(dev->zmii_dev, dev->zmii_port);
3272 
3273 	if (dev->phy_dev)
3274 		phy_disconnect(dev->phy_dev);
3275 
3276 	if (dev->mii_bus)
3277 		mdiobus_unregister(dev->mii_bus);
3278 
3279 	busy_phy_map &= ~(1 << dev->phy.address);
3280 	DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
3281 
3282 	mal_unregister_commac(dev->mal, &dev->commac);
3283 	emac_put_deps(dev);
3284 
3285 	iounmap(dev->emacp);
3286 
3287 	if (dev->wol_irq)
3288 		irq_dispose_mapping(dev->wol_irq);
3289 	if (dev->emac_irq)
3290 		irq_dispose_mapping(dev->emac_irq);
3291 
3292 	free_netdev(dev->ndev);
3293 
3294 	return 0;
3295 }
3296 
3297 /* XXX Features in here should be replaced by properties... */
3298 static const struct of_device_id emac_match[] =
3299 {
3300 	{
3301 		.type		= "network",
3302 		.compatible	= "ibm,emac",
3303 	},
3304 	{
3305 		.type		= "network",
3306 		.compatible	= "ibm,emac4",
3307 	},
3308 	{
3309 		.type		= "network",
3310 		.compatible	= "ibm,emac4sync",
3311 	},
3312 	{},
3313 };
3314 MODULE_DEVICE_TABLE(of, emac_match);
3315 
3316 static struct platform_driver emac_driver = {
3317 	.driver = {
3318 		.name = "emac",
3319 		.of_match_table = emac_match,
3320 	},
3321 	.probe = emac_probe,
3322 	.remove = emac_remove,
3323 };
3324 
3325 static void __init emac_make_bootlist(void)
3326 {
3327 	struct device_node *np = NULL;
3328 	int j, max, i = 0;
3329 	int cell_indices[EMAC_BOOT_LIST_SIZE];
3330 
3331 	/* Collect EMACs */
3332 	while((np = of_find_all_nodes(np)) != NULL) {
3333 		const u32 *idx;
3334 
3335 		if (of_match_node(emac_match, np) == NULL)
3336 			continue;
3337 		if (of_get_property(np, "unused", NULL))
3338 			continue;
3339 		idx = of_get_property(np, "cell-index", NULL);
3340 		if (idx == NULL)
3341 			continue;
3342 		cell_indices[i] = *idx;
3343 		emac_boot_list[i++] = of_node_get(np);
3344 		if (i >= EMAC_BOOT_LIST_SIZE) {
3345 			of_node_put(np);
3346 			break;
3347 		}
3348 	}
3349 	max = i;
3350 
3351 	/* Bubble sort them (doh, what a creative algorithm :-) */
3352 	for (i = 0; max > 1 && (i < (max - 1)); i++)
3353 		for (j = i; j < max; j++) {
3354 			if (cell_indices[i] > cell_indices[j]) {
3355 				swap(emac_boot_list[i], emac_boot_list[j]);
3356 				swap(cell_indices[i], cell_indices[j]);
3357 			}
3358 		}
3359 }
3360 
3361 static int __init emac_init(void)
3362 {
3363 	int rc;
3364 
3365 	printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3366 
3367 	/* Build EMAC boot list */
3368 	emac_make_bootlist();
3369 
3370 	/* Init submodules */
3371 	rc = mal_init();
3372 	if (rc)
3373 		goto err;
3374 	rc = zmii_init();
3375 	if (rc)
3376 		goto err_mal;
3377 	rc = rgmii_init();
3378 	if (rc)
3379 		goto err_zmii;
3380 	rc = tah_init();
3381 	if (rc)
3382 		goto err_rgmii;
3383 	rc = platform_driver_register(&emac_driver);
3384 	if (rc)
3385 		goto err_tah;
3386 
3387 	return 0;
3388 
3389  err_tah:
3390 	tah_exit();
3391  err_rgmii:
3392 	rgmii_exit();
3393  err_zmii:
3394 	zmii_exit();
3395  err_mal:
3396 	mal_exit();
3397  err:
3398 	return rc;
3399 }
3400 
3401 static void __exit emac_exit(void)
3402 {
3403 	int i;
3404 
3405 	platform_driver_unregister(&emac_driver);
3406 
3407 	tah_exit();
3408 	rgmii_exit();
3409 	zmii_exit();
3410 	mal_exit();
3411 
3412 	/* Destroy EMAC boot list */
3413 	for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3414 		of_node_put(emac_boot_list[i]);
3415 }
3416 
3417 module_init(emac_init);
3418 module_exit(emac_exit);
3419