xref: /openbmc/linux/drivers/net/ethernet/ibm/emac/core.c (revision 151f4e2b)
1 /*
2  * drivers/net/ethernet/ibm/emac/core.c
3  *
4  * Driver for PowerPC 4xx on-chip ethernet controller.
5  *
6  * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7  *                <benh@kernel.crashing.org>
8  *
9  * Based on the arch/ppc version of the driver:
10  *
11  * Copyright (c) 2004, 2005 Zultys Technologies.
12  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13  *
14  * Based on original work by
15  * 	Matt Porter <mporter@kernel.crashing.org>
16  *	(c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17  *      Armin Kuster <akuster@mvista.com>
18  * 	Johnnie Peters <jpeters@mvista.com>
19  *
20  * This program is free software; you can redistribute  it and/or modify it
21  * under  the terms of  the GNU General  Public License as published by the
22  * Free Software Foundation;  either version 2 of the  License, or (at your
23  * option) any later version.
24  *
25  */
26 
27 #include <linux/module.h>
28 #include <linux/sched.h>
29 #include <linux/string.h>
30 #include <linux/errno.h>
31 #include <linux/delay.h>
32 #include <linux/types.h>
33 #include <linux/pci.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
40 #include <linux/workqueue.h>
41 #include <linux/of.h>
42 #include <linux/of_address.h>
43 #include <linux/of_irq.h>
44 #include <linux/of_net.h>
45 #include <linux/of_mdio.h>
46 #include <linux/slab.h>
47 
48 #include <asm/processor.h>
49 #include <asm/io.h>
50 #include <asm/dma.h>
51 #include <linux/uaccess.h>
52 #include <asm/dcr.h>
53 #include <asm/dcr-regs.h>
54 
55 #include "core.h"
56 
57 /*
58  * Lack of dma_unmap_???? calls is intentional.
59  *
60  * API-correct usage requires additional support state information to be
61  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
62  * EMAC design (e.g. TX buffer passed from network stack can be split into
63  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
64  * maintaining such information will add additional overhead.
65  * Current DMA API implementation for 4xx processors only ensures cache coherency
66  * and dma_unmap_???? routines are empty and are likely to stay this way.
67  * I decided to omit dma_unmap_??? calls because I don't want to add additional
68  * complexity just for the sake of following some abstract API, when it doesn't
69  * add any real benefit to the driver. I understand that this decision maybe
70  * controversial, but I really tried to make code API-correct and efficient
71  * at the same time and didn't come up with code I liked :(.                --ebs
72  */
73 
74 #define DRV_NAME        "emac"
75 #define DRV_VERSION     "3.54"
76 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
77 
78 MODULE_DESCRIPTION(DRV_DESC);
79 MODULE_AUTHOR
80     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
81 MODULE_LICENSE("GPL");
82 
83 /* minimum number of free TX descriptors required to wake up TX process */
84 #define EMAC_TX_WAKEUP_THRESH		(NUM_TX_BUFF / 4)
85 
86 /* If packet size is less than this number, we allocate small skb and copy packet
87  * contents into it instead of just sending original big skb up
88  */
89 #define EMAC_RX_COPY_THRESH		CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
90 
91 /* Since multiple EMACs share MDIO lines in various ways, we need
92  * to avoid re-using the same PHY ID in cases where the arch didn't
93  * setup precise phy_map entries
94  *
95  * XXX This is something that needs to be reworked as we can have multiple
96  * EMAC "sets" (multiple ASICs containing several EMACs) though we can
97  * probably require in that case to have explicit PHY IDs in the device-tree
98  */
99 static u32 busy_phy_map;
100 static DEFINE_MUTEX(emac_phy_map_lock);
101 
102 /* This is the wait queue used to wait on any event related to probe, that
103  * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
104  */
105 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
106 
107 /* Having stable interface names is a doomed idea. However, it would be nice
108  * if we didn't have completely random interface names at boot too :-) It's
109  * just a matter of making everybody's life easier. Since we are doing
110  * threaded probing, it's a bit harder though. The base idea here is that
111  * we make up a list of all emacs in the device-tree before we register the
112  * driver. Every emac will then wait for the previous one in the list to
113  * initialize before itself. We should also keep that list ordered by
114  * cell_index.
115  * That list is only 4 entries long, meaning that additional EMACs don't
116  * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
117  */
118 
119 #define EMAC_BOOT_LIST_SIZE	4
120 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
121 
122 /* How long should I wait for dependent devices ? */
123 #define EMAC_PROBE_DEP_TIMEOUT	(HZ * 5)
124 
125 /* I don't want to litter system log with timeout errors
126  * when we have brain-damaged PHY.
127  */
128 static inline void emac_report_timeout_error(struct emac_instance *dev,
129 					     const char *error)
130 {
131 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
132 				  EMAC_FTR_460EX_PHY_CLK_FIX |
133 				  EMAC_FTR_440EP_PHY_CLK_FIX))
134 		DBG(dev, "%s" NL, error);
135 	else if (net_ratelimit())
136 		printk(KERN_ERR "%pOF: %s\n", dev->ofdev->dev.of_node, error);
137 }
138 
139 /* EMAC PHY clock workaround:
140  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
141  * which allows controlling each EMAC clock
142  */
143 static inline void emac_rx_clk_tx(struct emac_instance *dev)
144 {
145 #ifdef CONFIG_PPC_DCR_NATIVE
146 	if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
147 		dcri_clrset(SDR0, SDR0_MFR,
148 			    0, SDR0_MFR_ECS >> dev->cell_index);
149 #endif
150 }
151 
152 static inline void emac_rx_clk_default(struct emac_instance *dev)
153 {
154 #ifdef CONFIG_PPC_DCR_NATIVE
155 	if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
156 		dcri_clrset(SDR0, SDR0_MFR,
157 			    SDR0_MFR_ECS >> dev->cell_index, 0);
158 #endif
159 }
160 
161 /* PHY polling intervals */
162 #define PHY_POLL_LINK_ON	HZ
163 #define PHY_POLL_LINK_OFF	(HZ / 5)
164 
165 /* Graceful stop timeouts in us.
166  * We should allow up to 1 frame time (full-duplex, ignoring collisions)
167  */
168 #define STOP_TIMEOUT_10		1230
169 #define STOP_TIMEOUT_100	124
170 #define STOP_TIMEOUT_1000	13
171 #define STOP_TIMEOUT_1000_JUMBO	73
172 
173 static unsigned char default_mcast_addr[] = {
174 	0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
175 };
176 
177 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
178 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
179 	"rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
180 	"tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
181 	"rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
182 	"rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
183 	"rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
184 	"rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
185 	"rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
186 	"rx_bad_packet", "rx_runt_packet", "rx_short_event",
187 	"rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
188 	"rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
189 	"tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
190 	"tx_bd_excessive_collisions", "tx_bd_late_collision",
191 	"tx_bd_multple_collisions", "tx_bd_single_collision",
192 	"tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
193 	"tx_errors"
194 };
195 
196 static irqreturn_t emac_irq(int irq, void *dev_instance);
197 static void emac_clean_tx_ring(struct emac_instance *dev);
198 static void __emac_set_multicast_list(struct emac_instance *dev);
199 
200 static inline int emac_phy_supports_gige(int phy_mode)
201 {
202 	return  phy_interface_mode_is_rgmii(phy_mode) ||
203 		phy_mode == PHY_INTERFACE_MODE_GMII ||
204 		phy_mode == PHY_INTERFACE_MODE_SGMII ||
205 		phy_mode == PHY_INTERFACE_MODE_TBI ||
206 		phy_mode == PHY_INTERFACE_MODE_RTBI;
207 }
208 
209 static inline int emac_phy_gpcs(int phy_mode)
210 {
211 	return  phy_mode == PHY_INTERFACE_MODE_SGMII ||
212 		phy_mode == PHY_INTERFACE_MODE_TBI ||
213 		phy_mode == PHY_INTERFACE_MODE_RTBI;
214 }
215 
216 static inline void emac_tx_enable(struct emac_instance *dev)
217 {
218 	struct emac_regs __iomem *p = dev->emacp;
219 	u32 r;
220 
221 	DBG(dev, "tx_enable" NL);
222 
223 	r = in_be32(&p->mr0);
224 	if (!(r & EMAC_MR0_TXE))
225 		out_be32(&p->mr0, r | EMAC_MR0_TXE);
226 }
227 
228 static void emac_tx_disable(struct emac_instance *dev)
229 {
230 	struct emac_regs __iomem *p = dev->emacp;
231 	u32 r;
232 
233 	DBG(dev, "tx_disable" NL);
234 
235 	r = in_be32(&p->mr0);
236 	if (r & EMAC_MR0_TXE) {
237 		int n = dev->stop_timeout;
238 		out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
239 		while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
240 			udelay(1);
241 			--n;
242 		}
243 		if (unlikely(!n))
244 			emac_report_timeout_error(dev, "TX disable timeout");
245 	}
246 }
247 
248 static void emac_rx_enable(struct emac_instance *dev)
249 {
250 	struct emac_regs __iomem *p = dev->emacp;
251 	u32 r;
252 
253 	if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
254 		goto out;
255 
256 	DBG(dev, "rx_enable" NL);
257 
258 	r = in_be32(&p->mr0);
259 	if (!(r & EMAC_MR0_RXE)) {
260 		if (unlikely(!(r & EMAC_MR0_RXI))) {
261 			/* Wait if previous async disable is still in progress */
262 			int n = dev->stop_timeout;
263 			while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
264 				udelay(1);
265 				--n;
266 			}
267 			if (unlikely(!n))
268 				emac_report_timeout_error(dev,
269 							  "RX disable timeout");
270 		}
271 		out_be32(&p->mr0, r | EMAC_MR0_RXE);
272 	}
273  out:
274 	;
275 }
276 
277 static void emac_rx_disable(struct emac_instance *dev)
278 {
279 	struct emac_regs __iomem *p = dev->emacp;
280 	u32 r;
281 
282 	DBG(dev, "rx_disable" NL);
283 
284 	r = in_be32(&p->mr0);
285 	if (r & EMAC_MR0_RXE) {
286 		int n = dev->stop_timeout;
287 		out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
288 		while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
289 			udelay(1);
290 			--n;
291 		}
292 		if (unlikely(!n))
293 			emac_report_timeout_error(dev, "RX disable timeout");
294 	}
295 }
296 
297 static inline void emac_netif_stop(struct emac_instance *dev)
298 {
299 	netif_tx_lock_bh(dev->ndev);
300 	netif_addr_lock(dev->ndev);
301 	dev->no_mcast = 1;
302 	netif_addr_unlock(dev->ndev);
303 	netif_tx_unlock_bh(dev->ndev);
304 	netif_trans_update(dev->ndev);	/* prevent tx timeout */
305 	mal_poll_disable(dev->mal, &dev->commac);
306 	netif_tx_disable(dev->ndev);
307 }
308 
309 static inline void emac_netif_start(struct emac_instance *dev)
310 {
311 	netif_tx_lock_bh(dev->ndev);
312 	netif_addr_lock(dev->ndev);
313 	dev->no_mcast = 0;
314 	if (dev->mcast_pending && netif_running(dev->ndev))
315 		__emac_set_multicast_list(dev);
316 	netif_addr_unlock(dev->ndev);
317 	netif_tx_unlock_bh(dev->ndev);
318 
319 	netif_wake_queue(dev->ndev);
320 
321 	/* NOTE: unconditional netif_wake_queue is only appropriate
322 	 * so long as all callers are assured to have free tx slots
323 	 * (taken from tg3... though the case where that is wrong is
324 	 *  not terribly harmful)
325 	 */
326 	mal_poll_enable(dev->mal, &dev->commac);
327 }
328 
329 static inline void emac_rx_disable_async(struct emac_instance *dev)
330 {
331 	struct emac_regs __iomem *p = dev->emacp;
332 	u32 r;
333 
334 	DBG(dev, "rx_disable_async" NL);
335 
336 	r = in_be32(&p->mr0);
337 	if (r & EMAC_MR0_RXE)
338 		out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
339 }
340 
341 static int emac_reset(struct emac_instance *dev)
342 {
343 	struct emac_regs __iomem *p = dev->emacp;
344 	int n = 20;
345 	bool __maybe_unused try_internal_clock = false;
346 
347 	DBG(dev, "reset" NL);
348 
349 	if (!dev->reset_failed) {
350 		/* 40x erratum suggests stopping RX channel before reset,
351 		 * we stop TX as well
352 		 */
353 		emac_rx_disable(dev);
354 		emac_tx_disable(dev);
355 	}
356 
357 #ifdef CONFIG_PPC_DCR_NATIVE
358 do_retry:
359 	/*
360 	 * PPC460EX/GT Embedded Processor Advanced User's Manual
361 	 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
362 	 * Note: The PHY must provide a TX Clk in order to perform a soft reset
363 	 * of the EMAC. If none is present, select the internal clock
364 	 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
365 	 * After a soft reset, select the external clock.
366 	 *
367 	 * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
368 	 * ethernet cable is not attached. This causes the reset to timeout
369 	 * and the PHY detection code in emac_init_phy() is unable to
370 	 * communicate and detect the AR8035-A PHY. As a result, the emac
371 	 * driver bails out early and the user has no ethernet.
372 	 * In order to stay compatible with existing configurations, the
373 	 * driver will temporarily switch to the internal clock, after
374 	 * the first reset fails.
375 	 */
376 	if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
377 		if (try_internal_clock || (dev->phy_address == 0xffffffff &&
378 					   dev->phy_map == 0xffffffff)) {
379 			/* No PHY: select internal loop clock before reset */
380 			dcri_clrset(SDR0, SDR0_ETH_CFG,
381 				    0, SDR0_ETH_CFG_ECS << dev->cell_index);
382 		} else {
383 			/* PHY present: select external clock before reset */
384 			dcri_clrset(SDR0, SDR0_ETH_CFG,
385 				    SDR0_ETH_CFG_ECS << dev->cell_index, 0);
386 		}
387 	}
388 #endif
389 
390 	out_be32(&p->mr0, EMAC_MR0_SRST);
391 	while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
392 		--n;
393 
394 #ifdef CONFIG_PPC_DCR_NATIVE
395 	if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
396 		if (!n && !try_internal_clock) {
397 			/* first attempt has timed out. */
398 			n = 20;
399 			try_internal_clock = true;
400 			goto do_retry;
401 		}
402 
403 		if (try_internal_clock || (dev->phy_address == 0xffffffff &&
404 					   dev->phy_map == 0xffffffff)) {
405 			/* No PHY: restore external clock source after reset */
406 			dcri_clrset(SDR0, SDR0_ETH_CFG,
407 				    SDR0_ETH_CFG_ECS << dev->cell_index, 0);
408 		}
409 	}
410 #endif
411 
412 	if (n) {
413 		dev->reset_failed = 0;
414 		return 0;
415 	} else {
416 		emac_report_timeout_error(dev, "reset timeout");
417 		dev->reset_failed = 1;
418 		return -ETIMEDOUT;
419 	}
420 }
421 
422 static void emac_hash_mc(struct emac_instance *dev)
423 {
424 	const int regs = EMAC_XAHT_REGS(dev);
425 	u32 *gaht_base = emac_gaht_base(dev);
426 	u32 gaht_temp[EMAC_XAHT_MAX_REGS];
427 	struct netdev_hw_addr *ha;
428 	int i;
429 
430 	DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
431 
432 	memset(gaht_temp, 0, sizeof (gaht_temp));
433 
434 	netdev_for_each_mc_addr(ha, dev->ndev) {
435 		int slot, reg, mask;
436 		DBG2(dev, "mc %pM" NL, ha->addr);
437 
438 		slot = EMAC_XAHT_CRC_TO_SLOT(dev,
439 					     ether_crc(ETH_ALEN, ha->addr));
440 		reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
441 		mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
442 
443 		gaht_temp[reg] |= mask;
444 	}
445 
446 	for (i = 0; i < regs; i++)
447 		out_be32(gaht_base + i, gaht_temp[i]);
448 }
449 
450 static inline u32 emac_iff2rmr(struct net_device *ndev)
451 {
452 	struct emac_instance *dev = netdev_priv(ndev);
453 	u32 r;
454 
455 	r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
456 
457 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
458 	    r |= EMAC4_RMR_BASE;
459 	else
460 	    r |= EMAC_RMR_BASE;
461 
462 	if (ndev->flags & IFF_PROMISC)
463 		r |= EMAC_RMR_PME;
464 	else if (ndev->flags & IFF_ALLMULTI ||
465 			 (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
466 		r |= EMAC_RMR_PMME;
467 	else if (!netdev_mc_empty(ndev))
468 		r |= EMAC_RMR_MAE;
469 
470 	if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
471 		r &= ~EMAC4_RMR_MJS_MASK;
472 		r |= EMAC4_RMR_MJS(ndev->mtu);
473 	}
474 
475 	return r;
476 }
477 
478 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
479 {
480 	u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
481 
482 	DBG2(dev, "__emac_calc_base_mr1" NL);
483 
484 	switch(tx_size) {
485 	case 2048:
486 		ret |= EMAC_MR1_TFS_2K;
487 		break;
488 	default:
489 		printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
490 		       dev->ndev->name, tx_size);
491 	}
492 
493 	switch(rx_size) {
494 	case 16384:
495 		ret |= EMAC_MR1_RFS_16K;
496 		break;
497 	case 4096:
498 		ret |= EMAC_MR1_RFS_4K;
499 		break;
500 	default:
501 		printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
502 		       dev->ndev->name, rx_size);
503 	}
504 
505 	return ret;
506 }
507 
508 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
509 {
510 	u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
511 		EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
512 
513 	DBG2(dev, "__emac4_calc_base_mr1" NL);
514 
515 	switch(tx_size) {
516 	case 16384:
517 		ret |= EMAC4_MR1_TFS_16K;
518 		break;
519 	case 8192:
520 		ret |= EMAC4_MR1_TFS_8K;
521 		break;
522 	case 4096:
523 		ret |= EMAC4_MR1_TFS_4K;
524 		break;
525 	case 2048:
526 		ret |= EMAC4_MR1_TFS_2K;
527 		break;
528 	default:
529 		printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
530 		       dev->ndev->name, tx_size);
531 	}
532 
533 	switch(rx_size) {
534 	case 16384:
535 		ret |= EMAC4_MR1_RFS_16K;
536 		break;
537 	case 8192:
538 		ret |= EMAC4_MR1_RFS_8K;
539 		break;
540 	case 4096:
541 		ret |= EMAC4_MR1_RFS_4K;
542 		break;
543 	case 2048:
544 		ret |= EMAC4_MR1_RFS_2K;
545 		break;
546 	default:
547 		printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
548 		       dev->ndev->name, rx_size);
549 	}
550 
551 	return ret;
552 }
553 
554 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
555 {
556 	return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
557 		__emac4_calc_base_mr1(dev, tx_size, rx_size) :
558 		__emac_calc_base_mr1(dev, tx_size, rx_size);
559 }
560 
561 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
562 {
563 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
564 		return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
565 	else
566 		return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
567 }
568 
569 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
570 				 unsigned int low, unsigned int high)
571 {
572 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
573 		return (low << 22) | ( (high & 0x3ff) << 6);
574 	else
575 		return (low << 23) | ( (high & 0x1ff) << 7);
576 }
577 
578 static int emac_configure(struct emac_instance *dev)
579 {
580 	struct emac_regs __iomem *p = dev->emacp;
581 	struct net_device *ndev = dev->ndev;
582 	int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
583 	u32 r, mr1 = 0;
584 
585 	DBG(dev, "configure" NL);
586 
587 	if (!link) {
588 		out_be32(&p->mr1, in_be32(&p->mr1)
589 			 | EMAC_MR1_FDE | EMAC_MR1_ILE);
590 		udelay(100);
591 	} else if (emac_reset(dev) < 0)
592 		return -ETIMEDOUT;
593 
594 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
595 		tah_reset(dev->tah_dev);
596 
597 	DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
598 	    link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
599 
600 	/* Default fifo sizes */
601 	tx_size = dev->tx_fifo_size;
602 	rx_size = dev->rx_fifo_size;
603 
604 	/* No link, force loopback */
605 	if (!link)
606 		mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
607 
608 	/* Check for full duplex */
609 	else if (dev->phy.duplex == DUPLEX_FULL)
610 		mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
611 
612 	/* Adjust fifo sizes, mr1 and timeouts based on link speed */
613 	dev->stop_timeout = STOP_TIMEOUT_10;
614 	switch (dev->phy.speed) {
615 	case SPEED_1000:
616 		if (emac_phy_gpcs(dev->phy.mode)) {
617 			mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
618 				(dev->phy.gpcs_address != 0xffffffff) ?
619 				 dev->phy.gpcs_address : dev->phy.address);
620 
621 			/* Put some arbitrary OUI, Manuf & Rev IDs so we can
622 			 * identify this GPCS PHY later.
623 			 */
624 			out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
625 		} else
626 			mr1 |= EMAC_MR1_MF_1000;
627 
628 		/* Extended fifo sizes */
629 		tx_size = dev->tx_fifo_size_gige;
630 		rx_size = dev->rx_fifo_size_gige;
631 
632 		if (dev->ndev->mtu > ETH_DATA_LEN) {
633 			if (emac_has_feature(dev, EMAC_FTR_EMAC4))
634 				mr1 |= EMAC4_MR1_JPSM;
635 			else
636 				mr1 |= EMAC_MR1_JPSM;
637 			dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
638 		} else
639 			dev->stop_timeout = STOP_TIMEOUT_1000;
640 		break;
641 	case SPEED_100:
642 		mr1 |= EMAC_MR1_MF_100;
643 		dev->stop_timeout = STOP_TIMEOUT_100;
644 		break;
645 	default: /* make gcc happy */
646 		break;
647 	}
648 
649 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
650 		rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
651 				dev->phy.speed);
652 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
653 		zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
654 
655 	/* on 40x erratum forces us to NOT use integrated flow control,
656 	 * let's hope it works on 44x ;)
657 	 */
658 	if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
659 	    dev->phy.duplex == DUPLEX_FULL) {
660 		if (dev->phy.pause)
661 			mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
662 		else if (dev->phy.asym_pause)
663 			mr1 |= EMAC_MR1_APP;
664 	}
665 
666 	/* Add base settings & fifo sizes & program MR1 */
667 	mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
668 	out_be32(&p->mr1, mr1);
669 
670 	/* Set individual MAC address */
671 	out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
672 	out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
673 		 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
674 		 ndev->dev_addr[5]);
675 
676 	/* VLAN Tag Protocol ID */
677 	out_be32(&p->vtpid, 0x8100);
678 
679 	/* Receive mode register */
680 	r = emac_iff2rmr(ndev);
681 	if (r & EMAC_RMR_MAE)
682 		emac_hash_mc(dev);
683 	out_be32(&p->rmr, r);
684 
685 	/* FIFOs thresholds */
686 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
687 		r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
688 			       tx_size / 2 / dev->fifo_entry_size);
689 	else
690 		r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
691 			      tx_size / 2 / dev->fifo_entry_size);
692 	out_be32(&p->tmr1, r);
693 	out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
694 
695 	/* PAUSE frame is sent when RX FIFO reaches its high-water mark,
696 	   there should be still enough space in FIFO to allow the our link
697 	   partner time to process this frame and also time to send PAUSE
698 	   frame itself.
699 
700 	   Here is the worst case scenario for the RX FIFO "headroom"
701 	   (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
702 
703 	   1) One maximum-length frame on TX                    1522 bytes
704 	   2) One PAUSE frame time                                64 bytes
705 	   3) PAUSE frame decode time allowance                   64 bytes
706 	   4) One maximum-length frame on RX                    1522 bytes
707 	   5) Round-trip propagation delay of the link (100Mb)    15 bytes
708 	   ----------
709 	   3187 bytes
710 
711 	   I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
712 	   low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
713 	 */
714 	r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
715 			   rx_size / 4 / dev->fifo_entry_size);
716 	out_be32(&p->rwmr, r);
717 
718 	/* Set PAUSE timer to the maximum */
719 	out_be32(&p->ptr, 0xffff);
720 
721 	/* IRQ sources */
722 	r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
723 		EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
724 		EMAC_ISR_IRE | EMAC_ISR_TE;
725 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
726 	    r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
727 						  EMAC4_ISR_RXOE | */;
728 	out_be32(&p->iser,  r);
729 
730 	/* We need to take GPCS PHY out of isolate mode after EMAC reset */
731 	if (emac_phy_gpcs(dev->phy.mode)) {
732 		if (dev->phy.gpcs_address != 0xffffffff)
733 			emac_mii_reset_gpcs(&dev->phy);
734 		else
735 			emac_mii_reset_phy(&dev->phy);
736 	}
737 
738 	return 0;
739 }
740 
741 static void emac_reinitialize(struct emac_instance *dev)
742 {
743 	DBG(dev, "reinitialize" NL);
744 
745 	emac_netif_stop(dev);
746 	if (!emac_configure(dev)) {
747 		emac_tx_enable(dev);
748 		emac_rx_enable(dev);
749 	}
750 	emac_netif_start(dev);
751 }
752 
753 static void emac_full_tx_reset(struct emac_instance *dev)
754 {
755 	DBG(dev, "full_tx_reset" NL);
756 
757 	emac_tx_disable(dev);
758 	mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
759 	emac_clean_tx_ring(dev);
760 	dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
761 
762 	emac_configure(dev);
763 
764 	mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
765 	emac_tx_enable(dev);
766 	emac_rx_enable(dev);
767 }
768 
769 static void emac_reset_work(struct work_struct *work)
770 {
771 	struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
772 
773 	DBG(dev, "reset_work" NL);
774 
775 	mutex_lock(&dev->link_lock);
776 	if (dev->opened) {
777 		emac_netif_stop(dev);
778 		emac_full_tx_reset(dev);
779 		emac_netif_start(dev);
780 	}
781 	mutex_unlock(&dev->link_lock);
782 }
783 
784 static void emac_tx_timeout(struct net_device *ndev)
785 {
786 	struct emac_instance *dev = netdev_priv(ndev);
787 
788 	DBG(dev, "tx_timeout" NL);
789 
790 	schedule_work(&dev->reset_work);
791 }
792 
793 
794 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
795 {
796 	int done = !!(stacr & EMAC_STACR_OC);
797 
798 	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
799 		done = !done;
800 
801 	return done;
802 };
803 
804 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
805 {
806 	struct emac_regs __iomem *p = dev->emacp;
807 	u32 r = 0;
808 	int n, err = -ETIMEDOUT;
809 
810 	mutex_lock(&dev->mdio_lock);
811 
812 	DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
813 
814 	/* Enable proper MDIO port */
815 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
816 		zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
817 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
818 		rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
819 
820 	/* Wait for management interface to become idle */
821 	n = 20;
822 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
823 		udelay(1);
824 		if (!--n) {
825 			DBG2(dev, " -> timeout wait idle\n");
826 			goto bail;
827 		}
828 	}
829 
830 	/* Issue read command */
831 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
832 		r = EMAC4_STACR_BASE(dev->opb_bus_freq);
833 	else
834 		r = EMAC_STACR_BASE(dev->opb_bus_freq);
835 	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
836 		r |= EMAC_STACR_OC;
837 	if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
838 		r |= EMACX_STACR_STAC_READ;
839 	else
840 		r |= EMAC_STACR_STAC_READ;
841 	r |= (reg & EMAC_STACR_PRA_MASK)
842 		| ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
843 	out_be32(&p->stacr, r);
844 
845 	/* Wait for read to complete */
846 	n = 200;
847 	while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
848 		udelay(1);
849 		if (!--n) {
850 			DBG2(dev, " -> timeout wait complete\n");
851 			goto bail;
852 		}
853 	}
854 
855 	if (unlikely(r & EMAC_STACR_PHYE)) {
856 		DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
857 		err = -EREMOTEIO;
858 		goto bail;
859 	}
860 
861 	r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
862 
863 	DBG2(dev, "mdio_read -> %04x" NL, r);
864 	err = 0;
865  bail:
866 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
867 		rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
868 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
869 		zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
870 	mutex_unlock(&dev->mdio_lock);
871 
872 	return err == 0 ? r : err;
873 }
874 
875 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
876 			      u16 val)
877 {
878 	struct emac_regs __iomem *p = dev->emacp;
879 	u32 r = 0;
880 	int n, err = -ETIMEDOUT;
881 
882 	mutex_lock(&dev->mdio_lock);
883 
884 	DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
885 
886 	/* Enable proper MDIO port */
887 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
888 		zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
889 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
890 		rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
891 
892 	/* Wait for management interface to be idle */
893 	n = 20;
894 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
895 		udelay(1);
896 		if (!--n) {
897 			DBG2(dev, " -> timeout wait idle\n");
898 			goto bail;
899 		}
900 	}
901 
902 	/* Issue write command */
903 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
904 		r = EMAC4_STACR_BASE(dev->opb_bus_freq);
905 	else
906 		r = EMAC_STACR_BASE(dev->opb_bus_freq);
907 	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
908 		r |= EMAC_STACR_OC;
909 	if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
910 		r |= EMACX_STACR_STAC_WRITE;
911 	else
912 		r |= EMAC_STACR_STAC_WRITE;
913 	r |= (reg & EMAC_STACR_PRA_MASK) |
914 		((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
915 		(val << EMAC_STACR_PHYD_SHIFT);
916 	out_be32(&p->stacr, r);
917 
918 	/* Wait for write to complete */
919 	n = 200;
920 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
921 		udelay(1);
922 		if (!--n) {
923 			DBG2(dev, " -> timeout wait complete\n");
924 			goto bail;
925 		}
926 	}
927 	err = 0;
928  bail:
929 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
930 		rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
931 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
932 		zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
933 	mutex_unlock(&dev->mdio_lock);
934 }
935 
936 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
937 {
938 	struct emac_instance *dev = netdev_priv(ndev);
939 	int res;
940 
941 	res = __emac_mdio_read((dev->mdio_instance &&
942 				dev->phy.gpcs_address != id) ?
943 				dev->mdio_instance : dev,
944 			       (u8) id, (u8) reg);
945 	return res;
946 }
947 
948 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
949 {
950 	struct emac_instance *dev = netdev_priv(ndev);
951 
952 	__emac_mdio_write((dev->mdio_instance &&
953 			   dev->phy.gpcs_address != id) ?
954 			   dev->mdio_instance : dev,
955 			  (u8) id, (u8) reg, (u16) val);
956 }
957 
958 /* Tx lock BH */
959 static void __emac_set_multicast_list(struct emac_instance *dev)
960 {
961 	struct emac_regs __iomem *p = dev->emacp;
962 	u32 rmr = emac_iff2rmr(dev->ndev);
963 
964 	DBG(dev, "__multicast %08x" NL, rmr);
965 
966 	/* I decided to relax register access rules here to avoid
967 	 * full EMAC reset.
968 	 *
969 	 * There is a real problem with EMAC4 core if we use MWSW_001 bit
970 	 * in MR1 register and do a full EMAC reset.
971 	 * One TX BD status update is delayed and, after EMAC reset, it
972 	 * never happens, resulting in TX hung (it'll be recovered by TX
973 	 * timeout handler eventually, but this is just gross).
974 	 * So we either have to do full TX reset or try to cheat here :)
975 	 *
976 	 * The only required change is to RX mode register, so I *think* all
977 	 * we need is just to stop RX channel. This seems to work on all
978 	 * tested SoCs.                                                --ebs
979 	 *
980 	 * If we need the full reset, we might just trigger the workqueue
981 	 * and do it async... a bit nasty but should work --BenH
982 	 */
983 	dev->mcast_pending = 0;
984 	emac_rx_disable(dev);
985 	if (rmr & EMAC_RMR_MAE)
986 		emac_hash_mc(dev);
987 	out_be32(&p->rmr, rmr);
988 	emac_rx_enable(dev);
989 }
990 
991 /* Tx lock BH */
992 static void emac_set_multicast_list(struct net_device *ndev)
993 {
994 	struct emac_instance *dev = netdev_priv(ndev);
995 
996 	DBG(dev, "multicast" NL);
997 
998 	BUG_ON(!netif_running(dev->ndev));
999 
1000 	if (dev->no_mcast) {
1001 		dev->mcast_pending = 1;
1002 		return;
1003 	}
1004 
1005 	mutex_lock(&dev->link_lock);
1006 	__emac_set_multicast_list(dev);
1007 	mutex_unlock(&dev->link_lock);
1008 }
1009 
1010 static int emac_set_mac_address(struct net_device *ndev, void *sa)
1011 {
1012 	struct emac_instance *dev = netdev_priv(ndev);
1013 	struct sockaddr *addr = sa;
1014 	struct emac_regs __iomem *p = dev->emacp;
1015 
1016 	if (!is_valid_ether_addr(addr->sa_data))
1017 	       return -EADDRNOTAVAIL;
1018 
1019 	mutex_lock(&dev->link_lock);
1020 
1021 	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
1022 
1023 	emac_rx_disable(dev);
1024 	emac_tx_disable(dev);
1025 	out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
1026 	out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
1027 		(ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
1028 		ndev->dev_addr[5]);
1029 	emac_tx_enable(dev);
1030 	emac_rx_enable(dev);
1031 
1032 	mutex_unlock(&dev->link_lock);
1033 
1034 	return 0;
1035 }
1036 
1037 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
1038 {
1039 	int rx_sync_size = emac_rx_sync_size(new_mtu);
1040 	int rx_skb_size = emac_rx_skb_size(new_mtu);
1041 	int i, ret = 0;
1042 	int mr1_jumbo_bit_change = 0;
1043 
1044 	mutex_lock(&dev->link_lock);
1045 	emac_netif_stop(dev);
1046 	emac_rx_disable(dev);
1047 	mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1048 
1049 	if (dev->rx_sg_skb) {
1050 		++dev->estats.rx_dropped_resize;
1051 		dev_kfree_skb(dev->rx_sg_skb);
1052 		dev->rx_sg_skb = NULL;
1053 	}
1054 
1055 	/* Make a first pass over RX ring and mark BDs ready, dropping
1056 	 * non-processed packets on the way. We need this as a separate pass
1057 	 * to simplify error recovery in the case of allocation failure later.
1058 	 */
1059 	for (i = 0; i < NUM_RX_BUFF; ++i) {
1060 		if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
1061 			++dev->estats.rx_dropped_resize;
1062 
1063 		dev->rx_desc[i].data_len = 0;
1064 		dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
1065 		    (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1066 	}
1067 
1068 	/* Reallocate RX ring only if bigger skb buffers are required */
1069 	if (rx_skb_size <= dev->rx_skb_size)
1070 		goto skip;
1071 
1072 	/* Second pass, allocate new skbs */
1073 	for (i = 0; i < NUM_RX_BUFF; ++i) {
1074 		struct sk_buff *skb;
1075 
1076 		skb = netdev_alloc_skb_ip_align(dev->ndev, rx_skb_size);
1077 		if (!skb) {
1078 			ret = -ENOMEM;
1079 			goto oom;
1080 		}
1081 
1082 		BUG_ON(!dev->rx_skb[i]);
1083 		dev_kfree_skb(dev->rx_skb[i]);
1084 
1085 		dev->rx_desc[i].data_ptr =
1086 		    dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
1087 				   rx_sync_size, DMA_FROM_DEVICE)
1088 				   + NET_IP_ALIGN;
1089 		dev->rx_skb[i] = skb;
1090 	}
1091  skip:
1092 	/* Check if we need to change "Jumbo" bit in MR1 */
1093 	if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
1094 		mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ||
1095 				(dev->ndev->mtu > ETH_DATA_LEN);
1096 	} else {
1097 		mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ^
1098 				(dev->ndev->mtu > ETH_DATA_LEN);
1099 	}
1100 
1101 	if (mr1_jumbo_bit_change) {
1102 		/* This is to prevent starting RX channel in emac_rx_enable() */
1103 		set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1104 
1105 		dev->ndev->mtu = new_mtu;
1106 		emac_full_tx_reset(dev);
1107 	}
1108 
1109 	mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1110  oom:
1111 	/* Restart RX */
1112 	clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1113 	dev->rx_slot = 0;
1114 	mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1115 	emac_rx_enable(dev);
1116 	emac_netif_start(dev);
1117 	mutex_unlock(&dev->link_lock);
1118 
1119 	return ret;
1120 }
1121 
1122 /* Process ctx, rtnl_lock semaphore */
1123 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1124 {
1125 	struct emac_instance *dev = netdev_priv(ndev);
1126 	int ret = 0;
1127 
1128 	DBG(dev, "change_mtu(%d)" NL, new_mtu);
1129 
1130 	if (netif_running(ndev)) {
1131 		/* Check if we really need to reinitialize RX ring */
1132 		if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1133 			ret = emac_resize_rx_ring(dev, new_mtu);
1134 	}
1135 
1136 	if (!ret) {
1137 		ndev->mtu = new_mtu;
1138 		dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1139 		dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1140 	}
1141 
1142 	return ret;
1143 }
1144 
1145 static void emac_clean_tx_ring(struct emac_instance *dev)
1146 {
1147 	int i;
1148 
1149 	for (i = 0; i < NUM_TX_BUFF; ++i) {
1150 		if (dev->tx_skb[i]) {
1151 			dev_kfree_skb(dev->tx_skb[i]);
1152 			dev->tx_skb[i] = NULL;
1153 			if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1154 				++dev->estats.tx_dropped;
1155 		}
1156 		dev->tx_desc[i].ctrl = 0;
1157 		dev->tx_desc[i].data_ptr = 0;
1158 	}
1159 }
1160 
1161 static void emac_clean_rx_ring(struct emac_instance *dev)
1162 {
1163 	int i;
1164 
1165 	for (i = 0; i < NUM_RX_BUFF; ++i)
1166 		if (dev->rx_skb[i]) {
1167 			dev->rx_desc[i].ctrl = 0;
1168 			dev_kfree_skb(dev->rx_skb[i]);
1169 			dev->rx_skb[i] = NULL;
1170 			dev->rx_desc[i].data_ptr = 0;
1171 		}
1172 
1173 	if (dev->rx_sg_skb) {
1174 		dev_kfree_skb(dev->rx_sg_skb);
1175 		dev->rx_sg_skb = NULL;
1176 	}
1177 }
1178 
1179 static int
1180 __emac_prepare_rx_skb(struct sk_buff *skb, struct emac_instance *dev, int slot)
1181 {
1182 	if (unlikely(!skb))
1183 		return -ENOMEM;
1184 
1185 	dev->rx_skb[slot] = skb;
1186 	dev->rx_desc[slot].data_len = 0;
1187 
1188 	dev->rx_desc[slot].data_ptr =
1189 	    dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
1190 			   dev->rx_sync_size, DMA_FROM_DEVICE) + NET_IP_ALIGN;
1191 	wmb();
1192 	dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1193 	    (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1194 
1195 	return 0;
1196 }
1197 
1198 static int
1199 emac_alloc_rx_skb(struct emac_instance *dev, int slot)
1200 {
1201 	struct sk_buff *skb;
1202 
1203 	skb = __netdev_alloc_skb_ip_align(dev->ndev, dev->rx_skb_size,
1204 					  GFP_KERNEL);
1205 
1206 	return __emac_prepare_rx_skb(skb, dev, slot);
1207 }
1208 
1209 static int
1210 emac_alloc_rx_skb_napi(struct emac_instance *dev, int slot)
1211 {
1212 	struct sk_buff *skb;
1213 
1214 	skb = napi_alloc_skb(&dev->mal->napi, dev->rx_skb_size);
1215 
1216 	return __emac_prepare_rx_skb(skb, dev, slot);
1217 }
1218 
1219 static void emac_print_link_status(struct emac_instance *dev)
1220 {
1221 	if (netif_carrier_ok(dev->ndev))
1222 		printk(KERN_INFO "%s: link is up, %d %s%s\n",
1223 		       dev->ndev->name, dev->phy.speed,
1224 		       dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1225 		       dev->phy.pause ? ", pause enabled" :
1226 		       dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1227 	else
1228 		printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1229 }
1230 
1231 /* Process ctx, rtnl_lock semaphore */
1232 static int emac_open(struct net_device *ndev)
1233 {
1234 	struct emac_instance *dev = netdev_priv(ndev);
1235 	int err, i;
1236 
1237 	DBG(dev, "open" NL);
1238 
1239 	/* Setup error IRQ handler */
1240 	err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1241 	if (err) {
1242 		printk(KERN_ERR "%s: failed to request IRQ %d\n",
1243 		       ndev->name, dev->emac_irq);
1244 		return err;
1245 	}
1246 
1247 	/* Allocate RX ring */
1248 	for (i = 0; i < NUM_RX_BUFF; ++i)
1249 		if (emac_alloc_rx_skb(dev, i)) {
1250 			printk(KERN_ERR "%s: failed to allocate RX ring\n",
1251 			       ndev->name);
1252 			goto oom;
1253 		}
1254 
1255 	dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1256 	clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1257 	dev->rx_sg_skb = NULL;
1258 
1259 	mutex_lock(&dev->link_lock);
1260 	dev->opened = 1;
1261 
1262 	/* Start PHY polling now.
1263 	 */
1264 	if (dev->phy.address >= 0) {
1265 		int link_poll_interval;
1266 		if (dev->phy.def->ops->poll_link(&dev->phy)) {
1267 			dev->phy.def->ops->read_link(&dev->phy);
1268 			emac_rx_clk_default(dev);
1269 			netif_carrier_on(dev->ndev);
1270 			link_poll_interval = PHY_POLL_LINK_ON;
1271 		} else {
1272 			emac_rx_clk_tx(dev);
1273 			netif_carrier_off(dev->ndev);
1274 			link_poll_interval = PHY_POLL_LINK_OFF;
1275 		}
1276 		dev->link_polling = 1;
1277 		wmb();
1278 		schedule_delayed_work(&dev->link_work, link_poll_interval);
1279 		emac_print_link_status(dev);
1280 	} else
1281 		netif_carrier_on(dev->ndev);
1282 
1283 	/* Required for Pause packet support in EMAC */
1284 	dev_mc_add_global(ndev, default_mcast_addr);
1285 
1286 	emac_configure(dev);
1287 	mal_poll_add(dev->mal, &dev->commac);
1288 	mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1289 	mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1290 	mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1291 	emac_tx_enable(dev);
1292 	emac_rx_enable(dev);
1293 	emac_netif_start(dev);
1294 
1295 	mutex_unlock(&dev->link_lock);
1296 
1297 	return 0;
1298  oom:
1299 	emac_clean_rx_ring(dev);
1300 	free_irq(dev->emac_irq, dev);
1301 
1302 	return -ENOMEM;
1303 }
1304 
1305 /* BHs disabled */
1306 #if 0
1307 static int emac_link_differs(struct emac_instance *dev)
1308 {
1309 	u32 r = in_be32(&dev->emacp->mr1);
1310 
1311 	int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1312 	int speed, pause, asym_pause;
1313 
1314 	if (r & EMAC_MR1_MF_1000)
1315 		speed = SPEED_1000;
1316 	else if (r & EMAC_MR1_MF_100)
1317 		speed = SPEED_100;
1318 	else
1319 		speed = SPEED_10;
1320 
1321 	switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1322 	case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1323 		pause = 1;
1324 		asym_pause = 0;
1325 		break;
1326 	case EMAC_MR1_APP:
1327 		pause = 0;
1328 		asym_pause = 1;
1329 		break;
1330 	default:
1331 		pause = asym_pause = 0;
1332 	}
1333 	return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1334 	    pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1335 }
1336 #endif
1337 
1338 static void emac_link_timer(struct work_struct *work)
1339 {
1340 	struct emac_instance *dev =
1341 		container_of(to_delayed_work(work),
1342 			     struct emac_instance, link_work);
1343 	int link_poll_interval;
1344 
1345 	mutex_lock(&dev->link_lock);
1346 	DBG2(dev, "link timer" NL);
1347 
1348 	if (!dev->opened)
1349 		goto bail;
1350 
1351 	if (dev->phy.def->ops->poll_link(&dev->phy)) {
1352 		if (!netif_carrier_ok(dev->ndev)) {
1353 			emac_rx_clk_default(dev);
1354 			/* Get new link parameters */
1355 			dev->phy.def->ops->read_link(&dev->phy);
1356 
1357 			netif_carrier_on(dev->ndev);
1358 			emac_netif_stop(dev);
1359 			emac_full_tx_reset(dev);
1360 			emac_netif_start(dev);
1361 			emac_print_link_status(dev);
1362 		}
1363 		link_poll_interval = PHY_POLL_LINK_ON;
1364 	} else {
1365 		if (netif_carrier_ok(dev->ndev)) {
1366 			emac_rx_clk_tx(dev);
1367 			netif_carrier_off(dev->ndev);
1368 			netif_tx_disable(dev->ndev);
1369 			emac_reinitialize(dev);
1370 			emac_print_link_status(dev);
1371 		}
1372 		link_poll_interval = PHY_POLL_LINK_OFF;
1373 	}
1374 	schedule_delayed_work(&dev->link_work, link_poll_interval);
1375  bail:
1376 	mutex_unlock(&dev->link_lock);
1377 }
1378 
1379 static void emac_force_link_update(struct emac_instance *dev)
1380 {
1381 	netif_carrier_off(dev->ndev);
1382 	smp_rmb();
1383 	if (dev->link_polling) {
1384 		cancel_delayed_work_sync(&dev->link_work);
1385 		if (dev->link_polling)
1386 			schedule_delayed_work(&dev->link_work,  PHY_POLL_LINK_OFF);
1387 	}
1388 }
1389 
1390 /* Process ctx, rtnl_lock semaphore */
1391 static int emac_close(struct net_device *ndev)
1392 {
1393 	struct emac_instance *dev = netdev_priv(ndev);
1394 
1395 	DBG(dev, "close" NL);
1396 
1397 	if (dev->phy.address >= 0) {
1398 		dev->link_polling = 0;
1399 		cancel_delayed_work_sync(&dev->link_work);
1400 	}
1401 	mutex_lock(&dev->link_lock);
1402 	emac_netif_stop(dev);
1403 	dev->opened = 0;
1404 	mutex_unlock(&dev->link_lock);
1405 
1406 	emac_rx_disable(dev);
1407 	emac_tx_disable(dev);
1408 	mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1409 	mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1410 	mal_poll_del(dev->mal, &dev->commac);
1411 
1412 	emac_clean_tx_ring(dev);
1413 	emac_clean_rx_ring(dev);
1414 
1415 	free_irq(dev->emac_irq, dev);
1416 
1417 	netif_carrier_off(ndev);
1418 
1419 	return 0;
1420 }
1421 
1422 static inline u16 emac_tx_csum(struct emac_instance *dev,
1423 			       struct sk_buff *skb)
1424 {
1425 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1426 		(skb->ip_summed == CHECKSUM_PARTIAL)) {
1427 		++dev->stats.tx_packets_csum;
1428 		return EMAC_TX_CTRL_TAH_CSUM;
1429 	}
1430 	return 0;
1431 }
1432 
1433 static inline netdev_tx_t emac_xmit_finish(struct emac_instance *dev, int len)
1434 {
1435 	struct emac_regs __iomem *p = dev->emacp;
1436 	struct net_device *ndev = dev->ndev;
1437 
1438 	/* Send the packet out. If the if makes a significant perf
1439 	 * difference, then we can store the TMR0 value in "dev"
1440 	 * instead
1441 	 */
1442 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1443 		out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1444 	else
1445 		out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1446 
1447 	if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1448 		netif_stop_queue(ndev);
1449 		DBG2(dev, "stopped TX queue" NL);
1450 	}
1451 
1452 	netif_trans_update(ndev);
1453 	++dev->stats.tx_packets;
1454 	dev->stats.tx_bytes += len;
1455 
1456 	return NETDEV_TX_OK;
1457 }
1458 
1459 /* Tx lock BH */
1460 static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1461 {
1462 	struct emac_instance *dev = netdev_priv(ndev);
1463 	unsigned int len = skb->len;
1464 	int slot;
1465 
1466 	u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1467 	    MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1468 
1469 	slot = dev->tx_slot++;
1470 	if (dev->tx_slot == NUM_TX_BUFF) {
1471 		dev->tx_slot = 0;
1472 		ctrl |= MAL_TX_CTRL_WRAP;
1473 	}
1474 
1475 	DBG2(dev, "xmit(%u) %d" NL, len, slot);
1476 
1477 	dev->tx_skb[slot] = skb;
1478 	dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1479 						     skb->data, len,
1480 						     DMA_TO_DEVICE);
1481 	dev->tx_desc[slot].data_len = (u16) len;
1482 	wmb();
1483 	dev->tx_desc[slot].ctrl = ctrl;
1484 
1485 	return emac_xmit_finish(dev, len);
1486 }
1487 
1488 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1489 				  u32 pd, int len, int last, u16 base_ctrl)
1490 {
1491 	while (1) {
1492 		u16 ctrl = base_ctrl;
1493 		int chunk = min(len, MAL_MAX_TX_SIZE);
1494 		len -= chunk;
1495 
1496 		slot = (slot + 1) % NUM_TX_BUFF;
1497 
1498 		if (last && !len)
1499 			ctrl |= MAL_TX_CTRL_LAST;
1500 		if (slot == NUM_TX_BUFF - 1)
1501 			ctrl |= MAL_TX_CTRL_WRAP;
1502 
1503 		dev->tx_skb[slot] = NULL;
1504 		dev->tx_desc[slot].data_ptr = pd;
1505 		dev->tx_desc[slot].data_len = (u16) chunk;
1506 		dev->tx_desc[slot].ctrl = ctrl;
1507 		++dev->tx_cnt;
1508 
1509 		if (!len)
1510 			break;
1511 
1512 		pd += chunk;
1513 	}
1514 	return slot;
1515 }
1516 
1517 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1518 static netdev_tx_t
1519 emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1520 {
1521 	struct emac_instance *dev = netdev_priv(ndev);
1522 	int nr_frags = skb_shinfo(skb)->nr_frags;
1523 	int len = skb->len, chunk;
1524 	int slot, i;
1525 	u16 ctrl;
1526 	u32 pd;
1527 
1528 	/* This is common "fast" path */
1529 	if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1530 		return emac_start_xmit(skb, ndev);
1531 
1532 	len -= skb->data_len;
1533 
1534 	/* Note, this is only an *estimation*, we can still run out of empty
1535 	 * slots because of the additional fragmentation into
1536 	 * MAL_MAX_TX_SIZE-sized chunks
1537 	 */
1538 	if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1539 		goto stop_queue;
1540 
1541 	ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1542 	    emac_tx_csum(dev, skb);
1543 	slot = dev->tx_slot;
1544 
1545 	/* skb data */
1546 	dev->tx_skb[slot] = NULL;
1547 	chunk = min(len, MAL_MAX_TX_SIZE);
1548 	dev->tx_desc[slot].data_ptr = pd =
1549 	    dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1550 	dev->tx_desc[slot].data_len = (u16) chunk;
1551 	len -= chunk;
1552 	if (unlikely(len))
1553 		slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1554 				       ctrl);
1555 	/* skb fragments */
1556 	for (i = 0; i < nr_frags; ++i) {
1557 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1558 		len = skb_frag_size(frag);
1559 
1560 		if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1561 			goto undo_frame;
1562 
1563 		pd = skb_frag_dma_map(&dev->ofdev->dev, frag, 0, len,
1564 				      DMA_TO_DEVICE);
1565 
1566 		slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1567 				       ctrl);
1568 	}
1569 
1570 	DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1571 
1572 	/* Attach skb to the last slot so we don't release it too early */
1573 	dev->tx_skb[slot] = skb;
1574 
1575 	/* Send the packet out */
1576 	if (dev->tx_slot == NUM_TX_BUFF - 1)
1577 		ctrl |= MAL_TX_CTRL_WRAP;
1578 	wmb();
1579 	dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1580 	dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1581 
1582 	return emac_xmit_finish(dev, skb->len);
1583 
1584  undo_frame:
1585 	/* Well, too bad. Our previous estimation was overly optimistic.
1586 	 * Undo everything.
1587 	 */
1588 	while (slot != dev->tx_slot) {
1589 		dev->tx_desc[slot].ctrl = 0;
1590 		--dev->tx_cnt;
1591 		if (--slot < 0)
1592 			slot = NUM_TX_BUFF - 1;
1593 	}
1594 	++dev->estats.tx_undo;
1595 
1596  stop_queue:
1597 	netif_stop_queue(ndev);
1598 	DBG2(dev, "stopped TX queue" NL);
1599 	return NETDEV_TX_BUSY;
1600 }
1601 
1602 /* Tx lock BHs */
1603 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1604 {
1605 	struct emac_error_stats *st = &dev->estats;
1606 
1607 	DBG(dev, "BD TX error %04x" NL, ctrl);
1608 
1609 	++st->tx_bd_errors;
1610 	if (ctrl & EMAC_TX_ST_BFCS)
1611 		++st->tx_bd_bad_fcs;
1612 	if (ctrl & EMAC_TX_ST_LCS)
1613 		++st->tx_bd_carrier_loss;
1614 	if (ctrl & EMAC_TX_ST_ED)
1615 		++st->tx_bd_excessive_deferral;
1616 	if (ctrl & EMAC_TX_ST_EC)
1617 		++st->tx_bd_excessive_collisions;
1618 	if (ctrl & EMAC_TX_ST_LC)
1619 		++st->tx_bd_late_collision;
1620 	if (ctrl & EMAC_TX_ST_MC)
1621 		++st->tx_bd_multple_collisions;
1622 	if (ctrl & EMAC_TX_ST_SC)
1623 		++st->tx_bd_single_collision;
1624 	if (ctrl & EMAC_TX_ST_UR)
1625 		++st->tx_bd_underrun;
1626 	if (ctrl & EMAC_TX_ST_SQE)
1627 		++st->tx_bd_sqe;
1628 }
1629 
1630 static void emac_poll_tx(void *param)
1631 {
1632 	struct emac_instance *dev = param;
1633 	u32 bad_mask;
1634 
1635 	DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1636 
1637 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1638 		bad_mask = EMAC_IS_BAD_TX_TAH;
1639 	else
1640 		bad_mask = EMAC_IS_BAD_TX;
1641 
1642 	netif_tx_lock_bh(dev->ndev);
1643 	if (dev->tx_cnt) {
1644 		u16 ctrl;
1645 		int slot = dev->ack_slot, n = 0;
1646 	again:
1647 		ctrl = dev->tx_desc[slot].ctrl;
1648 		if (!(ctrl & MAL_TX_CTRL_READY)) {
1649 			struct sk_buff *skb = dev->tx_skb[slot];
1650 			++n;
1651 
1652 			if (skb) {
1653 				dev_kfree_skb(skb);
1654 				dev->tx_skb[slot] = NULL;
1655 			}
1656 			slot = (slot + 1) % NUM_TX_BUFF;
1657 
1658 			if (unlikely(ctrl & bad_mask))
1659 				emac_parse_tx_error(dev, ctrl);
1660 
1661 			if (--dev->tx_cnt)
1662 				goto again;
1663 		}
1664 		if (n) {
1665 			dev->ack_slot = slot;
1666 			if (netif_queue_stopped(dev->ndev) &&
1667 			    dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1668 				netif_wake_queue(dev->ndev);
1669 
1670 			DBG2(dev, "tx %d pkts" NL, n);
1671 		}
1672 	}
1673 	netif_tx_unlock_bh(dev->ndev);
1674 }
1675 
1676 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1677 				       int len)
1678 {
1679 	struct sk_buff *skb = dev->rx_skb[slot];
1680 
1681 	DBG2(dev, "recycle %d %d" NL, slot, len);
1682 
1683 	if (len)
1684 		dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
1685 			       SKB_DATA_ALIGN(len + NET_IP_ALIGN),
1686 			       DMA_FROM_DEVICE);
1687 
1688 	dev->rx_desc[slot].data_len = 0;
1689 	wmb();
1690 	dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1691 	    (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1692 }
1693 
1694 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1695 {
1696 	struct emac_error_stats *st = &dev->estats;
1697 
1698 	DBG(dev, "BD RX error %04x" NL, ctrl);
1699 
1700 	++st->rx_bd_errors;
1701 	if (ctrl & EMAC_RX_ST_OE)
1702 		++st->rx_bd_overrun;
1703 	if (ctrl & EMAC_RX_ST_BP)
1704 		++st->rx_bd_bad_packet;
1705 	if (ctrl & EMAC_RX_ST_RP)
1706 		++st->rx_bd_runt_packet;
1707 	if (ctrl & EMAC_RX_ST_SE)
1708 		++st->rx_bd_short_event;
1709 	if (ctrl & EMAC_RX_ST_AE)
1710 		++st->rx_bd_alignment_error;
1711 	if (ctrl & EMAC_RX_ST_BFCS)
1712 		++st->rx_bd_bad_fcs;
1713 	if (ctrl & EMAC_RX_ST_PTL)
1714 		++st->rx_bd_packet_too_long;
1715 	if (ctrl & EMAC_RX_ST_ORE)
1716 		++st->rx_bd_out_of_range;
1717 	if (ctrl & EMAC_RX_ST_IRE)
1718 		++st->rx_bd_in_range;
1719 }
1720 
1721 static inline void emac_rx_csum(struct emac_instance *dev,
1722 				struct sk_buff *skb, u16 ctrl)
1723 {
1724 #ifdef CONFIG_IBM_EMAC_TAH
1725 	if (!ctrl && dev->tah_dev) {
1726 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1727 		++dev->stats.rx_packets_csum;
1728 	}
1729 #endif
1730 }
1731 
1732 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1733 {
1734 	if (likely(dev->rx_sg_skb != NULL)) {
1735 		int len = dev->rx_desc[slot].data_len;
1736 		int tot_len = dev->rx_sg_skb->len + len;
1737 
1738 		if (unlikely(tot_len + NET_IP_ALIGN > dev->rx_skb_size)) {
1739 			++dev->estats.rx_dropped_mtu;
1740 			dev_kfree_skb(dev->rx_sg_skb);
1741 			dev->rx_sg_skb = NULL;
1742 		} else {
1743 			memcpy(skb_tail_pointer(dev->rx_sg_skb),
1744 					 dev->rx_skb[slot]->data, len);
1745 			skb_put(dev->rx_sg_skb, len);
1746 			emac_recycle_rx_skb(dev, slot, len);
1747 			return 0;
1748 		}
1749 	}
1750 	emac_recycle_rx_skb(dev, slot, 0);
1751 	return -1;
1752 }
1753 
1754 /* NAPI poll context */
1755 static int emac_poll_rx(void *param, int budget)
1756 {
1757 	struct emac_instance *dev = param;
1758 	int slot = dev->rx_slot, received = 0;
1759 
1760 	DBG2(dev, "poll_rx(%d)" NL, budget);
1761 
1762  again:
1763 	while (budget > 0) {
1764 		int len;
1765 		struct sk_buff *skb;
1766 		u16 ctrl = dev->rx_desc[slot].ctrl;
1767 
1768 		if (ctrl & MAL_RX_CTRL_EMPTY)
1769 			break;
1770 
1771 		skb = dev->rx_skb[slot];
1772 		mb();
1773 		len = dev->rx_desc[slot].data_len;
1774 
1775 		if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1776 			goto sg;
1777 
1778 		ctrl &= EMAC_BAD_RX_MASK;
1779 		if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1780 			emac_parse_rx_error(dev, ctrl);
1781 			++dev->estats.rx_dropped_error;
1782 			emac_recycle_rx_skb(dev, slot, 0);
1783 			len = 0;
1784 			goto next;
1785 		}
1786 
1787 		if (len < ETH_HLEN) {
1788 			++dev->estats.rx_dropped_stack;
1789 			emac_recycle_rx_skb(dev, slot, len);
1790 			goto next;
1791 		}
1792 
1793 		if (len && len < EMAC_RX_COPY_THRESH) {
1794 			struct sk_buff *copy_skb;
1795 
1796 			copy_skb = napi_alloc_skb(&dev->mal->napi, len);
1797 			if (unlikely(!copy_skb))
1798 				goto oom;
1799 
1800 			memcpy(copy_skb->data - NET_IP_ALIGN,
1801 			       skb->data - NET_IP_ALIGN,
1802 			       len + NET_IP_ALIGN);
1803 			emac_recycle_rx_skb(dev, slot, len);
1804 			skb = copy_skb;
1805 		} else if (unlikely(emac_alloc_rx_skb_napi(dev, slot)))
1806 			goto oom;
1807 
1808 		skb_put(skb, len);
1809 	push_packet:
1810 		skb->protocol = eth_type_trans(skb, dev->ndev);
1811 		emac_rx_csum(dev, skb, ctrl);
1812 
1813 		if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1814 			++dev->estats.rx_dropped_stack;
1815 	next:
1816 		++dev->stats.rx_packets;
1817 	skip:
1818 		dev->stats.rx_bytes += len;
1819 		slot = (slot + 1) % NUM_RX_BUFF;
1820 		--budget;
1821 		++received;
1822 		continue;
1823 	sg:
1824 		if (ctrl & MAL_RX_CTRL_FIRST) {
1825 			BUG_ON(dev->rx_sg_skb);
1826 			if (unlikely(emac_alloc_rx_skb_napi(dev, slot))) {
1827 				DBG(dev, "rx OOM %d" NL, slot);
1828 				++dev->estats.rx_dropped_oom;
1829 				emac_recycle_rx_skb(dev, slot, 0);
1830 			} else {
1831 				dev->rx_sg_skb = skb;
1832 				skb_put(skb, len);
1833 			}
1834 		} else if (!emac_rx_sg_append(dev, slot) &&
1835 			   (ctrl & MAL_RX_CTRL_LAST)) {
1836 
1837 			skb = dev->rx_sg_skb;
1838 			dev->rx_sg_skb = NULL;
1839 
1840 			ctrl &= EMAC_BAD_RX_MASK;
1841 			if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1842 				emac_parse_rx_error(dev, ctrl);
1843 				++dev->estats.rx_dropped_error;
1844 				dev_kfree_skb(skb);
1845 				len = 0;
1846 			} else
1847 				goto push_packet;
1848 		}
1849 		goto skip;
1850 	oom:
1851 		DBG(dev, "rx OOM %d" NL, slot);
1852 		/* Drop the packet and recycle skb */
1853 		++dev->estats.rx_dropped_oom;
1854 		emac_recycle_rx_skb(dev, slot, 0);
1855 		goto next;
1856 	}
1857 
1858 	if (received) {
1859 		DBG2(dev, "rx %d BDs" NL, received);
1860 		dev->rx_slot = slot;
1861 	}
1862 
1863 	if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1864 		mb();
1865 		if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1866 			DBG2(dev, "rx restart" NL);
1867 			received = 0;
1868 			goto again;
1869 		}
1870 
1871 		if (dev->rx_sg_skb) {
1872 			DBG2(dev, "dropping partial rx packet" NL);
1873 			++dev->estats.rx_dropped_error;
1874 			dev_kfree_skb(dev->rx_sg_skb);
1875 			dev->rx_sg_skb = NULL;
1876 		}
1877 
1878 		clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1879 		mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1880 		emac_rx_enable(dev);
1881 		dev->rx_slot = 0;
1882 	}
1883 	return received;
1884 }
1885 
1886 /* NAPI poll context */
1887 static int emac_peek_rx(void *param)
1888 {
1889 	struct emac_instance *dev = param;
1890 
1891 	return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1892 }
1893 
1894 /* NAPI poll context */
1895 static int emac_peek_rx_sg(void *param)
1896 {
1897 	struct emac_instance *dev = param;
1898 
1899 	int slot = dev->rx_slot;
1900 	while (1) {
1901 		u16 ctrl = dev->rx_desc[slot].ctrl;
1902 		if (ctrl & MAL_RX_CTRL_EMPTY)
1903 			return 0;
1904 		else if (ctrl & MAL_RX_CTRL_LAST)
1905 			return 1;
1906 
1907 		slot = (slot + 1) % NUM_RX_BUFF;
1908 
1909 		/* I'm just being paranoid here :) */
1910 		if (unlikely(slot == dev->rx_slot))
1911 			return 0;
1912 	}
1913 }
1914 
1915 /* Hard IRQ */
1916 static void emac_rxde(void *param)
1917 {
1918 	struct emac_instance *dev = param;
1919 
1920 	++dev->estats.rx_stopped;
1921 	emac_rx_disable_async(dev);
1922 }
1923 
1924 /* Hard IRQ */
1925 static irqreturn_t emac_irq(int irq, void *dev_instance)
1926 {
1927 	struct emac_instance *dev = dev_instance;
1928 	struct emac_regs __iomem *p = dev->emacp;
1929 	struct emac_error_stats *st = &dev->estats;
1930 	u32 isr;
1931 
1932 	spin_lock(&dev->lock);
1933 
1934 	isr = in_be32(&p->isr);
1935 	out_be32(&p->isr, isr);
1936 
1937 	DBG(dev, "isr = %08x" NL, isr);
1938 
1939 	if (isr & EMAC4_ISR_TXPE)
1940 		++st->tx_parity;
1941 	if (isr & EMAC4_ISR_RXPE)
1942 		++st->rx_parity;
1943 	if (isr & EMAC4_ISR_TXUE)
1944 		++st->tx_underrun;
1945 	if (isr & EMAC4_ISR_RXOE)
1946 		++st->rx_fifo_overrun;
1947 	if (isr & EMAC_ISR_OVR)
1948 		++st->rx_overrun;
1949 	if (isr & EMAC_ISR_BP)
1950 		++st->rx_bad_packet;
1951 	if (isr & EMAC_ISR_RP)
1952 		++st->rx_runt_packet;
1953 	if (isr & EMAC_ISR_SE)
1954 		++st->rx_short_event;
1955 	if (isr & EMAC_ISR_ALE)
1956 		++st->rx_alignment_error;
1957 	if (isr & EMAC_ISR_BFCS)
1958 		++st->rx_bad_fcs;
1959 	if (isr & EMAC_ISR_PTLE)
1960 		++st->rx_packet_too_long;
1961 	if (isr & EMAC_ISR_ORE)
1962 		++st->rx_out_of_range;
1963 	if (isr & EMAC_ISR_IRE)
1964 		++st->rx_in_range;
1965 	if (isr & EMAC_ISR_SQE)
1966 		++st->tx_sqe;
1967 	if (isr & EMAC_ISR_TE)
1968 		++st->tx_errors;
1969 
1970 	spin_unlock(&dev->lock);
1971 
1972 	return IRQ_HANDLED;
1973 }
1974 
1975 static struct net_device_stats *emac_stats(struct net_device *ndev)
1976 {
1977 	struct emac_instance *dev = netdev_priv(ndev);
1978 	struct emac_stats *st = &dev->stats;
1979 	struct emac_error_stats *est = &dev->estats;
1980 	struct net_device_stats *nst = &ndev->stats;
1981 	unsigned long flags;
1982 
1983 	DBG2(dev, "stats" NL);
1984 
1985 	/* Compute "legacy" statistics */
1986 	spin_lock_irqsave(&dev->lock, flags);
1987 	nst->rx_packets = (unsigned long)st->rx_packets;
1988 	nst->rx_bytes = (unsigned long)st->rx_bytes;
1989 	nst->tx_packets = (unsigned long)st->tx_packets;
1990 	nst->tx_bytes = (unsigned long)st->tx_bytes;
1991 	nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1992 					  est->rx_dropped_error +
1993 					  est->rx_dropped_resize +
1994 					  est->rx_dropped_mtu);
1995 	nst->tx_dropped = (unsigned long)est->tx_dropped;
1996 
1997 	nst->rx_errors = (unsigned long)est->rx_bd_errors;
1998 	nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1999 					      est->rx_fifo_overrun +
2000 					      est->rx_overrun);
2001 	nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
2002 					       est->rx_alignment_error);
2003 	nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
2004 					     est->rx_bad_fcs);
2005 	nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
2006 						est->rx_bd_short_event +
2007 						est->rx_bd_packet_too_long +
2008 						est->rx_bd_out_of_range +
2009 						est->rx_bd_in_range +
2010 						est->rx_runt_packet +
2011 						est->rx_short_event +
2012 						est->rx_packet_too_long +
2013 						est->rx_out_of_range +
2014 						est->rx_in_range);
2015 
2016 	nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
2017 	nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
2018 					      est->tx_underrun);
2019 	nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
2020 	nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
2021 					  est->tx_bd_excessive_collisions +
2022 					  est->tx_bd_late_collision +
2023 					  est->tx_bd_multple_collisions);
2024 	spin_unlock_irqrestore(&dev->lock, flags);
2025 	return nst;
2026 }
2027 
2028 static struct mal_commac_ops emac_commac_ops = {
2029 	.poll_tx = &emac_poll_tx,
2030 	.poll_rx = &emac_poll_rx,
2031 	.peek_rx = &emac_peek_rx,
2032 	.rxde = &emac_rxde,
2033 };
2034 
2035 static struct mal_commac_ops emac_commac_sg_ops = {
2036 	.poll_tx = &emac_poll_tx,
2037 	.poll_rx = &emac_poll_rx,
2038 	.peek_rx = &emac_peek_rx_sg,
2039 	.rxde = &emac_rxde,
2040 };
2041 
2042 /* Ethtool support */
2043 static int emac_ethtool_get_link_ksettings(struct net_device *ndev,
2044 					   struct ethtool_link_ksettings *cmd)
2045 {
2046 	struct emac_instance *dev = netdev_priv(ndev);
2047 	u32 supported, advertising;
2048 
2049 	supported = dev->phy.features;
2050 	cmd->base.port = PORT_MII;
2051 	cmd->base.phy_address = dev->phy.address;
2052 
2053 	mutex_lock(&dev->link_lock);
2054 	advertising = dev->phy.advertising;
2055 	cmd->base.autoneg = dev->phy.autoneg;
2056 	cmd->base.speed = dev->phy.speed;
2057 	cmd->base.duplex = dev->phy.duplex;
2058 	mutex_unlock(&dev->link_lock);
2059 
2060 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2061 						supported);
2062 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2063 						advertising);
2064 
2065 	return 0;
2066 }
2067 
2068 static int
2069 emac_ethtool_set_link_ksettings(struct net_device *ndev,
2070 				const struct ethtool_link_ksettings *cmd)
2071 {
2072 	struct emac_instance *dev = netdev_priv(ndev);
2073 	u32 f = dev->phy.features;
2074 	u32 advertising;
2075 
2076 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
2077 						cmd->link_modes.advertising);
2078 
2079 	DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
2080 	    cmd->base.autoneg, cmd->base.speed, cmd->base.duplex, advertising);
2081 
2082 	/* Basic sanity checks */
2083 	if (dev->phy.address < 0)
2084 		return -EOPNOTSUPP;
2085 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
2086 	    cmd->base.autoneg != AUTONEG_DISABLE)
2087 		return -EINVAL;
2088 	if (cmd->base.autoneg == AUTONEG_ENABLE && advertising == 0)
2089 		return -EINVAL;
2090 	if (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL)
2091 		return -EINVAL;
2092 
2093 	if (cmd->base.autoneg == AUTONEG_DISABLE) {
2094 		switch (cmd->base.speed) {
2095 		case SPEED_10:
2096 			if (cmd->base.duplex == DUPLEX_HALF &&
2097 			    !(f & SUPPORTED_10baseT_Half))
2098 				return -EINVAL;
2099 			if (cmd->base.duplex == DUPLEX_FULL &&
2100 			    !(f & SUPPORTED_10baseT_Full))
2101 				return -EINVAL;
2102 			break;
2103 		case SPEED_100:
2104 			if (cmd->base.duplex == DUPLEX_HALF &&
2105 			    !(f & SUPPORTED_100baseT_Half))
2106 				return -EINVAL;
2107 			if (cmd->base.duplex == DUPLEX_FULL &&
2108 			    !(f & SUPPORTED_100baseT_Full))
2109 				return -EINVAL;
2110 			break;
2111 		case SPEED_1000:
2112 			if (cmd->base.duplex == DUPLEX_HALF &&
2113 			    !(f & SUPPORTED_1000baseT_Half))
2114 				return -EINVAL;
2115 			if (cmd->base.duplex == DUPLEX_FULL &&
2116 			    !(f & SUPPORTED_1000baseT_Full))
2117 				return -EINVAL;
2118 			break;
2119 		default:
2120 			return -EINVAL;
2121 		}
2122 
2123 		mutex_lock(&dev->link_lock);
2124 		dev->phy.def->ops->setup_forced(&dev->phy, cmd->base.speed,
2125 						cmd->base.duplex);
2126 		mutex_unlock(&dev->link_lock);
2127 
2128 	} else {
2129 		if (!(f & SUPPORTED_Autoneg))
2130 			return -EINVAL;
2131 
2132 		mutex_lock(&dev->link_lock);
2133 		dev->phy.def->ops->setup_aneg(&dev->phy,
2134 					      (advertising & f) |
2135 					      (dev->phy.advertising &
2136 					       (ADVERTISED_Pause |
2137 						ADVERTISED_Asym_Pause)));
2138 		mutex_unlock(&dev->link_lock);
2139 	}
2140 	emac_force_link_update(dev);
2141 
2142 	return 0;
2143 }
2144 
2145 static void emac_ethtool_get_ringparam(struct net_device *ndev,
2146 				       struct ethtool_ringparam *rp)
2147 {
2148 	rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2149 	rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2150 }
2151 
2152 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2153 					struct ethtool_pauseparam *pp)
2154 {
2155 	struct emac_instance *dev = netdev_priv(ndev);
2156 
2157 	mutex_lock(&dev->link_lock);
2158 	if ((dev->phy.features & SUPPORTED_Autoneg) &&
2159 	    (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2160 		pp->autoneg = 1;
2161 
2162 	if (dev->phy.duplex == DUPLEX_FULL) {
2163 		if (dev->phy.pause)
2164 			pp->rx_pause = pp->tx_pause = 1;
2165 		else if (dev->phy.asym_pause)
2166 			pp->tx_pause = 1;
2167 	}
2168 	mutex_unlock(&dev->link_lock);
2169 }
2170 
2171 static int emac_get_regs_len(struct emac_instance *dev)
2172 {
2173 		return sizeof(struct emac_ethtool_regs_subhdr) +
2174 			sizeof(struct emac_regs);
2175 }
2176 
2177 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2178 {
2179 	struct emac_instance *dev = netdev_priv(ndev);
2180 	int size;
2181 
2182 	size = sizeof(struct emac_ethtool_regs_hdr) +
2183 		emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2184 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2185 		size += zmii_get_regs_len(dev->zmii_dev);
2186 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2187 		size += rgmii_get_regs_len(dev->rgmii_dev);
2188 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2189 		size += tah_get_regs_len(dev->tah_dev);
2190 
2191 	return size;
2192 }
2193 
2194 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2195 {
2196 	struct emac_ethtool_regs_subhdr *hdr = buf;
2197 
2198 	hdr->index = dev->cell_index;
2199 	if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2200 		hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER;
2201 	} else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2202 		hdr->version = EMAC4_ETHTOOL_REGS_VER;
2203 	} else {
2204 		hdr->version = EMAC_ETHTOOL_REGS_VER;
2205 	}
2206 	memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs));
2207 	return (void *)(hdr + 1) + sizeof(struct emac_regs);
2208 }
2209 
2210 static void emac_ethtool_get_regs(struct net_device *ndev,
2211 				  struct ethtool_regs *regs, void *buf)
2212 {
2213 	struct emac_instance *dev = netdev_priv(ndev);
2214 	struct emac_ethtool_regs_hdr *hdr = buf;
2215 
2216 	hdr->components = 0;
2217 	buf = hdr + 1;
2218 
2219 	buf = mal_dump_regs(dev->mal, buf);
2220 	buf = emac_dump_regs(dev, buf);
2221 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2222 		hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2223 		buf = zmii_dump_regs(dev->zmii_dev, buf);
2224 	}
2225 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2226 		hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2227 		buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2228 	}
2229 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2230 		hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2231 		buf = tah_dump_regs(dev->tah_dev, buf);
2232 	}
2233 }
2234 
2235 static int emac_ethtool_nway_reset(struct net_device *ndev)
2236 {
2237 	struct emac_instance *dev = netdev_priv(ndev);
2238 	int res = 0;
2239 
2240 	DBG(dev, "nway_reset" NL);
2241 
2242 	if (dev->phy.address < 0)
2243 		return -EOPNOTSUPP;
2244 
2245 	mutex_lock(&dev->link_lock);
2246 	if (!dev->phy.autoneg) {
2247 		res = -EINVAL;
2248 		goto out;
2249 	}
2250 
2251 	dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2252  out:
2253 	mutex_unlock(&dev->link_lock);
2254 	emac_force_link_update(dev);
2255 	return res;
2256 }
2257 
2258 static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2259 {
2260 	if (stringset == ETH_SS_STATS)
2261 		return EMAC_ETHTOOL_STATS_COUNT;
2262 	else
2263 		return -EINVAL;
2264 }
2265 
2266 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2267 				     u8 * buf)
2268 {
2269 	if (stringset == ETH_SS_STATS)
2270 		memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2271 }
2272 
2273 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2274 					   struct ethtool_stats *estats,
2275 					   u64 * tmp_stats)
2276 {
2277 	struct emac_instance *dev = netdev_priv(ndev);
2278 
2279 	memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2280 	tmp_stats += sizeof(dev->stats) / sizeof(u64);
2281 	memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2282 }
2283 
2284 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2285 				     struct ethtool_drvinfo *info)
2286 {
2287 	struct emac_instance *dev = netdev_priv(ndev);
2288 
2289 	strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
2290 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2291 	snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %pOF",
2292 		 dev->cell_index, dev->ofdev->dev.of_node);
2293 }
2294 
2295 static const struct ethtool_ops emac_ethtool_ops = {
2296 	.get_drvinfo = emac_ethtool_get_drvinfo,
2297 
2298 	.get_regs_len = emac_ethtool_get_regs_len,
2299 	.get_regs = emac_ethtool_get_regs,
2300 
2301 	.nway_reset = emac_ethtool_nway_reset,
2302 
2303 	.get_ringparam = emac_ethtool_get_ringparam,
2304 	.get_pauseparam = emac_ethtool_get_pauseparam,
2305 
2306 	.get_strings = emac_ethtool_get_strings,
2307 	.get_sset_count = emac_ethtool_get_sset_count,
2308 	.get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2309 
2310 	.get_link = ethtool_op_get_link,
2311 	.get_link_ksettings = emac_ethtool_get_link_ksettings,
2312 	.set_link_ksettings = emac_ethtool_set_link_ksettings,
2313 };
2314 
2315 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2316 {
2317 	struct emac_instance *dev = netdev_priv(ndev);
2318 	struct mii_ioctl_data *data = if_mii(rq);
2319 
2320 	DBG(dev, "ioctl %08x" NL, cmd);
2321 
2322 	if (dev->phy.address < 0)
2323 		return -EOPNOTSUPP;
2324 
2325 	switch (cmd) {
2326 	case SIOCGMIIPHY:
2327 		data->phy_id = dev->phy.address;
2328 		/* Fall through */
2329 	case SIOCGMIIREG:
2330 		data->val_out = emac_mdio_read(ndev, dev->phy.address,
2331 					       data->reg_num);
2332 		return 0;
2333 
2334 	case SIOCSMIIREG:
2335 		emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2336 				data->val_in);
2337 		return 0;
2338 	default:
2339 		return -EOPNOTSUPP;
2340 	}
2341 }
2342 
2343 struct emac_depentry {
2344 	u32			phandle;
2345 	struct device_node	*node;
2346 	struct platform_device	*ofdev;
2347 	void			*drvdata;
2348 };
2349 
2350 #define	EMAC_DEP_MAL_IDX	0
2351 #define	EMAC_DEP_ZMII_IDX	1
2352 #define	EMAC_DEP_RGMII_IDX	2
2353 #define	EMAC_DEP_TAH_IDX	3
2354 #define	EMAC_DEP_MDIO_IDX	4
2355 #define	EMAC_DEP_PREV_IDX	5
2356 #define	EMAC_DEP_COUNT		6
2357 
2358 static int emac_check_deps(struct emac_instance *dev,
2359 			   struct emac_depentry *deps)
2360 {
2361 	int i, there = 0;
2362 	struct device_node *np;
2363 
2364 	for (i = 0; i < EMAC_DEP_COUNT; i++) {
2365 		/* no dependency on that item, allright */
2366 		if (deps[i].phandle == 0) {
2367 			there++;
2368 			continue;
2369 		}
2370 		/* special case for blist as the dependency might go away */
2371 		if (i == EMAC_DEP_PREV_IDX) {
2372 			np = *(dev->blist - 1);
2373 			if (np == NULL) {
2374 				deps[i].phandle = 0;
2375 				there++;
2376 				continue;
2377 			}
2378 			if (deps[i].node == NULL)
2379 				deps[i].node = of_node_get(np);
2380 		}
2381 		if (deps[i].node == NULL)
2382 			deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2383 		if (deps[i].node == NULL)
2384 			continue;
2385 		if (deps[i].ofdev == NULL)
2386 			deps[i].ofdev = of_find_device_by_node(deps[i].node);
2387 		if (deps[i].ofdev == NULL)
2388 			continue;
2389 		if (deps[i].drvdata == NULL)
2390 			deps[i].drvdata = platform_get_drvdata(deps[i].ofdev);
2391 		if (deps[i].drvdata != NULL)
2392 			there++;
2393 	}
2394 	return there == EMAC_DEP_COUNT;
2395 }
2396 
2397 static void emac_put_deps(struct emac_instance *dev)
2398 {
2399 	of_dev_put(dev->mal_dev);
2400 	of_dev_put(dev->zmii_dev);
2401 	of_dev_put(dev->rgmii_dev);
2402 	of_dev_put(dev->mdio_dev);
2403 	of_dev_put(dev->tah_dev);
2404 }
2405 
2406 static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
2407 			      void *data)
2408 {
2409 	/* We are only intereted in device addition */
2410 	if (action == BUS_NOTIFY_BOUND_DRIVER)
2411 		wake_up_all(&emac_probe_wait);
2412 	return 0;
2413 }
2414 
2415 static struct notifier_block emac_of_bus_notifier = {
2416 	.notifier_call = emac_of_bus_notify
2417 };
2418 
2419 static int emac_wait_deps(struct emac_instance *dev)
2420 {
2421 	struct emac_depentry deps[EMAC_DEP_COUNT];
2422 	int i, err;
2423 
2424 	memset(&deps, 0, sizeof(deps));
2425 
2426 	deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2427 	deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2428 	deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2429 	if (dev->tah_ph)
2430 		deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2431 	if (dev->mdio_ph)
2432 		deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2433 	if (dev->blist && dev->blist > emac_boot_list)
2434 		deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2435 	bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
2436 	wait_event_timeout(emac_probe_wait,
2437 			   emac_check_deps(dev, deps),
2438 			   EMAC_PROBE_DEP_TIMEOUT);
2439 	bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
2440 	err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2441 	for (i = 0; i < EMAC_DEP_COUNT; i++) {
2442 		of_node_put(deps[i].node);
2443 		if (err)
2444 			of_dev_put(deps[i].ofdev);
2445 	}
2446 	if (err == 0) {
2447 		dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2448 		dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2449 		dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2450 		dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2451 		dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2452 	}
2453 	of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2454 	return err;
2455 }
2456 
2457 static int emac_read_uint_prop(struct device_node *np, const char *name,
2458 			       u32 *val, int fatal)
2459 {
2460 	int len;
2461 	const u32 *prop = of_get_property(np, name, &len);
2462 	if (prop == NULL || len < sizeof(u32)) {
2463 		if (fatal)
2464 			printk(KERN_ERR "%pOF: missing %s property\n",
2465 			       np, name);
2466 		return -ENODEV;
2467 	}
2468 	*val = *prop;
2469 	return 0;
2470 }
2471 
2472 static void emac_adjust_link(struct net_device *ndev)
2473 {
2474 	struct emac_instance *dev = netdev_priv(ndev);
2475 	struct phy_device *phy = dev->phy_dev;
2476 
2477 	dev->phy.autoneg = phy->autoneg;
2478 	dev->phy.speed = phy->speed;
2479 	dev->phy.duplex = phy->duplex;
2480 	dev->phy.pause = phy->pause;
2481 	dev->phy.asym_pause = phy->asym_pause;
2482 	ethtool_convert_link_mode_to_legacy_u32(&dev->phy.advertising,
2483 						phy->advertising);
2484 }
2485 
2486 static int emac_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
2487 {
2488 	int ret = emac_mdio_read(bus->priv, addr, regnum);
2489 	/* This is a workaround for powered down ports/phys.
2490 	 * In the wild, this was seen on the Cisco Meraki MX60(W).
2491 	 * This hardware disables ports as part of the handoff
2492 	 * procedure. Accessing the ports will lead to errors
2493 	 * (-ETIMEDOUT, -EREMOTEIO) that do more harm than good.
2494 	 */
2495 	return ret < 0 ? 0xffff : ret;
2496 }
2497 
2498 static int emac_mii_bus_write(struct mii_bus *bus, int addr,
2499 			      int regnum, u16 val)
2500 {
2501 	emac_mdio_write(bus->priv, addr, regnum, val);
2502 	return 0;
2503 }
2504 
2505 static int emac_mii_bus_reset(struct mii_bus *bus)
2506 {
2507 	struct emac_instance *dev = netdev_priv(bus->priv);
2508 
2509 	return emac_reset(dev);
2510 }
2511 
2512 static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
2513 				    struct phy_device *phy_dev)
2514 {
2515 	phy_dev->autoneg = phy->autoneg;
2516 	phy_dev->speed = phy->speed;
2517 	phy_dev->duplex = phy->duplex;
2518 	ethtool_convert_legacy_u32_to_link_mode(phy_dev->advertising,
2519 						phy->advertising);
2520 	return phy_start_aneg(phy_dev);
2521 }
2522 
2523 static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise)
2524 {
2525 	struct net_device *ndev = phy->dev;
2526 	struct emac_instance *dev = netdev_priv(ndev);
2527 
2528 	phy->autoneg = AUTONEG_ENABLE;
2529 	phy->advertising = advertise;
2530 	return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
2531 }
2532 
2533 static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
2534 {
2535 	struct net_device *ndev = phy->dev;
2536 	struct emac_instance *dev = netdev_priv(ndev);
2537 
2538 	phy->autoneg = AUTONEG_DISABLE;
2539 	phy->speed = speed;
2540 	phy->duplex = fd;
2541 	return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
2542 }
2543 
2544 static int emac_mdio_poll_link(struct mii_phy *phy)
2545 {
2546 	struct net_device *ndev = phy->dev;
2547 	struct emac_instance *dev = netdev_priv(ndev);
2548 	int res;
2549 
2550 	res = phy_read_status(dev->phy_dev);
2551 	if (res) {
2552 		dev_err(&dev->ofdev->dev, "link update failed (%d).", res);
2553 		return ethtool_op_get_link(ndev);
2554 	}
2555 
2556 	return dev->phy_dev->link;
2557 }
2558 
2559 static int emac_mdio_read_link(struct mii_phy *phy)
2560 {
2561 	struct net_device *ndev = phy->dev;
2562 	struct emac_instance *dev = netdev_priv(ndev);
2563 	struct phy_device *phy_dev = dev->phy_dev;
2564 	int res;
2565 
2566 	res = phy_read_status(phy_dev);
2567 	if (res)
2568 		return res;
2569 
2570 	phy->speed = phy_dev->speed;
2571 	phy->duplex = phy_dev->duplex;
2572 	phy->pause = phy_dev->pause;
2573 	phy->asym_pause = phy_dev->asym_pause;
2574 	return 0;
2575 }
2576 
2577 static int emac_mdio_init_phy(struct mii_phy *phy)
2578 {
2579 	struct net_device *ndev = phy->dev;
2580 	struct emac_instance *dev = netdev_priv(ndev);
2581 
2582 	phy_start(dev->phy_dev);
2583 	return phy_init_hw(dev->phy_dev);
2584 }
2585 
2586 static const struct mii_phy_ops emac_dt_mdio_phy_ops = {
2587 	.init		= emac_mdio_init_phy,
2588 	.setup_aneg	= emac_mdio_setup_aneg,
2589 	.setup_forced	= emac_mdio_setup_forced,
2590 	.poll_link	= emac_mdio_poll_link,
2591 	.read_link	= emac_mdio_read_link,
2592 };
2593 
2594 static int emac_dt_mdio_probe(struct emac_instance *dev)
2595 {
2596 	struct device_node *mii_np;
2597 	int res;
2598 
2599 	mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio");
2600 	if (!mii_np) {
2601 		dev_err(&dev->ofdev->dev, "no mdio definition found.");
2602 		return -ENODEV;
2603 	}
2604 
2605 	if (!of_device_is_available(mii_np)) {
2606 		res = -ENODEV;
2607 		goto put_node;
2608 	}
2609 
2610 	dev->mii_bus = devm_mdiobus_alloc(&dev->ofdev->dev);
2611 	if (!dev->mii_bus) {
2612 		res = -ENOMEM;
2613 		goto put_node;
2614 	}
2615 
2616 	dev->mii_bus->priv = dev->ndev;
2617 	dev->mii_bus->parent = dev->ndev->dev.parent;
2618 	dev->mii_bus->name = "emac_mdio";
2619 	dev->mii_bus->read = &emac_mii_bus_read;
2620 	dev->mii_bus->write = &emac_mii_bus_write;
2621 	dev->mii_bus->reset = &emac_mii_bus_reset;
2622 	snprintf(dev->mii_bus->id, MII_BUS_ID_SIZE, "%s", dev->ofdev->name);
2623 	res = of_mdiobus_register(dev->mii_bus, mii_np);
2624 	if (res) {
2625 		dev_err(&dev->ofdev->dev, "cannot register MDIO bus %s (%d)",
2626 			dev->mii_bus->name, res);
2627 	}
2628 
2629  put_node:
2630 	of_node_put(mii_np);
2631 	return res;
2632 }
2633 
2634 static int emac_dt_phy_connect(struct emac_instance *dev,
2635 			       struct device_node *phy_handle)
2636 {
2637 	dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def),
2638 				    GFP_KERNEL);
2639 	if (!dev->phy.def)
2640 		return -ENOMEM;
2641 
2642 	dev->phy_dev = of_phy_connect(dev->ndev, phy_handle, &emac_adjust_link,
2643 				      0, dev->phy_mode);
2644 	if (!dev->phy_dev) {
2645 		dev_err(&dev->ofdev->dev, "failed to connect to PHY.\n");
2646 		return -ENODEV;
2647 	}
2648 
2649 	dev->phy.def->phy_id = dev->phy_dev->drv->phy_id;
2650 	dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask;
2651 	dev->phy.def->name = dev->phy_dev->drv->name;
2652 	dev->phy.def->ops = &emac_dt_mdio_phy_ops;
2653 	ethtool_convert_link_mode_to_legacy_u32(&dev->phy.features,
2654 						dev->phy_dev->supported);
2655 	dev->phy.address = dev->phy_dev->mdio.addr;
2656 	dev->phy.mode = dev->phy_dev->interface;
2657 	return 0;
2658 }
2659 
2660 static int emac_dt_phy_probe(struct emac_instance *dev)
2661 {
2662 	struct device_node *np = dev->ofdev->dev.of_node;
2663 	struct device_node *phy_handle;
2664 	int res = 1;
2665 
2666 	phy_handle = of_parse_phandle(np, "phy-handle", 0);
2667 
2668 	if (phy_handle) {
2669 		res = emac_dt_mdio_probe(dev);
2670 		if (!res) {
2671 			res = emac_dt_phy_connect(dev, phy_handle);
2672 			if (res)
2673 				mdiobus_unregister(dev->mii_bus);
2674 		}
2675 	}
2676 
2677 	of_node_put(phy_handle);
2678 	return res;
2679 }
2680 
2681 static int emac_init_phy(struct emac_instance *dev)
2682 {
2683 	struct device_node *np = dev->ofdev->dev.of_node;
2684 	struct net_device *ndev = dev->ndev;
2685 	u32 phy_map, adv;
2686 	int i;
2687 
2688 	dev->phy.dev = ndev;
2689 	dev->phy.mode = dev->phy_mode;
2690 
2691 	/* PHY-less configuration. */
2692 	if ((dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) ||
2693 	    of_phy_is_fixed_link(np)) {
2694 		emac_reset(dev);
2695 
2696 		/* PHY-less configuration. */
2697 		dev->phy.address = -1;
2698 		dev->phy.features = SUPPORTED_MII;
2699 		if (emac_phy_supports_gige(dev->phy_mode))
2700 			dev->phy.features |= SUPPORTED_1000baseT_Full;
2701 		else
2702 			dev->phy.features |= SUPPORTED_100baseT_Full;
2703 		dev->phy.pause = 1;
2704 
2705 		if (of_phy_is_fixed_link(np)) {
2706 			int res = emac_dt_mdio_probe(dev);
2707 
2708 			if (res)
2709 				return res;
2710 
2711 			res = of_phy_register_fixed_link(np);
2712 			dev->phy_dev = of_phy_find_device(np);
2713 			if (res || !dev->phy_dev) {
2714 				mdiobus_unregister(dev->mii_bus);
2715 				return res ? res : -EINVAL;
2716 			}
2717 			emac_adjust_link(dev->ndev);
2718 			put_device(&dev->phy_dev->mdio.dev);
2719 		}
2720 		return 0;
2721 	}
2722 
2723 	mutex_lock(&emac_phy_map_lock);
2724 	phy_map = dev->phy_map | busy_phy_map;
2725 
2726 	DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2727 
2728 	dev->phy.mdio_read = emac_mdio_read;
2729 	dev->phy.mdio_write = emac_mdio_write;
2730 
2731 	/* Enable internal clock source */
2732 #ifdef CONFIG_PPC_DCR_NATIVE
2733 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2734 		dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2735 #endif
2736 	/* PHY clock workaround */
2737 	emac_rx_clk_tx(dev);
2738 
2739 	/* Enable internal clock source on 440GX*/
2740 #ifdef CONFIG_PPC_DCR_NATIVE
2741 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2742 		dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2743 #endif
2744 	/* Configure EMAC with defaults so we can at least use MDIO
2745 	 * This is needed mostly for 440GX
2746 	 */
2747 	if (emac_phy_gpcs(dev->phy.mode)) {
2748 		/* XXX
2749 		 * Make GPCS PHY address equal to EMAC index.
2750 		 * We probably should take into account busy_phy_map
2751 		 * and/or phy_map here.
2752 		 *
2753 		 * Note that the busy_phy_map is currently global
2754 		 * while it should probably be per-ASIC...
2755 		 */
2756 		dev->phy.gpcs_address = dev->gpcs_address;
2757 		if (dev->phy.gpcs_address == 0xffffffff)
2758 			dev->phy.address = dev->cell_index;
2759 	}
2760 
2761 	emac_configure(dev);
2762 
2763 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2764 		int res = emac_dt_phy_probe(dev);
2765 
2766 		switch (res) {
2767 		case 1:
2768 			/* No phy-handle property configured.
2769 			 * Continue with the existing phy probe
2770 			 * and setup code.
2771 			 */
2772 			break;
2773 
2774 		case 0:
2775 			mutex_unlock(&emac_phy_map_lock);
2776 			goto init_phy;
2777 
2778 		default:
2779 			mutex_unlock(&emac_phy_map_lock);
2780 			dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
2781 				res);
2782 			return res;
2783 		}
2784 	}
2785 
2786 	if (dev->phy_address != 0xffffffff)
2787 		phy_map = ~(1 << dev->phy_address);
2788 
2789 	for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2790 		if (!(phy_map & 1)) {
2791 			int r;
2792 			busy_phy_map |= 1 << i;
2793 
2794 			/* Quick check if there is a PHY at the address */
2795 			r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2796 			if (r == 0xffff || r < 0)
2797 				continue;
2798 			if (!emac_mii_phy_probe(&dev->phy, i))
2799 				break;
2800 		}
2801 
2802 	/* Enable external clock source */
2803 #ifdef CONFIG_PPC_DCR_NATIVE
2804 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2805 		dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2806 #endif
2807 	mutex_unlock(&emac_phy_map_lock);
2808 	if (i == 0x20) {
2809 		printk(KERN_WARNING "%pOF: can't find PHY!\n", np);
2810 		return -ENXIO;
2811 	}
2812 
2813  init_phy:
2814 	/* Init PHY */
2815 	if (dev->phy.def->ops->init)
2816 		dev->phy.def->ops->init(&dev->phy);
2817 
2818 	/* Disable any PHY features not supported by the platform */
2819 	dev->phy.def->features &= ~dev->phy_feat_exc;
2820 	dev->phy.features &= ~dev->phy_feat_exc;
2821 
2822 	/* Setup initial link parameters */
2823 	if (dev->phy.features & SUPPORTED_Autoneg) {
2824 		adv = dev->phy.features;
2825 		if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2826 			adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2827 		/* Restart autonegotiation */
2828 		dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2829 	} else {
2830 		u32 f = dev->phy.def->features;
2831 		int speed = SPEED_10, fd = DUPLEX_HALF;
2832 
2833 		/* Select highest supported speed/duplex */
2834 		if (f & SUPPORTED_1000baseT_Full) {
2835 			speed = SPEED_1000;
2836 			fd = DUPLEX_FULL;
2837 		} else if (f & SUPPORTED_1000baseT_Half)
2838 			speed = SPEED_1000;
2839 		else if (f & SUPPORTED_100baseT_Full) {
2840 			speed = SPEED_100;
2841 			fd = DUPLEX_FULL;
2842 		} else if (f & SUPPORTED_100baseT_Half)
2843 			speed = SPEED_100;
2844 		else if (f & SUPPORTED_10baseT_Full)
2845 			fd = DUPLEX_FULL;
2846 
2847 		/* Force link parameters */
2848 		dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2849 	}
2850 	return 0;
2851 }
2852 
2853 static int emac_init_config(struct emac_instance *dev)
2854 {
2855 	struct device_node *np = dev->ofdev->dev.of_node;
2856 	const void *p;
2857 
2858 	/* Read config from device-tree */
2859 	if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2860 		return -ENXIO;
2861 	if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2862 		return -ENXIO;
2863 	if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2864 		return -ENXIO;
2865 	if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2866 		return -ENXIO;
2867 	if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2868 		dev->max_mtu = ETH_DATA_LEN;
2869 	if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2870 		dev->rx_fifo_size = 2048;
2871 	if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2872 		dev->tx_fifo_size = 2048;
2873 	if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2874 		dev->rx_fifo_size_gige = dev->rx_fifo_size;
2875 	if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2876 		dev->tx_fifo_size_gige = dev->tx_fifo_size;
2877 	if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2878 		dev->phy_address = 0xffffffff;
2879 	if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2880 		dev->phy_map = 0xffffffff;
2881 	if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2882 		dev->gpcs_address = 0xffffffff;
2883 	if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2884 		return -ENXIO;
2885 	if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2886 		dev->tah_ph = 0;
2887 	if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2888 		dev->tah_port = 0;
2889 	if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2890 		dev->mdio_ph = 0;
2891 	if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2892 		dev->zmii_ph = 0;
2893 	if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2894 		dev->zmii_port = 0xffffffff;
2895 	if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2896 		dev->rgmii_ph = 0;
2897 	if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2898 		dev->rgmii_port = 0xffffffff;
2899 	if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2900 		dev->fifo_entry_size = 16;
2901 	if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2902 		dev->mal_burst_size = 256;
2903 
2904 	/* PHY mode needs some decoding */
2905 	dev->phy_mode = of_get_phy_mode(np);
2906 	if (dev->phy_mode < 0)
2907 		dev->phy_mode = PHY_INTERFACE_MODE_NA;
2908 
2909 	/* Check EMAC version */
2910 	if (of_device_is_compatible(np, "ibm,emac4sync")) {
2911 		dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2912 		if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2913 		    of_device_is_compatible(np, "ibm,emac-460gt"))
2914 			dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2915 		if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2916 		    of_device_is_compatible(np, "ibm,emac-405exr"))
2917 			dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2918 		if (of_device_is_compatible(np, "ibm,emac-apm821xx")) {
2919 			dev->features |= (EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE |
2920 					  EMAC_FTR_APM821XX_NO_HALF_DUPLEX |
2921 					  EMAC_FTR_460EX_PHY_CLK_FIX);
2922 		}
2923 	} else if (of_device_is_compatible(np, "ibm,emac4")) {
2924 		dev->features |= EMAC_FTR_EMAC4;
2925 		if (of_device_is_compatible(np, "ibm,emac-440gx"))
2926 			dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2927 	} else {
2928 		if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2929 		    of_device_is_compatible(np, "ibm,emac-440gr"))
2930 			dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2931 		if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2932 #ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
2933 			dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2934 #else
2935 			printk(KERN_ERR "%pOF: Flow control not disabled!\n",
2936 					np);
2937 			return -ENXIO;
2938 #endif
2939 		}
2940 
2941 	}
2942 
2943 	/* Fixup some feature bits based on the device tree */
2944 	if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2945 		dev->features |= EMAC_FTR_STACR_OC_INVERT;
2946 	if (of_get_property(np, "has-new-stacr-staopc", NULL))
2947 		dev->features |= EMAC_FTR_HAS_NEW_STACR;
2948 
2949 	/* CAB lacks the appropriate properties */
2950 	if (of_device_is_compatible(np, "ibm,emac-axon"))
2951 		dev->features |= EMAC_FTR_HAS_NEW_STACR |
2952 			EMAC_FTR_STACR_OC_INVERT;
2953 
2954 	/* Enable TAH/ZMII/RGMII features as found */
2955 	if (dev->tah_ph != 0) {
2956 #ifdef CONFIG_IBM_EMAC_TAH
2957 		dev->features |= EMAC_FTR_HAS_TAH;
2958 #else
2959 		printk(KERN_ERR "%pOF: TAH support not enabled !\n", np);
2960 		return -ENXIO;
2961 #endif
2962 	}
2963 
2964 	if (dev->zmii_ph != 0) {
2965 #ifdef CONFIG_IBM_EMAC_ZMII
2966 		dev->features |= EMAC_FTR_HAS_ZMII;
2967 #else
2968 		printk(KERN_ERR "%pOF: ZMII support not enabled !\n", np);
2969 		return -ENXIO;
2970 #endif
2971 	}
2972 
2973 	if (dev->rgmii_ph != 0) {
2974 #ifdef CONFIG_IBM_EMAC_RGMII
2975 		dev->features |= EMAC_FTR_HAS_RGMII;
2976 #else
2977 		printk(KERN_ERR "%pOF: RGMII support not enabled !\n", np);
2978 		return -ENXIO;
2979 #endif
2980 	}
2981 
2982 	/* Read MAC-address */
2983 	p = of_get_property(np, "local-mac-address", NULL);
2984 	if (p == NULL) {
2985 		printk(KERN_ERR "%pOF: Can't find local-mac-address property\n",
2986 		       np);
2987 		return -ENXIO;
2988 	}
2989 	memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
2990 
2991 	/* IAHT and GAHT filter parameterization */
2992 	if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2993 		dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2994 		dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2995 	} else {
2996 		dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2997 		dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2998 	}
2999 
3000 	/* This should never happen */
3001 	if (WARN_ON(EMAC_XAHT_REGS(dev) > EMAC_XAHT_MAX_REGS))
3002 		return -ENXIO;
3003 
3004 	DBG(dev, "features     : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
3005 	DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
3006 	DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
3007 	DBG(dev, "max_mtu      : %d\n", dev->max_mtu);
3008 	DBG(dev, "OPB freq     : %d\n", dev->opb_bus_freq);
3009 
3010 	return 0;
3011 }
3012 
3013 static const struct net_device_ops emac_netdev_ops = {
3014 	.ndo_open		= emac_open,
3015 	.ndo_stop		= emac_close,
3016 	.ndo_get_stats		= emac_stats,
3017 	.ndo_set_rx_mode	= emac_set_multicast_list,
3018 	.ndo_do_ioctl		= emac_ioctl,
3019 	.ndo_tx_timeout		= emac_tx_timeout,
3020 	.ndo_validate_addr	= eth_validate_addr,
3021 	.ndo_set_mac_address	= emac_set_mac_address,
3022 	.ndo_start_xmit		= emac_start_xmit,
3023 };
3024 
3025 static const struct net_device_ops emac_gige_netdev_ops = {
3026 	.ndo_open		= emac_open,
3027 	.ndo_stop		= emac_close,
3028 	.ndo_get_stats		= emac_stats,
3029 	.ndo_set_rx_mode	= emac_set_multicast_list,
3030 	.ndo_do_ioctl		= emac_ioctl,
3031 	.ndo_tx_timeout		= emac_tx_timeout,
3032 	.ndo_validate_addr	= eth_validate_addr,
3033 	.ndo_set_mac_address	= emac_set_mac_address,
3034 	.ndo_start_xmit		= emac_start_xmit_sg,
3035 	.ndo_change_mtu		= emac_change_mtu,
3036 };
3037 
3038 static int emac_probe(struct platform_device *ofdev)
3039 {
3040 	struct net_device *ndev;
3041 	struct emac_instance *dev;
3042 	struct device_node *np = ofdev->dev.of_node;
3043 	struct device_node **blist = NULL;
3044 	int err, i;
3045 
3046 	/* Skip unused/unwired EMACS.  We leave the check for an unused
3047 	 * property here for now, but new flat device trees should set a
3048 	 * status property to "disabled" instead.
3049 	 */
3050 	if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
3051 		return -ENODEV;
3052 
3053 	/* Find ourselves in the bootlist if we are there */
3054 	for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3055 		if (emac_boot_list[i] == np)
3056 			blist = &emac_boot_list[i];
3057 
3058 	/* Allocate our net_device structure */
3059 	err = -ENOMEM;
3060 	ndev = alloc_etherdev(sizeof(struct emac_instance));
3061 	if (!ndev)
3062 		goto err_gone;
3063 
3064 	dev = netdev_priv(ndev);
3065 	dev->ndev = ndev;
3066 	dev->ofdev = ofdev;
3067 	dev->blist = blist;
3068 	SET_NETDEV_DEV(ndev, &ofdev->dev);
3069 
3070 	/* Initialize some embedded data structures */
3071 	mutex_init(&dev->mdio_lock);
3072 	mutex_init(&dev->link_lock);
3073 	spin_lock_init(&dev->lock);
3074 	INIT_WORK(&dev->reset_work, emac_reset_work);
3075 
3076 	/* Init various config data based on device-tree */
3077 	err = emac_init_config(dev);
3078 	if (err)
3079 		goto err_free;
3080 
3081 	/* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
3082 	dev->emac_irq = irq_of_parse_and_map(np, 0);
3083 	dev->wol_irq = irq_of_parse_and_map(np, 1);
3084 	if (!dev->emac_irq) {
3085 		printk(KERN_ERR "%pOF: Can't map main interrupt\n", np);
3086 		err = -ENODEV;
3087 		goto err_free;
3088 	}
3089 	ndev->irq = dev->emac_irq;
3090 
3091 	/* Map EMAC regs */
3092 	// TODO : platform_get_resource() and devm_ioremap_resource()
3093 	dev->emacp = of_iomap(np, 0);
3094 	if (dev->emacp == NULL) {
3095 		printk(KERN_ERR "%pOF: Can't map device registers!\n", np);
3096 		err = -ENOMEM;
3097 		goto err_irq_unmap;
3098 	}
3099 
3100 	/* Wait for dependent devices */
3101 	err = emac_wait_deps(dev);
3102 	if (err) {
3103 		printk(KERN_ERR
3104 		       "%pOF: Timeout waiting for dependent devices\n", np);
3105 		/*  display more info about what's missing ? */
3106 		goto err_reg_unmap;
3107 	}
3108 	dev->mal = platform_get_drvdata(dev->mal_dev);
3109 	if (dev->mdio_dev != NULL)
3110 		dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
3111 
3112 	/* Register with MAL */
3113 	dev->commac.ops = &emac_commac_ops;
3114 	dev->commac.dev = dev;
3115 	dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
3116 	dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
3117 	err = mal_register_commac(dev->mal, &dev->commac);
3118 	if (err) {
3119 		printk(KERN_ERR "%pOF: failed to register with mal %pOF!\n",
3120 		       np, dev->mal_dev->dev.of_node);
3121 		goto err_rel_deps;
3122 	}
3123 	dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
3124 	dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
3125 
3126 	/* Get pointers to BD rings */
3127 	dev->tx_desc =
3128 	    dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
3129 	dev->rx_desc =
3130 	    dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
3131 
3132 	DBG(dev, "tx_desc %p" NL, dev->tx_desc);
3133 	DBG(dev, "rx_desc %p" NL, dev->rx_desc);
3134 
3135 	/* Clean rings */
3136 	memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
3137 	memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
3138 	memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
3139 	memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
3140 
3141 	/* Attach to ZMII, if needed */
3142 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
3143 	    (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
3144 		goto err_unreg_commac;
3145 
3146 	/* Attach to RGMII, if needed */
3147 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
3148 	    (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
3149 		goto err_detach_zmii;
3150 
3151 	/* Attach to TAH, if needed */
3152 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
3153 	    (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
3154 		goto err_detach_rgmii;
3155 
3156 	/* Set some link defaults before we can find out real parameters */
3157 	dev->phy.speed = SPEED_100;
3158 	dev->phy.duplex = DUPLEX_FULL;
3159 	dev->phy.autoneg = AUTONEG_DISABLE;
3160 	dev->phy.pause = dev->phy.asym_pause = 0;
3161 	dev->stop_timeout = STOP_TIMEOUT_100;
3162 	INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
3163 
3164 	/* Some SoCs like APM821xx does not support Half Duplex mode. */
3165 	if (emac_has_feature(dev, EMAC_FTR_APM821XX_NO_HALF_DUPLEX)) {
3166 		dev->phy_feat_exc = (SUPPORTED_1000baseT_Half |
3167 				     SUPPORTED_100baseT_Half |
3168 				     SUPPORTED_10baseT_Half);
3169 	}
3170 
3171 	/* Find PHY if any */
3172 	err = emac_init_phy(dev);
3173 	if (err != 0)
3174 		goto err_detach_tah;
3175 
3176 	if (dev->tah_dev) {
3177 		ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
3178 		ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
3179 	}
3180 	ndev->watchdog_timeo = 5 * HZ;
3181 	if (emac_phy_supports_gige(dev->phy_mode)) {
3182 		ndev->netdev_ops = &emac_gige_netdev_ops;
3183 		dev->commac.ops = &emac_commac_sg_ops;
3184 	} else
3185 		ndev->netdev_ops = &emac_netdev_ops;
3186 	ndev->ethtool_ops = &emac_ethtool_ops;
3187 
3188 	/* MTU range: 46 - 1500 or whatever is in OF */
3189 	ndev->min_mtu = EMAC_MIN_MTU;
3190 	ndev->max_mtu = dev->max_mtu;
3191 
3192 	netif_carrier_off(ndev);
3193 
3194 	err = register_netdev(ndev);
3195 	if (err) {
3196 		printk(KERN_ERR "%pOF: failed to register net device (%d)!\n",
3197 		       np, err);
3198 		goto err_detach_tah;
3199 	}
3200 
3201 	/* Set our drvdata last as we don't want them visible until we are
3202 	 * fully initialized
3203 	 */
3204 	wmb();
3205 	platform_set_drvdata(ofdev, dev);
3206 
3207 	/* There's a new kid in town ! Let's tell everybody */
3208 	wake_up_all(&emac_probe_wait);
3209 
3210 
3211 	printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n",
3212 	       ndev->name, dev->cell_index, np, ndev->dev_addr);
3213 
3214 	if (dev->phy_mode == PHY_INTERFACE_MODE_SGMII)
3215 		printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
3216 
3217 	if (dev->phy.address >= 0)
3218 		printk("%s: found %s PHY (0x%02x)\n", ndev->name,
3219 		       dev->phy.def->name, dev->phy.address);
3220 
3221 	/* Life is good */
3222 	return 0;
3223 
3224 	/* I have a bad feeling about this ... */
3225 
3226  err_detach_tah:
3227 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
3228 		tah_detach(dev->tah_dev, dev->tah_port);
3229  err_detach_rgmii:
3230 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
3231 		rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
3232  err_detach_zmii:
3233 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
3234 		zmii_detach(dev->zmii_dev, dev->zmii_port);
3235  err_unreg_commac:
3236 	mal_unregister_commac(dev->mal, &dev->commac);
3237  err_rel_deps:
3238 	emac_put_deps(dev);
3239  err_reg_unmap:
3240 	iounmap(dev->emacp);
3241  err_irq_unmap:
3242 	if (dev->wol_irq)
3243 		irq_dispose_mapping(dev->wol_irq);
3244 	if (dev->emac_irq)
3245 		irq_dispose_mapping(dev->emac_irq);
3246  err_free:
3247 	free_netdev(ndev);
3248  err_gone:
3249 	/* if we were on the bootlist, remove us as we won't show up and
3250 	 * wake up all waiters to notify them in case they were waiting
3251 	 * on us
3252 	 */
3253 	if (blist) {
3254 		*blist = NULL;
3255 		wake_up_all(&emac_probe_wait);
3256 	}
3257 	return err;
3258 }
3259 
3260 static int emac_remove(struct platform_device *ofdev)
3261 {
3262 	struct emac_instance *dev = platform_get_drvdata(ofdev);
3263 
3264 	DBG(dev, "remove" NL);
3265 
3266 	unregister_netdev(dev->ndev);
3267 
3268 	cancel_work_sync(&dev->reset_work);
3269 
3270 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
3271 		tah_detach(dev->tah_dev, dev->tah_port);
3272 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
3273 		rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
3274 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
3275 		zmii_detach(dev->zmii_dev, dev->zmii_port);
3276 
3277 	if (dev->phy_dev)
3278 		phy_disconnect(dev->phy_dev);
3279 
3280 	if (dev->mii_bus)
3281 		mdiobus_unregister(dev->mii_bus);
3282 
3283 	busy_phy_map &= ~(1 << dev->phy.address);
3284 	DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
3285 
3286 	mal_unregister_commac(dev->mal, &dev->commac);
3287 	emac_put_deps(dev);
3288 
3289 	iounmap(dev->emacp);
3290 
3291 	if (dev->wol_irq)
3292 		irq_dispose_mapping(dev->wol_irq);
3293 	if (dev->emac_irq)
3294 		irq_dispose_mapping(dev->emac_irq);
3295 
3296 	free_netdev(dev->ndev);
3297 
3298 	return 0;
3299 }
3300 
3301 /* XXX Features in here should be replaced by properties... */
3302 static const struct of_device_id emac_match[] =
3303 {
3304 	{
3305 		.type		= "network",
3306 		.compatible	= "ibm,emac",
3307 	},
3308 	{
3309 		.type		= "network",
3310 		.compatible	= "ibm,emac4",
3311 	},
3312 	{
3313 		.type		= "network",
3314 		.compatible	= "ibm,emac4sync",
3315 	},
3316 	{},
3317 };
3318 MODULE_DEVICE_TABLE(of, emac_match);
3319 
3320 static struct platform_driver emac_driver = {
3321 	.driver = {
3322 		.name = "emac",
3323 		.of_match_table = emac_match,
3324 	},
3325 	.probe = emac_probe,
3326 	.remove = emac_remove,
3327 };
3328 
3329 static void __init emac_make_bootlist(void)
3330 {
3331 	struct device_node *np = NULL;
3332 	int j, max, i = 0;
3333 	int cell_indices[EMAC_BOOT_LIST_SIZE];
3334 
3335 	/* Collect EMACs */
3336 	while((np = of_find_all_nodes(np)) != NULL) {
3337 		const u32 *idx;
3338 
3339 		if (of_match_node(emac_match, np) == NULL)
3340 			continue;
3341 		if (of_get_property(np, "unused", NULL))
3342 			continue;
3343 		idx = of_get_property(np, "cell-index", NULL);
3344 		if (idx == NULL)
3345 			continue;
3346 		cell_indices[i] = *idx;
3347 		emac_boot_list[i++] = of_node_get(np);
3348 		if (i >= EMAC_BOOT_LIST_SIZE) {
3349 			of_node_put(np);
3350 			break;
3351 		}
3352 	}
3353 	max = i;
3354 
3355 	/* Bubble sort them (doh, what a creative algorithm :-) */
3356 	for (i = 0; max > 1 && (i < (max - 1)); i++)
3357 		for (j = i; j < max; j++) {
3358 			if (cell_indices[i] > cell_indices[j]) {
3359 				swap(emac_boot_list[i], emac_boot_list[j]);
3360 				swap(cell_indices[i], cell_indices[j]);
3361 			}
3362 		}
3363 }
3364 
3365 static int __init emac_init(void)
3366 {
3367 	int rc;
3368 
3369 	printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3370 
3371 	/* Build EMAC boot list */
3372 	emac_make_bootlist();
3373 
3374 	/* Init submodules */
3375 	rc = mal_init();
3376 	if (rc)
3377 		goto err;
3378 	rc = zmii_init();
3379 	if (rc)
3380 		goto err_mal;
3381 	rc = rgmii_init();
3382 	if (rc)
3383 		goto err_zmii;
3384 	rc = tah_init();
3385 	if (rc)
3386 		goto err_rgmii;
3387 	rc = platform_driver_register(&emac_driver);
3388 	if (rc)
3389 		goto err_tah;
3390 
3391 	return 0;
3392 
3393  err_tah:
3394 	tah_exit();
3395  err_rgmii:
3396 	rgmii_exit();
3397  err_zmii:
3398 	zmii_exit();
3399  err_mal:
3400 	mal_exit();
3401  err:
3402 	return rc;
3403 }
3404 
3405 static void __exit emac_exit(void)
3406 {
3407 	int i;
3408 
3409 	platform_driver_unregister(&emac_driver);
3410 
3411 	tah_exit();
3412 	rgmii_exit();
3413 	zmii_exit();
3414 	mal_exit();
3415 
3416 	/* Destroy EMAC boot list */
3417 	for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3418 		of_node_put(emac_boot_list[i]);
3419 }
3420 
3421 module_init(emac_init);
3422 module_exit(emac_exit);
3423