xref: /openbmc/linux/drivers/net/ethernet/ibm/emac/core.c (revision d0e22329)
1 /*
2  * drivers/net/ethernet/ibm/emac/core.c
3  *
4  * Driver for PowerPC 4xx on-chip ethernet controller.
5  *
6  * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7  *                <benh@kernel.crashing.org>
8  *
9  * Based on the arch/ppc version of the driver:
10  *
11  * Copyright (c) 2004, 2005 Zultys Technologies.
12  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13  *
14  * Based on original work by
15  * 	Matt Porter <mporter@kernel.crashing.org>
16  *	(c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17  *      Armin Kuster <akuster@mvista.com>
18  * 	Johnnie Peters <jpeters@mvista.com>
19  *
20  * This program is free software; you can redistribute  it and/or modify it
21  * under  the terms of  the GNU General  Public License as published by the
22  * Free Software Foundation;  either version 2 of the  License, or (at your
23  * option) any later version.
24  *
25  */
26 
27 #include <linux/module.h>
28 #include <linux/sched.h>
29 #include <linux/string.h>
30 #include <linux/errno.h>
31 #include <linux/delay.h>
32 #include <linux/types.h>
33 #include <linux/pci.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
40 #include <linux/workqueue.h>
41 #include <linux/of.h>
42 #include <linux/of_address.h>
43 #include <linux/of_irq.h>
44 #include <linux/of_net.h>
45 #include <linux/of_mdio.h>
46 #include <linux/slab.h>
47 
48 #include <asm/processor.h>
49 #include <asm/io.h>
50 #include <asm/dma.h>
51 #include <linux/uaccess.h>
52 #include <asm/dcr.h>
53 #include <asm/dcr-regs.h>
54 
55 #include "core.h"
56 
57 /*
58  * Lack of dma_unmap_???? calls is intentional.
59  *
60  * API-correct usage requires additional support state information to be
61  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
62  * EMAC design (e.g. TX buffer passed from network stack can be split into
63  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
64  * maintaining such information will add additional overhead.
65  * Current DMA API implementation for 4xx processors only ensures cache coherency
66  * and dma_unmap_???? routines are empty and are likely to stay this way.
67  * I decided to omit dma_unmap_??? calls because I don't want to add additional
68  * complexity just for the sake of following some abstract API, when it doesn't
69  * add any real benefit to the driver. I understand that this decision maybe
70  * controversial, but I really tried to make code API-correct and efficient
71  * at the same time and didn't come up with code I liked :(.                --ebs
72  */
73 
74 #define DRV_NAME        "emac"
75 #define DRV_VERSION     "3.54"
76 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
77 
78 MODULE_DESCRIPTION(DRV_DESC);
79 MODULE_AUTHOR
80     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
81 MODULE_LICENSE("GPL");
82 
83 /* minimum number of free TX descriptors required to wake up TX process */
84 #define EMAC_TX_WAKEUP_THRESH		(NUM_TX_BUFF / 4)
85 
86 /* If packet size is less than this number, we allocate small skb and copy packet
87  * contents into it instead of just sending original big skb up
88  */
89 #define EMAC_RX_COPY_THRESH		CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
90 
91 /* Since multiple EMACs share MDIO lines in various ways, we need
92  * to avoid re-using the same PHY ID in cases where the arch didn't
93  * setup precise phy_map entries
94  *
95  * XXX This is something that needs to be reworked as we can have multiple
96  * EMAC "sets" (multiple ASICs containing several EMACs) though we can
97  * probably require in that case to have explicit PHY IDs in the device-tree
98  */
99 static u32 busy_phy_map;
100 static DEFINE_MUTEX(emac_phy_map_lock);
101 
102 /* This is the wait queue used to wait on any event related to probe, that
103  * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
104  */
105 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
106 
107 /* Having stable interface names is a doomed idea. However, it would be nice
108  * if we didn't have completely random interface names at boot too :-) It's
109  * just a matter of making everybody's life easier. Since we are doing
110  * threaded probing, it's a bit harder though. The base idea here is that
111  * we make up a list of all emacs in the device-tree before we register the
112  * driver. Every emac will then wait for the previous one in the list to
113  * initialize before itself. We should also keep that list ordered by
114  * cell_index.
115  * That list is only 4 entries long, meaning that additional EMACs don't
116  * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
117  */
118 
119 #define EMAC_BOOT_LIST_SIZE	4
120 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
121 
122 /* How long should I wait for dependent devices ? */
123 #define EMAC_PROBE_DEP_TIMEOUT	(HZ * 5)
124 
125 /* I don't want to litter system log with timeout errors
126  * when we have brain-damaged PHY.
127  */
128 static inline void emac_report_timeout_error(struct emac_instance *dev,
129 					     const char *error)
130 {
131 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
132 				  EMAC_FTR_460EX_PHY_CLK_FIX |
133 				  EMAC_FTR_440EP_PHY_CLK_FIX))
134 		DBG(dev, "%s" NL, error);
135 	else if (net_ratelimit())
136 		printk(KERN_ERR "%pOF: %s\n", dev->ofdev->dev.of_node, error);
137 }
138 
139 /* EMAC PHY clock workaround:
140  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
141  * which allows controlling each EMAC clock
142  */
143 static inline void emac_rx_clk_tx(struct emac_instance *dev)
144 {
145 #ifdef CONFIG_PPC_DCR_NATIVE
146 	if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
147 		dcri_clrset(SDR0, SDR0_MFR,
148 			    0, SDR0_MFR_ECS >> dev->cell_index);
149 #endif
150 }
151 
152 static inline void emac_rx_clk_default(struct emac_instance *dev)
153 {
154 #ifdef CONFIG_PPC_DCR_NATIVE
155 	if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
156 		dcri_clrset(SDR0, SDR0_MFR,
157 			    SDR0_MFR_ECS >> dev->cell_index, 0);
158 #endif
159 }
160 
161 /* PHY polling intervals */
162 #define PHY_POLL_LINK_ON	HZ
163 #define PHY_POLL_LINK_OFF	(HZ / 5)
164 
165 /* Graceful stop timeouts in us.
166  * We should allow up to 1 frame time (full-duplex, ignoring collisions)
167  */
168 #define STOP_TIMEOUT_10		1230
169 #define STOP_TIMEOUT_100	124
170 #define STOP_TIMEOUT_1000	13
171 #define STOP_TIMEOUT_1000_JUMBO	73
172 
173 static unsigned char default_mcast_addr[] = {
174 	0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
175 };
176 
177 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
178 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
179 	"rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
180 	"tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
181 	"rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
182 	"rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
183 	"rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
184 	"rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
185 	"rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
186 	"rx_bad_packet", "rx_runt_packet", "rx_short_event",
187 	"rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
188 	"rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
189 	"tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
190 	"tx_bd_excessive_collisions", "tx_bd_late_collision",
191 	"tx_bd_multple_collisions", "tx_bd_single_collision",
192 	"tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
193 	"tx_errors"
194 };
195 
196 static irqreturn_t emac_irq(int irq, void *dev_instance);
197 static void emac_clean_tx_ring(struct emac_instance *dev);
198 static void __emac_set_multicast_list(struct emac_instance *dev);
199 
200 static inline int emac_phy_supports_gige(int phy_mode)
201 {
202 	return  phy_interface_mode_is_rgmii(phy_mode) ||
203 		phy_mode == PHY_INTERFACE_MODE_GMII ||
204 		phy_mode == PHY_INTERFACE_MODE_SGMII ||
205 		phy_mode == PHY_INTERFACE_MODE_TBI ||
206 		phy_mode == PHY_INTERFACE_MODE_RTBI;
207 }
208 
209 static inline int emac_phy_gpcs(int phy_mode)
210 {
211 	return  phy_mode == PHY_INTERFACE_MODE_SGMII ||
212 		phy_mode == PHY_INTERFACE_MODE_TBI ||
213 		phy_mode == PHY_INTERFACE_MODE_RTBI;
214 }
215 
216 static inline void emac_tx_enable(struct emac_instance *dev)
217 {
218 	struct emac_regs __iomem *p = dev->emacp;
219 	u32 r;
220 
221 	DBG(dev, "tx_enable" NL);
222 
223 	r = in_be32(&p->mr0);
224 	if (!(r & EMAC_MR0_TXE))
225 		out_be32(&p->mr0, r | EMAC_MR0_TXE);
226 }
227 
228 static void emac_tx_disable(struct emac_instance *dev)
229 {
230 	struct emac_regs __iomem *p = dev->emacp;
231 	u32 r;
232 
233 	DBG(dev, "tx_disable" NL);
234 
235 	r = in_be32(&p->mr0);
236 	if (r & EMAC_MR0_TXE) {
237 		int n = dev->stop_timeout;
238 		out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
239 		while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
240 			udelay(1);
241 			--n;
242 		}
243 		if (unlikely(!n))
244 			emac_report_timeout_error(dev, "TX disable timeout");
245 	}
246 }
247 
248 static void emac_rx_enable(struct emac_instance *dev)
249 {
250 	struct emac_regs __iomem *p = dev->emacp;
251 	u32 r;
252 
253 	if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
254 		goto out;
255 
256 	DBG(dev, "rx_enable" NL);
257 
258 	r = in_be32(&p->mr0);
259 	if (!(r & EMAC_MR0_RXE)) {
260 		if (unlikely(!(r & EMAC_MR0_RXI))) {
261 			/* Wait if previous async disable is still in progress */
262 			int n = dev->stop_timeout;
263 			while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
264 				udelay(1);
265 				--n;
266 			}
267 			if (unlikely(!n))
268 				emac_report_timeout_error(dev,
269 							  "RX disable timeout");
270 		}
271 		out_be32(&p->mr0, r | EMAC_MR0_RXE);
272 	}
273  out:
274 	;
275 }
276 
277 static void emac_rx_disable(struct emac_instance *dev)
278 {
279 	struct emac_regs __iomem *p = dev->emacp;
280 	u32 r;
281 
282 	DBG(dev, "rx_disable" NL);
283 
284 	r = in_be32(&p->mr0);
285 	if (r & EMAC_MR0_RXE) {
286 		int n = dev->stop_timeout;
287 		out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
288 		while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
289 			udelay(1);
290 			--n;
291 		}
292 		if (unlikely(!n))
293 			emac_report_timeout_error(dev, "RX disable timeout");
294 	}
295 }
296 
297 static inline void emac_netif_stop(struct emac_instance *dev)
298 {
299 	netif_tx_lock_bh(dev->ndev);
300 	netif_addr_lock(dev->ndev);
301 	dev->no_mcast = 1;
302 	netif_addr_unlock(dev->ndev);
303 	netif_tx_unlock_bh(dev->ndev);
304 	netif_trans_update(dev->ndev);	/* prevent tx timeout */
305 	mal_poll_disable(dev->mal, &dev->commac);
306 	netif_tx_disable(dev->ndev);
307 }
308 
309 static inline void emac_netif_start(struct emac_instance *dev)
310 {
311 	netif_tx_lock_bh(dev->ndev);
312 	netif_addr_lock(dev->ndev);
313 	dev->no_mcast = 0;
314 	if (dev->mcast_pending && netif_running(dev->ndev))
315 		__emac_set_multicast_list(dev);
316 	netif_addr_unlock(dev->ndev);
317 	netif_tx_unlock_bh(dev->ndev);
318 
319 	netif_wake_queue(dev->ndev);
320 
321 	/* NOTE: unconditional netif_wake_queue is only appropriate
322 	 * so long as all callers are assured to have free tx slots
323 	 * (taken from tg3... though the case where that is wrong is
324 	 *  not terribly harmful)
325 	 */
326 	mal_poll_enable(dev->mal, &dev->commac);
327 }
328 
329 static inline void emac_rx_disable_async(struct emac_instance *dev)
330 {
331 	struct emac_regs __iomem *p = dev->emacp;
332 	u32 r;
333 
334 	DBG(dev, "rx_disable_async" NL);
335 
336 	r = in_be32(&p->mr0);
337 	if (r & EMAC_MR0_RXE)
338 		out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
339 }
340 
341 static int emac_reset(struct emac_instance *dev)
342 {
343 	struct emac_regs __iomem *p = dev->emacp;
344 	int n = 20;
345 	bool __maybe_unused try_internal_clock = false;
346 
347 	DBG(dev, "reset" NL);
348 
349 	if (!dev->reset_failed) {
350 		/* 40x erratum suggests stopping RX channel before reset,
351 		 * we stop TX as well
352 		 */
353 		emac_rx_disable(dev);
354 		emac_tx_disable(dev);
355 	}
356 
357 #ifdef CONFIG_PPC_DCR_NATIVE
358 do_retry:
359 	/*
360 	 * PPC460EX/GT Embedded Processor Advanced User's Manual
361 	 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
362 	 * Note: The PHY must provide a TX Clk in order to perform a soft reset
363 	 * of the EMAC. If none is present, select the internal clock
364 	 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
365 	 * After a soft reset, select the external clock.
366 	 *
367 	 * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
368 	 * ethernet cable is not attached. This causes the reset to timeout
369 	 * and the PHY detection code in emac_init_phy() is unable to
370 	 * communicate and detect the AR8035-A PHY. As a result, the emac
371 	 * driver bails out early and the user has no ethernet.
372 	 * In order to stay compatible with existing configurations, the
373 	 * driver will temporarily switch to the internal clock, after
374 	 * the first reset fails.
375 	 */
376 	if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
377 		if (try_internal_clock || (dev->phy_address == 0xffffffff &&
378 					   dev->phy_map == 0xffffffff)) {
379 			/* No PHY: select internal loop clock before reset */
380 			dcri_clrset(SDR0, SDR0_ETH_CFG,
381 				    0, SDR0_ETH_CFG_ECS << dev->cell_index);
382 		} else {
383 			/* PHY present: select external clock before reset */
384 			dcri_clrset(SDR0, SDR0_ETH_CFG,
385 				    SDR0_ETH_CFG_ECS << dev->cell_index, 0);
386 		}
387 	}
388 #endif
389 
390 	out_be32(&p->mr0, EMAC_MR0_SRST);
391 	while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
392 		--n;
393 
394 #ifdef CONFIG_PPC_DCR_NATIVE
395 	if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
396 		if (!n && !try_internal_clock) {
397 			/* first attempt has timed out. */
398 			n = 20;
399 			try_internal_clock = true;
400 			goto do_retry;
401 		}
402 
403 		if (try_internal_clock || (dev->phy_address == 0xffffffff &&
404 					   dev->phy_map == 0xffffffff)) {
405 			/* No PHY: restore external clock source after reset */
406 			dcri_clrset(SDR0, SDR0_ETH_CFG,
407 				    SDR0_ETH_CFG_ECS << dev->cell_index, 0);
408 		}
409 	}
410 #endif
411 
412 	if (n) {
413 		dev->reset_failed = 0;
414 		return 0;
415 	} else {
416 		emac_report_timeout_error(dev, "reset timeout");
417 		dev->reset_failed = 1;
418 		return -ETIMEDOUT;
419 	}
420 }
421 
422 static void emac_hash_mc(struct emac_instance *dev)
423 {
424 	const int regs = EMAC_XAHT_REGS(dev);
425 	u32 *gaht_base = emac_gaht_base(dev);
426 	u32 gaht_temp[EMAC_XAHT_MAX_REGS];
427 	struct netdev_hw_addr *ha;
428 	int i;
429 
430 	DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
431 
432 	memset(gaht_temp, 0, sizeof (gaht_temp));
433 
434 	netdev_for_each_mc_addr(ha, dev->ndev) {
435 		int slot, reg, mask;
436 		DBG2(dev, "mc %pM" NL, ha->addr);
437 
438 		slot = EMAC_XAHT_CRC_TO_SLOT(dev,
439 					     ether_crc(ETH_ALEN, ha->addr));
440 		reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
441 		mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
442 
443 		gaht_temp[reg] |= mask;
444 	}
445 
446 	for (i = 0; i < regs; i++)
447 		out_be32(gaht_base + i, gaht_temp[i]);
448 }
449 
450 static inline u32 emac_iff2rmr(struct net_device *ndev)
451 {
452 	struct emac_instance *dev = netdev_priv(ndev);
453 	u32 r;
454 
455 	r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
456 
457 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
458 	    r |= EMAC4_RMR_BASE;
459 	else
460 	    r |= EMAC_RMR_BASE;
461 
462 	if (ndev->flags & IFF_PROMISC)
463 		r |= EMAC_RMR_PME;
464 	else if (ndev->flags & IFF_ALLMULTI ||
465 			 (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
466 		r |= EMAC_RMR_PMME;
467 	else if (!netdev_mc_empty(ndev))
468 		r |= EMAC_RMR_MAE;
469 
470 	if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
471 		r &= ~EMAC4_RMR_MJS_MASK;
472 		r |= EMAC4_RMR_MJS(ndev->mtu);
473 	}
474 
475 	return r;
476 }
477 
478 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
479 {
480 	u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
481 
482 	DBG2(dev, "__emac_calc_base_mr1" NL);
483 
484 	switch(tx_size) {
485 	case 2048:
486 		ret |= EMAC_MR1_TFS_2K;
487 		break;
488 	default:
489 		printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
490 		       dev->ndev->name, tx_size);
491 	}
492 
493 	switch(rx_size) {
494 	case 16384:
495 		ret |= EMAC_MR1_RFS_16K;
496 		break;
497 	case 4096:
498 		ret |= EMAC_MR1_RFS_4K;
499 		break;
500 	default:
501 		printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
502 		       dev->ndev->name, rx_size);
503 	}
504 
505 	return ret;
506 }
507 
508 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
509 {
510 	u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
511 		EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
512 
513 	DBG2(dev, "__emac4_calc_base_mr1" NL);
514 
515 	switch(tx_size) {
516 	case 16384:
517 		ret |= EMAC4_MR1_TFS_16K;
518 		break;
519 	case 8192:
520 		ret |= EMAC4_MR1_TFS_8K;
521 		break;
522 	case 4096:
523 		ret |= EMAC4_MR1_TFS_4K;
524 		break;
525 	case 2048:
526 		ret |= EMAC4_MR1_TFS_2K;
527 		break;
528 	default:
529 		printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
530 		       dev->ndev->name, tx_size);
531 	}
532 
533 	switch(rx_size) {
534 	case 16384:
535 		ret |= EMAC4_MR1_RFS_16K;
536 		break;
537 	case 8192:
538 		ret |= EMAC4_MR1_RFS_8K;
539 		break;
540 	case 4096:
541 		ret |= EMAC4_MR1_RFS_4K;
542 		break;
543 	case 2048:
544 		ret |= EMAC4_MR1_RFS_2K;
545 		break;
546 	default:
547 		printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
548 		       dev->ndev->name, rx_size);
549 	}
550 
551 	return ret;
552 }
553 
554 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
555 {
556 	return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
557 		__emac4_calc_base_mr1(dev, tx_size, rx_size) :
558 		__emac_calc_base_mr1(dev, tx_size, rx_size);
559 }
560 
561 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
562 {
563 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
564 		return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
565 	else
566 		return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
567 }
568 
569 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
570 				 unsigned int low, unsigned int high)
571 {
572 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
573 		return (low << 22) | ( (high & 0x3ff) << 6);
574 	else
575 		return (low << 23) | ( (high & 0x1ff) << 7);
576 }
577 
578 static int emac_configure(struct emac_instance *dev)
579 {
580 	struct emac_regs __iomem *p = dev->emacp;
581 	struct net_device *ndev = dev->ndev;
582 	int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
583 	u32 r, mr1 = 0;
584 
585 	DBG(dev, "configure" NL);
586 
587 	if (!link) {
588 		out_be32(&p->mr1, in_be32(&p->mr1)
589 			 | EMAC_MR1_FDE | EMAC_MR1_ILE);
590 		udelay(100);
591 	} else if (emac_reset(dev) < 0)
592 		return -ETIMEDOUT;
593 
594 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
595 		tah_reset(dev->tah_dev);
596 
597 	DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
598 	    link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
599 
600 	/* Default fifo sizes */
601 	tx_size = dev->tx_fifo_size;
602 	rx_size = dev->rx_fifo_size;
603 
604 	/* No link, force loopback */
605 	if (!link)
606 		mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
607 
608 	/* Check for full duplex */
609 	else if (dev->phy.duplex == DUPLEX_FULL)
610 		mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
611 
612 	/* Adjust fifo sizes, mr1 and timeouts based on link speed */
613 	dev->stop_timeout = STOP_TIMEOUT_10;
614 	switch (dev->phy.speed) {
615 	case SPEED_1000:
616 		if (emac_phy_gpcs(dev->phy.mode)) {
617 			mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
618 				(dev->phy.gpcs_address != 0xffffffff) ?
619 				 dev->phy.gpcs_address : dev->phy.address);
620 
621 			/* Put some arbitrary OUI, Manuf & Rev IDs so we can
622 			 * identify this GPCS PHY later.
623 			 */
624 			out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
625 		} else
626 			mr1 |= EMAC_MR1_MF_1000;
627 
628 		/* Extended fifo sizes */
629 		tx_size = dev->tx_fifo_size_gige;
630 		rx_size = dev->rx_fifo_size_gige;
631 
632 		if (dev->ndev->mtu > ETH_DATA_LEN) {
633 			if (emac_has_feature(dev, EMAC_FTR_EMAC4))
634 				mr1 |= EMAC4_MR1_JPSM;
635 			else
636 				mr1 |= EMAC_MR1_JPSM;
637 			dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
638 		} else
639 			dev->stop_timeout = STOP_TIMEOUT_1000;
640 		break;
641 	case SPEED_100:
642 		mr1 |= EMAC_MR1_MF_100;
643 		dev->stop_timeout = STOP_TIMEOUT_100;
644 		break;
645 	default: /* make gcc happy */
646 		break;
647 	}
648 
649 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
650 		rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
651 				dev->phy.speed);
652 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
653 		zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
654 
655 	/* on 40x erratum forces us to NOT use integrated flow control,
656 	 * let's hope it works on 44x ;)
657 	 */
658 	if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
659 	    dev->phy.duplex == DUPLEX_FULL) {
660 		if (dev->phy.pause)
661 			mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
662 		else if (dev->phy.asym_pause)
663 			mr1 |= EMAC_MR1_APP;
664 	}
665 
666 	/* Add base settings & fifo sizes & program MR1 */
667 	mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
668 	out_be32(&p->mr1, mr1);
669 
670 	/* Set individual MAC address */
671 	out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
672 	out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
673 		 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
674 		 ndev->dev_addr[5]);
675 
676 	/* VLAN Tag Protocol ID */
677 	out_be32(&p->vtpid, 0x8100);
678 
679 	/* Receive mode register */
680 	r = emac_iff2rmr(ndev);
681 	if (r & EMAC_RMR_MAE)
682 		emac_hash_mc(dev);
683 	out_be32(&p->rmr, r);
684 
685 	/* FIFOs thresholds */
686 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
687 		r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
688 			       tx_size / 2 / dev->fifo_entry_size);
689 	else
690 		r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
691 			      tx_size / 2 / dev->fifo_entry_size);
692 	out_be32(&p->tmr1, r);
693 	out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
694 
695 	/* PAUSE frame is sent when RX FIFO reaches its high-water mark,
696 	   there should be still enough space in FIFO to allow the our link
697 	   partner time to process this frame and also time to send PAUSE
698 	   frame itself.
699 
700 	   Here is the worst case scenario for the RX FIFO "headroom"
701 	   (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
702 
703 	   1) One maximum-length frame on TX                    1522 bytes
704 	   2) One PAUSE frame time                                64 bytes
705 	   3) PAUSE frame decode time allowance                   64 bytes
706 	   4) One maximum-length frame on RX                    1522 bytes
707 	   5) Round-trip propagation delay of the link (100Mb)    15 bytes
708 	   ----------
709 	   3187 bytes
710 
711 	   I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
712 	   low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
713 	 */
714 	r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
715 			   rx_size / 4 / dev->fifo_entry_size);
716 	out_be32(&p->rwmr, r);
717 
718 	/* Set PAUSE timer to the maximum */
719 	out_be32(&p->ptr, 0xffff);
720 
721 	/* IRQ sources */
722 	r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
723 		EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
724 		EMAC_ISR_IRE | EMAC_ISR_TE;
725 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
726 	    r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
727 						  EMAC4_ISR_RXOE | */;
728 	out_be32(&p->iser,  r);
729 
730 	/* We need to take GPCS PHY out of isolate mode after EMAC reset */
731 	if (emac_phy_gpcs(dev->phy.mode)) {
732 		if (dev->phy.gpcs_address != 0xffffffff)
733 			emac_mii_reset_gpcs(&dev->phy);
734 		else
735 			emac_mii_reset_phy(&dev->phy);
736 	}
737 
738 	return 0;
739 }
740 
741 static void emac_reinitialize(struct emac_instance *dev)
742 {
743 	DBG(dev, "reinitialize" NL);
744 
745 	emac_netif_stop(dev);
746 	if (!emac_configure(dev)) {
747 		emac_tx_enable(dev);
748 		emac_rx_enable(dev);
749 	}
750 	emac_netif_start(dev);
751 }
752 
753 static void emac_full_tx_reset(struct emac_instance *dev)
754 {
755 	DBG(dev, "full_tx_reset" NL);
756 
757 	emac_tx_disable(dev);
758 	mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
759 	emac_clean_tx_ring(dev);
760 	dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
761 
762 	emac_configure(dev);
763 
764 	mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
765 	emac_tx_enable(dev);
766 	emac_rx_enable(dev);
767 }
768 
769 static void emac_reset_work(struct work_struct *work)
770 {
771 	struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
772 
773 	DBG(dev, "reset_work" NL);
774 
775 	mutex_lock(&dev->link_lock);
776 	if (dev->opened) {
777 		emac_netif_stop(dev);
778 		emac_full_tx_reset(dev);
779 		emac_netif_start(dev);
780 	}
781 	mutex_unlock(&dev->link_lock);
782 }
783 
784 static void emac_tx_timeout(struct net_device *ndev)
785 {
786 	struct emac_instance *dev = netdev_priv(ndev);
787 
788 	DBG(dev, "tx_timeout" NL);
789 
790 	schedule_work(&dev->reset_work);
791 }
792 
793 
794 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
795 {
796 	int done = !!(stacr & EMAC_STACR_OC);
797 
798 	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
799 		done = !done;
800 
801 	return done;
802 };
803 
804 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
805 {
806 	struct emac_regs __iomem *p = dev->emacp;
807 	u32 r = 0;
808 	int n, err = -ETIMEDOUT;
809 
810 	mutex_lock(&dev->mdio_lock);
811 
812 	DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
813 
814 	/* Enable proper MDIO port */
815 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
816 		zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
817 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
818 		rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
819 
820 	/* Wait for management interface to become idle */
821 	n = 20;
822 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
823 		udelay(1);
824 		if (!--n) {
825 			DBG2(dev, " -> timeout wait idle\n");
826 			goto bail;
827 		}
828 	}
829 
830 	/* Issue read command */
831 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
832 		r = EMAC4_STACR_BASE(dev->opb_bus_freq);
833 	else
834 		r = EMAC_STACR_BASE(dev->opb_bus_freq);
835 	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
836 		r |= EMAC_STACR_OC;
837 	if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
838 		r |= EMACX_STACR_STAC_READ;
839 	else
840 		r |= EMAC_STACR_STAC_READ;
841 	r |= (reg & EMAC_STACR_PRA_MASK)
842 		| ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
843 	out_be32(&p->stacr, r);
844 
845 	/* Wait for read to complete */
846 	n = 200;
847 	while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
848 		udelay(1);
849 		if (!--n) {
850 			DBG2(dev, " -> timeout wait complete\n");
851 			goto bail;
852 		}
853 	}
854 
855 	if (unlikely(r & EMAC_STACR_PHYE)) {
856 		DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
857 		err = -EREMOTEIO;
858 		goto bail;
859 	}
860 
861 	r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
862 
863 	DBG2(dev, "mdio_read -> %04x" NL, r);
864 	err = 0;
865  bail:
866 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
867 		rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
868 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
869 		zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
870 	mutex_unlock(&dev->mdio_lock);
871 
872 	return err == 0 ? r : err;
873 }
874 
875 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
876 			      u16 val)
877 {
878 	struct emac_regs __iomem *p = dev->emacp;
879 	u32 r = 0;
880 	int n, err = -ETIMEDOUT;
881 
882 	mutex_lock(&dev->mdio_lock);
883 
884 	DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
885 
886 	/* Enable proper MDIO port */
887 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
888 		zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
889 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
890 		rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
891 
892 	/* Wait for management interface to be idle */
893 	n = 20;
894 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
895 		udelay(1);
896 		if (!--n) {
897 			DBG2(dev, " -> timeout wait idle\n");
898 			goto bail;
899 		}
900 	}
901 
902 	/* Issue write command */
903 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
904 		r = EMAC4_STACR_BASE(dev->opb_bus_freq);
905 	else
906 		r = EMAC_STACR_BASE(dev->opb_bus_freq);
907 	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
908 		r |= EMAC_STACR_OC;
909 	if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
910 		r |= EMACX_STACR_STAC_WRITE;
911 	else
912 		r |= EMAC_STACR_STAC_WRITE;
913 	r |= (reg & EMAC_STACR_PRA_MASK) |
914 		((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
915 		(val << EMAC_STACR_PHYD_SHIFT);
916 	out_be32(&p->stacr, r);
917 
918 	/* Wait for write to complete */
919 	n = 200;
920 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
921 		udelay(1);
922 		if (!--n) {
923 			DBG2(dev, " -> timeout wait complete\n");
924 			goto bail;
925 		}
926 	}
927 	err = 0;
928  bail:
929 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
930 		rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
931 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
932 		zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
933 	mutex_unlock(&dev->mdio_lock);
934 }
935 
936 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
937 {
938 	struct emac_instance *dev = netdev_priv(ndev);
939 	int res;
940 
941 	res = __emac_mdio_read((dev->mdio_instance &&
942 				dev->phy.gpcs_address != id) ?
943 				dev->mdio_instance : dev,
944 			       (u8) id, (u8) reg);
945 	return res;
946 }
947 
948 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
949 {
950 	struct emac_instance *dev = netdev_priv(ndev);
951 
952 	__emac_mdio_write((dev->mdio_instance &&
953 			   dev->phy.gpcs_address != id) ?
954 			   dev->mdio_instance : dev,
955 			  (u8) id, (u8) reg, (u16) val);
956 }
957 
958 /* Tx lock BH */
959 static void __emac_set_multicast_list(struct emac_instance *dev)
960 {
961 	struct emac_regs __iomem *p = dev->emacp;
962 	u32 rmr = emac_iff2rmr(dev->ndev);
963 
964 	DBG(dev, "__multicast %08x" NL, rmr);
965 
966 	/* I decided to relax register access rules here to avoid
967 	 * full EMAC reset.
968 	 *
969 	 * There is a real problem with EMAC4 core if we use MWSW_001 bit
970 	 * in MR1 register and do a full EMAC reset.
971 	 * One TX BD status update is delayed and, after EMAC reset, it
972 	 * never happens, resulting in TX hung (it'll be recovered by TX
973 	 * timeout handler eventually, but this is just gross).
974 	 * So we either have to do full TX reset or try to cheat here :)
975 	 *
976 	 * The only required change is to RX mode register, so I *think* all
977 	 * we need is just to stop RX channel. This seems to work on all
978 	 * tested SoCs.                                                --ebs
979 	 *
980 	 * If we need the full reset, we might just trigger the workqueue
981 	 * and do it async... a bit nasty but should work --BenH
982 	 */
983 	dev->mcast_pending = 0;
984 	emac_rx_disable(dev);
985 	if (rmr & EMAC_RMR_MAE)
986 		emac_hash_mc(dev);
987 	out_be32(&p->rmr, rmr);
988 	emac_rx_enable(dev);
989 }
990 
991 /* Tx lock BH */
992 static void emac_set_multicast_list(struct net_device *ndev)
993 {
994 	struct emac_instance *dev = netdev_priv(ndev);
995 
996 	DBG(dev, "multicast" NL);
997 
998 	BUG_ON(!netif_running(dev->ndev));
999 
1000 	if (dev->no_mcast) {
1001 		dev->mcast_pending = 1;
1002 		return;
1003 	}
1004 
1005 	mutex_lock(&dev->link_lock);
1006 	__emac_set_multicast_list(dev);
1007 	mutex_unlock(&dev->link_lock);
1008 }
1009 
1010 static int emac_set_mac_address(struct net_device *ndev, void *sa)
1011 {
1012 	struct emac_instance *dev = netdev_priv(ndev);
1013 	struct sockaddr *addr = sa;
1014 	struct emac_regs __iomem *p = dev->emacp;
1015 
1016 	if (!is_valid_ether_addr(addr->sa_data))
1017 	       return -EADDRNOTAVAIL;
1018 
1019 	mutex_lock(&dev->link_lock);
1020 
1021 	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
1022 
1023 	emac_rx_disable(dev);
1024 	emac_tx_disable(dev);
1025 	out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
1026 	out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
1027 		(ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
1028 		ndev->dev_addr[5]);
1029 	emac_tx_enable(dev);
1030 	emac_rx_enable(dev);
1031 
1032 	mutex_unlock(&dev->link_lock);
1033 
1034 	return 0;
1035 }
1036 
1037 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
1038 {
1039 	int rx_sync_size = emac_rx_sync_size(new_mtu);
1040 	int rx_skb_size = emac_rx_skb_size(new_mtu);
1041 	int i, ret = 0;
1042 	int mr1_jumbo_bit_change = 0;
1043 
1044 	mutex_lock(&dev->link_lock);
1045 	emac_netif_stop(dev);
1046 	emac_rx_disable(dev);
1047 	mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1048 
1049 	if (dev->rx_sg_skb) {
1050 		++dev->estats.rx_dropped_resize;
1051 		dev_kfree_skb(dev->rx_sg_skb);
1052 		dev->rx_sg_skb = NULL;
1053 	}
1054 
1055 	/* Make a first pass over RX ring and mark BDs ready, dropping
1056 	 * non-processed packets on the way. We need this as a separate pass
1057 	 * to simplify error recovery in the case of allocation failure later.
1058 	 */
1059 	for (i = 0; i < NUM_RX_BUFF; ++i) {
1060 		if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
1061 			++dev->estats.rx_dropped_resize;
1062 
1063 		dev->rx_desc[i].data_len = 0;
1064 		dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
1065 		    (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1066 	}
1067 
1068 	/* Reallocate RX ring only if bigger skb buffers are required */
1069 	if (rx_skb_size <= dev->rx_skb_size)
1070 		goto skip;
1071 
1072 	/* Second pass, allocate new skbs */
1073 	for (i = 0; i < NUM_RX_BUFF; ++i) {
1074 		struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
1075 		if (!skb) {
1076 			ret = -ENOMEM;
1077 			goto oom;
1078 		}
1079 
1080 		BUG_ON(!dev->rx_skb[i]);
1081 		dev_kfree_skb(dev->rx_skb[i]);
1082 
1083 		skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1084 		dev->rx_desc[i].data_ptr =
1085 		    dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
1086 				   DMA_FROM_DEVICE) + 2;
1087 		dev->rx_skb[i] = skb;
1088 	}
1089  skip:
1090 	/* Check if we need to change "Jumbo" bit in MR1 */
1091 	if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
1092 		mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ||
1093 				(dev->ndev->mtu > ETH_DATA_LEN);
1094 	} else {
1095 		mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ^
1096 				(dev->ndev->mtu > ETH_DATA_LEN);
1097 	}
1098 
1099 	if (mr1_jumbo_bit_change) {
1100 		/* This is to prevent starting RX channel in emac_rx_enable() */
1101 		set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1102 
1103 		dev->ndev->mtu = new_mtu;
1104 		emac_full_tx_reset(dev);
1105 	}
1106 
1107 	mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1108  oom:
1109 	/* Restart RX */
1110 	clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1111 	dev->rx_slot = 0;
1112 	mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1113 	emac_rx_enable(dev);
1114 	emac_netif_start(dev);
1115 	mutex_unlock(&dev->link_lock);
1116 
1117 	return ret;
1118 }
1119 
1120 /* Process ctx, rtnl_lock semaphore */
1121 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1122 {
1123 	struct emac_instance *dev = netdev_priv(ndev);
1124 	int ret = 0;
1125 
1126 	DBG(dev, "change_mtu(%d)" NL, new_mtu);
1127 
1128 	if (netif_running(ndev)) {
1129 		/* Check if we really need to reinitialize RX ring */
1130 		if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1131 			ret = emac_resize_rx_ring(dev, new_mtu);
1132 	}
1133 
1134 	if (!ret) {
1135 		ndev->mtu = new_mtu;
1136 		dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1137 		dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1138 	}
1139 
1140 	return ret;
1141 }
1142 
1143 static void emac_clean_tx_ring(struct emac_instance *dev)
1144 {
1145 	int i;
1146 
1147 	for (i = 0; i < NUM_TX_BUFF; ++i) {
1148 		if (dev->tx_skb[i]) {
1149 			dev_kfree_skb(dev->tx_skb[i]);
1150 			dev->tx_skb[i] = NULL;
1151 			if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1152 				++dev->estats.tx_dropped;
1153 		}
1154 		dev->tx_desc[i].ctrl = 0;
1155 		dev->tx_desc[i].data_ptr = 0;
1156 	}
1157 }
1158 
1159 static void emac_clean_rx_ring(struct emac_instance *dev)
1160 {
1161 	int i;
1162 
1163 	for (i = 0; i < NUM_RX_BUFF; ++i)
1164 		if (dev->rx_skb[i]) {
1165 			dev->rx_desc[i].ctrl = 0;
1166 			dev_kfree_skb(dev->rx_skb[i]);
1167 			dev->rx_skb[i] = NULL;
1168 			dev->rx_desc[i].data_ptr = 0;
1169 		}
1170 
1171 	if (dev->rx_sg_skb) {
1172 		dev_kfree_skb(dev->rx_sg_skb);
1173 		dev->rx_sg_skb = NULL;
1174 	}
1175 }
1176 
1177 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1178 				    gfp_t flags)
1179 {
1180 	struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1181 	if (unlikely(!skb))
1182 		return -ENOMEM;
1183 
1184 	dev->rx_skb[slot] = skb;
1185 	dev->rx_desc[slot].data_len = 0;
1186 
1187 	skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1188 	dev->rx_desc[slot].data_ptr =
1189 	    dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1190 			   DMA_FROM_DEVICE) + 2;
1191 	wmb();
1192 	dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1193 	    (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1194 
1195 	return 0;
1196 }
1197 
1198 static void emac_print_link_status(struct emac_instance *dev)
1199 {
1200 	if (netif_carrier_ok(dev->ndev))
1201 		printk(KERN_INFO "%s: link is up, %d %s%s\n",
1202 		       dev->ndev->name, dev->phy.speed,
1203 		       dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1204 		       dev->phy.pause ? ", pause enabled" :
1205 		       dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1206 	else
1207 		printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1208 }
1209 
1210 /* Process ctx, rtnl_lock semaphore */
1211 static int emac_open(struct net_device *ndev)
1212 {
1213 	struct emac_instance *dev = netdev_priv(ndev);
1214 	int err, i;
1215 
1216 	DBG(dev, "open" NL);
1217 
1218 	/* Setup error IRQ handler */
1219 	err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1220 	if (err) {
1221 		printk(KERN_ERR "%s: failed to request IRQ %d\n",
1222 		       ndev->name, dev->emac_irq);
1223 		return err;
1224 	}
1225 
1226 	/* Allocate RX ring */
1227 	for (i = 0; i < NUM_RX_BUFF; ++i)
1228 		if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1229 			printk(KERN_ERR "%s: failed to allocate RX ring\n",
1230 			       ndev->name);
1231 			goto oom;
1232 		}
1233 
1234 	dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1235 	clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1236 	dev->rx_sg_skb = NULL;
1237 
1238 	mutex_lock(&dev->link_lock);
1239 	dev->opened = 1;
1240 
1241 	/* Start PHY polling now.
1242 	 */
1243 	if (dev->phy.address >= 0) {
1244 		int link_poll_interval;
1245 		if (dev->phy.def->ops->poll_link(&dev->phy)) {
1246 			dev->phy.def->ops->read_link(&dev->phy);
1247 			emac_rx_clk_default(dev);
1248 			netif_carrier_on(dev->ndev);
1249 			link_poll_interval = PHY_POLL_LINK_ON;
1250 		} else {
1251 			emac_rx_clk_tx(dev);
1252 			netif_carrier_off(dev->ndev);
1253 			link_poll_interval = PHY_POLL_LINK_OFF;
1254 		}
1255 		dev->link_polling = 1;
1256 		wmb();
1257 		schedule_delayed_work(&dev->link_work, link_poll_interval);
1258 		emac_print_link_status(dev);
1259 	} else
1260 		netif_carrier_on(dev->ndev);
1261 
1262 	/* Required for Pause packet support in EMAC */
1263 	dev_mc_add_global(ndev, default_mcast_addr);
1264 
1265 	emac_configure(dev);
1266 	mal_poll_add(dev->mal, &dev->commac);
1267 	mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1268 	mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1269 	mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1270 	emac_tx_enable(dev);
1271 	emac_rx_enable(dev);
1272 	emac_netif_start(dev);
1273 
1274 	mutex_unlock(&dev->link_lock);
1275 
1276 	return 0;
1277  oom:
1278 	emac_clean_rx_ring(dev);
1279 	free_irq(dev->emac_irq, dev);
1280 
1281 	return -ENOMEM;
1282 }
1283 
1284 /* BHs disabled */
1285 #if 0
1286 static int emac_link_differs(struct emac_instance *dev)
1287 {
1288 	u32 r = in_be32(&dev->emacp->mr1);
1289 
1290 	int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1291 	int speed, pause, asym_pause;
1292 
1293 	if (r & EMAC_MR1_MF_1000)
1294 		speed = SPEED_1000;
1295 	else if (r & EMAC_MR1_MF_100)
1296 		speed = SPEED_100;
1297 	else
1298 		speed = SPEED_10;
1299 
1300 	switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1301 	case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1302 		pause = 1;
1303 		asym_pause = 0;
1304 		break;
1305 	case EMAC_MR1_APP:
1306 		pause = 0;
1307 		asym_pause = 1;
1308 		break;
1309 	default:
1310 		pause = asym_pause = 0;
1311 	}
1312 	return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1313 	    pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1314 }
1315 #endif
1316 
1317 static void emac_link_timer(struct work_struct *work)
1318 {
1319 	struct emac_instance *dev =
1320 		container_of(to_delayed_work(work),
1321 			     struct emac_instance, link_work);
1322 	int link_poll_interval;
1323 
1324 	mutex_lock(&dev->link_lock);
1325 	DBG2(dev, "link timer" NL);
1326 
1327 	if (!dev->opened)
1328 		goto bail;
1329 
1330 	if (dev->phy.def->ops->poll_link(&dev->phy)) {
1331 		if (!netif_carrier_ok(dev->ndev)) {
1332 			emac_rx_clk_default(dev);
1333 			/* Get new link parameters */
1334 			dev->phy.def->ops->read_link(&dev->phy);
1335 
1336 			netif_carrier_on(dev->ndev);
1337 			emac_netif_stop(dev);
1338 			emac_full_tx_reset(dev);
1339 			emac_netif_start(dev);
1340 			emac_print_link_status(dev);
1341 		}
1342 		link_poll_interval = PHY_POLL_LINK_ON;
1343 	} else {
1344 		if (netif_carrier_ok(dev->ndev)) {
1345 			emac_rx_clk_tx(dev);
1346 			netif_carrier_off(dev->ndev);
1347 			netif_tx_disable(dev->ndev);
1348 			emac_reinitialize(dev);
1349 			emac_print_link_status(dev);
1350 		}
1351 		link_poll_interval = PHY_POLL_LINK_OFF;
1352 	}
1353 	schedule_delayed_work(&dev->link_work, link_poll_interval);
1354  bail:
1355 	mutex_unlock(&dev->link_lock);
1356 }
1357 
1358 static void emac_force_link_update(struct emac_instance *dev)
1359 {
1360 	netif_carrier_off(dev->ndev);
1361 	smp_rmb();
1362 	if (dev->link_polling) {
1363 		cancel_delayed_work_sync(&dev->link_work);
1364 		if (dev->link_polling)
1365 			schedule_delayed_work(&dev->link_work,  PHY_POLL_LINK_OFF);
1366 	}
1367 }
1368 
1369 /* Process ctx, rtnl_lock semaphore */
1370 static int emac_close(struct net_device *ndev)
1371 {
1372 	struct emac_instance *dev = netdev_priv(ndev);
1373 
1374 	DBG(dev, "close" NL);
1375 
1376 	if (dev->phy.address >= 0) {
1377 		dev->link_polling = 0;
1378 		cancel_delayed_work_sync(&dev->link_work);
1379 	}
1380 	mutex_lock(&dev->link_lock);
1381 	emac_netif_stop(dev);
1382 	dev->opened = 0;
1383 	mutex_unlock(&dev->link_lock);
1384 
1385 	emac_rx_disable(dev);
1386 	emac_tx_disable(dev);
1387 	mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1388 	mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1389 	mal_poll_del(dev->mal, &dev->commac);
1390 
1391 	emac_clean_tx_ring(dev);
1392 	emac_clean_rx_ring(dev);
1393 
1394 	free_irq(dev->emac_irq, dev);
1395 
1396 	netif_carrier_off(ndev);
1397 
1398 	return 0;
1399 }
1400 
1401 static inline u16 emac_tx_csum(struct emac_instance *dev,
1402 			       struct sk_buff *skb)
1403 {
1404 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1405 		(skb->ip_summed == CHECKSUM_PARTIAL)) {
1406 		++dev->stats.tx_packets_csum;
1407 		return EMAC_TX_CTRL_TAH_CSUM;
1408 	}
1409 	return 0;
1410 }
1411 
1412 static inline netdev_tx_t emac_xmit_finish(struct emac_instance *dev, int len)
1413 {
1414 	struct emac_regs __iomem *p = dev->emacp;
1415 	struct net_device *ndev = dev->ndev;
1416 
1417 	/* Send the packet out. If the if makes a significant perf
1418 	 * difference, then we can store the TMR0 value in "dev"
1419 	 * instead
1420 	 */
1421 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1422 		out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1423 	else
1424 		out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1425 
1426 	if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1427 		netif_stop_queue(ndev);
1428 		DBG2(dev, "stopped TX queue" NL);
1429 	}
1430 
1431 	netif_trans_update(ndev);
1432 	++dev->stats.tx_packets;
1433 	dev->stats.tx_bytes += len;
1434 
1435 	return NETDEV_TX_OK;
1436 }
1437 
1438 /* Tx lock BH */
1439 static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1440 {
1441 	struct emac_instance *dev = netdev_priv(ndev);
1442 	unsigned int len = skb->len;
1443 	int slot;
1444 
1445 	u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1446 	    MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1447 
1448 	slot = dev->tx_slot++;
1449 	if (dev->tx_slot == NUM_TX_BUFF) {
1450 		dev->tx_slot = 0;
1451 		ctrl |= MAL_TX_CTRL_WRAP;
1452 	}
1453 
1454 	DBG2(dev, "xmit(%u) %d" NL, len, slot);
1455 
1456 	dev->tx_skb[slot] = skb;
1457 	dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1458 						     skb->data, len,
1459 						     DMA_TO_DEVICE);
1460 	dev->tx_desc[slot].data_len = (u16) len;
1461 	wmb();
1462 	dev->tx_desc[slot].ctrl = ctrl;
1463 
1464 	return emac_xmit_finish(dev, len);
1465 }
1466 
1467 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1468 				  u32 pd, int len, int last, u16 base_ctrl)
1469 {
1470 	while (1) {
1471 		u16 ctrl = base_ctrl;
1472 		int chunk = min(len, MAL_MAX_TX_SIZE);
1473 		len -= chunk;
1474 
1475 		slot = (slot + 1) % NUM_TX_BUFF;
1476 
1477 		if (last && !len)
1478 			ctrl |= MAL_TX_CTRL_LAST;
1479 		if (slot == NUM_TX_BUFF - 1)
1480 			ctrl |= MAL_TX_CTRL_WRAP;
1481 
1482 		dev->tx_skb[slot] = NULL;
1483 		dev->tx_desc[slot].data_ptr = pd;
1484 		dev->tx_desc[slot].data_len = (u16) chunk;
1485 		dev->tx_desc[slot].ctrl = ctrl;
1486 		++dev->tx_cnt;
1487 
1488 		if (!len)
1489 			break;
1490 
1491 		pd += chunk;
1492 	}
1493 	return slot;
1494 }
1495 
1496 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1497 static netdev_tx_t
1498 emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1499 {
1500 	struct emac_instance *dev = netdev_priv(ndev);
1501 	int nr_frags = skb_shinfo(skb)->nr_frags;
1502 	int len = skb->len, chunk;
1503 	int slot, i;
1504 	u16 ctrl;
1505 	u32 pd;
1506 
1507 	/* This is common "fast" path */
1508 	if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1509 		return emac_start_xmit(skb, ndev);
1510 
1511 	len -= skb->data_len;
1512 
1513 	/* Note, this is only an *estimation*, we can still run out of empty
1514 	 * slots because of the additional fragmentation into
1515 	 * MAL_MAX_TX_SIZE-sized chunks
1516 	 */
1517 	if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1518 		goto stop_queue;
1519 
1520 	ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1521 	    emac_tx_csum(dev, skb);
1522 	slot = dev->tx_slot;
1523 
1524 	/* skb data */
1525 	dev->tx_skb[slot] = NULL;
1526 	chunk = min(len, MAL_MAX_TX_SIZE);
1527 	dev->tx_desc[slot].data_ptr = pd =
1528 	    dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1529 	dev->tx_desc[slot].data_len = (u16) chunk;
1530 	len -= chunk;
1531 	if (unlikely(len))
1532 		slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1533 				       ctrl);
1534 	/* skb fragments */
1535 	for (i = 0; i < nr_frags; ++i) {
1536 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1537 		len = skb_frag_size(frag);
1538 
1539 		if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1540 			goto undo_frame;
1541 
1542 		pd = skb_frag_dma_map(&dev->ofdev->dev, frag, 0, len,
1543 				      DMA_TO_DEVICE);
1544 
1545 		slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1546 				       ctrl);
1547 	}
1548 
1549 	DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1550 
1551 	/* Attach skb to the last slot so we don't release it too early */
1552 	dev->tx_skb[slot] = skb;
1553 
1554 	/* Send the packet out */
1555 	if (dev->tx_slot == NUM_TX_BUFF - 1)
1556 		ctrl |= MAL_TX_CTRL_WRAP;
1557 	wmb();
1558 	dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1559 	dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1560 
1561 	return emac_xmit_finish(dev, skb->len);
1562 
1563  undo_frame:
1564 	/* Well, too bad. Our previous estimation was overly optimistic.
1565 	 * Undo everything.
1566 	 */
1567 	while (slot != dev->tx_slot) {
1568 		dev->tx_desc[slot].ctrl = 0;
1569 		--dev->tx_cnt;
1570 		if (--slot < 0)
1571 			slot = NUM_TX_BUFF - 1;
1572 	}
1573 	++dev->estats.tx_undo;
1574 
1575  stop_queue:
1576 	netif_stop_queue(ndev);
1577 	DBG2(dev, "stopped TX queue" NL);
1578 	return NETDEV_TX_BUSY;
1579 }
1580 
1581 /* Tx lock BHs */
1582 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1583 {
1584 	struct emac_error_stats *st = &dev->estats;
1585 
1586 	DBG(dev, "BD TX error %04x" NL, ctrl);
1587 
1588 	++st->tx_bd_errors;
1589 	if (ctrl & EMAC_TX_ST_BFCS)
1590 		++st->tx_bd_bad_fcs;
1591 	if (ctrl & EMAC_TX_ST_LCS)
1592 		++st->tx_bd_carrier_loss;
1593 	if (ctrl & EMAC_TX_ST_ED)
1594 		++st->tx_bd_excessive_deferral;
1595 	if (ctrl & EMAC_TX_ST_EC)
1596 		++st->tx_bd_excessive_collisions;
1597 	if (ctrl & EMAC_TX_ST_LC)
1598 		++st->tx_bd_late_collision;
1599 	if (ctrl & EMAC_TX_ST_MC)
1600 		++st->tx_bd_multple_collisions;
1601 	if (ctrl & EMAC_TX_ST_SC)
1602 		++st->tx_bd_single_collision;
1603 	if (ctrl & EMAC_TX_ST_UR)
1604 		++st->tx_bd_underrun;
1605 	if (ctrl & EMAC_TX_ST_SQE)
1606 		++st->tx_bd_sqe;
1607 }
1608 
1609 static void emac_poll_tx(void *param)
1610 {
1611 	struct emac_instance *dev = param;
1612 	u32 bad_mask;
1613 
1614 	DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1615 
1616 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1617 		bad_mask = EMAC_IS_BAD_TX_TAH;
1618 	else
1619 		bad_mask = EMAC_IS_BAD_TX;
1620 
1621 	netif_tx_lock_bh(dev->ndev);
1622 	if (dev->tx_cnt) {
1623 		u16 ctrl;
1624 		int slot = dev->ack_slot, n = 0;
1625 	again:
1626 		ctrl = dev->tx_desc[slot].ctrl;
1627 		if (!(ctrl & MAL_TX_CTRL_READY)) {
1628 			struct sk_buff *skb = dev->tx_skb[slot];
1629 			++n;
1630 
1631 			if (skb) {
1632 				dev_kfree_skb(skb);
1633 				dev->tx_skb[slot] = NULL;
1634 			}
1635 			slot = (slot + 1) % NUM_TX_BUFF;
1636 
1637 			if (unlikely(ctrl & bad_mask))
1638 				emac_parse_tx_error(dev, ctrl);
1639 
1640 			if (--dev->tx_cnt)
1641 				goto again;
1642 		}
1643 		if (n) {
1644 			dev->ack_slot = slot;
1645 			if (netif_queue_stopped(dev->ndev) &&
1646 			    dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1647 				netif_wake_queue(dev->ndev);
1648 
1649 			DBG2(dev, "tx %d pkts" NL, n);
1650 		}
1651 	}
1652 	netif_tx_unlock_bh(dev->ndev);
1653 }
1654 
1655 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1656 				       int len)
1657 {
1658 	struct sk_buff *skb = dev->rx_skb[slot];
1659 
1660 	DBG2(dev, "recycle %d %d" NL, slot, len);
1661 
1662 	if (len)
1663 		dma_map_single(&dev->ofdev->dev, skb->data - 2,
1664 			       EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1665 
1666 	dev->rx_desc[slot].data_len = 0;
1667 	wmb();
1668 	dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1669 	    (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1670 }
1671 
1672 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1673 {
1674 	struct emac_error_stats *st = &dev->estats;
1675 
1676 	DBG(dev, "BD RX error %04x" NL, ctrl);
1677 
1678 	++st->rx_bd_errors;
1679 	if (ctrl & EMAC_RX_ST_OE)
1680 		++st->rx_bd_overrun;
1681 	if (ctrl & EMAC_RX_ST_BP)
1682 		++st->rx_bd_bad_packet;
1683 	if (ctrl & EMAC_RX_ST_RP)
1684 		++st->rx_bd_runt_packet;
1685 	if (ctrl & EMAC_RX_ST_SE)
1686 		++st->rx_bd_short_event;
1687 	if (ctrl & EMAC_RX_ST_AE)
1688 		++st->rx_bd_alignment_error;
1689 	if (ctrl & EMAC_RX_ST_BFCS)
1690 		++st->rx_bd_bad_fcs;
1691 	if (ctrl & EMAC_RX_ST_PTL)
1692 		++st->rx_bd_packet_too_long;
1693 	if (ctrl & EMAC_RX_ST_ORE)
1694 		++st->rx_bd_out_of_range;
1695 	if (ctrl & EMAC_RX_ST_IRE)
1696 		++st->rx_bd_in_range;
1697 }
1698 
1699 static inline void emac_rx_csum(struct emac_instance *dev,
1700 				struct sk_buff *skb, u16 ctrl)
1701 {
1702 #ifdef CONFIG_IBM_EMAC_TAH
1703 	if (!ctrl && dev->tah_dev) {
1704 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1705 		++dev->stats.rx_packets_csum;
1706 	}
1707 #endif
1708 }
1709 
1710 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1711 {
1712 	if (likely(dev->rx_sg_skb != NULL)) {
1713 		int len = dev->rx_desc[slot].data_len;
1714 		int tot_len = dev->rx_sg_skb->len + len;
1715 
1716 		if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1717 			++dev->estats.rx_dropped_mtu;
1718 			dev_kfree_skb(dev->rx_sg_skb);
1719 			dev->rx_sg_skb = NULL;
1720 		} else {
1721 			memcpy(skb_tail_pointer(dev->rx_sg_skb),
1722 					 dev->rx_skb[slot]->data, len);
1723 			skb_put(dev->rx_sg_skb, len);
1724 			emac_recycle_rx_skb(dev, slot, len);
1725 			return 0;
1726 		}
1727 	}
1728 	emac_recycle_rx_skb(dev, slot, 0);
1729 	return -1;
1730 }
1731 
1732 /* NAPI poll context */
1733 static int emac_poll_rx(void *param, int budget)
1734 {
1735 	struct emac_instance *dev = param;
1736 	int slot = dev->rx_slot, received = 0;
1737 
1738 	DBG2(dev, "poll_rx(%d)" NL, budget);
1739 
1740  again:
1741 	while (budget > 0) {
1742 		int len;
1743 		struct sk_buff *skb;
1744 		u16 ctrl = dev->rx_desc[slot].ctrl;
1745 
1746 		if (ctrl & MAL_RX_CTRL_EMPTY)
1747 			break;
1748 
1749 		skb = dev->rx_skb[slot];
1750 		mb();
1751 		len = dev->rx_desc[slot].data_len;
1752 
1753 		if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1754 			goto sg;
1755 
1756 		ctrl &= EMAC_BAD_RX_MASK;
1757 		if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1758 			emac_parse_rx_error(dev, ctrl);
1759 			++dev->estats.rx_dropped_error;
1760 			emac_recycle_rx_skb(dev, slot, 0);
1761 			len = 0;
1762 			goto next;
1763 		}
1764 
1765 		if (len < ETH_HLEN) {
1766 			++dev->estats.rx_dropped_stack;
1767 			emac_recycle_rx_skb(dev, slot, len);
1768 			goto next;
1769 		}
1770 
1771 		if (len && len < EMAC_RX_COPY_THRESH) {
1772 			struct sk_buff *copy_skb =
1773 			    alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1774 			if (unlikely(!copy_skb))
1775 				goto oom;
1776 
1777 			skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1778 			memcpy(copy_skb->data - 2, skb->data - 2, len + 2);
1779 			emac_recycle_rx_skb(dev, slot, len);
1780 			skb = copy_skb;
1781 		} else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1782 			goto oom;
1783 
1784 		skb_put(skb, len);
1785 	push_packet:
1786 		skb->protocol = eth_type_trans(skb, dev->ndev);
1787 		emac_rx_csum(dev, skb, ctrl);
1788 
1789 		if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1790 			++dev->estats.rx_dropped_stack;
1791 	next:
1792 		++dev->stats.rx_packets;
1793 	skip:
1794 		dev->stats.rx_bytes += len;
1795 		slot = (slot + 1) % NUM_RX_BUFF;
1796 		--budget;
1797 		++received;
1798 		continue;
1799 	sg:
1800 		if (ctrl & MAL_RX_CTRL_FIRST) {
1801 			BUG_ON(dev->rx_sg_skb);
1802 			if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1803 				DBG(dev, "rx OOM %d" NL, slot);
1804 				++dev->estats.rx_dropped_oom;
1805 				emac_recycle_rx_skb(dev, slot, 0);
1806 			} else {
1807 				dev->rx_sg_skb = skb;
1808 				skb_put(skb, len);
1809 			}
1810 		} else if (!emac_rx_sg_append(dev, slot) &&
1811 			   (ctrl & MAL_RX_CTRL_LAST)) {
1812 
1813 			skb = dev->rx_sg_skb;
1814 			dev->rx_sg_skb = NULL;
1815 
1816 			ctrl &= EMAC_BAD_RX_MASK;
1817 			if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1818 				emac_parse_rx_error(dev, ctrl);
1819 				++dev->estats.rx_dropped_error;
1820 				dev_kfree_skb(skb);
1821 				len = 0;
1822 			} else
1823 				goto push_packet;
1824 		}
1825 		goto skip;
1826 	oom:
1827 		DBG(dev, "rx OOM %d" NL, slot);
1828 		/* Drop the packet and recycle skb */
1829 		++dev->estats.rx_dropped_oom;
1830 		emac_recycle_rx_skb(dev, slot, 0);
1831 		goto next;
1832 	}
1833 
1834 	if (received) {
1835 		DBG2(dev, "rx %d BDs" NL, received);
1836 		dev->rx_slot = slot;
1837 	}
1838 
1839 	if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1840 		mb();
1841 		if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1842 			DBG2(dev, "rx restart" NL);
1843 			received = 0;
1844 			goto again;
1845 		}
1846 
1847 		if (dev->rx_sg_skb) {
1848 			DBG2(dev, "dropping partial rx packet" NL);
1849 			++dev->estats.rx_dropped_error;
1850 			dev_kfree_skb(dev->rx_sg_skb);
1851 			dev->rx_sg_skb = NULL;
1852 		}
1853 
1854 		clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1855 		mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1856 		emac_rx_enable(dev);
1857 		dev->rx_slot = 0;
1858 	}
1859 	return received;
1860 }
1861 
1862 /* NAPI poll context */
1863 static int emac_peek_rx(void *param)
1864 {
1865 	struct emac_instance *dev = param;
1866 
1867 	return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1868 }
1869 
1870 /* NAPI poll context */
1871 static int emac_peek_rx_sg(void *param)
1872 {
1873 	struct emac_instance *dev = param;
1874 
1875 	int slot = dev->rx_slot;
1876 	while (1) {
1877 		u16 ctrl = dev->rx_desc[slot].ctrl;
1878 		if (ctrl & MAL_RX_CTRL_EMPTY)
1879 			return 0;
1880 		else if (ctrl & MAL_RX_CTRL_LAST)
1881 			return 1;
1882 
1883 		slot = (slot + 1) % NUM_RX_BUFF;
1884 
1885 		/* I'm just being paranoid here :) */
1886 		if (unlikely(slot == dev->rx_slot))
1887 			return 0;
1888 	}
1889 }
1890 
1891 /* Hard IRQ */
1892 static void emac_rxde(void *param)
1893 {
1894 	struct emac_instance *dev = param;
1895 
1896 	++dev->estats.rx_stopped;
1897 	emac_rx_disable_async(dev);
1898 }
1899 
1900 /* Hard IRQ */
1901 static irqreturn_t emac_irq(int irq, void *dev_instance)
1902 {
1903 	struct emac_instance *dev = dev_instance;
1904 	struct emac_regs __iomem *p = dev->emacp;
1905 	struct emac_error_stats *st = &dev->estats;
1906 	u32 isr;
1907 
1908 	spin_lock(&dev->lock);
1909 
1910 	isr = in_be32(&p->isr);
1911 	out_be32(&p->isr, isr);
1912 
1913 	DBG(dev, "isr = %08x" NL, isr);
1914 
1915 	if (isr & EMAC4_ISR_TXPE)
1916 		++st->tx_parity;
1917 	if (isr & EMAC4_ISR_RXPE)
1918 		++st->rx_parity;
1919 	if (isr & EMAC4_ISR_TXUE)
1920 		++st->tx_underrun;
1921 	if (isr & EMAC4_ISR_RXOE)
1922 		++st->rx_fifo_overrun;
1923 	if (isr & EMAC_ISR_OVR)
1924 		++st->rx_overrun;
1925 	if (isr & EMAC_ISR_BP)
1926 		++st->rx_bad_packet;
1927 	if (isr & EMAC_ISR_RP)
1928 		++st->rx_runt_packet;
1929 	if (isr & EMAC_ISR_SE)
1930 		++st->rx_short_event;
1931 	if (isr & EMAC_ISR_ALE)
1932 		++st->rx_alignment_error;
1933 	if (isr & EMAC_ISR_BFCS)
1934 		++st->rx_bad_fcs;
1935 	if (isr & EMAC_ISR_PTLE)
1936 		++st->rx_packet_too_long;
1937 	if (isr & EMAC_ISR_ORE)
1938 		++st->rx_out_of_range;
1939 	if (isr & EMAC_ISR_IRE)
1940 		++st->rx_in_range;
1941 	if (isr & EMAC_ISR_SQE)
1942 		++st->tx_sqe;
1943 	if (isr & EMAC_ISR_TE)
1944 		++st->tx_errors;
1945 
1946 	spin_unlock(&dev->lock);
1947 
1948 	return IRQ_HANDLED;
1949 }
1950 
1951 static struct net_device_stats *emac_stats(struct net_device *ndev)
1952 {
1953 	struct emac_instance *dev = netdev_priv(ndev);
1954 	struct emac_stats *st = &dev->stats;
1955 	struct emac_error_stats *est = &dev->estats;
1956 	struct net_device_stats *nst = &ndev->stats;
1957 	unsigned long flags;
1958 
1959 	DBG2(dev, "stats" NL);
1960 
1961 	/* Compute "legacy" statistics */
1962 	spin_lock_irqsave(&dev->lock, flags);
1963 	nst->rx_packets = (unsigned long)st->rx_packets;
1964 	nst->rx_bytes = (unsigned long)st->rx_bytes;
1965 	nst->tx_packets = (unsigned long)st->tx_packets;
1966 	nst->tx_bytes = (unsigned long)st->tx_bytes;
1967 	nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1968 					  est->rx_dropped_error +
1969 					  est->rx_dropped_resize +
1970 					  est->rx_dropped_mtu);
1971 	nst->tx_dropped = (unsigned long)est->tx_dropped;
1972 
1973 	nst->rx_errors = (unsigned long)est->rx_bd_errors;
1974 	nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1975 					      est->rx_fifo_overrun +
1976 					      est->rx_overrun);
1977 	nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1978 					       est->rx_alignment_error);
1979 	nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1980 					     est->rx_bad_fcs);
1981 	nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1982 						est->rx_bd_short_event +
1983 						est->rx_bd_packet_too_long +
1984 						est->rx_bd_out_of_range +
1985 						est->rx_bd_in_range +
1986 						est->rx_runt_packet +
1987 						est->rx_short_event +
1988 						est->rx_packet_too_long +
1989 						est->rx_out_of_range +
1990 						est->rx_in_range);
1991 
1992 	nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1993 	nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1994 					      est->tx_underrun);
1995 	nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1996 	nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1997 					  est->tx_bd_excessive_collisions +
1998 					  est->tx_bd_late_collision +
1999 					  est->tx_bd_multple_collisions);
2000 	spin_unlock_irqrestore(&dev->lock, flags);
2001 	return nst;
2002 }
2003 
2004 static struct mal_commac_ops emac_commac_ops = {
2005 	.poll_tx = &emac_poll_tx,
2006 	.poll_rx = &emac_poll_rx,
2007 	.peek_rx = &emac_peek_rx,
2008 	.rxde = &emac_rxde,
2009 };
2010 
2011 static struct mal_commac_ops emac_commac_sg_ops = {
2012 	.poll_tx = &emac_poll_tx,
2013 	.poll_rx = &emac_poll_rx,
2014 	.peek_rx = &emac_peek_rx_sg,
2015 	.rxde = &emac_rxde,
2016 };
2017 
2018 /* Ethtool support */
2019 static int emac_ethtool_get_link_ksettings(struct net_device *ndev,
2020 					   struct ethtool_link_ksettings *cmd)
2021 {
2022 	struct emac_instance *dev = netdev_priv(ndev);
2023 	u32 supported, advertising;
2024 
2025 	supported = dev->phy.features;
2026 	cmd->base.port = PORT_MII;
2027 	cmd->base.phy_address = dev->phy.address;
2028 
2029 	mutex_lock(&dev->link_lock);
2030 	advertising = dev->phy.advertising;
2031 	cmd->base.autoneg = dev->phy.autoneg;
2032 	cmd->base.speed = dev->phy.speed;
2033 	cmd->base.duplex = dev->phy.duplex;
2034 	mutex_unlock(&dev->link_lock);
2035 
2036 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2037 						supported);
2038 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2039 						advertising);
2040 
2041 	return 0;
2042 }
2043 
2044 static int
2045 emac_ethtool_set_link_ksettings(struct net_device *ndev,
2046 				const struct ethtool_link_ksettings *cmd)
2047 {
2048 	struct emac_instance *dev = netdev_priv(ndev);
2049 	u32 f = dev->phy.features;
2050 	u32 advertising;
2051 
2052 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
2053 						cmd->link_modes.advertising);
2054 
2055 	DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
2056 	    cmd->base.autoneg, cmd->base.speed, cmd->base.duplex, advertising);
2057 
2058 	/* Basic sanity checks */
2059 	if (dev->phy.address < 0)
2060 		return -EOPNOTSUPP;
2061 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
2062 	    cmd->base.autoneg != AUTONEG_DISABLE)
2063 		return -EINVAL;
2064 	if (cmd->base.autoneg == AUTONEG_ENABLE && advertising == 0)
2065 		return -EINVAL;
2066 	if (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL)
2067 		return -EINVAL;
2068 
2069 	if (cmd->base.autoneg == AUTONEG_DISABLE) {
2070 		switch (cmd->base.speed) {
2071 		case SPEED_10:
2072 			if (cmd->base.duplex == DUPLEX_HALF &&
2073 			    !(f & SUPPORTED_10baseT_Half))
2074 				return -EINVAL;
2075 			if (cmd->base.duplex == DUPLEX_FULL &&
2076 			    !(f & SUPPORTED_10baseT_Full))
2077 				return -EINVAL;
2078 			break;
2079 		case SPEED_100:
2080 			if (cmd->base.duplex == DUPLEX_HALF &&
2081 			    !(f & SUPPORTED_100baseT_Half))
2082 				return -EINVAL;
2083 			if (cmd->base.duplex == DUPLEX_FULL &&
2084 			    !(f & SUPPORTED_100baseT_Full))
2085 				return -EINVAL;
2086 			break;
2087 		case SPEED_1000:
2088 			if (cmd->base.duplex == DUPLEX_HALF &&
2089 			    !(f & SUPPORTED_1000baseT_Half))
2090 				return -EINVAL;
2091 			if (cmd->base.duplex == DUPLEX_FULL &&
2092 			    !(f & SUPPORTED_1000baseT_Full))
2093 				return -EINVAL;
2094 			break;
2095 		default:
2096 			return -EINVAL;
2097 		}
2098 
2099 		mutex_lock(&dev->link_lock);
2100 		dev->phy.def->ops->setup_forced(&dev->phy, cmd->base.speed,
2101 						cmd->base.duplex);
2102 		mutex_unlock(&dev->link_lock);
2103 
2104 	} else {
2105 		if (!(f & SUPPORTED_Autoneg))
2106 			return -EINVAL;
2107 
2108 		mutex_lock(&dev->link_lock);
2109 		dev->phy.def->ops->setup_aneg(&dev->phy,
2110 					      (advertising & f) |
2111 					      (dev->phy.advertising &
2112 					       (ADVERTISED_Pause |
2113 						ADVERTISED_Asym_Pause)));
2114 		mutex_unlock(&dev->link_lock);
2115 	}
2116 	emac_force_link_update(dev);
2117 
2118 	return 0;
2119 }
2120 
2121 static void emac_ethtool_get_ringparam(struct net_device *ndev,
2122 				       struct ethtool_ringparam *rp)
2123 {
2124 	rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2125 	rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2126 }
2127 
2128 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2129 					struct ethtool_pauseparam *pp)
2130 {
2131 	struct emac_instance *dev = netdev_priv(ndev);
2132 
2133 	mutex_lock(&dev->link_lock);
2134 	if ((dev->phy.features & SUPPORTED_Autoneg) &&
2135 	    (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2136 		pp->autoneg = 1;
2137 
2138 	if (dev->phy.duplex == DUPLEX_FULL) {
2139 		if (dev->phy.pause)
2140 			pp->rx_pause = pp->tx_pause = 1;
2141 		else if (dev->phy.asym_pause)
2142 			pp->tx_pause = 1;
2143 	}
2144 	mutex_unlock(&dev->link_lock);
2145 }
2146 
2147 static int emac_get_regs_len(struct emac_instance *dev)
2148 {
2149 		return sizeof(struct emac_ethtool_regs_subhdr) +
2150 			sizeof(struct emac_regs);
2151 }
2152 
2153 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2154 {
2155 	struct emac_instance *dev = netdev_priv(ndev);
2156 	int size;
2157 
2158 	size = sizeof(struct emac_ethtool_regs_hdr) +
2159 		emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2160 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2161 		size += zmii_get_regs_len(dev->zmii_dev);
2162 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2163 		size += rgmii_get_regs_len(dev->rgmii_dev);
2164 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2165 		size += tah_get_regs_len(dev->tah_dev);
2166 
2167 	return size;
2168 }
2169 
2170 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2171 {
2172 	struct emac_ethtool_regs_subhdr *hdr = buf;
2173 
2174 	hdr->index = dev->cell_index;
2175 	if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2176 		hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER;
2177 	} else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2178 		hdr->version = EMAC4_ETHTOOL_REGS_VER;
2179 	} else {
2180 		hdr->version = EMAC_ETHTOOL_REGS_VER;
2181 	}
2182 	memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs));
2183 	return (void *)(hdr + 1) + sizeof(struct emac_regs);
2184 }
2185 
2186 static void emac_ethtool_get_regs(struct net_device *ndev,
2187 				  struct ethtool_regs *regs, void *buf)
2188 {
2189 	struct emac_instance *dev = netdev_priv(ndev);
2190 	struct emac_ethtool_regs_hdr *hdr = buf;
2191 
2192 	hdr->components = 0;
2193 	buf = hdr + 1;
2194 
2195 	buf = mal_dump_regs(dev->mal, buf);
2196 	buf = emac_dump_regs(dev, buf);
2197 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2198 		hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2199 		buf = zmii_dump_regs(dev->zmii_dev, buf);
2200 	}
2201 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2202 		hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2203 		buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2204 	}
2205 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2206 		hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2207 		buf = tah_dump_regs(dev->tah_dev, buf);
2208 	}
2209 }
2210 
2211 static int emac_ethtool_nway_reset(struct net_device *ndev)
2212 {
2213 	struct emac_instance *dev = netdev_priv(ndev);
2214 	int res = 0;
2215 
2216 	DBG(dev, "nway_reset" NL);
2217 
2218 	if (dev->phy.address < 0)
2219 		return -EOPNOTSUPP;
2220 
2221 	mutex_lock(&dev->link_lock);
2222 	if (!dev->phy.autoneg) {
2223 		res = -EINVAL;
2224 		goto out;
2225 	}
2226 
2227 	dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2228  out:
2229 	mutex_unlock(&dev->link_lock);
2230 	emac_force_link_update(dev);
2231 	return res;
2232 }
2233 
2234 static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2235 {
2236 	if (stringset == ETH_SS_STATS)
2237 		return EMAC_ETHTOOL_STATS_COUNT;
2238 	else
2239 		return -EINVAL;
2240 }
2241 
2242 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2243 				     u8 * buf)
2244 {
2245 	if (stringset == ETH_SS_STATS)
2246 		memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2247 }
2248 
2249 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2250 					   struct ethtool_stats *estats,
2251 					   u64 * tmp_stats)
2252 {
2253 	struct emac_instance *dev = netdev_priv(ndev);
2254 
2255 	memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2256 	tmp_stats += sizeof(dev->stats) / sizeof(u64);
2257 	memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2258 }
2259 
2260 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2261 				     struct ethtool_drvinfo *info)
2262 {
2263 	struct emac_instance *dev = netdev_priv(ndev);
2264 
2265 	strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
2266 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2267 	snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %pOF",
2268 		 dev->cell_index, dev->ofdev->dev.of_node);
2269 }
2270 
2271 static const struct ethtool_ops emac_ethtool_ops = {
2272 	.get_drvinfo = emac_ethtool_get_drvinfo,
2273 
2274 	.get_regs_len = emac_ethtool_get_regs_len,
2275 	.get_regs = emac_ethtool_get_regs,
2276 
2277 	.nway_reset = emac_ethtool_nway_reset,
2278 
2279 	.get_ringparam = emac_ethtool_get_ringparam,
2280 	.get_pauseparam = emac_ethtool_get_pauseparam,
2281 
2282 	.get_strings = emac_ethtool_get_strings,
2283 	.get_sset_count = emac_ethtool_get_sset_count,
2284 	.get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2285 
2286 	.get_link = ethtool_op_get_link,
2287 	.get_link_ksettings = emac_ethtool_get_link_ksettings,
2288 	.set_link_ksettings = emac_ethtool_set_link_ksettings,
2289 };
2290 
2291 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2292 {
2293 	struct emac_instance *dev = netdev_priv(ndev);
2294 	struct mii_ioctl_data *data = if_mii(rq);
2295 
2296 	DBG(dev, "ioctl %08x" NL, cmd);
2297 
2298 	if (dev->phy.address < 0)
2299 		return -EOPNOTSUPP;
2300 
2301 	switch (cmd) {
2302 	case SIOCGMIIPHY:
2303 		data->phy_id = dev->phy.address;
2304 		/* Fall through */
2305 	case SIOCGMIIREG:
2306 		data->val_out = emac_mdio_read(ndev, dev->phy.address,
2307 					       data->reg_num);
2308 		return 0;
2309 
2310 	case SIOCSMIIREG:
2311 		emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2312 				data->val_in);
2313 		return 0;
2314 	default:
2315 		return -EOPNOTSUPP;
2316 	}
2317 }
2318 
2319 struct emac_depentry {
2320 	u32			phandle;
2321 	struct device_node	*node;
2322 	struct platform_device	*ofdev;
2323 	void			*drvdata;
2324 };
2325 
2326 #define	EMAC_DEP_MAL_IDX	0
2327 #define	EMAC_DEP_ZMII_IDX	1
2328 #define	EMAC_DEP_RGMII_IDX	2
2329 #define	EMAC_DEP_TAH_IDX	3
2330 #define	EMAC_DEP_MDIO_IDX	4
2331 #define	EMAC_DEP_PREV_IDX	5
2332 #define	EMAC_DEP_COUNT		6
2333 
2334 static int emac_check_deps(struct emac_instance *dev,
2335 			   struct emac_depentry *deps)
2336 {
2337 	int i, there = 0;
2338 	struct device_node *np;
2339 
2340 	for (i = 0; i < EMAC_DEP_COUNT; i++) {
2341 		/* no dependency on that item, allright */
2342 		if (deps[i].phandle == 0) {
2343 			there++;
2344 			continue;
2345 		}
2346 		/* special case for blist as the dependency might go away */
2347 		if (i == EMAC_DEP_PREV_IDX) {
2348 			np = *(dev->blist - 1);
2349 			if (np == NULL) {
2350 				deps[i].phandle = 0;
2351 				there++;
2352 				continue;
2353 			}
2354 			if (deps[i].node == NULL)
2355 				deps[i].node = of_node_get(np);
2356 		}
2357 		if (deps[i].node == NULL)
2358 			deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2359 		if (deps[i].node == NULL)
2360 			continue;
2361 		if (deps[i].ofdev == NULL)
2362 			deps[i].ofdev = of_find_device_by_node(deps[i].node);
2363 		if (deps[i].ofdev == NULL)
2364 			continue;
2365 		if (deps[i].drvdata == NULL)
2366 			deps[i].drvdata = platform_get_drvdata(deps[i].ofdev);
2367 		if (deps[i].drvdata != NULL)
2368 			there++;
2369 	}
2370 	return there == EMAC_DEP_COUNT;
2371 }
2372 
2373 static void emac_put_deps(struct emac_instance *dev)
2374 {
2375 	of_dev_put(dev->mal_dev);
2376 	of_dev_put(dev->zmii_dev);
2377 	of_dev_put(dev->rgmii_dev);
2378 	of_dev_put(dev->mdio_dev);
2379 	of_dev_put(dev->tah_dev);
2380 }
2381 
2382 static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
2383 			      void *data)
2384 {
2385 	/* We are only intereted in device addition */
2386 	if (action == BUS_NOTIFY_BOUND_DRIVER)
2387 		wake_up_all(&emac_probe_wait);
2388 	return 0;
2389 }
2390 
2391 static struct notifier_block emac_of_bus_notifier = {
2392 	.notifier_call = emac_of_bus_notify
2393 };
2394 
2395 static int emac_wait_deps(struct emac_instance *dev)
2396 {
2397 	struct emac_depentry deps[EMAC_DEP_COUNT];
2398 	int i, err;
2399 
2400 	memset(&deps, 0, sizeof(deps));
2401 
2402 	deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2403 	deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2404 	deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2405 	if (dev->tah_ph)
2406 		deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2407 	if (dev->mdio_ph)
2408 		deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2409 	if (dev->blist && dev->blist > emac_boot_list)
2410 		deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2411 	bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
2412 	wait_event_timeout(emac_probe_wait,
2413 			   emac_check_deps(dev, deps),
2414 			   EMAC_PROBE_DEP_TIMEOUT);
2415 	bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
2416 	err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2417 	for (i = 0; i < EMAC_DEP_COUNT; i++) {
2418 		of_node_put(deps[i].node);
2419 		if (err)
2420 			of_dev_put(deps[i].ofdev);
2421 	}
2422 	if (err == 0) {
2423 		dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2424 		dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2425 		dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2426 		dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2427 		dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2428 	}
2429 	of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2430 	return err;
2431 }
2432 
2433 static int emac_read_uint_prop(struct device_node *np, const char *name,
2434 			       u32 *val, int fatal)
2435 {
2436 	int len;
2437 	const u32 *prop = of_get_property(np, name, &len);
2438 	if (prop == NULL || len < sizeof(u32)) {
2439 		if (fatal)
2440 			printk(KERN_ERR "%pOF: missing %s property\n",
2441 			       np, name);
2442 		return -ENODEV;
2443 	}
2444 	*val = *prop;
2445 	return 0;
2446 }
2447 
2448 static void emac_adjust_link(struct net_device *ndev)
2449 {
2450 	struct emac_instance *dev = netdev_priv(ndev);
2451 	struct phy_device *phy = dev->phy_dev;
2452 
2453 	dev->phy.autoneg = phy->autoneg;
2454 	dev->phy.speed = phy->speed;
2455 	dev->phy.duplex = phy->duplex;
2456 	dev->phy.pause = phy->pause;
2457 	dev->phy.asym_pause = phy->asym_pause;
2458 	ethtool_convert_link_mode_to_legacy_u32(&dev->phy.advertising,
2459 						phy->advertising);
2460 }
2461 
2462 static int emac_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
2463 {
2464 	int ret = emac_mdio_read(bus->priv, addr, regnum);
2465 	/* This is a workaround for powered down ports/phys.
2466 	 * In the wild, this was seen on the Cisco Meraki MX60(W).
2467 	 * This hardware disables ports as part of the handoff
2468 	 * procedure. Accessing the ports will lead to errors
2469 	 * (-ETIMEDOUT, -EREMOTEIO) that do more harm than good.
2470 	 */
2471 	return ret < 0 ? 0xffff : ret;
2472 }
2473 
2474 static int emac_mii_bus_write(struct mii_bus *bus, int addr,
2475 			      int regnum, u16 val)
2476 {
2477 	emac_mdio_write(bus->priv, addr, regnum, val);
2478 	return 0;
2479 }
2480 
2481 static int emac_mii_bus_reset(struct mii_bus *bus)
2482 {
2483 	struct emac_instance *dev = netdev_priv(bus->priv);
2484 
2485 	return emac_reset(dev);
2486 }
2487 
2488 static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
2489 				    struct phy_device *phy_dev)
2490 {
2491 	phy_dev->autoneg = phy->autoneg;
2492 	phy_dev->speed = phy->speed;
2493 	phy_dev->duplex = phy->duplex;
2494 	ethtool_convert_legacy_u32_to_link_mode(phy_dev->advertising,
2495 						phy->advertising);
2496 	return phy_start_aneg(phy_dev);
2497 }
2498 
2499 static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise)
2500 {
2501 	struct net_device *ndev = phy->dev;
2502 	struct emac_instance *dev = netdev_priv(ndev);
2503 
2504 	phy->autoneg = AUTONEG_ENABLE;
2505 	phy->advertising = advertise;
2506 	return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
2507 }
2508 
2509 static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
2510 {
2511 	struct net_device *ndev = phy->dev;
2512 	struct emac_instance *dev = netdev_priv(ndev);
2513 
2514 	phy->autoneg = AUTONEG_DISABLE;
2515 	phy->speed = speed;
2516 	phy->duplex = fd;
2517 	return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
2518 }
2519 
2520 static int emac_mdio_poll_link(struct mii_phy *phy)
2521 {
2522 	struct net_device *ndev = phy->dev;
2523 	struct emac_instance *dev = netdev_priv(ndev);
2524 	int res;
2525 
2526 	res = phy_read_status(dev->phy_dev);
2527 	if (res) {
2528 		dev_err(&dev->ofdev->dev, "link update failed (%d).", res);
2529 		return ethtool_op_get_link(ndev);
2530 	}
2531 
2532 	return dev->phy_dev->link;
2533 }
2534 
2535 static int emac_mdio_read_link(struct mii_phy *phy)
2536 {
2537 	struct net_device *ndev = phy->dev;
2538 	struct emac_instance *dev = netdev_priv(ndev);
2539 	struct phy_device *phy_dev = dev->phy_dev;
2540 	int res;
2541 
2542 	res = phy_read_status(phy_dev);
2543 	if (res)
2544 		return res;
2545 
2546 	phy->speed = phy_dev->speed;
2547 	phy->duplex = phy_dev->duplex;
2548 	phy->pause = phy_dev->pause;
2549 	phy->asym_pause = phy_dev->asym_pause;
2550 	return 0;
2551 }
2552 
2553 static int emac_mdio_init_phy(struct mii_phy *phy)
2554 {
2555 	struct net_device *ndev = phy->dev;
2556 	struct emac_instance *dev = netdev_priv(ndev);
2557 
2558 	phy_start(dev->phy_dev);
2559 	return phy_init_hw(dev->phy_dev);
2560 }
2561 
2562 static const struct mii_phy_ops emac_dt_mdio_phy_ops = {
2563 	.init		= emac_mdio_init_phy,
2564 	.setup_aneg	= emac_mdio_setup_aneg,
2565 	.setup_forced	= emac_mdio_setup_forced,
2566 	.poll_link	= emac_mdio_poll_link,
2567 	.read_link	= emac_mdio_read_link,
2568 };
2569 
2570 static int emac_dt_mdio_probe(struct emac_instance *dev)
2571 {
2572 	struct device_node *mii_np;
2573 	int res;
2574 
2575 	mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio");
2576 	if (!mii_np) {
2577 		dev_err(&dev->ofdev->dev, "no mdio definition found.");
2578 		return -ENODEV;
2579 	}
2580 
2581 	if (!of_device_is_available(mii_np)) {
2582 		res = -ENODEV;
2583 		goto put_node;
2584 	}
2585 
2586 	dev->mii_bus = devm_mdiobus_alloc(&dev->ofdev->dev);
2587 	if (!dev->mii_bus) {
2588 		res = -ENOMEM;
2589 		goto put_node;
2590 	}
2591 
2592 	dev->mii_bus->priv = dev->ndev;
2593 	dev->mii_bus->parent = dev->ndev->dev.parent;
2594 	dev->mii_bus->name = "emac_mdio";
2595 	dev->mii_bus->read = &emac_mii_bus_read;
2596 	dev->mii_bus->write = &emac_mii_bus_write;
2597 	dev->mii_bus->reset = &emac_mii_bus_reset;
2598 	snprintf(dev->mii_bus->id, MII_BUS_ID_SIZE, "%s", dev->ofdev->name);
2599 	res = of_mdiobus_register(dev->mii_bus, mii_np);
2600 	if (res) {
2601 		dev_err(&dev->ofdev->dev, "cannot register MDIO bus %s (%d)",
2602 			dev->mii_bus->name, res);
2603 	}
2604 
2605  put_node:
2606 	of_node_put(mii_np);
2607 	return res;
2608 }
2609 
2610 static int emac_dt_phy_connect(struct emac_instance *dev,
2611 			       struct device_node *phy_handle)
2612 {
2613 	dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def),
2614 				    GFP_KERNEL);
2615 	if (!dev->phy.def)
2616 		return -ENOMEM;
2617 
2618 	dev->phy_dev = of_phy_connect(dev->ndev, phy_handle, &emac_adjust_link,
2619 				      0, dev->phy_mode);
2620 	if (!dev->phy_dev) {
2621 		dev_err(&dev->ofdev->dev, "failed to connect to PHY.\n");
2622 		return -ENODEV;
2623 	}
2624 
2625 	dev->phy.def->phy_id = dev->phy_dev->drv->phy_id;
2626 	dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask;
2627 	dev->phy.def->name = dev->phy_dev->drv->name;
2628 	dev->phy.def->ops = &emac_dt_mdio_phy_ops;
2629 	ethtool_convert_link_mode_to_legacy_u32(&dev->phy.features,
2630 						dev->phy_dev->supported);
2631 	dev->phy.address = dev->phy_dev->mdio.addr;
2632 	dev->phy.mode = dev->phy_dev->interface;
2633 	return 0;
2634 }
2635 
2636 static int emac_dt_phy_probe(struct emac_instance *dev)
2637 {
2638 	struct device_node *np = dev->ofdev->dev.of_node;
2639 	struct device_node *phy_handle;
2640 	int res = 1;
2641 
2642 	phy_handle = of_parse_phandle(np, "phy-handle", 0);
2643 
2644 	if (phy_handle) {
2645 		res = emac_dt_mdio_probe(dev);
2646 		if (!res) {
2647 			res = emac_dt_phy_connect(dev, phy_handle);
2648 			if (res)
2649 				mdiobus_unregister(dev->mii_bus);
2650 		}
2651 	}
2652 
2653 	of_node_put(phy_handle);
2654 	return res;
2655 }
2656 
2657 static int emac_init_phy(struct emac_instance *dev)
2658 {
2659 	struct device_node *np = dev->ofdev->dev.of_node;
2660 	struct net_device *ndev = dev->ndev;
2661 	u32 phy_map, adv;
2662 	int i;
2663 
2664 	dev->phy.dev = ndev;
2665 	dev->phy.mode = dev->phy_mode;
2666 
2667 	/* PHY-less configuration. */
2668 	if ((dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) ||
2669 	    of_phy_is_fixed_link(np)) {
2670 		emac_reset(dev);
2671 
2672 		/* PHY-less configuration. */
2673 		dev->phy.address = -1;
2674 		dev->phy.features = SUPPORTED_MII;
2675 		if (emac_phy_supports_gige(dev->phy_mode))
2676 			dev->phy.features |= SUPPORTED_1000baseT_Full;
2677 		else
2678 			dev->phy.features |= SUPPORTED_100baseT_Full;
2679 		dev->phy.pause = 1;
2680 
2681 		if (of_phy_is_fixed_link(np)) {
2682 			int res = emac_dt_mdio_probe(dev);
2683 
2684 			if (res)
2685 				return res;
2686 
2687 			res = of_phy_register_fixed_link(np);
2688 			dev->phy_dev = of_phy_find_device(np);
2689 			if (res || !dev->phy_dev) {
2690 				mdiobus_unregister(dev->mii_bus);
2691 				return res ? res : -EINVAL;
2692 			}
2693 			emac_adjust_link(dev->ndev);
2694 			put_device(&dev->phy_dev->mdio.dev);
2695 		}
2696 		return 0;
2697 	}
2698 
2699 	mutex_lock(&emac_phy_map_lock);
2700 	phy_map = dev->phy_map | busy_phy_map;
2701 
2702 	DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2703 
2704 	dev->phy.mdio_read = emac_mdio_read;
2705 	dev->phy.mdio_write = emac_mdio_write;
2706 
2707 	/* Enable internal clock source */
2708 #ifdef CONFIG_PPC_DCR_NATIVE
2709 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2710 		dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2711 #endif
2712 	/* PHY clock workaround */
2713 	emac_rx_clk_tx(dev);
2714 
2715 	/* Enable internal clock source on 440GX*/
2716 #ifdef CONFIG_PPC_DCR_NATIVE
2717 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2718 		dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2719 #endif
2720 	/* Configure EMAC with defaults so we can at least use MDIO
2721 	 * This is needed mostly for 440GX
2722 	 */
2723 	if (emac_phy_gpcs(dev->phy.mode)) {
2724 		/* XXX
2725 		 * Make GPCS PHY address equal to EMAC index.
2726 		 * We probably should take into account busy_phy_map
2727 		 * and/or phy_map here.
2728 		 *
2729 		 * Note that the busy_phy_map is currently global
2730 		 * while it should probably be per-ASIC...
2731 		 */
2732 		dev->phy.gpcs_address = dev->gpcs_address;
2733 		if (dev->phy.gpcs_address == 0xffffffff)
2734 			dev->phy.address = dev->cell_index;
2735 	}
2736 
2737 	emac_configure(dev);
2738 
2739 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2740 		int res = emac_dt_phy_probe(dev);
2741 
2742 		switch (res) {
2743 		case 1:
2744 			/* No phy-handle property configured.
2745 			 * Continue with the existing phy probe
2746 			 * and setup code.
2747 			 */
2748 			break;
2749 
2750 		case 0:
2751 			mutex_unlock(&emac_phy_map_lock);
2752 			goto init_phy;
2753 
2754 		default:
2755 			mutex_unlock(&emac_phy_map_lock);
2756 			dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
2757 				res);
2758 			return res;
2759 		}
2760 	}
2761 
2762 	if (dev->phy_address != 0xffffffff)
2763 		phy_map = ~(1 << dev->phy_address);
2764 
2765 	for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2766 		if (!(phy_map & 1)) {
2767 			int r;
2768 			busy_phy_map |= 1 << i;
2769 
2770 			/* Quick check if there is a PHY at the address */
2771 			r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2772 			if (r == 0xffff || r < 0)
2773 				continue;
2774 			if (!emac_mii_phy_probe(&dev->phy, i))
2775 				break;
2776 		}
2777 
2778 	/* Enable external clock source */
2779 #ifdef CONFIG_PPC_DCR_NATIVE
2780 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2781 		dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2782 #endif
2783 	mutex_unlock(&emac_phy_map_lock);
2784 	if (i == 0x20) {
2785 		printk(KERN_WARNING "%pOF: can't find PHY!\n", np);
2786 		return -ENXIO;
2787 	}
2788 
2789  init_phy:
2790 	/* Init PHY */
2791 	if (dev->phy.def->ops->init)
2792 		dev->phy.def->ops->init(&dev->phy);
2793 
2794 	/* Disable any PHY features not supported by the platform */
2795 	dev->phy.def->features &= ~dev->phy_feat_exc;
2796 	dev->phy.features &= ~dev->phy_feat_exc;
2797 
2798 	/* Setup initial link parameters */
2799 	if (dev->phy.features & SUPPORTED_Autoneg) {
2800 		adv = dev->phy.features;
2801 		if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2802 			adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2803 		/* Restart autonegotiation */
2804 		dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2805 	} else {
2806 		u32 f = dev->phy.def->features;
2807 		int speed = SPEED_10, fd = DUPLEX_HALF;
2808 
2809 		/* Select highest supported speed/duplex */
2810 		if (f & SUPPORTED_1000baseT_Full) {
2811 			speed = SPEED_1000;
2812 			fd = DUPLEX_FULL;
2813 		} else if (f & SUPPORTED_1000baseT_Half)
2814 			speed = SPEED_1000;
2815 		else if (f & SUPPORTED_100baseT_Full) {
2816 			speed = SPEED_100;
2817 			fd = DUPLEX_FULL;
2818 		} else if (f & SUPPORTED_100baseT_Half)
2819 			speed = SPEED_100;
2820 		else if (f & SUPPORTED_10baseT_Full)
2821 			fd = DUPLEX_FULL;
2822 
2823 		/* Force link parameters */
2824 		dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2825 	}
2826 	return 0;
2827 }
2828 
2829 static int emac_init_config(struct emac_instance *dev)
2830 {
2831 	struct device_node *np = dev->ofdev->dev.of_node;
2832 	const void *p;
2833 
2834 	/* Read config from device-tree */
2835 	if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2836 		return -ENXIO;
2837 	if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2838 		return -ENXIO;
2839 	if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2840 		return -ENXIO;
2841 	if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2842 		return -ENXIO;
2843 	if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2844 		dev->max_mtu = ETH_DATA_LEN;
2845 	if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2846 		dev->rx_fifo_size = 2048;
2847 	if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2848 		dev->tx_fifo_size = 2048;
2849 	if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2850 		dev->rx_fifo_size_gige = dev->rx_fifo_size;
2851 	if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2852 		dev->tx_fifo_size_gige = dev->tx_fifo_size;
2853 	if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2854 		dev->phy_address = 0xffffffff;
2855 	if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2856 		dev->phy_map = 0xffffffff;
2857 	if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2858 		dev->gpcs_address = 0xffffffff;
2859 	if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2860 		return -ENXIO;
2861 	if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2862 		dev->tah_ph = 0;
2863 	if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2864 		dev->tah_port = 0;
2865 	if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2866 		dev->mdio_ph = 0;
2867 	if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2868 		dev->zmii_ph = 0;
2869 	if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2870 		dev->zmii_port = 0xffffffff;
2871 	if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2872 		dev->rgmii_ph = 0;
2873 	if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2874 		dev->rgmii_port = 0xffffffff;
2875 	if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2876 		dev->fifo_entry_size = 16;
2877 	if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2878 		dev->mal_burst_size = 256;
2879 
2880 	/* PHY mode needs some decoding */
2881 	dev->phy_mode = of_get_phy_mode(np);
2882 	if (dev->phy_mode < 0)
2883 		dev->phy_mode = PHY_INTERFACE_MODE_NA;
2884 
2885 	/* Check EMAC version */
2886 	if (of_device_is_compatible(np, "ibm,emac4sync")) {
2887 		dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2888 		if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2889 		    of_device_is_compatible(np, "ibm,emac-460gt"))
2890 			dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2891 		if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2892 		    of_device_is_compatible(np, "ibm,emac-405exr"))
2893 			dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2894 		if (of_device_is_compatible(np, "ibm,emac-apm821xx")) {
2895 			dev->features |= (EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE |
2896 					  EMAC_FTR_APM821XX_NO_HALF_DUPLEX |
2897 					  EMAC_FTR_460EX_PHY_CLK_FIX);
2898 		}
2899 	} else if (of_device_is_compatible(np, "ibm,emac4")) {
2900 		dev->features |= EMAC_FTR_EMAC4;
2901 		if (of_device_is_compatible(np, "ibm,emac-440gx"))
2902 			dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2903 	} else {
2904 		if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2905 		    of_device_is_compatible(np, "ibm,emac-440gr"))
2906 			dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2907 		if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2908 #ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
2909 			dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2910 #else
2911 			printk(KERN_ERR "%pOF: Flow control not disabled!\n",
2912 					np);
2913 			return -ENXIO;
2914 #endif
2915 		}
2916 
2917 	}
2918 
2919 	/* Fixup some feature bits based on the device tree */
2920 	if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2921 		dev->features |= EMAC_FTR_STACR_OC_INVERT;
2922 	if (of_get_property(np, "has-new-stacr-staopc", NULL))
2923 		dev->features |= EMAC_FTR_HAS_NEW_STACR;
2924 
2925 	/* CAB lacks the appropriate properties */
2926 	if (of_device_is_compatible(np, "ibm,emac-axon"))
2927 		dev->features |= EMAC_FTR_HAS_NEW_STACR |
2928 			EMAC_FTR_STACR_OC_INVERT;
2929 
2930 	/* Enable TAH/ZMII/RGMII features as found */
2931 	if (dev->tah_ph != 0) {
2932 #ifdef CONFIG_IBM_EMAC_TAH
2933 		dev->features |= EMAC_FTR_HAS_TAH;
2934 #else
2935 		printk(KERN_ERR "%pOF: TAH support not enabled !\n", np);
2936 		return -ENXIO;
2937 #endif
2938 	}
2939 
2940 	if (dev->zmii_ph != 0) {
2941 #ifdef CONFIG_IBM_EMAC_ZMII
2942 		dev->features |= EMAC_FTR_HAS_ZMII;
2943 #else
2944 		printk(KERN_ERR "%pOF: ZMII support not enabled !\n", np);
2945 		return -ENXIO;
2946 #endif
2947 	}
2948 
2949 	if (dev->rgmii_ph != 0) {
2950 #ifdef CONFIG_IBM_EMAC_RGMII
2951 		dev->features |= EMAC_FTR_HAS_RGMII;
2952 #else
2953 		printk(KERN_ERR "%pOF: RGMII support not enabled !\n", np);
2954 		return -ENXIO;
2955 #endif
2956 	}
2957 
2958 	/* Read MAC-address */
2959 	p = of_get_property(np, "local-mac-address", NULL);
2960 	if (p == NULL) {
2961 		printk(KERN_ERR "%pOF: Can't find local-mac-address property\n",
2962 		       np);
2963 		return -ENXIO;
2964 	}
2965 	memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
2966 
2967 	/* IAHT and GAHT filter parameterization */
2968 	if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2969 		dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2970 		dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2971 	} else {
2972 		dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2973 		dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2974 	}
2975 
2976 	/* This should never happen */
2977 	if (WARN_ON(EMAC_XAHT_REGS(dev) > EMAC_XAHT_MAX_REGS))
2978 		return -ENXIO;
2979 
2980 	DBG(dev, "features     : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2981 	DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2982 	DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2983 	DBG(dev, "max_mtu      : %d\n", dev->max_mtu);
2984 	DBG(dev, "OPB freq     : %d\n", dev->opb_bus_freq);
2985 
2986 	return 0;
2987 }
2988 
2989 static const struct net_device_ops emac_netdev_ops = {
2990 	.ndo_open		= emac_open,
2991 	.ndo_stop		= emac_close,
2992 	.ndo_get_stats		= emac_stats,
2993 	.ndo_set_rx_mode	= emac_set_multicast_list,
2994 	.ndo_do_ioctl		= emac_ioctl,
2995 	.ndo_tx_timeout		= emac_tx_timeout,
2996 	.ndo_validate_addr	= eth_validate_addr,
2997 	.ndo_set_mac_address	= emac_set_mac_address,
2998 	.ndo_start_xmit		= emac_start_xmit,
2999 };
3000 
3001 static const struct net_device_ops emac_gige_netdev_ops = {
3002 	.ndo_open		= emac_open,
3003 	.ndo_stop		= emac_close,
3004 	.ndo_get_stats		= emac_stats,
3005 	.ndo_set_rx_mode	= emac_set_multicast_list,
3006 	.ndo_do_ioctl		= emac_ioctl,
3007 	.ndo_tx_timeout		= emac_tx_timeout,
3008 	.ndo_validate_addr	= eth_validate_addr,
3009 	.ndo_set_mac_address	= emac_set_mac_address,
3010 	.ndo_start_xmit		= emac_start_xmit_sg,
3011 	.ndo_change_mtu		= emac_change_mtu,
3012 };
3013 
3014 static int emac_probe(struct platform_device *ofdev)
3015 {
3016 	struct net_device *ndev;
3017 	struct emac_instance *dev;
3018 	struct device_node *np = ofdev->dev.of_node;
3019 	struct device_node **blist = NULL;
3020 	int err, i;
3021 
3022 	/* Skip unused/unwired EMACS.  We leave the check for an unused
3023 	 * property here for now, but new flat device trees should set a
3024 	 * status property to "disabled" instead.
3025 	 */
3026 	if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
3027 		return -ENODEV;
3028 
3029 	/* Find ourselves in the bootlist if we are there */
3030 	for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3031 		if (emac_boot_list[i] == np)
3032 			blist = &emac_boot_list[i];
3033 
3034 	/* Allocate our net_device structure */
3035 	err = -ENOMEM;
3036 	ndev = alloc_etherdev(sizeof(struct emac_instance));
3037 	if (!ndev)
3038 		goto err_gone;
3039 
3040 	dev = netdev_priv(ndev);
3041 	dev->ndev = ndev;
3042 	dev->ofdev = ofdev;
3043 	dev->blist = blist;
3044 	SET_NETDEV_DEV(ndev, &ofdev->dev);
3045 
3046 	/* Initialize some embedded data structures */
3047 	mutex_init(&dev->mdio_lock);
3048 	mutex_init(&dev->link_lock);
3049 	spin_lock_init(&dev->lock);
3050 	INIT_WORK(&dev->reset_work, emac_reset_work);
3051 
3052 	/* Init various config data based on device-tree */
3053 	err = emac_init_config(dev);
3054 	if (err)
3055 		goto err_free;
3056 
3057 	/* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
3058 	dev->emac_irq = irq_of_parse_and_map(np, 0);
3059 	dev->wol_irq = irq_of_parse_and_map(np, 1);
3060 	if (!dev->emac_irq) {
3061 		printk(KERN_ERR "%pOF: Can't map main interrupt\n", np);
3062 		err = -ENODEV;
3063 		goto err_free;
3064 	}
3065 	ndev->irq = dev->emac_irq;
3066 
3067 	/* Map EMAC regs */
3068 	// TODO : platform_get_resource() and devm_ioremap_resource()
3069 	dev->emacp = of_iomap(np, 0);
3070 	if (dev->emacp == NULL) {
3071 		printk(KERN_ERR "%pOF: Can't map device registers!\n", np);
3072 		err = -ENOMEM;
3073 		goto err_irq_unmap;
3074 	}
3075 
3076 	/* Wait for dependent devices */
3077 	err = emac_wait_deps(dev);
3078 	if (err) {
3079 		printk(KERN_ERR
3080 		       "%pOF: Timeout waiting for dependent devices\n", np);
3081 		/*  display more info about what's missing ? */
3082 		goto err_reg_unmap;
3083 	}
3084 	dev->mal = platform_get_drvdata(dev->mal_dev);
3085 	if (dev->mdio_dev != NULL)
3086 		dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
3087 
3088 	/* Register with MAL */
3089 	dev->commac.ops = &emac_commac_ops;
3090 	dev->commac.dev = dev;
3091 	dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
3092 	dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
3093 	err = mal_register_commac(dev->mal, &dev->commac);
3094 	if (err) {
3095 		printk(KERN_ERR "%pOF: failed to register with mal %pOF!\n",
3096 		       np, dev->mal_dev->dev.of_node);
3097 		goto err_rel_deps;
3098 	}
3099 	dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
3100 	dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
3101 
3102 	/* Get pointers to BD rings */
3103 	dev->tx_desc =
3104 	    dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
3105 	dev->rx_desc =
3106 	    dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
3107 
3108 	DBG(dev, "tx_desc %p" NL, dev->tx_desc);
3109 	DBG(dev, "rx_desc %p" NL, dev->rx_desc);
3110 
3111 	/* Clean rings */
3112 	memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
3113 	memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
3114 	memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
3115 	memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
3116 
3117 	/* Attach to ZMII, if needed */
3118 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
3119 	    (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
3120 		goto err_unreg_commac;
3121 
3122 	/* Attach to RGMII, if needed */
3123 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
3124 	    (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
3125 		goto err_detach_zmii;
3126 
3127 	/* Attach to TAH, if needed */
3128 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
3129 	    (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
3130 		goto err_detach_rgmii;
3131 
3132 	/* Set some link defaults before we can find out real parameters */
3133 	dev->phy.speed = SPEED_100;
3134 	dev->phy.duplex = DUPLEX_FULL;
3135 	dev->phy.autoneg = AUTONEG_DISABLE;
3136 	dev->phy.pause = dev->phy.asym_pause = 0;
3137 	dev->stop_timeout = STOP_TIMEOUT_100;
3138 	INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
3139 
3140 	/* Some SoCs like APM821xx does not support Half Duplex mode. */
3141 	if (emac_has_feature(dev, EMAC_FTR_APM821XX_NO_HALF_DUPLEX)) {
3142 		dev->phy_feat_exc = (SUPPORTED_1000baseT_Half |
3143 				     SUPPORTED_100baseT_Half |
3144 				     SUPPORTED_10baseT_Half);
3145 	}
3146 
3147 	/* Find PHY if any */
3148 	err = emac_init_phy(dev);
3149 	if (err != 0)
3150 		goto err_detach_tah;
3151 
3152 	if (dev->tah_dev) {
3153 		ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
3154 		ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
3155 	}
3156 	ndev->watchdog_timeo = 5 * HZ;
3157 	if (emac_phy_supports_gige(dev->phy_mode)) {
3158 		ndev->netdev_ops = &emac_gige_netdev_ops;
3159 		dev->commac.ops = &emac_commac_sg_ops;
3160 	} else
3161 		ndev->netdev_ops = &emac_netdev_ops;
3162 	ndev->ethtool_ops = &emac_ethtool_ops;
3163 
3164 	/* MTU range: 46 - 1500 or whatever is in OF */
3165 	ndev->min_mtu = EMAC_MIN_MTU;
3166 	ndev->max_mtu = dev->max_mtu;
3167 
3168 	netif_carrier_off(ndev);
3169 
3170 	err = register_netdev(ndev);
3171 	if (err) {
3172 		printk(KERN_ERR "%pOF: failed to register net device (%d)!\n",
3173 		       np, err);
3174 		goto err_detach_tah;
3175 	}
3176 
3177 	/* Set our drvdata last as we don't want them visible until we are
3178 	 * fully initialized
3179 	 */
3180 	wmb();
3181 	platform_set_drvdata(ofdev, dev);
3182 
3183 	/* There's a new kid in town ! Let's tell everybody */
3184 	wake_up_all(&emac_probe_wait);
3185 
3186 
3187 	printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n",
3188 	       ndev->name, dev->cell_index, np, ndev->dev_addr);
3189 
3190 	if (dev->phy_mode == PHY_INTERFACE_MODE_SGMII)
3191 		printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
3192 
3193 	if (dev->phy.address >= 0)
3194 		printk("%s: found %s PHY (0x%02x)\n", ndev->name,
3195 		       dev->phy.def->name, dev->phy.address);
3196 
3197 	/* Life is good */
3198 	return 0;
3199 
3200 	/* I have a bad feeling about this ... */
3201 
3202  err_detach_tah:
3203 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
3204 		tah_detach(dev->tah_dev, dev->tah_port);
3205  err_detach_rgmii:
3206 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
3207 		rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
3208  err_detach_zmii:
3209 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
3210 		zmii_detach(dev->zmii_dev, dev->zmii_port);
3211  err_unreg_commac:
3212 	mal_unregister_commac(dev->mal, &dev->commac);
3213  err_rel_deps:
3214 	emac_put_deps(dev);
3215  err_reg_unmap:
3216 	iounmap(dev->emacp);
3217  err_irq_unmap:
3218 	if (dev->wol_irq)
3219 		irq_dispose_mapping(dev->wol_irq);
3220 	if (dev->emac_irq)
3221 		irq_dispose_mapping(dev->emac_irq);
3222  err_free:
3223 	free_netdev(ndev);
3224  err_gone:
3225 	/* if we were on the bootlist, remove us as we won't show up and
3226 	 * wake up all waiters to notify them in case they were waiting
3227 	 * on us
3228 	 */
3229 	if (blist) {
3230 		*blist = NULL;
3231 		wake_up_all(&emac_probe_wait);
3232 	}
3233 	return err;
3234 }
3235 
3236 static int emac_remove(struct platform_device *ofdev)
3237 {
3238 	struct emac_instance *dev = platform_get_drvdata(ofdev);
3239 
3240 	DBG(dev, "remove" NL);
3241 
3242 	unregister_netdev(dev->ndev);
3243 
3244 	cancel_work_sync(&dev->reset_work);
3245 
3246 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
3247 		tah_detach(dev->tah_dev, dev->tah_port);
3248 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
3249 		rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
3250 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
3251 		zmii_detach(dev->zmii_dev, dev->zmii_port);
3252 
3253 	if (dev->phy_dev)
3254 		phy_disconnect(dev->phy_dev);
3255 
3256 	if (dev->mii_bus)
3257 		mdiobus_unregister(dev->mii_bus);
3258 
3259 	busy_phy_map &= ~(1 << dev->phy.address);
3260 	DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
3261 
3262 	mal_unregister_commac(dev->mal, &dev->commac);
3263 	emac_put_deps(dev);
3264 
3265 	iounmap(dev->emacp);
3266 
3267 	if (dev->wol_irq)
3268 		irq_dispose_mapping(dev->wol_irq);
3269 	if (dev->emac_irq)
3270 		irq_dispose_mapping(dev->emac_irq);
3271 
3272 	free_netdev(dev->ndev);
3273 
3274 	return 0;
3275 }
3276 
3277 /* XXX Features in here should be replaced by properties... */
3278 static const struct of_device_id emac_match[] =
3279 {
3280 	{
3281 		.type		= "network",
3282 		.compatible	= "ibm,emac",
3283 	},
3284 	{
3285 		.type		= "network",
3286 		.compatible	= "ibm,emac4",
3287 	},
3288 	{
3289 		.type		= "network",
3290 		.compatible	= "ibm,emac4sync",
3291 	},
3292 	{},
3293 };
3294 MODULE_DEVICE_TABLE(of, emac_match);
3295 
3296 static struct platform_driver emac_driver = {
3297 	.driver = {
3298 		.name = "emac",
3299 		.of_match_table = emac_match,
3300 	},
3301 	.probe = emac_probe,
3302 	.remove = emac_remove,
3303 };
3304 
3305 static void __init emac_make_bootlist(void)
3306 {
3307 	struct device_node *np = NULL;
3308 	int j, max, i = 0;
3309 	int cell_indices[EMAC_BOOT_LIST_SIZE];
3310 
3311 	/* Collect EMACs */
3312 	while((np = of_find_all_nodes(np)) != NULL) {
3313 		const u32 *idx;
3314 
3315 		if (of_match_node(emac_match, np) == NULL)
3316 			continue;
3317 		if (of_get_property(np, "unused", NULL))
3318 			continue;
3319 		idx = of_get_property(np, "cell-index", NULL);
3320 		if (idx == NULL)
3321 			continue;
3322 		cell_indices[i] = *idx;
3323 		emac_boot_list[i++] = of_node_get(np);
3324 		if (i >= EMAC_BOOT_LIST_SIZE) {
3325 			of_node_put(np);
3326 			break;
3327 		}
3328 	}
3329 	max = i;
3330 
3331 	/* Bubble sort them (doh, what a creative algorithm :-) */
3332 	for (i = 0; max > 1 && (i < (max - 1)); i++)
3333 		for (j = i; j < max; j++) {
3334 			if (cell_indices[i] > cell_indices[j]) {
3335 				swap(emac_boot_list[i], emac_boot_list[j]);
3336 				swap(cell_indices[i], cell_indices[j]);
3337 			}
3338 		}
3339 }
3340 
3341 static int __init emac_init(void)
3342 {
3343 	int rc;
3344 
3345 	printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3346 
3347 	/* Build EMAC boot list */
3348 	emac_make_bootlist();
3349 
3350 	/* Init submodules */
3351 	rc = mal_init();
3352 	if (rc)
3353 		goto err;
3354 	rc = zmii_init();
3355 	if (rc)
3356 		goto err_mal;
3357 	rc = rgmii_init();
3358 	if (rc)
3359 		goto err_zmii;
3360 	rc = tah_init();
3361 	if (rc)
3362 		goto err_rgmii;
3363 	rc = platform_driver_register(&emac_driver);
3364 	if (rc)
3365 		goto err_tah;
3366 
3367 	return 0;
3368 
3369  err_tah:
3370 	tah_exit();
3371  err_rgmii:
3372 	rgmii_exit();
3373  err_zmii:
3374 	zmii_exit();
3375  err_mal:
3376 	mal_exit();
3377  err:
3378 	return rc;
3379 }
3380 
3381 static void __exit emac_exit(void)
3382 {
3383 	int i;
3384 
3385 	platform_driver_unregister(&emac_driver);
3386 
3387 	tah_exit();
3388 	rgmii_exit();
3389 	zmii_exit();
3390 	mal_exit();
3391 
3392 	/* Destroy EMAC boot list */
3393 	for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3394 		of_node_put(emac_boot_list[i]);
3395 }
3396 
3397 module_init(emac_init);
3398 module_exit(emac_exit);
3399