xref: /openbmc/linux/drivers/net/ethernet/ibm/emac/core.c (revision f7c35abe)
1 /*
2  * drivers/net/ethernet/ibm/emac/core.c
3  *
4  * Driver for PowerPC 4xx on-chip ethernet controller.
5  *
6  * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7  *                <benh@kernel.crashing.org>
8  *
9  * Based on the arch/ppc version of the driver:
10  *
11  * Copyright (c) 2004, 2005 Zultys Technologies.
12  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13  *
14  * Based on original work by
15  * 	Matt Porter <mporter@kernel.crashing.org>
16  *	(c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17  *      Armin Kuster <akuster@mvista.com>
18  * 	Johnnie Peters <jpeters@mvista.com>
19  *
20  * This program is free software; you can redistribute  it and/or modify it
21  * under  the terms of  the GNU General  Public License as published by the
22  * Free Software Foundation;  either version 2 of the  License, or (at your
23  * option) any later version.
24  *
25  */
26 
27 #include <linux/module.h>
28 #include <linux/sched.h>
29 #include <linux/string.h>
30 #include <linux/errno.h>
31 #include <linux/delay.h>
32 #include <linux/types.h>
33 #include <linux/pci.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
40 #include <linux/workqueue.h>
41 #include <linux/of.h>
42 #include <linux/of_address.h>
43 #include <linux/of_irq.h>
44 #include <linux/of_net.h>
45 #include <linux/of_mdio.h>
46 #include <linux/slab.h>
47 
48 #include <asm/processor.h>
49 #include <asm/io.h>
50 #include <asm/dma.h>
51 #include <linux/uaccess.h>
52 #include <asm/dcr.h>
53 #include <asm/dcr-regs.h>
54 
55 #include "core.h"
56 
57 /*
58  * Lack of dma_unmap_???? calls is intentional.
59  *
60  * API-correct usage requires additional support state information to be
61  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
62  * EMAC design (e.g. TX buffer passed from network stack can be split into
63  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
64  * maintaining such information will add additional overhead.
65  * Current DMA API implementation for 4xx processors only ensures cache coherency
66  * and dma_unmap_???? routines are empty and are likely to stay this way.
67  * I decided to omit dma_unmap_??? calls because I don't want to add additional
68  * complexity just for the sake of following some abstract API, when it doesn't
69  * add any real benefit to the driver. I understand that this decision maybe
70  * controversial, but I really tried to make code API-correct and efficient
71  * at the same time and didn't come up with code I liked :(.                --ebs
72  */
73 
74 #define DRV_NAME        "emac"
75 #define DRV_VERSION     "3.54"
76 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
77 
78 MODULE_DESCRIPTION(DRV_DESC);
79 MODULE_AUTHOR
80     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
81 MODULE_LICENSE("GPL");
82 
83 /* minimum number of free TX descriptors required to wake up TX process */
84 #define EMAC_TX_WAKEUP_THRESH		(NUM_TX_BUFF / 4)
85 
86 /* If packet size is less than this number, we allocate small skb and copy packet
87  * contents into it instead of just sending original big skb up
88  */
89 #define EMAC_RX_COPY_THRESH		CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
90 
91 /* Since multiple EMACs share MDIO lines in various ways, we need
92  * to avoid re-using the same PHY ID in cases where the arch didn't
93  * setup precise phy_map entries
94  *
95  * XXX This is something that needs to be reworked as we can have multiple
96  * EMAC "sets" (multiple ASICs containing several EMACs) though we can
97  * probably require in that case to have explicit PHY IDs in the device-tree
98  */
99 static u32 busy_phy_map;
100 static DEFINE_MUTEX(emac_phy_map_lock);
101 
102 /* This is the wait queue used to wait on any event related to probe, that
103  * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
104  */
105 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
106 
107 /* Having stable interface names is a doomed idea. However, it would be nice
108  * if we didn't have completely random interface names at boot too :-) It's
109  * just a matter of making everybody's life easier. Since we are doing
110  * threaded probing, it's a bit harder though. The base idea here is that
111  * we make up a list of all emacs in the device-tree before we register the
112  * driver. Every emac will then wait for the previous one in the list to
113  * initialize before itself. We should also keep that list ordered by
114  * cell_index.
115  * That list is only 4 entries long, meaning that additional EMACs don't
116  * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
117  */
118 
119 #define EMAC_BOOT_LIST_SIZE	4
120 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
121 
122 /* How long should I wait for dependent devices ? */
123 #define EMAC_PROBE_DEP_TIMEOUT	(HZ * 5)
124 
125 /* I don't want to litter system log with timeout errors
126  * when we have brain-damaged PHY.
127  */
128 static inline void emac_report_timeout_error(struct emac_instance *dev,
129 					     const char *error)
130 {
131 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
132 				  EMAC_FTR_460EX_PHY_CLK_FIX |
133 				  EMAC_FTR_440EP_PHY_CLK_FIX))
134 		DBG(dev, "%s" NL, error);
135 	else if (net_ratelimit())
136 		printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
137 			error);
138 }
139 
140 /* EMAC PHY clock workaround:
141  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
142  * which allows controlling each EMAC clock
143  */
144 static inline void emac_rx_clk_tx(struct emac_instance *dev)
145 {
146 #ifdef CONFIG_PPC_DCR_NATIVE
147 	if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
148 		dcri_clrset(SDR0, SDR0_MFR,
149 			    0, SDR0_MFR_ECS >> dev->cell_index);
150 #endif
151 }
152 
153 static inline void emac_rx_clk_default(struct emac_instance *dev)
154 {
155 #ifdef CONFIG_PPC_DCR_NATIVE
156 	if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
157 		dcri_clrset(SDR0, SDR0_MFR,
158 			    SDR0_MFR_ECS >> dev->cell_index, 0);
159 #endif
160 }
161 
162 /* PHY polling intervals */
163 #define PHY_POLL_LINK_ON	HZ
164 #define PHY_POLL_LINK_OFF	(HZ / 5)
165 
166 /* Graceful stop timeouts in us.
167  * We should allow up to 1 frame time (full-duplex, ignoring collisions)
168  */
169 #define STOP_TIMEOUT_10		1230
170 #define STOP_TIMEOUT_100	124
171 #define STOP_TIMEOUT_1000	13
172 #define STOP_TIMEOUT_1000_JUMBO	73
173 
174 static unsigned char default_mcast_addr[] = {
175 	0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
176 };
177 
178 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
179 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
180 	"rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
181 	"tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
182 	"rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
183 	"rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
184 	"rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
185 	"rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
186 	"rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
187 	"rx_bad_packet", "rx_runt_packet", "rx_short_event",
188 	"rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
189 	"rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
190 	"tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
191 	"tx_bd_excessive_collisions", "tx_bd_late_collision",
192 	"tx_bd_multple_collisions", "tx_bd_single_collision",
193 	"tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
194 	"tx_errors"
195 };
196 
197 static irqreturn_t emac_irq(int irq, void *dev_instance);
198 static void emac_clean_tx_ring(struct emac_instance *dev);
199 static void __emac_set_multicast_list(struct emac_instance *dev);
200 
201 static inline int emac_phy_supports_gige(int phy_mode)
202 {
203 	return  phy_mode == PHY_MODE_GMII ||
204 		phy_mode == PHY_MODE_RGMII ||
205 		phy_mode == PHY_MODE_SGMII ||
206 		phy_mode == PHY_MODE_TBI ||
207 		phy_mode == PHY_MODE_RTBI;
208 }
209 
210 static inline int emac_phy_gpcs(int phy_mode)
211 {
212 	return  phy_mode == PHY_MODE_SGMII ||
213 		phy_mode == PHY_MODE_TBI ||
214 		phy_mode == PHY_MODE_RTBI;
215 }
216 
217 static inline void emac_tx_enable(struct emac_instance *dev)
218 {
219 	struct emac_regs __iomem *p = dev->emacp;
220 	u32 r;
221 
222 	DBG(dev, "tx_enable" NL);
223 
224 	r = in_be32(&p->mr0);
225 	if (!(r & EMAC_MR0_TXE))
226 		out_be32(&p->mr0, r | EMAC_MR0_TXE);
227 }
228 
229 static void emac_tx_disable(struct emac_instance *dev)
230 {
231 	struct emac_regs __iomem *p = dev->emacp;
232 	u32 r;
233 
234 	DBG(dev, "tx_disable" NL);
235 
236 	r = in_be32(&p->mr0);
237 	if (r & EMAC_MR0_TXE) {
238 		int n = dev->stop_timeout;
239 		out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
240 		while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
241 			udelay(1);
242 			--n;
243 		}
244 		if (unlikely(!n))
245 			emac_report_timeout_error(dev, "TX disable timeout");
246 	}
247 }
248 
249 static void emac_rx_enable(struct emac_instance *dev)
250 {
251 	struct emac_regs __iomem *p = dev->emacp;
252 	u32 r;
253 
254 	if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
255 		goto out;
256 
257 	DBG(dev, "rx_enable" NL);
258 
259 	r = in_be32(&p->mr0);
260 	if (!(r & EMAC_MR0_RXE)) {
261 		if (unlikely(!(r & EMAC_MR0_RXI))) {
262 			/* Wait if previous async disable is still in progress */
263 			int n = dev->stop_timeout;
264 			while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
265 				udelay(1);
266 				--n;
267 			}
268 			if (unlikely(!n))
269 				emac_report_timeout_error(dev,
270 							  "RX disable timeout");
271 		}
272 		out_be32(&p->mr0, r | EMAC_MR0_RXE);
273 	}
274  out:
275 	;
276 }
277 
278 static void emac_rx_disable(struct emac_instance *dev)
279 {
280 	struct emac_regs __iomem *p = dev->emacp;
281 	u32 r;
282 
283 	DBG(dev, "rx_disable" NL);
284 
285 	r = in_be32(&p->mr0);
286 	if (r & EMAC_MR0_RXE) {
287 		int n = dev->stop_timeout;
288 		out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
289 		while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
290 			udelay(1);
291 			--n;
292 		}
293 		if (unlikely(!n))
294 			emac_report_timeout_error(dev, "RX disable timeout");
295 	}
296 }
297 
298 static inline void emac_netif_stop(struct emac_instance *dev)
299 {
300 	netif_tx_lock_bh(dev->ndev);
301 	netif_addr_lock(dev->ndev);
302 	dev->no_mcast = 1;
303 	netif_addr_unlock(dev->ndev);
304 	netif_tx_unlock_bh(dev->ndev);
305 	netif_trans_update(dev->ndev);	/* prevent tx timeout */
306 	mal_poll_disable(dev->mal, &dev->commac);
307 	netif_tx_disable(dev->ndev);
308 }
309 
310 static inline void emac_netif_start(struct emac_instance *dev)
311 {
312 	netif_tx_lock_bh(dev->ndev);
313 	netif_addr_lock(dev->ndev);
314 	dev->no_mcast = 0;
315 	if (dev->mcast_pending && netif_running(dev->ndev))
316 		__emac_set_multicast_list(dev);
317 	netif_addr_unlock(dev->ndev);
318 	netif_tx_unlock_bh(dev->ndev);
319 
320 	netif_wake_queue(dev->ndev);
321 
322 	/* NOTE: unconditional netif_wake_queue is only appropriate
323 	 * so long as all callers are assured to have free tx slots
324 	 * (taken from tg3... though the case where that is wrong is
325 	 *  not terribly harmful)
326 	 */
327 	mal_poll_enable(dev->mal, &dev->commac);
328 }
329 
330 static inline void emac_rx_disable_async(struct emac_instance *dev)
331 {
332 	struct emac_regs __iomem *p = dev->emacp;
333 	u32 r;
334 
335 	DBG(dev, "rx_disable_async" NL);
336 
337 	r = in_be32(&p->mr0);
338 	if (r & EMAC_MR0_RXE)
339 		out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
340 }
341 
342 static int emac_reset(struct emac_instance *dev)
343 {
344 	struct emac_regs __iomem *p = dev->emacp;
345 	int n = 20;
346 
347 	DBG(dev, "reset" NL);
348 
349 	if (!dev->reset_failed) {
350 		/* 40x erratum suggests stopping RX channel before reset,
351 		 * we stop TX as well
352 		 */
353 		emac_rx_disable(dev);
354 		emac_tx_disable(dev);
355 	}
356 
357 #ifdef CONFIG_PPC_DCR_NATIVE
358 	/*
359 	 * PPC460EX/GT Embedded Processor Advanced User's Manual
360 	 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
361 	 * Note: The PHY must provide a TX Clk in order to perform a soft reset
362 	 * of the EMAC. If none is present, select the internal clock
363 	 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
364 	 * After a soft reset, select the external clock.
365 	 */
366 	if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
367 		if (dev->phy_address == 0xffffffff &&
368 		    dev->phy_map == 0xffffffff) {
369 			/* No PHY: select internal loop clock before reset */
370 			dcri_clrset(SDR0, SDR0_ETH_CFG,
371 				    0, SDR0_ETH_CFG_ECS << dev->cell_index);
372 		} else {
373 			/* PHY present: select external clock before reset */
374 			dcri_clrset(SDR0, SDR0_ETH_CFG,
375 				    SDR0_ETH_CFG_ECS << dev->cell_index, 0);
376 		}
377 	}
378 #endif
379 
380 	out_be32(&p->mr0, EMAC_MR0_SRST);
381 	while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
382 		--n;
383 
384 #ifdef CONFIG_PPC_DCR_NATIVE
385 	if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
386 		if (dev->phy_address == 0xffffffff &&
387 		    dev->phy_map == 0xffffffff) {
388 			/* No PHY: restore external clock source after reset */
389 			dcri_clrset(SDR0, SDR0_ETH_CFG,
390 				    SDR0_ETH_CFG_ECS << dev->cell_index, 0);
391 		}
392 	}
393 #endif
394 
395 	if (n) {
396 		dev->reset_failed = 0;
397 		return 0;
398 	} else {
399 		emac_report_timeout_error(dev, "reset timeout");
400 		dev->reset_failed = 1;
401 		return -ETIMEDOUT;
402 	}
403 }
404 
405 static void emac_hash_mc(struct emac_instance *dev)
406 {
407 	const int regs = EMAC_XAHT_REGS(dev);
408 	u32 *gaht_base = emac_gaht_base(dev);
409 	u32 gaht_temp[regs];
410 	struct netdev_hw_addr *ha;
411 	int i;
412 
413 	DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
414 
415 	memset(gaht_temp, 0, sizeof (gaht_temp));
416 
417 	netdev_for_each_mc_addr(ha, dev->ndev) {
418 		int slot, reg, mask;
419 		DBG2(dev, "mc %pM" NL, ha->addr);
420 
421 		slot = EMAC_XAHT_CRC_TO_SLOT(dev,
422 					     ether_crc(ETH_ALEN, ha->addr));
423 		reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
424 		mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
425 
426 		gaht_temp[reg] |= mask;
427 	}
428 
429 	for (i = 0; i < regs; i++)
430 		out_be32(gaht_base + i, gaht_temp[i]);
431 }
432 
433 static inline u32 emac_iff2rmr(struct net_device *ndev)
434 {
435 	struct emac_instance *dev = netdev_priv(ndev);
436 	u32 r;
437 
438 	r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
439 
440 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
441 	    r |= EMAC4_RMR_BASE;
442 	else
443 	    r |= EMAC_RMR_BASE;
444 
445 	if (ndev->flags & IFF_PROMISC)
446 		r |= EMAC_RMR_PME;
447 	else if (ndev->flags & IFF_ALLMULTI ||
448 			 (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
449 		r |= EMAC_RMR_PMME;
450 	else if (!netdev_mc_empty(ndev))
451 		r |= EMAC_RMR_MAE;
452 
453 	if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
454 		r &= ~EMAC4_RMR_MJS_MASK;
455 		r |= EMAC4_RMR_MJS(ndev->mtu);
456 	}
457 
458 	return r;
459 }
460 
461 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
462 {
463 	u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
464 
465 	DBG2(dev, "__emac_calc_base_mr1" NL);
466 
467 	switch(tx_size) {
468 	case 2048:
469 		ret |= EMAC_MR1_TFS_2K;
470 		break;
471 	default:
472 		printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
473 		       dev->ndev->name, tx_size);
474 	}
475 
476 	switch(rx_size) {
477 	case 16384:
478 		ret |= EMAC_MR1_RFS_16K;
479 		break;
480 	case 4096:
481 		ret |= EMAC_MR1_RFS_4K;
482 		break;
483 	default:
484 		printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
485 		       dev->ndev->name, rx_size);
486 	}
487 
488 	return ret;
489 }
490 
491 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
492 {
493 	u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
494 		EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
495 
496 	DBG2(dev, "__emac4_calc_base_mr1" NL);
497 
498 	switch(tx_size) {
499 	case 16384:
500 		ret |= EMAC4_MR1_TFS_16K;
501 		break;
502 	case 4096:
503 		ret |= EMAC4_MR1_TFS_4K;
504 		break;
505 	case 2048:
506 		ret |= EMAC4_MR1_TFS_2K;
507 		break;
508 	default:
509 		printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
510 		       dev->ndev->name, tx_size);
511 	}
512 
513 	switch(rx_size) {
514 	case 16384:
515 		ret |= EMAC4_MR1_RFS_16K;
516 		break;
517 	case 4096:
518 		ret |= EMAC4_MR1_RFS_4K;
519 		break;
520 	case 2048:
521 		ret |= EMAC4_MR1_RFS_2K;
522 		break;
523 	default:
524 		printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
525 		       dev->ndev->name, rx_size);
526 	}
527 
528 	return ret;
529 }
530 
531 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
532 {
533 	return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
534 		__emac4_calc_base_mr1(dev, tx_size, rx_size) :
535 		__emac_calc_base_mr1(dev, tx_size, rx_size);
536 }
537 
538 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
539 {
540 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
541 		return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
542 	else
543 		return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
544 }
545 
546 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
547 				 unsigned int low, unsigned int high)
548 {
549 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
550 		return (low << 22) | ( (high & 0x3ff) << 6);
551 	else
552 		return (low << 23) | ( (high & 0x1ff) << 7);
553 }
554 
555 static int emac_configure(struct emac_instance *dev)
556 {
557 	struct emac_regs __iomem *p = dev->emacp;
558 	struct net_device *ndev = dev->ndev;
559 	int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
560 	u32 r, mr1 = 0;
561 
562 	DBG(dev, "configure" NL);
563 
564 	if (!link) {
565 		out_be32(&p->mr1, in_be32(&p->mr1)
566 			 | EMAC_MR1_FDE | EMAC_MR1_ILE);
567 		udelay(100);
568 	} else if (emac_reset(dev) < 0)
569 		return -ETIMEDOUT;
570 
571 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
572 		tah_reset(dev->tah_dev);
573 
574 	DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
575 	    link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
576 
577 	/* Default fifo sizes */
578 	tx_size = dev->tx_fifo_size;
579 	rx_size = dev->rx_fifo_size;
580 
581 	/* No link, force loopback */
582 	if (!link)
583 		mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
584 
585 	/* Check for full duplex */
586 	else if (dev->phy.duplex == DUPLEX_FULL)
587 		mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
588 
589 	/* Adjust fifo sizes, mr1 and timeouts based on link speed */
590 	dev->stop_timeout = STOP_TIMEOUT_10;
591 	switch (dev->phy.speed) {
592 	case SPEED_1000:
593 		if (emac_phy_gpcs(dev->phy.mode)) {
594 			mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
595 				(dev->phy.gpcs_address != 0xffffffff) ?
596 				 dev->phy.gpcs_address : dev->phy.address);
597 
598 			/* Put some arbitrary OUI, Manuf & Rev IDs so we can
599 			 * identify this GPCS PHY later.
600 			 */
601 			out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
602 		} else
603 			mr1 |= EMAC_MR1_MF_1000;
604 
605 		/* Extended fifo sizes */
606 		tx_size = dev->tx_fifo_size_gige;
607 		rx_size = dev->rx_fifo_size_gige;
608 
609 		if (dev->ndev->mtu > ETH_DATA_LEN) {
610 			if (emac_has_feature(dev, EMAC_FTR_EMAC4))
611 				mr1 |= EMAC4_MR1_JPSM;
612 			else
613 				mr1 |= EMAC_MR1_JPSM;
614 			dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
615 		} else
616 			dev->stop_timeout = STOP_TIMEOUT_1000;
617 		break;
618 	case SPEED_100:
619 		mr1 |= EMAC_MR1_MF_100;
620 		dev->stop_timeout = STOP_TIMEOUT_100;
621 		break;
622 	default: /* make gcc happy */
623 		break;
624 	}
625 
626 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
627 		rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
628 				dev->phy.speed);
629 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
630 		zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
631 
632 	/* on 40x erratum forces us to NOT use integrated flow control,
633 	 * let's hope it works on 44x ;)
634 	 */
635 	if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
636 	    dev->phy.duplex == DUPLEX_FULL) {
637 		if (dev->phy.pause)
638 			mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
639 		else if (dev->phy.asym_pause)
640 			mr1 |= EMAC_MR1_APP;
641 	}
642 
643 	/* Add base settings & fifo sizes & program MR1 */
644 	mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
645 	out_be32(&p->mr1, mr1);
646 
647 	/* Set individual MAC address */
648 	out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
649 	out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
650 		 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
651 		 ndev->dev_addr[5]);
652 
653 	/* VLAN Tag Protocol ID */
654 	out_be32(&p->vtpid, 0x8100);
655 
656 	/* Receive mode register */
657 	r = emac_iff2rmr(ndev);
658 	if (r & EMAC_RMR_MAE)
659 		emac_hash_mc(dev);
660 	out_be32(&p->rmr, r);
661 
662 	/* FIFOs thresholds */
663 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
664 		r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
665 			       tx_size / 2 / dev->fifo_entry_size);
666 	else
667 		r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
668 			      tx_size / 2 / dev->fifo_entry_size);
669 	out_be32(&p->tmr1, r);
670 	out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
671 
672 	/* PAUSE frame is sent when RX FIFO reaches its high-water mark,
673 	   there should be still enough space in FIFO to allow the our link
674 	   partner time to process this frame and also time to send PAUSE
675 	   frame itself.
676 
677 	   Here is the worst case scenario for the RX FIFO "headroom"
678 	   (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
679 
680 	   1) One maximum-length frame on TX                    1522 bytes
681 	   2) One PAUSE frame time                                64 bytes
682 	   3) PAUSE frame decode time allowance                   64 bytes
683 	   4) One maximum-length frame on RX                    1522 bytes
684 	   5) Round-trip propagation delay of the link (100Mb)    15 bytes
685 	   ----------
686 	   3187 bytes
687 
688 	   I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
689 	   low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
690 	 */
691 	r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
692 			   rx_size / 4 / dev->fifo_entry_size);
693 	out_be32(&p->rwmr, r);
694 
695 	/* Set PAUSE timer to the maximum */
696 	out_be32(&p->ptr, 0xffff);
697 
698 	/* IRQ sources */
699 	r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
700 		EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
701 		EMAC_ISR_IRE | EMAC_ISR_TE;
702 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
703 	    r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
704 						  EMAC4_ISR_RXOE | */;
705 	out_be32(&p->iser,  r);
706 
707 	/* We need to take GPCS PHY out of isolate mode after EMAC reset */
708 	if (emac_phy_gpcs(dev->phy.mode)) {
709 		if (dev->phy.gpcs_address != 0xffffffff)
710 			emac_mii_reset_gpcs(&dev->phy);
711 		else
712 			emac_mii_reset_phy(&dev->phy);
713 	}
714 
715 	return 0;
716 }
717 
718 static void emac_reinitialize(struct emac_instance *dev)
719 {
720 	DBG(dev, "reinitialize" NL);
721 
722 	emac_netif_stop(dev);
723 	if (!emac_configure(dev)) {
724 		emac_tx_enable(dev);
725 		emac_rx_enable(dev);
726 	}
727 	emac_netif_start(dev);
728 }
729 
730 static void emac_full_tx_reset(struct emac_instance *dev)
731 {
732 	DBG(dev, "full_tx_reset" NL);
733 
734 	emac_tx_disable(dev);
735 	mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
736 	emac_clean_tx_ring(dev);
737 	dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
738 
739 	emac_configure(dev);
740 
741 	mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
742 	emac_tx_enable(dev);
743 	emac_rx_enable(dev);
744 }
745 
746 static void emac_reset_work(struct work_struct *work)
747 {
748 	struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
749 
750 	DBG(dev, "reset_work" NL);
751 
752 	mutex_lock(&dev->link_lock);
753 	if (dev->opened) {
754 		emac_netif_stop(dev);
755 		emac_full_tx_reset(dev);
756 		emac_netif_start(dev);
757 	}
758 	mutex_unlock(&dev->link_lock);
759 }
760 
761 static void emac_tx_timeout(struct net_device *ndev)
762 {
763 	struct emac_instance *dev = netdev_priv(ndev);
764 
765 	DBG(dev, "tx_timeout" NL);
766 
767 	schedule_work(&dev->reset_work);
768 }
769 
770 
771 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
772 {
773 	int done = !!(stacr & EMAC_STACR_OC);
774 
775 	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
776 		done = !done;
777 
778 	return done;
779 };
780 
781 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
782 {
783 	struct emac_regs __iomem *p = dev->emacp;
784 	u32 r = 0;
785 	int n, err = -ETIMEDOUT;
786 
787 	mutex_lock(&dev->mdio_lock);
788 
789 	DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
790 
791 	/* Enable proper MDIO port */
792 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
793 		zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
794 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
795 		rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
796 
797 	/* Wait for management interface to become idle */
798 	n = 20;
799 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
800 		udelay(1);
801 		if (!--n) {
802 			DBG2(dev, " -> timeout wait idle\n");
803 			goto bail;
804 		}
805 	}
806 
807 	/* Issue read command */
808 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
809 		r = EMAC4_STACR_BASE(dev->opb_bus_freq);
810 	else
811 		r = EMAC_STACR_BASE(dev->opb_bus_freq);
812 	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
813 		r |= EMAC_STACR_OC;
814 	if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
815 		r |= EMACX_STACR_STAC_READ;
816 	else
817 		r |= EMAC_STACR_STAC_READ;
818 	r |= (reg & EMAC_STACR_PRA_MASK)
819 		| ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
820 	out_be32(&p->stacr, r);
821 
822 	/* Wait for read to complete */
823 	n = 200;
824 	while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
825 		udelay(1);
826 		if (!--n) {
827 			DBG2(dev, " -> timeout wait complete\n");
828 			goto bail;
829 		}
830 	}
831 
832 	if (unlikely(r & EMAC_STACR_PHYE)) {
833 		DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
834 		err = -EREMOTEIO;
835 		goto bail;
836 	}
837 
838 	r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
839 
840 	DBG2(dev, "mdio_read -> %04x" NL, r);
841 	err = 0;
842  bail:
843 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
844 		rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
845 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
846 		zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
847 	mutex_unlock(&dev->mdio_lock);
848 
849 	return err == 0 ? r : err;
850 }
851 
852 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
853 			      u16 val)
854 {
855 	struct emac_regs __iomem *p = dev->emacp;
856 	u32 r = 0;
857 	int n, err = -ETIMEDOUT;
858 
859 	mutex_lock(&dev->mdio_lock);
860 
861 	DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
862 
863 	/* Enable proper MDIO port */
864 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
865 		zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
866 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
867 		rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
868 
869 	/* Wait for management interface to be idle */
870 	n = 20;
871 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
872 		udelay(1);
873 		if (!--n) {
874 			DBG2(dev, " -> timeout wait idle\n");
875 			goto bail;
876 		}
877 	}
878 
879 	/* Issue write command */
880 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
881 		r = EMAC4_STACR_BASE(dev->opb_bus_freq);
882 	else
883 		r = EMAC_STACR_BASE(dev->opb_bus_freq);
884 	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
885 		r |= EMAC_STACR_OC;
886 	if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
887 		r |= EMACX_STACR_STAC_WRITE;
888 	else
889 		r |= EMAC_STACR_STAC_WRITE;
890 	r |= (reg & EMAC_STACR_PRA_MASK) |
891 		((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
892 		(val << EMAC_STACR_PHYD_SHIFT);
893 	out_be32(&p->stacr, r);
894 
895 	/* Wait for write to complete */
896 	n = 200;
897 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
898 		udelay(1);
899 		if (!--n) {
900 			DBG2(dev, " -> timeout wait complete\n");
901 			goto bail;
902 		}
903 	}
904 	err = 0;
905  bail:
906 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
907 		rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
908 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
909 		zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
910 	mutex_unlock(&dev->mdio_lock);
911 }
912 
913 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
914 {
915 	struct emac_instance *dev = netdev_priv(ndev);
916 	int res;
917 
918 	res = __emac_mdio_read((dev->mdio_instance &&
919 				dev->phy.gpcs_address != id) ?
920 				dev->mdio_instance : dev,
921 			       (u8) id, (u8) reg);
922 	return res;
923 }
924 
925 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
926 {
927 	struct emac_instance *dev = netdev_priv(ndev);
928 
929 	__emac_mdio_write((dev->mdio_instance &&
930 			   dev->phy.gpcs_address != id) ?
931 			   dev->mdio_instance : dev,
932 			  (u8) id, (u8) reg, (u16) val);
933 }
934 
935 /* Tx lock BH */
936 static void __emac_set_multicast_list(struct emac_instance *dev)
937 {
938 	struct emac_regs __iomem *p = dev->emacp;
939 	u32 rmr = emac_iff2rmr(dev->ndev);
940 
941 	DBG(dev, "__multicast %08x" NL, rmr);
942 
943 	/* I decided to relax register access rules here to avoid
944 	 * full EMAC reset.
945 	 *
946 	 * There is a real problem with EMAC4 core if we use MWSW_001 bit
947 	 * in MR1 register and do a full EMAC reset.
948 	 * One TX BD status update is delayed and, after EMAC reset, it
949 	 * never happens, resulting in TX hung (it'll be recovered by TX
950 	 * timeout handler eventually, but this is just gross).
951 	 * So we either have to do full TX reset or try to cheat here :)
952 	 *
953 	 * The only required change is to RX mode register, so I *think* all
954 	 * we need is just to stop RX channel. This seems to work on all
955 	 * tested SoCs.                                                --ebs
956 	 *
957 	 * If we need the full reset, we might just trigger the workqueue
958 	 * and do it async... a bit nasty but should work --BenH
959 	 */
960 	dev->mcast_pending = 0;
961 	emac_rx_disable(dev);
962 	if (rmr & EMAC_RMR_MAE)
963 		emac_hash_mc(dev);
964 	out_be32(&p->rmr, rmr);
965 	emac_rx_enable(dev);
966 }
967 
968 /* Tx lock BH */
969 static void emac_set_multicast_list(struct net_device *ndev)
970 {
971 	struct emac_instance *dev = netdev_priv(ndev);
972 
973 	DBG(dev, "multicast" NL);
974 
975 	BUG_ON(!netif_running(dev->ndev));
976 
977 	if (dev->no_mcast) {
978 		dev->mcast_pending = 1;
979 		return;
980 	}
981 
982 	mutex_lock(&dev->link_lock);
983 	__emac_set_multicast_list(dev);
984 	mutex_unlock(&dev->link_lock);
985 }
986 
987 static int emac_set_mac_address(struct net_device *ndev, void *sa)
988 {
989 	struct emac_instance *dev = netdev_priv(ndev);
990 	struct sockaddr *addr = sa;
991 	struct emac_regs __iomem *p = dev->emacp;
992 
993 	if (!is_valid_ether_addr(addr->sa_data))
994 	       return -EADDRNOTAVAIL;
995 
996 	mutex_lock(&dev->link_lock);
997 
998 	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
999 
1000 	emac_rx_disable(dev);
1001 	emac_tx_disable(dev);
1002 	out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
1003 	out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
1004 		(ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
1005 		ndev->dev_addr[5]);
1006 	emac_tx_enable(dev);
1007 	emac_rx_enable(dev);
1008 
1009 	mutex_unlock(&dev->link_lock);
1010 
1011 	return 0;
1012 }
1013 
1014 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
1015 {
1016 	int rx_sync_size = emac_rx_sync_size(new_mtu);
1017 	int rx_skb_size = emac_rx_skb_size(new_mtu);
1018 	int i, ret = 0;
1019 	int mr1_jumbo_bit_change = 0;
1020 
1021 	mutex_lock(&dev->link_lock);
1022 	emac_netif_stop(dev);
1023 	emac_rx_disable(dev);
1024 	mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1025 
1026 	if (dev->rx_sg_skb) {
1027 		++dev->estats.rx_dropped_resize;
1028 		dev_kfree_skb(dev->rx_sg_skb);
1029 		dev->rx_sg_skb = NULL;
1030 	}
1031 
1032 	/* Make a first pass over RX ring and mark BDs ready, dropping
1033 	 * non-processed packets on the way. We need this as a separate pass
1034 	 * to simplify error recovery in the case of allocation failure later.
1035 	 */
1036 	for (i = 0; i < NUM_RX_BUFF; ++i) {
1037 		if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
1038 			++dev->estats.rx_dropped_resize;
1039 
1040 		dev->rx_desc[i].data_len = 0;
1041 		dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
1042 		    (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1043 	}
1044 
1045 	/* Reallocate RX ring only if bigger skb buffers are required */
1046 	if (rx_skb_size <= dev->rx_skb_size)
1047 		goto skip;
1048 
1049 	/* Second pass, allocate new skbs */
1050 	for (i = 0; i < NUM_RX_BUFF; ++i) {
1051 		struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
1052 		if (!skb) {
1053 			ret = -ENOMEM;
1054 			goto oom;
1055 		}
1056 
1057 		BUG_ON(!dev->rx_skb[i]);
1058 		dev_kfree_skb(dev->rx_skb[i]);
1059 
1060 		skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1061 		dev->rx_desc[i].data_ptr =
1062 		    dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
1063 				   DMA_FROM_DEVICE) + 2;
1064 		dev->rx_skb[i] = skb;
1065 	}
1066  skip:
1067 	/* Check if we need to change "Jumbo" bit in MR1 */
1068 	if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
1069 		mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ||
1070 				(dev->ndev->mtu > ETH_DATA_LEN);
1071 	} else {
1072 		mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ^
1073 				(dev->ndev->mtu > ETH_DATA_LEN);
1074 	}
1075 
1076 	if (mr1_jumbo_bit_change) {
1077 		/* This is to prevent starting RX channel in emac_rx_enable() */
1078 		set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1079 
1080 		dev->ndev->mtu = new_mtu;
1081 		emac_full_tx_reset(dev);
1082 	}
1083 
1084 	mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1085  oom:
1086 	/* Restart RX */
1087 	clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1088 	dev->rx_slot = 0;
1089 	mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1090 	emac_rx_enable(dev);
1091 	emac_netif_start(dev);
1092 	mutex_unlock(&dev->link_lock);
1093 
1094 	return ret;
1095 }
1096 
1097 /* Process ctx, rtnl_lock semaphore */
1098 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1099 {
1100 	struct emac_instance *dev = netdev_priv(ndev);
1101 	int ret = 0;
1102 
1103 	DBG(dev, "change_mtu(%d)" NL, new_mtu);
1104 
1105 	if (netif_running(ndev)) {
1106 		/* Check if we really need to reinitialize RX ring */
1107 		if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1108 			ret = emac_resize_rx_ring(dev, new_mtu);
1109 	}
1110 
1111 	if (!ret) {
1112 		ndev->mtu = new_mtu;
1113 		dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1114 		dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1115 	}
1116 
1117 	return ret;
1118 }
1119 
1120 static void emac_clean_tx_ring(struct emac_instance *dev)
1121 {
1122 	int i;
1123 
1124 	for (i = 0; i < NUM_TX_BUFF; ++i) {
1125 		if (dev->tx_skb[i]) {
1126 			dev_kfree_skb(dev->tx_skb[i]);
1127 			dev->tx_skb[i] = NULL;
1128 			if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1129 				++dev->estats.tx_dropped;
1130 		}
1131 		dev->tx_desc[i].ctrl = 0;
1132 		dev->tx_desc[i].data_ptr = 0;
1133 	}
1134 }
1135 
1136 static void emac_clean_rx_ring(struct emac_instance *dev)
1137 {
1138 	int i;
1139 
1140 	for (i = 0; i < NUM_RX_BUFF; ++i)
1141 		if (dev->rx_skb[i]) {
1142 			dev->rx_desc[i].ctrl = 0;
1143 			dev_kfree_skb(dev->rx_skb[i]);
1144 			dev->rx_skb[i] = NULL;
1145 			dev->rx_desc[i].data_ptr = 0;
1146 		}
1147 
1148 	if (dev->rx_sg_skb) {
1149 		dev_kfree_skb(dev->rx_sg_skb);
1150 		dev->rx_sg_skb = NULL;
1151 	}
1152 }
1153 
1154 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1155 				    gfp_t flags)
1156 {
1157 	struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1158 	if (unlikely(!skb))
1159 		return -ENOMEM;
1160 
1161 	dev->rx_skb[slot] = skb;
1162 	dev->rx_desc[slot].data_len = 0;
1163 
1164 	skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1165 	dev->rx_desc[slot].data_ptr =
1166 	    dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1167 			   DMA_FROM_DEVICE) + 2;
1168 	wmb();
1169 	dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1170 	    (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1171 
1172 	return 0;
1173 }
1174 
1175 static void emac_print_link_status(struct emac_instance *dev)
1176 {
1177 	if (netif_carrier_ok(dev->ndev))
1178 		printk(KERN_INFO "%s: link is up, %d %s%s\n",
1179 		       dev->ndev->name, dev->phy.speed,
1180 		       dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1181 		       dev->phy.pause ? ", pause enabled" :
1182 		       dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1183 	else
1184 		printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1185 }
1186 
1187 /* Process ctx, rtnl_lock semaphore */
1188 static int emac_open(struct net_device *ndev)
1189 {
1190 	struct emac_instance *dev = netdev_priv(ndev);
1191 	int err, i;
1192 
1193 	DBG(dev, "open" NL);
1194 
1195 	/* Setup error IRQ handler */
1196 	err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1197 	if (err) {
1198 		printk(KERN_ERR "%s: failed to request IRQ %d\n",
1199 		       ndev->name, dev->emac_irq);
1200 		return err;
1201 	}
1202 
1203 	/* Allocate RX ring */
1204 	for (i = 0; i < NUM_RX_BUFF; ++i)
1205 		if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1206 			printk(KERN_ERR "%s: failed to allocate RX ring\n",
1207 			       ndev->name);
1208 			goto oom;
1209 		}
1210 
1211 	dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1212 	clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1213 	dev->rx_sg_skb = NULL;
1214 
1215 	mutex_lock(&dev->link_lock);
1216 	dev->opened = 1;
1217 
1218 	/* Start PHY polling now.
1219 	 */
1220 	if (dev->phy.address >= 0) {
1221 		int link_poll_interval;
1222 		if (dev->phy.def->ops->poll_link(&dev->phy)) {
1223 			dev->phy.def->ops->read_link(&dev->phy);
1224 			emac_rx_clk_default(dev);
1225 			netif_carrier_on(dev->ndev);
1226 			link_poll_interval = PHY_POLL_LINK_ON;
1227 		} else {
1228 			emac_rx_clk_tx(dev);
1229 			netif_carrier_off(dev->ndev);
1230 			link_poll_interval = PHY_POLL_LINK_OFF;
1231 		}
1232 		dev->link_polling = 1;
1233 		wmb();
1234 		schedule_delayed_work(&dev->link_work, link_poll_interval);
1235 		emac_print_link_status(dev);
1236 	} else
1237 		netif_carrier_on(dev->ndev);
1238 
1239 	/* Required for Pause packet support in EMAC */
1240 	dev_mc_add_global(ndev, default_mcast_addr);
1241 
1242 	emac_configure(dev);
1243 	mal_poll_add(dev->mal, &dev->commac);
1244 	mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1245 	mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1246 	mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1247 	emac_tx_enable(dev);
1248 	emac_rx_enable(dev);
1249 	emac_netif_start(dev);
1250 
1251 	mutex_unlock(&dev->link_lock);
1252 
1253 	return 0;
1254  oom:
1255 	emac_clean_rx_ring(dev);
1256 	free_irq(dev->emac_irq, dev);
1257 
1258 	return -ENOMEM;
1259 }
1260 
1261 /* BHs disabled */
1262 #if 0
1263 static int emac_link_differs(struct emac_instance *dev)
1264 {
1265 	u32 r = in_be32(&dev->emacp->mr1);
1266 
1267 	int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1268 	int speed, pause, asym_pause;
1269 
1270 	if (r & EMAC_MR1_MF_1000)
1271 		speed = SPEED_1000;
1272 	else if (r & EMAC_MR1_MF_100)
1273 		speed = SPEED_100;
1274 	else
1275 		speed = SPEED_10;
1276 
1277 	switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1278 	case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1279 		pause = 1;
1280 		asym_pause = 0;
1281 		break;
1282 	case EMAC_MR1_APP:
1283 		pause = 0;
1284 		asym_pause = 1;
1285 		break;
1286 	default:
1287 		pause = asym_pause = 0;
1288 	}
1289 	return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1290 	    pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1291 }
1292 #endif
1293 
1294 static void emac_link_timer(struct work_struct *work)
1295 {
1296 	struct emac_instance *dev =
1297 		container_of(to_delayed_work(work),
1298 			     struct emac_instance, link_work);
1299 	int link_poll_interval;
1300 
1301 	mutex_lock(&dev->link_lock);
1302 	DBG2(dev, "link timer" NL);
1303 
1304 	if (!dev->opened)
1305 		goto bail;
1306 
1307 	if (dev->phy.def->ops->poll_link(&dev->phy)) {
1308 		if (!netif_carrier_ok(dev->ndev)) {
1309 			emac_rx_clk_default(dev);
1310 			/* Get new link parameters */
1311 			dev->phy.def->ops->read_link(&dev->phy);
1312 
1313 			netif_carrier_on(dev->ndev);
1314 			emac_netif_stop(dev);
1315 			emac_full_tx_reset(dev);
1316 			emac_netif_start(dev);
1317 			emac_print_link_status(dev);
1318 		}
1319 		link_poll_interval = PHY_POLL_LINK_ON;
1320 	} else {
1321 		if (netif_carrier_ok(dev->ndev)) {
1322 			emac_rx_clk_tx(dev);
1323 			netif_carrier_off(dev->ndev);
1324 			netif_tx_disable(dev->ndev);
1325 			emac_reinitialize(dev);
1326 			emac_print_link_status(dev);
1327 		}
1328 		link_poll_interval = PHY_POLL_LINK_OFF;
1329 	}
1330 	schedule_delayed_work(&dev->link_work, link_poll_interval);
1331  bail:
1332 	mutex_unlock(&dev->link_lock);
1333 }
1334 
1335 static void emac_force_link_update(struct emac_instance *dev)
1336 {
1337 	netif_carrier_off(dev->ndev);
1338 	smp_rmb();
1339 	if (dev->link_polling) {
1340 		cancel_delayed_work_sync(&dev->link_work);
1341 		if (dev->link_polling)
1342 			schedule_delayed_work(&dev->link_work,  PHY_POLL_LINK_OFF);
1343 	}
1344 }
1345 
1346 /* Process ctx, rtnl_lock semaphore */
1347 static int emac_close(struct net_device *ndev)
1348 {
1349 	struct emac_instance *dev = netdev_priv(ndev);
1350 
1351 	DBG(dev, "close" NL);
1352 
1353 	if (dev->phy.address >= 0) {
1354 		dev->link_polling = 0;
1355 		cancel_delayed_work_sync(&dev->link_work);
1356 	}
1357 	mutex_lock(&dev->link_lock);
1358 	emac_netif_stop(dev);
1359 	dev->opened = 0;
1360 	mutex_unlock(&dev->link_lock);
1361 
1362 	emac_rx_disable(dev);
1363 	emac_tx_disable(dev);
1364 	mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1365 	mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1366 	mal_poll_del(dev->mal, &dev->commac);
1367 
1368 	emac_clean_tx_ring(dev);
1369 	emac_clean_rx_ring(dev);
1370 
1371 	free_irq(dev->emac_irq, dev);
1372 
1373 	netif_carrier_off(ndev);
1374 
1375 	return 0;
1376 }
1377 
1378 static inline u16 emac_tx_csum(struct emac_instance *dev,
1379 			       struct sk_buff *skb)
1380 {
1381 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1382 		(skb->ip_summed == CHECKSUM_PARTIAL)) {
1383 		++dev->stats.tx_packets_csum;
1384 		return EMAC_TX_CTRL_TAH_CSUM;
1385 	}
1386 	return 0;
1387 }
1388 
1389 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1390 {
1391 	struct emac_regs __iomem *p = dev->emacp;
1392 	struct net_device *ndev = dev->ndev;
1393 
1394 	/* Send the packet out. If the if makes a significant perf
1395 	 * difference, then we can store the TMR0 value in "dev"
1396 	 * instead
1397 	 */
1398 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1399 		out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1400 	else
1401 		out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1402 
1403 	if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1404 		netif_stop_queue(ndev);
1405 		DBG2(dev, "stopped TX queue" NL);
1406 	}
1407 
1408 	netif_trans_update(ndev);
1409 	++dev->stats.tx_packets;
1410 	dev->stats.tx_bytes += len;
1411 
1412 	return NETDEV_TX_OK;
1413 }
1414 
1415 /* Tx lock BH */
1416 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1417 {
1418 	struct emac_instance *dev = netdev_priv(ndev);
1419 	unsigned int len = skb->len;
1420 	int slot;
1421 
1422 	u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1423 	    MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1424 
1425 	slot = dev->tx_slot++;
1426 	if (dev->tx_slot == NUM_TX_BUFF) {
1427 		dev->tx_slot = 0;
1428 		ctrl |= MAL_TX_CTRL_WRAP;
1429 	}
1430 
1431 	DBG2(dev, "xmit(%u) %d" NL, len, slot);
1432 
1433 	dev->tx_skb[slot] = skb;
1434 	dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1435 						     skb->data, len,
1436 						     DMA_TO_DEVICE);
1437 	dev->tx_desc[slot].data_len = (u16) len;
1438 	wmb();
1439 	dev->tx_desc[slot].ctrl = ctrl;
1440 
1441 	return emac_xmit_finish(dev, len);
1442 }
1443 
1444 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1445 				  u32 pd, int len, int last, u16 base_ctrl)
1446 {
1447 	while (1) {
1448 		u16 ctrl = base_ctrl;
1449 		int chunk = min(len, MAL_MAX_TX_SIZE);
1450 		len -= chunk;
1451 
1452 		slot = (slot + 1) % NUM_TX_BUFF;
1453 
1454 		if (last && !len)
1455 			ctrl |= MAL_TX_CTRL_LAST;
1456 		if (slot == NUM_TX_BUFF - 1)
1457 			ctrl |= MAL_TX_CTRL_WRAP;
1458 
1459 		dev->tx_skb[slot] = NULL;
1460 		dev->tx_desc[slot].data_ptr = pd;
1461 		dev->tx_desc[slot].data_len = (u16) chunk;
1462 		dev->tx_desc[slot].ctrl = ctrl;
1463 		++dev->tx_cnt;
1464 
1465 		if (!len)
1466 			break;
1467 
1468 		pd += chunk;
1469 	}
1470 	return slot;
1471 }
1472 
1473 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1474 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1475 {
1476 	struct emac_instance *dev = netdev_priv(ndev);
1477 	int nr_frags = skb_shinfo(skb)->nr_frags;
1478 	int len = skb->len, chunk;
1479 	int slot, i;
1480 	u16 ctrl;
1481 	u32 pd;
1482 
1483 	/* This is common "fast" path */
1484 	if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1485 		return emac_start_xmit(skb, ndev);
1486 
1487 	len -= skb->data_len;
1488 
1489 	/* Note, this is only an *estimation*, we can still run out of empty
1490 	 * slots because of the additional fragmentation into
1491 	 * MAL_MAX_TX_SIZE-sized chunks
1492 	 */
1493 	if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1494 		goto stop_queue;
1495 
1496 	ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1497 	    emac_tx_csum(dev, skb);
1498 	slot = dev->tx_slot;
1499 
1500 	/* skb data */
1501 	dev->tx_skb[slot] = NULL;
1502 	chunk = min(len, MAL_MAX_TX_SIZE);
1503 	dev->tx_desc[slot].data_ptr = pd =
1504 	    dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1505 	dev->tx_desc[slot].data_len = (u16) chunk;
1506 	len -= chunk;
1507 	if (unlikely(len))
1508 		slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1509 				       ctrl);
1510 	/* skb fragments */
1511 	for (i = 0; i < nr_frags; ++i) {
1512 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1513 		len = skb_frag_size(frag);
1514 
1515 		if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1516 			goto undo_frame;
1517 
1518 		pd = skb_frag_dma_map(&dev->ofdev->dev, frag, 0, len,
1519 				      DMA_TO_DEVICE);
1520 
1521 		slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1522 				       ctrl);
1523 	}
1524 
1525 	DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1526 
1527 	/* Attach skb to the last slot so we don't release it too early */
1528 	dev->tx_skb[slot] = skb;
1529 
1530 	/* Send the packet out */
1531 	if (dev->tx_slot == NUM_TX_BUFF - 1)
1532 		ctrl |= MAL_TX_CTRL_WRAP;
1533 	wmb();
1534 	dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1535 	dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1536 
1537 	return emac_xmit_finish(dev, skb->len);
1538 
1539  undo_frame:
1540 	/* Well, too bad. Our previous estimation was overly optimistic.
1541 	 * Undo everything.
1542 	 */
1543 	while (slot != dev->tx_slot) {
1544 		dev->tx_desc[slot].ctrl = 0;
1545 		--dev->tx_cnt;
1546 		if (--slot < 0)
1547 			slot = NUM_TX_BUFF - 1;
1548 	}
1549 	++dev->estats.tx_undo;
1550 
1551  stop_queue:
1552 	netif_stop_queue(ndev);
1553 	DBG2(dev, "stopped TX queue" NL);
1554 	return NETDEV_TX_BUSY;
1555 }
1556 
1557 /* Tx lock BHs */
1558 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1559 {
1560 	struct emac_error_stats *st = &dev->estats;
1561 
1562 	DBG(dev, "BD TX error %04x" NL, ctrl);
1563 
1564 	++st->tx_bd_errors;
1565 	if (ctrl & EMAC_TX_ST_BFCS)
1566 		++st->tx_bd_bad_fcs;
1567 	if (ctrl & EMAC_TX_ST_LCS)
1568 		++st->tx_bd_carrier_loss;
1569 	if (ctrl & EMAC_TX_ST_ED)
1570 		++st->tx_bd_excessive_deferral;
1571 	if (ctrl & EMAC_TX_ST_EC)
1572 		++st->tx_bd_excessive_collisions;
1573 	if (ctrl & EMAC_TX_ST_LC)
1574 		++st->tx_bd_late_collision;
1575 	if (ctrl & EMAC_TX_ST_MC)
1576 		++st->tx_bd_multple_collisions;
1577 	if (ctrl & EMAC_TX_ST_SC)
1578 		++st->tx_bd_single_collision;
1579 	if (ctrl & EMAC_TX_ST_UR)
1580 		++st->tx_bd_underrun;
1581 	if (ctrl & EMAC_TX_ST_SQE)
1582 		++st->tx_bd_sqe;
1583 }
1584 
1585 static void emac_poll_tx(void *param)
1586 {
1587 	struct emac_instance *dev = param;
1588 	u32 bad_mask;
1589 
1590 	DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1591 
1592 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1593 		bad_mask = EMAC_IS_BAD_TX_TAH;
1594 	else
1595 		bad_mask = EMAC_IS_BAD_TX;
1596 
1597 	netif_tx_lock_bh(dev->ndev);
1598 	if (dev->tx_cnt) {
1599 		u16 ctrl;
1600 		int slot = dev->ack_slot, n = 0;
1601 	again:
1602 		ctrl = dev->tx_desc[slot].ctrl;
1603 		if (!(ctrl & MAL_TX_CTRL_READY)) {
1604 			struct sk_buff *skb = dev->tx_skb[slot];
1605 			++n;
1606 
1607 			if (skb) {
1608 				dev_kfree_skb(skb);
1609 				dev->tx_skb[slot] = NULL;
1610 			}
1611 			slot = (slot + 1) % NUM_TX_BUFF;
1612 
1613 			if (unlikely(ctrl & bad_mask))
1614 				emac_parse_tx_error(dev, ctrl);
1615 
1616 			if (--dev->tx_cnt)
1617 				goto again;
1618 		}
1619 		if (n) {
1620 			dev->ack_slot = slot;
1621 			if (netif_queue_stopped(dev->ndev) &&
1622 			    dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1623 				netif_wake_queue(dev->ndev);
1624 
1625 			DBG2(dev, "tx %d pkts" NL, n);
1626 		}
1627 	}
1628 	netif_tx_unlock_bh(dev->ndev);
1629 }
1630 
1631 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1632 				       int len)
1633 {
1634 	struct sk_buff *skb = dev->rx_skb[slot];
1635 
1636 	DBG2(dev, "recycle %d %d" NL, slot, len);
1637 
1638 	if (len)
1639 		dma_map_single(&dev->ofdev->dev, skb->data - 2,
1640 			       EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1641 
1642 	dev->rx_desc[slot].data_len = 0;
1643 	wmb();
1644 	dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1645 	    (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1646 }
1647 
1648 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1649 {
1650 	struct emac_error_stats *st = &dev->estats;
1651 
1652 	DBG(dev, "BD RX error %04x" NL, ctrl);
1653 
1654 	++st->rx_bd_errors;
1655 	if (ctrl & EMAC_RX_ST_OE)
1656 		++st->rx_bd_overrun;
1657 	if (ctrl & EMAC_RX_ST_BP)
1658 		++st->rx_bd_bad_packet;
1659 	if (ctrl & EMAC_RX_ST_RP)
1660 		++st->rx_bd_runt_packet;
1661 	if (ctrl & EMAC_RX_ST_SE)
1662 		++st->rx_bd_short_event;
1663 	if (ctrl & EMAC_RX_ST_AE)
1664 		++st->rx_bd_alignment_error;
1665 	if (ctrl & EMAC_RX_ST_BFCS)
1666 		++st->rx_bd_bad_fcs;
1667 	if (ctrl & EMAC_RX_ST_PTL)
1668 		++st->rx_bd_packet_too_long;
1669 	if (ctrl & EMAC_RX_ST_ORE)
1670 		++st->rx_bd_out_of_range;
1671 	if (ctrl & EMAC_RX_ST_IRE)
1672 		++st->rx_bd_in_range;
1673 }
1674 
1675 static inline void emac_rx_csum(struct emac_instance *dev,
1676 				struct sk_buff *skb, u16 ctrl)
1677 {
1678 #ifdef CONFIG_IBM_EMAC_TAH
1679 	if (!ctrl && dev->tah_dev) {
1680 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1681 		++dev->stats.rx_packets_csum;
1682 	}
1683 #endif
1684 }
1685 
1686 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1687 {
1688 	if (likely(dev->rx_sg_skb != NULL)) {
1689 		int len = dev->rx_desc[slot].data_len;
1690 		int tot_len = dev->rx_sg_skb->len + len;
1691 
1692 		if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1693 			++dev->estats.rx_dropped_mtu;
1694 			dev_kfree_skb(dev->rx_sg_skb);
1695 			dev->rx_sg_skb = NULL;
1696 		} else {
1697 			memcpy(skb_tail_pointer(dev->rx_sg_skb),
1698 					 dev->rx_skb[slot]->data, len);
1699 			skb_put(dev->rx_sg_skb, len);
1700 			emac_recycle_rx_skb(dev, slot, len);
1701 			return 0;
1702 		}
1703 	}
1704 	emac_recycle_rx_skb(dev, slot, 0);
1705 	return -1;
1706 }
1707 
1708 /* NAPI poll context */
1709 static int emac_poll_rx(void *param, int budget)
1710 {
1711 	struct emac_instance *dev = param;
1712 	int slot = dev->rx_slot, received = 0;
1713 
1714 	DBG2(dev, "poll_rx(%d)" NL, budget);
1715 
1716  again:
1717 	while (budget > 0) {
1718 		int len;
1719 		struct sk_buff *skb;
1720 		u16 ctrl = dev->rx_desc[slot].ctrl;
1721 
1722 		if (ctrl & MAL_RX_CTRL_EMPTY)
1723 			break;
1724 
1725 		skb = dev->rx_skb[slot];
1726 		mb();
1727 		len = dev->rx_desc[slot].data_len;
1728 
1729 		if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1730 			goto sg;
1731 
1732 		ctrl &= EMAC_BAD_RX_MASK;
1733 		if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1734 			emac_parse_rx_error(dev, ctrl);
1735 			++dev->estats.rx_dropped_error;
1736 			emac_recycle_rx_skb(dev, slot, 0);
1737 			len = 0;
1738 			goto next;
1739 		}
1740 
1741 		if (len < ETH_HLEN) {
1742 			++dev->estats.rx_dropped_stack;
1743 			emac_recycle_rx_skb(dev, slot, len);
1744 			goto next;
1745 		}
1746 
1747 		if (len && len < EMAC_RX_COPY_THRESH) {
1748 			struct sk_buff *copy_skb =
1749 			    alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1750 			if (unlikely(!copy_skb))
1751 				goto oom;
1752 
1753 			skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1754 			memcpy(copy_skb->data - 2, skb->data - 2, len + 2);
1755 			emac_recycle_rx_skb(dev, slot, len);
1756 			skb = copy_skb;
1757 		} else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1758 			goto oom;
1759 
1760 		skb_put(skb, len);
1761 	push_packet:
1762 		skb->protocol = eth_type_trans(skb, dev->ndev);
1763 		emac_rx_csum(dev, skb, ctrl);
1764 
1765 		if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1766 			++dev->estats.rx_dropped_stack;
1767 	next:
1768 		++dev->stats.rx_packets;
1769 	skip:
1770 		dev->stats.rx_bytes += len;
1771 		slot = (slot + 1) % NUM_RX_BUFF;
1772 		--budget;
1773 		++received;
1774 		continue;
1775 	sg:
1776 		if (ctrl & MAL_RX_CTRL_FIRST) {
1777 			BUG_ON(dev->rx_sg_skb);
1778 			if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1779 				DBG(dev, "rx OOM %d" NL, slot);
1780 				++dev->estats.rx_dropped_oom;
1781 				emac_recycle_rx_skb(dev, slot, 0);
1782 			} else {
1783 				dev->rx_sg_skb = skb;
1784 				skb_put(skb, len);
1785 			}
1786 		} else if (!emac_rx_sg_append(dev, slot) &&
1787 			   (ctrl & MAL_RX_CTRL_LAST)) {
1788 
1789 			skb = dev->rx_sg_skb;
1790 			dev->rx_sg_skb = NULL;
1791 
1792 			ctrl &= EMAC_BAD_RX_MASK;
1793 			if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1794 				emac_parse_rx_error(dev, ctrl);
1795 				++dev->estats.rx_dropped_error;
1796 				dev_kfree_skb(skb);
1797 				len = 0;
1798 			} else
1799 				goto push_packet;
1800 		}
1801 		goto skip;
1802 	oom:
1803 		DBG(dev, "rx OOM %d" NL, slot);
1804 		/* Drop the packet and recycle skb */
1805 		++dev->estats.rx_dropped_oom;
1806 		emac_recycle_rx_skb(dev, slot, 0);
1807 		goto next;
1808 	}
1809 
1810 	if (received) {
1811 		DBG2(dev, "rx %d BDs" NL, received);
1812 		dev->rx_slot = slot;
1813 	}
1814 
1815 	if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1816 		mb();
1817 		if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1818 			DBG2(dev, "rx restart" NL);
1819 			received = 0;
1820 			goto again;
1821 		}
1822 
1823 		if (dev->rx_sg_skb) {
1824 			DBG2(dev, "dropping partial rx packet" NL);
1825 			++dev->estats.rx_dropped_error;
1826 			dev_kfree_skb(dev->rx_sg_skb);
1827 			dev->rx_sg_skb = NULL;
1828 		}
1829 
1830 		clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1831 		mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1832 		emac_rx_enable(dev);
1833 		dev->rx_slot = 0;
1834 	}
1835 	return received;
1836 }
1837 
1838 /* NAPI poll context */
1839 static int emac_peek_rx(void *param)
1840 {
1841 	struct emac_instance *dev = param;
1842 
1843 	return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1844 }
1845 
1846 /* NAPI poll context */
1847 static int emac_peek_rx_sg(void *param)
1848 {
1849 	struct emac_instance *dev = param;
1850 
1851 	int slot = dev->rx_slot;
1852 	while (1) {
1853 		u16 ctrl = dev->rx_desc[slot].ctrl;
1854 		if (ctrl & MAL_RX_CTRL_EMPTY)
1855 			return 0;
1856 		else if (ctrl & MAL_RX_CTRL_LAST)
1857 			return 1;
1858 
1859 		slot = (slot + 1) % NUM_RX_BUFF;
1860 
1861 		/* I'm just being paranoid here :) */
1862 		if (unlikely(slot == dev->rx_slot))
1863 			return 0;
1864 	}
1865 }
1866 
1867 /* Hard IRQ */
1868 static void emac_rxde(void *param)
1869 {
1870 	struct emac_instance *dev = param;
1871 
1872 	++dev->estats.rx_stopped;
1873 	emac_rx_disable_async(dev);
1874 }
1875 
1876 /* Hard IRQ */
1877 static irqreturn_t emac_irq(int irq, void *dev_instance)
1878 {
1879 	struct emac_instance *dev = dev_instance;
1880 	struct emac_regs __iomem *p = dev->emacp;
1881 	struct emac_error_stats *st = &dev->estats;
1882 	u32 isr;
1883 
1884 	spin_lock(&dev->lock);
1885 
1886 	isr = in_be32(&p->isr);
1887 	out_be32(&p->isr, isr);
1888 
1889 	DBG(dev, "isr = %08x" NL, isr);
1890 
1891 	if (isr & EMAC4_ISR_TXPE)
1892 		++st->tx_parity;
1893 	if (isr & EMAC4_ISR_RXPE)
1894 		++st->rx_parity;
1895 	if (isr & EMAC4_ISR_TXUE)
1896 		++st->tx_underrun;
1897 	if (isr & EMAC4_ISR_RXOE)
1898 		++st->rx_fifo_overrun;
1899 	if (isr & EMAC_ISR_OVR)
1900 		++st->rx_overrun;
1901 	if (isr & EMAC_ISR_BP)
1902 		++st->rx_bad_packet;
1903 	if (isr & EMAC_ISR_RP)
1904 		++st->rx_runt_packet;
1905 	if (isr & EMAC_ISR_SE)
1906 		++st->rx_short_event;
1907 	if (isr & EMAC_ISR_ALE)
1908 		++st->rx_alignment_error;
1909 	if (isr & EMAC_ISR_BFCS)
1910 		++st->rx_bad_fcs;
1911 	if (isr & EMAC_ISR_PTLE)
1912 		++st->rx_packet_too_long;
1913 	if (isr & EMAC_ISR_ORE)
1914 		++st->rx_out_of_range;
1915 	if (isr & EMAC_ISR_IRE)
1916 		++st->rx_in_range;
1917 	if (isr & EMAC_ISR_SQE)
1918 		++st->tx_sqe;
1919 	if (isr & EMAC_ISR_TE)
1920 		++st->tx_errors;
1921 
1922 	spin_unlock(&dev->lock);
1923 
1924 	return IRQ_HANDLED;
1925 }
1926 
1927 static struct net_device_stats *emac_stats(struct net_device *ndev)
1928 {
1929 	struct emac_instance *dev = netdev_priv(ndev);
1930 	struct emac_stats *st = &dev->stats;
1931 	struct emac_error_stats *est = &dev->estats;
1932 	struct net_device_stats *nst = &dev->nstats;
1933 	unsigned long flags;
1934 
1935 	DBG2(dev, "stats" NL);
1936 
1937 	/* Compute "legacy" statistics */
1938 	spin_lock_irqsave(&dev->lock, flags);
1939 	nst->rx_packets = (unsigned long)st->rx_packets;
1940 	nst->rx_bytes = (unsigned long)st->rx_bytes;
1941 	nst->tx_packets = (unsigned long)st->tx_packets;
1942 	nst->tx_bytes = (unsigned long)st->tx_bytes;
1943 	nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1944 					  est->rx_dropped_error +
1945 					  est->rx_dropped_resize +
1946 					  est->rx_dropped_mtu);
1947 	nst->tx_dropped = (unsigned long)est->tx_dropped;
1948 
1949 	nst->rx_errors = (unsigned long)est->rx_bd_errors;
1950 	nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1951 					      est->rx_fifo_overrun +
1952 					      est->rx_overrun);
1953 	nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1954 					       est->rx_alignment_error);
1955 	nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1956 					     est->rx_bad_fcs);
1957 	nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1958 						est->rx_bd_short_event +
1959 						est->rx_bd_packet_too_long +
1960 						est->rx_bd_out_of_range +
1961 						est->rx_bd_in_range +
1962 						est->rx_runt_packet +
1963 						est->rx_short_event +
1964 						est->rx_packet_too_long +
1965 						est->rx_out_of_range +
1966 						est->rx_in_range);
1967 
1968 	nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1969 	nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1970 					      est->tx_underrun);
1971 	nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1972 	nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1973 					  est->tx_bd_excessive_collisions +
1974 					  est->tx_bd_late_collision +
1975 					  est->tx_bd_multple_collisions);
1976 	spin_unlock_irqrestore(&dev->lock, flags);
1977 	return nst;
1978 }
1979 
1980 static struct mal_commac_ops emac_commac_ops = {
1981 	.poll_tx = &emac_poll_tx,
1982 	.poll_rx = &emac_poll_rx,
1983 	.peek_rx = &emac_peek_rx,
1984 	.rxde = &emac_rxde,
1985 };
1986 
1987 static struct mal_commac_ops emac_commac_sg_ops = {
1988 	.poll_tx = &emac_poll_tx,
1989 	.poll_rx = &emac_poll_rx,
1990 	.peek_rx = &emac_peek_rx_sg,
1991 	.rxde = &emac_rxde,
1992 };
1993 
1994 /* Ethtool support */
1995 static int emac_ethtool_get_link_ksettings(struct net_device *ndev,
1996 					   struct ethtool_link_ksettings *cmd)
1997 {
1998 	struct emac_instance *dev = netdev_priv(ndev);
1999 	u32 supported, advertising;
2000 
2001 	supported = dev->phy.features;
2002 	cmd->base.port = PORT_MII;
2003 	cmd->base.phy_address = dev->phy.address;
2004 
2005 	mutex_lock(&dev->link_lock);
2006 	advertising = dev->phy.advertising;
2007 	cmd->base.autoneg = dev->phy.autoneg;
2008 	cmd->base.speed = dev->phy.speed;
2009 	cmd->base.duplex = dev->phy.duplex;
2010 	mutex_unlock(&dev->link_lock);
2011 
2012 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2013 						supported);
2014 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2015 						advertising);
2016 
2017 	return 0;
2018 }
2019 
2020 static int
2021 emac_ethtool_set_link_ksettings(struct net_device *ndev,
2022 				const struct ethtool_link_ksettings *cmd)
2023 {
2024 	struct emac_instance *dev = netdev_priv(ndev);
2025 	u32 f = dev->phy.features;
2026 	u32 advertising;
2027 
2028 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
2029 						cmd->link_modes.advertising);
2030 
2031 	DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
2032 	    cmd->base.autoneg, cmd->base.speed, cmd->base.duplex, advertising);
2033 
2034 	/* Basic sanity checks */
2035 	if (dev->phy.address < 0)
2036 		return -EOPNOTSUPP;
2037 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
2038 	    cmd->base.autoneg != AUTONEG_DISABLE)
2039 		return -EINVAL;
2040 	if (cmd->base.autoneg == AUTONEG_ENABLE && advertising == 0)
2041 		return -EINVAL;
2042 	if (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL)
2043 		return -EINVAL;
2044 
2045 	if (cmd->base.autoneg == AUTONEG_DISABLE) {
2046 		switch (cmd->base.speed) {
2047 		case SPEED_10:
2048 			if (cmd->base.duplex == DUPLEX_HALF &&
2049 			    !(f & SUPPORTED_10baseT_Half))
2050 				return -EINVAL;
2051 			if (cmd->base.duplex == DUPLEX_FULL &&
2052 			    !(f & SUPPORTED_10baseT_Full))
2053 				return -EINVAL;
2054 			break;
2055 		case SPEED_100:
2056 			if (cmd->base.duplex == DUPLEX_HALF &&
2057 			    !(f & SUPPORTED_100baseT_Half))
2058 				return -EINVAL;
2059 			if (cmd->base.duplex == DUPLEX_FULL &&
2060 			    !(f & SUPPORTED_100baseT_Full))
2061 				return -EINVAL;
2062 			break;
2063 		case SPEED_1000:
2064 			if (cmd->base.duplex == DUPLEX_HALF &&
2065 			    !(f & SUPPORTED_1000baseT_Half))
2066 				return -EINVAL;
2067 			if (cmd->base.duplex == DUPLEX_FULL &&
2068 			    !(f & SUPPORTED_1000baseT_Full))
2069 				return -EINVAL;
2070 			break;
2071 		default:
2072 			return -EINVAL;
2073 		}
2074 
2075 		mutex_lock(&dev->link_lock);
2076 		dev->phy.def->ops->setup_forced(&dev->phy, cmd->base.speed,
2077 						cmd->base.duplex);
2078 		mutex_unlock(&dev->link_lock);
2079 
2080 	} else {
2081 		if (!(f & SUPPORTED_Autoneg))
2082 			return -EINVAL;
2083 
2084 		mutex_lock(&dev->link_lock);
2085 		dev->phy.def->ops->setup_aneg(&dev->phy,
2086 					      (advertising & f) |
2087 					      (dev->phy.advertising &
2088 					       (ADVERTISED_Pause |
2089 						ADVERTISED_Asym_Pause)));
2090 		mutex_unlock(&dev->link_lock);
2091 	}
2092 	emac_force_link_update(dev);
2093 
2094 	return 0;
2095 }
2096 
2097 static void emac_ethtool_get_ringparam(struct net_device *ndev,
2098 				       struct ethtool_ringparam *rp)
2099 {
2100 	rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2101 	rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2102 }
2103 
2104 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2105 					struct ethtool_pauseparam *pp)
2106 {
2107 	struct emac_instance *dev = netdev_priv(ndev);
2108 
2109 	mutex_lock(&dev->link_lock);
2110 	if ((dev->phy.features & SUPPORTED_Autoneg) &&
2111 	    (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2112 		pp->autoneg = 1;
2113 
2114 	if (dev->phy.duplex == DUPLEX_FULL) {
2115 		if (dev->phy.pause)
2116 			pp->rx_pause = pp->tx_pause = 1;
2117 		else if (dev->phy.asym_pause)
2118 			pp->tx_pause = 1;
2119 	}
2120 	mutex_unlock(&dev->link_lock);
2121 }
2122 
2123 static int emac_get_regs_len(struct emac_instance *dev)
2124 {
2125 		return sizeof(struct emac_ethtool_regs_subhdr) +
2126 			sizeof(struct emac_regs);
2127 }
2128 
2129 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2130 {
2131 	struct emac_instance *dev = netdev_priv(ndev);
2132 	int size;
2133 
2134 	size = sizeof(struct emac_ethtool_regs_hdr) +
2135 		emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2136 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2137 		size += zmii_get_regs_len(dev->zmii_dev);
2138 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2139 		size += rgmii_get_regs_len(dev->rgmii_dev);
2140 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2141 		size += tah_get_regs_len(dev->tah_dev);
2142 
2143 	return size;
2144 }
2145 
2146 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2147 {
2148 	struct emac_ethtool_regs_subhdr *hdr = buf;
2149 
2150 	hdr->index = dev->cell_index;
2151 	if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2152 		hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER;
2153 	} else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2154 		hdr->version = EMAC4_ETHTOOL_REGS_VER;
2155 	} else {
2156 		hdr->version = EMAC_ETHTOOL_REGS_VER;
2157 	}
2158 	memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs));
2159 	return (void *)(hdr + 1) + sizeof(struct emac_regs);
2160 }
2161 
2162 static void emac_ethtool_get_regs(struct net_device *ndev,
2163 				  struct ethtool_regs *regs, void *buf)
2164 {
2165 	struct emac_instance *dev = netdev_priv(ndev);
2166 	struct emac_ethtool_regs_hdr *hdr = buf;
2167 
2168 	hdr->components = 0;
2169 	buf = hdr + 1;
2170 
2171 	buf = mal_dump_regs(dev->mal, buf);
2172 	buf = emac_dump_regs(dev, buf);
2173 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2174 		hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2175 		buf = zmii_dump_regs(dev->zmii_dev, buf);
2176 	}
2177 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2178 		hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2179 		buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2180 	}
2181 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2182 		hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2183 		buf = tah_dump_regs(dev->tah_dev, buf);
2184 	}
2185 }
2186 
2187 static int emac_ethtool_nway_reset(struct net_device *ndev)
2188 {
2189 	struct emac_instance *dev = netdev_priv(ndev);
2190 	int res = 0;
2191 
2192 	DBG(dev, "nway_reset" NL);
2193 
2194 	if (dev->phy.address < 0)
2195 		return -EOPNOTSUPP;
2196 
2197 	mutex_lock(&dev->link_lock);
2198 	if (!dev->phy.autoneg) {
2199 		res = -EINVAL;
2200 		goto out;
2201 	}
2202 
2203 	dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2204  out:
2205 	mutex_unlock(&dev->link_lock);
2206 	emac_force_link_update(dev);
2207 	return res;
2208 }
2209 
2210 static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2211 {
2212 	if (stringset == ETH_SS_STATS)
2213 		return EMAC_ETHTOOL_STATS_COUNT;
2214 	else
2215 		return -EINVAL;
2216 }
2217 
2218 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2219 				     u8 * buf)
2220 {
2221 	if (stringset == ETH_SS_STATS)
2222 		memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2223 }
2224 
2225 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2226 					   struct ethtool_stats *estats,
2227 					   u64 * tmp_stats)
2228 {
2229 	struct emac_instance *dev = netdev_priv(ndev);
2230 
2231 	memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2232 	tmp_stats += sizeof(dev->stats) / sizeof(u64);
2233 	memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2234 }
2235 
2236 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2237 				     struct ethtool_drvinfo *info)
2238 {
2239 	struct emac_instance *dev = netdev_priv(ndev);
2240 
2241 	strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
2242 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2243 	snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s",
2244 		 dev->cell_index, dev->ofdev->dev.of_node->full_name);
2245 }
2246 
2247 static const struct ethtool_ops emac_ethtool_ops = {
2248 	.get_drvinfo = emac_ethtool_get_drvinfo,
2249 
2250 	.get_regs_len = emac_ethtool_get_regs_len,
2251 	.get_regs = emac_ethtool_get_regs,
2252 
2253 	.nway_reset = emac_ethtool_nway_reset,
2254 
2255 	.get_ringparam = emac_ethtool_get_ringparam,
2256 	.get_pauseparam = emac_ethtool_get_pauseparam,
2257 
2258 	.get_strings = emac_ethtool_get_strings,
2259 	.get_sset_count = emac_ethtool_get_sset_count,
2260 	.get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2261 
2262 	.get_link = ethtool_op_get_link,
2263 	.get_link_ksettings = emac_ethtool_get_link_ksettings,
2264 	.set_link_ksettings = emac_ethtool_set_link_ksettings,
2265 };
2266 
2267 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2268 {
2269 	struct emac_instance *dev = netdev_priv(ndev);
2270 	struct mii_ioctl_data *data = if_mii(rq);
2271 
2272 	DBG(dev, "ioctl %08x" NL, cmd);
2273 
2274 	if (dev->phy.address < 0)
2275 		return -EOPNOTSUPP;
2276 
2277 	switch (cmd) {
2278 	case SIOCGMIIPHY:
2279 		data->phy_id = dev->phy.address;
2280 		/* Fall through */
2281 	case SIOCGMIIREG:
2282 		data->val_out = emac_mdio_read(ndev, dev->phy.address,
2283 					       data->reg_num);
2284 		return 0;
2285 
2286 	case SIOCSMIIREG:
2287 		emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2288 				data->val_in);
2289 		return 0;
2290 	default:
2291 		return -EOPNOTSUPP;
2292 	}
2293 }
2294 
2295 struct emac_depentry {
2296 	u32			phandle;
2297 	struct device_node	*node;
2298 	struct platform_device	*ofdev;
2299 	void			*drvdata;
2300 };
2301 
2302 #define	EMAC_DEP_MAL_IDX	0
2303 #define	EMAC_DEP_ZMII_IDX	1
2304 #define	EMAC_DEP_RGMII_IDX	2
2305 #define	EMAC_DEP_TAH_IDX	3
2306 #define	EMAC_DEP_MDIO_IDX	4
2307 #define	EMAC_DEP_PREV_IDX	5
2308 #define	EMAC_DEP_COUNT		6
2309 
2310 static int emac_check_deps(struct emac_instance *dev,
2311 			   struct emac_depentry *deps)
2312 {
2313 	int i, there = 0;
2314 	struct device_node *np;
2315 
2316 	for (i = 0; i < EMAC_DEP_COUNT; i++) {
2317 		/* no dependency on that item, allright */
2318 		if (deps[i].phandle == 0) {
2319 			there++;
2320 			continue;
2321 		}
2322 		/* special case for blist as the dependency might go away */
2323 		if (i == EMAC_DEP_PREV_IDX) {
2324 			np = *(dev->blist - 1);
2325 			if (np == NULL) {
2326 				deps[i].phandle = 0;
2327 				there++;
2328 				continue;
2329 			}
2330 			if (deps[i].node == NULL)
2331 				deps[i].node = of_node_get(np);
2332 		}
2333 		if (deps[i].node == NULL)
2334 			deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2335 		if (deps[i].node == NULL)
2336 			continue;
2337 		if (deps[i].ofdev == NULL)
2338 			deps[i].ofdev = of_find_device_by_node(deps[i].node);
2339 		if (deps[i].ofdev == NULL)
2340 			continue;
2341 		if (deps[i].drvdata == NULL)
2342 			deps[i].drvdata = platform_get_drvdata(deps[i].ofdev);
2343 		if (deps[i].drvdata != NULL)
2344 			there++;
2345 	}
2346 	return there == EMAC_DEP_COUNT;
2347 }
2348 
2349 static void emac_put_deps(struct emac_instance *dev)
2350 {
2351 	of_dev_put(dev->mal_dev);
2352 	of_dev_put(dev->zmii_dev);
2353 	of_dev_put(dev->rgmii_dev);
2354 	of_dev_put(dev->mdio_dev);
2355 	of_dev_put(dev->tah_dev);
2356 }
2357 
2358 static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
2359 			      void *data)
2360 {
2361 	/* We are only intereted in device addition */
2362 	if (action == BUS_NOTIFY_BOUND_DRIVER)
2363 		wake_up_all(&emac_probe_wait);
2364 	return 0;
2365 }
2366 
2367 static struct notifier_block emac_of_bus_notifier = {
2368 	.notifier_call = emac_of_bus_notify
2369 };
2370 
2371 static int emac_wait_deps(struct emac_instance *dev)
2372 {
2373 	struct emac_depentry deps[EMAC_DEP_COUNT];
2374 	int i, err;
2375 
2376 	memset(&deps, 0, sizeof(deps));
2377 
2378 	deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2379 	deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2380 	deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2381 	if (dev->tah_ph)
2382 		deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2383 	if (dev->mdio_ph)
2384 		deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2385 	if (dev->blist && dev->blist > emac_boot_list)
2386 		deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2387 	bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
2388 	wait_event_timeout(emac_probe_wait,
2389 			   emac_check_deps(dev, deps),
2390 			   EMAC_PROBE_DEP_TIMEOUT);
2391 	bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
2392 	err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2393 	for (i = 0; i < EMAC_DEP_COUNT; i++) {
2394 		of_node_put(deps[i].node);
2395 		if (err)
2396 			of_dev_put(deps[i].ofdev);
2397 	}
2398 	if (err == 0) {
2399 		dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2400 		dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2401 		dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2402 		dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2403 		dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2404 	}
2405 	of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2406 	return err;
2407 }
2408 
2409 static int emac_read_uint_prop(struct device_node *np, const char *name,
2410 			       u32 *val, int fatal)
2411 {
2412 	int len;
2413 	const u32 *prop = of_get_property(np, name, &len);
2414 	if (prop == NULL || len < sizeof(u32)) {
2415 		if (fatal)
2416 			printk(KERN_ERR "%s: missing %s property\n",
2417 			       np->full_name, name);
2418 		return -ENODEV;
2419 	}
2420 	*val = *prop;
2421 	return 0;
2422 }
2423 
2424 static void emac_adjust_link(struct net_device *ndev)
2425 {
2426 	struct emac_instance *dev = netdev_priv(ndev);
2427 	struct phy_device *phy = dev->phy_dev;
2428 
2429 	dev->phy.autoneg = phy->autoneg;
2430 	dev->phy.speed = phy->speed;
2431 	dev->phy.duplex = phy->duplex;
2432 	dev->phy.pause = phy->pause;
2433 	dev->phy.asym_pause = phy->asym_pause;
2434 	dev->phy.advertising = phy->advertising;
2435 }
2436 
2437 static int emac_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
2438 {
2439 	int ret = emac_mdio_read(bus->priv, addr, regnum);
2440 	/* This is a workaround for powered down ports/phys.
2441 	 * In the wild, this was seen on the Cisco Meraki MX60(W).
2442 	 * This hardware disables ports as part of the handoff
2443 	 * procedure. Accessing the ports will lead to errors
2444 	 * (-ETIMEDOUT, -EREMOTEIO) that do more harm than good.
2445 	 */
2446 	return ret < 0 ? 0xffff : ret;
2447 }
2448 
2449 static int emac_mii_bus_write(struct mii_bus *bus, int addr,
2450 			      int regnum, u16 val)
2451 {
2452 	emac_mdio_write(bus->priv, addr, regnum, val);
2453 	return 0;
2454 }
2455 
2456 static int emac_mii_bus_reset(struct mii_bus *bus)
2457 {
2458 	struct emac_instance *dev = netdev_priv(bus->priv);
2459 
2460 	return emac_reset(dev);
2461 }
2462 
2463 static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise)
2464 {
2465 	struct net_device *ndev = phy->dev;
2466 	struct emac_instance *dev = netdev_priv(ndev);
2467 
2468 	dev->phy.autoneg = AUTONEG_ENABLE;
2469 	dev->phy.speed = SPEED_1000;
2470 	dev->phy.duplex = DUPLEX_FULL;
2471 	dev->phy.advertising = advertise;
2472 	phy->autoneg = AUTONEG_ENABLE;
2473 	phy->speed = dev->phy.speed;
2474 	phy->duplex = dev->phy.duplex;
2475 	phy->advertising = advertise;
2476 	return phy_start_aneg(dev->phy_dev);
2477 }
2478 
2479 static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
2480 {
2481 	struct net_device *ndev = phy->dev;
2482 	struct emac_instance *dev = netdev_priv(ndev);
2483 
2484 	dev->phy.autoneg =  AUTONEG_DISABLE;
2485 	dev->phy.speed = speed;
2486 	dev->phy.duplex = fd;
2487 	phy->autoneg = AUTONEG_DISABLE;
2488 	phy->speed = speed;
2489 	phy->duplex = fd;
2490 	return phy_start_aneg(dev->phy_dev);
2491 }
2492 
2493 static int emac_mdio_poll_link(struct mii_phy *phy)
2494 {
2495 	struct net_device *ndev = phy->dev;
2496 	struct emac_instance *dev = netdev_priv(ndev);
2497 	int res;
2498 
2499 	res = phy_read_status(dev->phy_dev);
2500 	if (res) {
2501 		dev_err(&dev->ofdev->dev, "link update failed (%d).", res);
2502 		return ethtool_op_get_link(ndev);
2503 	}
2504 
2505 	return dev->phy_dev->link;
2506 }
2507 
2508 static int emac_mdio_read_link(struct mii_phy *phy)
2509 {
2510 	struct net_device *ndev = phy->dev;
2511 	struct emac_instance *dev = netdev_priv(ndev);
2512 	int res;
2513 
2514 	res = phy_read_status(dev->phy_dev);
2515 	if (res)
2516 		return res;
2517 
2518 	dev->phy.speed = phy->speed;
2519 	dev->phy.duplex = phy->duplex;
2520 	dev->phy.pause = phy->pause;
2521 	dev->phy.asym_pause = phy->asym_pause;
2522 	return 0;
2523 }
2524 
2525 static int emac_mdio_init_phy(struct mii_phy *phy)
2526 {
2527 	struct net_device *ndev = phy->dev;
2528 	struct emac_instance *dev = netdev_priv(ndev);
2529 
2530 	phy_start(dev->phy_dev);
2531 	dev->phy.autoneg = phy->autoneg;
2532 	dev->phy.speed = phy->speed;
2533 	dev->phy.duplex = phy->duplex;
2534 	dev->phy.advertising = phy->advertising;
2535 	dev->phy.pause = phy->pause;
2536 	dev->phy.asym_pause = phy->asym_pause;
2537 
2538 	return phy_init_hw(dev->phy_dev);
2539 }
2540 
2541 static const struct mii_phy_ops emac_dt_mdio_phy_ops = {
2542 	.init		= emac_mdio_init_phy,
2543 	.setup_aneg	= emac_mdio_setup_aneg,
2544 	.setup_forced	= emac_mdio_setup_forced,
2545 	.poll_link	= emac_mdio_poll_link,
2546 	.read_link	= emac_mdio_read_link,
2547 };
2548 
2549 static int emac_dt_mdio_probe(struct emac_instance *dev)
2550 {
2551 	struct device_node *mii_np;
2552 	int res;
2553 
2554 	mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio");
2555 	if (!mii_np) {
2556 		dev_err(&dev->ofdev->dev, "no mdio definition found.");
2557 		return -ENODEV;
2558 	}
2559 
2560 	if (!of_device_is_available(mii_np)) {
2561 		res = -ENODEV;
2562 		goto put_node;
2563 	}
2564 
2565 	dev->mii_bus = devm_mdiobus_alloc(&dev->ofdev->dev);
2566 	if (!dev->mii_bus) {
2567 		res = -ENOMEM;
2568 		goto put_node;
2569 	}
2570 
2571 	dev->mii_bus->priv = dev->ndev;
2572 	dev->mii_bus->parent = dev->ndev->dev.parent;
2573 	dev->mii_bus->name = "emac_mdio";
2574 	dev->mii_bus->read = &emac_mii_bus_read;
2575 	dev->mii_bus->write = &emac_mii_bus_write;
2576 	dev->mii_bus->reset = &emac_mii_bus_reset;
2577 	snprintf(dev->mii_bus->id, MII_BUS_ID_SIZE, "%s", dev->ofdev->name);
2578 	res = of_mdiobus_register(dev->mii_bus, mii_np);
2579 	if (res) {
2580 		dev_err(&dev->ofdev->dev, "cannot register MDIO bus %s (%d)",
2581 			dev->mii_bus->name, res);
2582 	}
2583 
2584  put_node:
2585 	of_node_put(mii_np);
2586 	return res;
2587 }
2588 
2589 static int emac_dt_phy_connect(struct emac_instance *dev,
2590 			       struct device_node *phy_handle)
2591 {
2592 	dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def),
2593 				    GFP_KERNEL);
2594 	if (!dev->phy.def)
2595 		return -ENOMEM;
2596 
2597 	dev->phy_dev = of_phy_connect(dev->ndev, phy_handle, &emac_adjust_link,
2598 				      0, dev->phy_mode);
2599 	if (!dev->phy_dev) {
2600 		dev_err(&dev->ofdev->dev, "failed to connect to PHY.\n");
2601 		return -ENODEV;
2602 	}
2603 
2604 	dev->phy.def->phy_id = dev->phy_dev->drv->phy_id;
2605 	dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask;
2606 	dev->phy.def->name = dev->phy_dev->drv->name;
2607 	dev->phy.def->ops = &emac_dt_mdio_phy_ops;
2608 	dev->phy.features = dev->phy_dev->supported;
2609 	dev->phy.address = dev->phy_dev->mdio.addr;
2610 	dev->phy.mode = dev->phy_dev->interface;
2611 	return 0;
2612 }
2613 
2614 static int emac_dt_phy_probe(struct emac_instance *dev)
2615 {
2616 	struct device_node *np = dev->ofdev->dev.of_node;
2617 	struct device_node *phy_handle;
2618 	int res = 1;
2619 
2620 	phy_handle = of_parse_phandle(np, "phy-handle", 0);
2621 
2622 	if (phy_handle) {
2623 		res = emac_dt_mdio_probe(dev);
2624 		if (!res) {
2625 			res = emac_dt_phy_connect(dev, phy_handle);
2626 			if (res)
2627 				mdiobus_unregister(dev->mii_bus);
2628 		}
2629 	}
2630 
2631 	of_node_put(phy_handle);
2632 	return res;
2633 }
2634 
2635 static int emac_init_phy(struct emac_instance *dev)
2636 {
2637 	struct device_node *np = dev->ofdev->dev.of_node;
2638 	struct net_device *ndev = dev->ndev;
2639 	u32 phy_map, adv;
2640 	int i;
2641 
2642 	dev->phy.dev = ndev;
2643 	dev->phy.mode = dev->phy_mode;
2644 
2645 	/* PHY-less configuration. */
2646 	if ((dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) ||
2647 	    of_phy_is_fixed_link(np)) {
2648 		emac_reset(dev);
2649 
2650 		/* PHY-less configuration. */
2651 		dev->phy.address = -1;
2652 		dev->phy.features = SUPPORTED_MII;
2653 		if (emac_phy_supports_gige(dev->phy_mode))
2654 			dev->phy.features |= SUPPORTED_1000baseT_Full;
2655 		else
2656 			dev->phy.features |= SUPPORTED_100baseT_Full;
2657 		dev->phy.pause = 1;
2658 
2659 		if (of_phy_is_fixed_link(np)) {
2660 			int res = emac_dt_mdio_probe(dev);
2661 
2662 			if (!res) {
2663 				res = of_phy_register_fixed_link(np);
2664 				if (res)
2665 					mdiobus_unregister(dev->mii_bus);
2666 			}
2667 			return res;
2668 		}
2669 		return 0;
2670 	}
2671 
2672 	mutex_lock(&emac_phy_map_lock);
2673 	phy_map = dev->phy_map | busy_phy_map;
2674 
2675 	DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2676 
2677 	dev->phy.mdio_read = emac_mdio_read;
2678 	dev->phy.mdio_write = emac_mdio_write;
2679 
2680 	/* Enable internal clock source */
2681 #ifdef CONFIG_PPC_DCR_NATIVE
2682 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2683 		dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2684 #endif
2685 	/* PHY clock workaround */
2686 	emac_rx_clk_tx(dev);
2687 
2688 	/* Enable internal clock source on 440GX*/
2689 #ifdef CONFIG_PPC_DCR_NATIVE
2690 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2691 		dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2692 #endif
2693 	/* Configure EMAC with defaults so we can at least use MDIO
2694 	 * This is needed mostly for 440GX
2695 	 */
2696 	if (emac_phy_gpcs(dev->phy.mode)) {
2697 		/* XXX
2698 		 * Make GPCS PHY address equal to EMAC index.
2699 		 * We probably should take into account busy_phy_map
2700 		 * and/or phy_map here.
2701 		 *
2702 		 * Note that the busy_phy_map is currently global
2703 		 * while it should probably be per-ASIC...
2704 		 */
2705 		dev->phy.gpcs_address = dev->gpcs_address;
2706 		if (dev->phy.gpcs_address == 0xffffffff)
2707 			dev->phy.address = dev->cell_index;
2708 	}
2709 
2710 	emac_configure(dev);
2711 
2712 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2713 		int res = emac_dt_phy_probe(dev);
2714 
2715 		switch (res) {
2716 		case 1:
2717 			/* No phy-handle property configured.
2718 			 * Continue with the existing phy probe
2719 			 * and setup code.
2720 			 */
2721 			break;
2722 
2723 		case 0:
2724 			mutex_unlock(&emac_phy_map_lock);
2725 			goto init_phy;
2726 
2727 		default:
2728 			mutex_unlock(&emac_phy_map_lock);
2729 			dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
2730 				res);
2731 			return res;
2732 		}
2733 	}
2734 
2735 	if (dev->phy_address != 0xffffffff)
2736 		phy_map = ~(1 << dev->phy_address);
2737 
2738 	for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2739 		if (!(phy_map & 1)) {
2740 			int r;
2741 			busy_phy_map |= 1 << i;
2742 
2743 			/* Quick check if there is a PHY at the address */
2744 			r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2745 			if (r == 0xffff || r < 0)
2746 				continue;
2747 			if (!emac_mii_phy_probe(&dev->phy, i))
2748 				break;
2749 		}
2750 
2751 	/* Enable external clock source */
2752 #ifdef CONFIG_PPC_DCR_NATIVE
2753 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2754 		dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2755 #endif
2756 	mutex_unlock(&emac_phy_map_lock);
2757 	if (i == 0x20) {
2758 		printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2759 		return -ENXIO;
2760 	}
2761 
2762  init_phy:
2763 	/* Init PHY */
2764 	if (dev->phy.def->ops->init)
2765 		dev->phy.def->ops->init(&dev->phy);
2766 
2767 	/* Disable any PHY features not supported by the platform */
2768 	dev->phy.def->features &= ~dev->phy_feat_exc;
2769 	dev->phy.features &= ~dev->phy_feat_exc;
2770 
2771 	/* Setup initial link parameters */
2772 	if (dev->phy.features & SUPPORTED_Autoneg) {
2773 		adv = dev->phy.features;
2774 		if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2775 			adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2776 		/* Restart autonegotiation */
2777 		dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2778 	} else {
2779 		u32 f = dev->phy.def->features;
2780 		int speed = SPEED_10, fd = DUPLEX_HALF;
2781 
2782 		/* Select highest supported speed/duplex */
2783 		if (f & SUPPORTED_1000baseT_Full) {
2784 			speed = SPEED_1000;
2785 			fd = DUPLEX_FULL;
2786 		} else if (f & SUPPORTED_1000baseT_Half)
2787 			speed = SPEED_1000;
2788 		else if (f & SUPPORTED_100baseT_Full) {
2789 			speed = SPEED_100;
2790 			fd = DUPLEX_FULL;
2791 		} else if (f & SUPPORTED_100baseT_Half)
2792 			speed = SPEED_100;
2793 		else if (f & SUPPORTED_10baseT_Full)
2794 			fd = DUPLEX_FULL;
2795 
2796 		/* Force link parameters */
2797 		dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2798 	}
2799 	return 0;
2800 }
2801 
2802 static int emac_init_config(struct emac_instance *dev)
2803 {
2804 	struct device_node *np = dev->ofdev->dev.of_node;
2805 	const void *p;
2806 
2807 	/* Read config from device-tree */
2808 	if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2809 		return -ENXIO;
2810 	if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2811 		return -ENXIO;
2812 	if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2813 		return -ENXIO;
2814 	if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2815 		return -ENXIO;
2816 	if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2817 		dev->max_mtu = ETH_DATA_LEN;
2818 	if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2819 		dev->rx_fifo_size = 2048;
2820 	if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2821 		dev->tx_fifo_size = 2048;
2822 	if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2823 		dev->rx_fifo_size_gige = dev->rx_fifo_size;
2824 	if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2825 		dev->tx_fifo_size_gige = dev->tx_fifo_size;
2826 	if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2827 		dev->phy_address = 0xffffffff;
2828 	if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2829 		dev->phy_map = 0xffffffff;
2830 	if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2831 		dev->gpcs_address = 0xffffffff;
2832 	if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2833 		return -ENXIO;
2834 	if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2835 		dev->tah_ph = 0;
2836 	if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2837 		dev->tah_port = 0;
2838 	if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2839 		dev->mdio_ph = 0;
2840 	if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2841 		dev->zmii_ph = 0;
2842 	if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2843 		dev->zmii_port = 0xffffffff;
2844 	if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2845 		dev->rgmii_ph = 0;
2846 	if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2847 		dev->rgmii_port = 0xffffffff;
2848 	if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2849 		dev->fifo_entry_size = 16;
2850 	if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2851 		dev->mal_burst_size = 256;
2852 
2853 	/* PHY mode needs some decoding */
2854 	dev->phy_mode = of_get_phy_mode(np);
2855 	if (dev->phy_mode < 0)
2856 		dev->phy_mode = PHY_MODE_NA;
2857 
2858 	/* Check EMAC version */
2859 	if (of_device_is_compatible(np, "ibm,emac4sync")) {
2860 		dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2861 		if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2862 		    of_device_is_compatible(np, "ibm,emac-460gt"))
2863 			dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2864 		if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2865 		    of_device_is_compatible(np, "ibm,emac-405exr"))
2866 			dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2867 		if (of_device_is_compatible(np, "ibm,emac-apm821xx")) {
2868 			dev->features |= (EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE |
2869 					  EMAC_FTR_APM821XX_NO_HALF_DUPLEX |
2870 					  EMAC_FTR_460EX_PHY_CLK_FIX);
2871 		}
2872 	} else if (of_device_is_compatible(np, "ibm,emac4")) {
2873 		dev->features |= EMAC_FTR_EMAC4;
2874 		if (of_device_is_compatible(np, "ibm,emac-440gx"))
2875 			dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2876 	} else {
2877 		if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2878 		    of_device_is_compatible(np, "ibm,emac-440gr"))
2879 			dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2880 		if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2881 #ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
2882 			dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2883 #else
2884 			printk(KERN_ERR "%s: Flow control not disabled!\n",
2885 					np->full_name);
2886 			return -ENXIO;
2887 #endif
2888 		}
2889 
2890 	}
2891 
2892 	/* Fixup some feature bits based on the device tree */
2893 	if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2894 		dev->features |= EMAC_FTR_STACR_OC_INVERT;
2895 	if (of_get_property(np, "has-new-stacr-staopc", NULL))
2896 		dev->features |= EMAC_FTR_HAS_NEW_STACR;
2897 
2898 	/* CAB lacks the appropriate properties */
2899 	if (of_device_is_compatible(np, "ibm,emac-axon"))
2900 		dev->features |= EMAC_FTR_HAS_NEW_STACR |
2901 			EMAC_FTR_STACR_OC_INVERT;
2902 
2903 	/* Enable TAH/ZMII/RGMII features as found */
2904 	if (dev->tah_ph != 0) {
2905 #ifdef CONFIG_IBM_EMAC_TAH
2906 		dev->features |= EMAC_FTR_HAS_TAH;
2907 #else
2908 		printk(KERN_ERR "%s: TAH support not enabled !\n",
2909 		       np->full_name);
2910 		return -ENXIO;
2911 #endif
2912 	}
2913 
2914 	if (dev->zmii_ph != 0) {
2915 #ifdef CONFIG_IBM_EMAC_ZMII
2916 		dev->features |= EMAC_FTR_HAS_ZMII;
2917 #else
2918 		printk(KERN_ERR "%s: ZMII support not enabled !\n",
2919 		       np->full_name);
2920 		return -ENXIO;
2921 #endif
2922 	}
2923 
2924 	if (dev->rgmii_ph != 0) {
2925 #ifdef CONFIG_IBM_EMAC_RGMII
2926 		dev->features |= EMAC_FTR_HAS_RGMII;
2927 #else
2928 		printk(KERN_ERR "%s: RGMII support not enabled !\n",
2929 		       np->full_name);
2930 		return -ENXIO;
2931 #endif
2932 	}
2933 
2934 	/* Read MAC-address */
2935 	p = of_get_property(np, "local-mac-address", NULL);
2936 	if (p == NULL) {
2937 		printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2938 		       np->full_name);
2939 		return -ENXIO;
2940 	}
2941 	memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
2942 
2943 	/* IAHT and GAHT filter parameterization */
2944 	if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2945 		dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2946 		dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2947 	} else {
2948 		dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2949 		dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2950 	}
2951 
2952 	DBG(dev, "features     : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2953 	DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2954 	DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2955 	DBG(dev, "max_mtu      : %d\n", dev->max_mtu);
2956 	DBG(dev, "OPB freq     : %d\n", dev->opb_bus_freq);
2957 
2958 	return 0;
2959 }
2960 
2961 static const struct net_device_ops emac_netdev_ops = {
2962 	.ndo_open		= emac_open,
2963 	.ndo_stop		= emac_close,
2964 	.ndo_get_stats		= emac_stats,
2965 	.ndo_set_rx_mode	= emac_set_multicast_list,
2966 	.ndo_do_ioctl		= emac_ioctl,
2967 	.ndo_tx_timeout		= emac_tx_timeout,
2968 	.ndo_validate_addr	= eth_validate_addr,
2969 	.ndo_set_mac_address	= emac_set_mac_address,
2970 	.ndo_start_xmit		= emac_start_xmit,
2971 };
2972 
2973 static const struct net_device_ops emac_gige_netdev_ops = {
2974 	.ndo_open		= emac_open,
2975 	.ndo_stop		= emac_close,
2976 	.ndo_get_stats		= emac_stats,
2977 	.ndo_set_rx_mode	= emac_set_multicast_list,
2978 	.ndo_do_ioctl		= emac_ioctl,
2979 	.ndo_tx_timeout		= emac_tx_timeout,
2980 	.ndo_validate_addr	= eth_validate_addr,
2981 	.ndo_set_mac_address	= emac_set_mac_address,
2982 	.ndo_start_xmit		= emac_start_xmit_sg,
2983 	.ndo_change_mtu		= emac_change_mtu,
2984 };
2985 
2986 static int emac_probe(struct platform_device *ofdev)
2987 {
2988 	struct net_device *ndev;
2989 	struct emac_instance *dev;
2990 	struct device_node *np = ofdev->dev.of_node;
2991 	struct device_node **blist = NULL;
2992 	int err, i;
2993 
2994 	/* Skip unused/unwired EMACS.  We leave the check for an unused
2995 	 * property here for now, but new flat device trees should set a
2996 	 * status property to "disabled" instead.
2997 	 */
2998 	if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2999 		return -ENODEV;
3000 
3001 	/* Find ourselves in the bootlist if we are there */
3002 	for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3003 		if (emac_boot_list[i] == np)
3004 			blist = &emac_boot_list[i];
3005 
3006 	/* Allocate our net_device structure */
3007 	err = -ENOMEM;
3008 	ndev = alloc_etherdev(sizeof(struct emac_instance));
3009 	if (!ndev)
3010 		goto err_gone;
3011 
3012 	dev = netdev_priv(ndev);
3013 	dev->ndev = ndev;
3014 	dev->ofdev = ofdev;
3015 	dev->blist = blist;
3016 	SET_NETDEV_DEV(ndev, &ofdev->dev);
3017 
3018 	/* Initialize some embedded data structures */
3019 	mutex_init(&dev->mdio_lock);
3020 	mutex_init(&dev->link_lock);
3021 	spin_lock_init(&dev->lock);
3022 	INIT_WORK(&dev->reset_work, emac_reset_work);
3023 
3024 	/* Init various config data based on device-tree */
3025 	err = emac_init_config(dev);
3026 	if (err != 0)
3027 		goto err_free;
3028 
3029 	/* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
3030 	dev->emac_irq = irq_of_parse_and_map(np, 0);
3031 	dev->wol_irq = irq_of_parse_and_map(np, 1);
3032 	if (!dev->emac_irq) {
3033 		printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
3034 		goto err_free;
3035 	}
3036 	ndev->irq = dev->emac_irq;
3037 
3038 	/* Map EMAC regs */
3039 	if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
3040 		printk(KERN_ERR "%s: Can't get registers address\n",
3041 		       np->full_name);
3042 		goto err_irq_unmap;
3043 	}
3044 	// TODO : request_mem_region
3045 	dev->emacp = ioremap(dev->rsrc_regs.start,
3046 			     resource_size(&dev->rsrc_regs));
3047 	if (dev->emacp == NULL) {
3048 		printk(KERN_ERR "%s: Can't map device registers!\n",
3049 		       np->full_name);
3050 		err = -ENOMEM;
3051 		goto err_irq_unmap;
3052 	}
3053 
3054 	/* Wait for dependent devices */
3055 	err = emac_wait_deps(dev);
3056 	if (err) {
3057 		printk(KERN_ERR
3058 		       "%s: Timeout waiting for dependent devices\n",
3059 		       np->full_name);
3060 		/*  display more info about what's missing ? */
3061 		goto err_reg_unmap;
3062 	}
3063 	dev->mal = platform_get_drvdata(dev->mal_dev);
3064 	if (dev->mdio_dev != NULL)
3065 		dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
3066 
3067 	/* Register with MAL */
3068 	dev->commac.ops = &emac_commac_ops;
3069 	dev->commac.dev = dev;
3070 	dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
3071 	dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
3072 	err = mal_register_commac(dev->mal, &dev->commac);
3073 	if (err) {
3074 		printk(KERN_ERR "%s: failed to register with mal %s!\n",
3075 		       np->full_name, dev->mal_dev->dev.of_node->full_name);
3076 		goto err_rel_deps;
3077 	}
3078 	dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
3079 	dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
3080 
3081 	/* Get pointers to BD rings */
3082 	dev->tx_desc =
3083 	    dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
3084 	dev->rx_desc =
3085 	    dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
3086 
3087 	DBG(dev, "tx_desc %p" NL, dev->tx_desc);
3088 	DBG(dev, "rx_desc %p" NL, dev->rx_desc);
3089 
3090 	/* Clean rings */
3091 	memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
3092 	memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
3093 	memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
3094 	memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
3095 
3096 	/* Attach to ZMII, if needed */
3097 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
3098 	    (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
3099 		goto err_unreg_commac;
3100 
3101 	/* Attach to RGMII, if needed */
3102 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
3103 	    (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
3104 		goto err_detach_zmii;
3105 
3106 	/* Attach to TAH, if needed */
3107 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
3108 	    (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
3109 		goto err_detach_rgmii;
3110 
3111 	/* Set some link defaults before we can find out real parameters */
3112 	dev->phy.speed = SPEED_100;
3113 	dev->phy.duplex = DUPLEX_FULL;
3114 	dev->phy.autoneg = AUTONEG_DISABLE;
3115 	dev->phy.pause = dev->phy.asym_pause = 0;
3116 	dev->stop_timeout = STOP_TIMEOUT_100;
3117 	INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
3118 
3119 	/* Some SoCs like APM821xx does not support Half Duplex mode. */
3120 	if (emac_has_feature(dev, EMAC_FTR_APM821XX_NO_HALF_DUPLEX)) {
3121 		dev->phy_feat_exc = (SUPPORTED_1000baseT_Half |
3122 				     SUPPORTED_100baseT_Half |
3123 				     SUPPORTED_10baseT_Half);
3124 	}
3125 
3126 	/* Find PHY if any */
3127 	err = emac_init_phy(dev);
3128 	if (err != 0)
3129 		goto err_detach_tah;
3130 
3131 	if (dev->tah_dev) {
3132 		ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
3133 		ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
3134 	}
3135 	ndev->watchdog_timeo = 5 * HZ;
3136 	if (emac_phy_supports_gige(dev->phy_mode)) {
3137 		ndev->netdev_ops = &emac_gige_netdev_ops;
3138 		dev->commac.ops = &emac_commac_sg_ops;
3139 	} else
3140 		ndev->netdev_ops = &emac_netdev_ops;
3141 	ndev->ethtool_ops = &emac_ethtool_ops;
3142 
3143 	/* MTU range: 46 - 1500 or whatever is in OF */
3144 	ndev->min_mtu = EMAC_MIN_MTU;
3145 	ndev->max_mtu = dev->max_mtu;
3146 
3147 	netif_carrier_off(ndev);
3148 
3149 	err = register_netdev(ndev);
3150 	if (err) {
3151 		printk(KERN_ERR "%s: failed to register net device (%d)!\n",
3152 		       np->full_name, err);
3153 		goto err_detach_tah;
3154 	}
3155 
3156 	/* Set our drvdata last as we don't want them visible until we are
3157 	 * fully initialized
3158 	 */
3159 	wmb();
3160 	platform_set_drvdata(ofdev, dev);
3161 
3162 	/* There's a new kid in town ! Let's tell everybody */
3163 	wake_up_all(&emac_probe_wait);
3164 
3165 
3166 	printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
3167 	       ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
3168 
3169 	if (dev->phy_mode == PHY_MODE_SGMII)
3170 		printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
3171 
3172 	if (dev->phy.address >= 0)
3173 		printk("%s: found %s PHY (0x%02x)\n", ndev->name,
3174 		       dev->phy.def->name, dev->phy.address);
3175 
3176 	emac_dbg_register(dev);
3177 
3178 	/* Life is good */
3179 	return 0;
3180 
3181 	/* I have a bad feeling about this ... */
3182 
3183  err_detach_tah:
3184 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
3185 		tah_detach(dev->tah_dev, dev->tah_port);
3186  err_detach_rgmii:
3187 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
3188 		rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
3189  err_detach_zmii:
3190 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
3191 		zmii_detach(dev->zmii_dev, dev->zmii_port);
3192  err_unreg_commac:
3193 	mal_unregister_commac(dev->mal, &dev->commac);
3194  err_rel_deps:
3195 	emac_put_deps(dev);
3196  err_reg_unmap:
3197 	iounmap(dev->emacp);
3198  err_irq_unmap:
3199 	if (dev->wol_irq)
3200 		irq_dispose_mapping(dev->wol_irq);
3201 	if (dev->emac_irq)
3202 		irq_dispose_mapping(dev->emac_irq);
3203  err_free:
3204 	free_netdev(ndev);
3205  err_gone:
3206 	/* if we were on the bootlist, remove us as we won't show up and
3207 	 * wake up all waiters to notify them in case they were waiting
3208 	 * on us
3209 	 */
3210 	if (blist) {
3211 		*blist = NULL;
3212 		wake_up_all(&emac_probe_wait);
3213 	}
3214 	return err;
3215 }
3216 
3217 static int emac_remove(struct platform_device *ofdev)
3218 {
3219 	struct emac_instance *dev = platform_get_drvdata(ofdev);
3220 
3221 	DBG(dev, "remove" NL);
3222 
3223 	unregister_netdev(dev->ndev);
3224 
3225 	cancel_work_sync(&dev->reset_work);
3226 
3227 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
3228 		tah_detach(dev->tah_dev, dev->tah_port);
3229 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
3230 		rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
3231 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
3232 		zmii_detach(dev->zmii_dev, dev->zmii_port);
3233 
3234 	if (dev->phy_dev)
3235 		phy_disconnect(dev->phy_dev);
3236 
3237 	if (dev->mii_bus)
3238 		mdiobus_unregister(dev->mii_bus);
3239 
3240 	busy_phy_map &= ~(1 << dev->phy.address);
3241 	DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
3242 
3243 	mal_unregister_commac(dev->mal, &dev->commac);
3244 	emac_put_deps(dev);
3245 
3246 	emac_dbg_unregister(dev);
3247 	iounmap(dev->emacp);
3248 
3249 	if (dev->wol_irq)
3250 		irq_dispose_mapping(dev->wol_irq);
3251 	if (dev->emac_irq)
3252 		irq_dispose_mapping(dev->emac_irq);
3253 
3254 	free_netdev(dev->ndev);
3255 
3256 	return 0;
3257 }
3258 
3259 /* XXX Features in here should be replaced by properties... */
3260 static const struct of_device_id emac_match[] =
3261 {
3262 	{
3263 		.type		= "network",
3264 		.compatible	= "ibm,emac",
3265 	},
3266 	{
3267 		.type		= "network",
3268 		.compatible	= "ibm,emac4",
3269 	},
3270 	{
3271 		.type		= "network",
3272 		.compatible	= "ibm,emac4sync",
3273 	},
3274 	{},
3275 };
3276 MODULE_DEVICE_TABLE(of, emac_match);
3277 
3278 static struct platform_driver emac_driver = {
3279 	.driver = {
3280 		.name = "emac",
3281 		.of_match_table = emac_match,
3282 	},
3283 	.probe = emac_probe,
3284 	.remove = emac_remove,
3285 };
3286 
3287 static void __init emac_make_bootlist(void)
3288 {
3289 	struct device_node *np = NULL;
3290 	int j, max, i = 0;
3291 	int cell_indices[EMAC_BOOT_LIST_SIZE];
3292 
3293 	/* Collect EMACs */
3294 	while((np = of_find_all_nodes(np)) != NULL) {
3295 		const u32 *idx;
3296 
3297 		if (of_match_node(emac_match, np) == NULL)
3298 			continue;
3299 		if (of_get_property(np, "unused", NULL))
3300 			continue;
3301 		idx = of_get_property(np, "cell-index", NULL);
3302 		if (idx == NULL)
3303 			continue;
3304 		cell_indices[i] = *idx;
3305 		emac_boot_list[i++] = of_node_get(np);
3306 		if (i >= EMAC_BOOT_LIST_SIZE) {
3307 			of_node_put(np);
3308 			break;
3309 		}
3310 	}
3311 	max = i;
3312 
3313 	/* Bubble sort them (doh, what a creative algorithm :-) */
3314 	for (i = 0; max > 1 && (i < (max - 1)); i++)
3315 		for (j = i; j < max; j++) {
3316 			if (cell_indices[i] > cell_indices[j]) {
3317 				swap(emac_boot_list[i], emac_boot_list[j]);
3318 				swap(cell_indices[i], cell_indices[j]);
3319 			}
3320 		}
3321 }
3322 
3323 static int __init emac_init(void)
3324 {
3325 	int rc;
3326 
3327 	printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3328 
3329 	/* Init debug stuff */
3330 	emac_init_debug();
3331 
3332 	/* Build EMAC boot list */
3333 	emac_make_bootlist();
3334 
3335 	/* Init submodules */
3336 	rc = mal_init();
3337 	if (rc)
3338 		goto err;
3339 	rc = zmii_init();
3340 	if (rc)
3341 		goto err_mal;
3342 	rc = rgmii_init();
3343 	if (rc)
3344 		goto err_zmii;
3345 	rc = tah_init();
3346 	if (rc)
3347 		goto err_rgmii;
3348 	rc = platform_driver_register(&emac_driver);
3349 	if (rc)
3350 		goto err_tah;
3351 
3352 	return 0;
3353 
3354  err_tah:
3355 	tah_exit();
3356  err_rgmii:
3357 	rgmii_exit();
3358  err_zmii:
3359 	zmii_exit();
3360  err_mal:
3361 	mal_exit();
3362  err:
3363 	return rc;
3364 }
3365 
3366 static void __exit emac_exit(void)
3367 {
3368 	int i;
3369 
3370 	platform_driver_unregister(&emac_driver);
3371 
3372 	tah_exit();
3373 	rgmii_exit();
3374 	zmii_exit();
3375 	mal_exit();
3376 	emac_fini_debug();
3377 
3378 	/* Destroy EMAC boot list */
3379 	for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3380 		of_node_put(emac_boot_list[i]);
3381 }
3382 
3383 module_init(emac_init);
3384 module_exit(emac_exit);
3385