1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Alchemy Au1x00 ethernet driver
5  *
6  * Copyright 2001-2003, 2006 MontaVista Software Inc.
7  * Copyright 2002 TimeSys Corp.
8  * Added ethtool/mii-tool support,
9  * Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
10  * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de
11  * or riemer@riemer-nt.de: fixed the link beat detection with
12  * ioctls (SIOCGMIIPHY)
13  * Copyright 2006 Herbert Valerio Riedel <hvr@gnu.org>
14  *  converted to use linux-2.6.x's PHY framework
15  *
16  * Author: MontaVista Software, Inc.
17  *		ppopov@mvista.com or source@mvista.com
18  */
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 
21 #include <linux/capability.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/string.h>
26 #include <linux/timer.h>
27 #include <linux/errno.h>
28 #include <linux/in.h>
29 #include <linux/ioport.h>
30 #include <linux/bitops.h>
31 #include <linux/slab.h>
32 #include <linux/interrupt.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/mii.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
39 #include <linux/crc32.h>
40 #include <linux/phy.h>
41 #include <linux/platform_device.h>
42 #include <linux/cpu.h>
43 #include <linux/io.h>
44 
45 #include <asm/mipsregs.h>
46 #include <asm/irq.h>
47 #include <asm/processor.h>
48 
49 #include <au1000.h>
50 #include <au1xxx_eth.h>
51 #include <prom.h>
52 
53 #include "au1000_eth.h"
54 
55 #ifdef AU1000_ETH_DEBUG
56 static int au1000_debug = 5;
57 #else
58 static int au1000_debug = 3;
59 #endif
60 
61 #define AU1000_DEF_MSG_ENABLE	(NETIF_MSG_DRV	| \
62 				NETIF_MSG_PROBE	| \
63 				NETIF_MSG_LINK)
64 
65 #define DRV_NAME	"au1000_eth"
66 #define DRV_VERSION	"1.7"
67 #define DRV_AUTHOR	"Pete Popov <ppopov@embeddedalley.com>"
68 #define DRV_DESC	"Au1xxx on-chip Ethernet driver"
69 
70 MODULE_AUTHOR(DRV_AUTHOR);
71 MODULE_DESCRIPTION(DRV_DESC);
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_VERSION);
74 
75 /* AU1000 MAC registers and bits */
76 #define MAC_CONTROL		0x0
77 #  define MAC_RX_ENABLE		(1 << 2)
78 #  define MAC_TX_ENABLE		(1 << 3)
79 #  define MAC_DEF_CHECK		(1 << 5)
80 #  define MAC_SET_BL(X)		(((X) & 0x3) << 6)
81 #  define MAC_AUTO_PAD		(1 << 8)
82 #  define MAC_DISABLE_RETRY	(1 << 10)
83 #  define MAC_DISABLE_BCAST	(1 << 11)
84 #  define MAC_LATE_COL		(1 << 12)
85 #  define MAC_HASH_MODE		(1 << 13)
86 #  define MAC_HASH_ONLY		(1 << 15)
87 #  define MAC_PASS_ALL		(1 << 16)
88 #  define MAC_INVERSE_FILTER	(1 << 17)
89 #  define MAC_PROMISCUOUS	(1 << 18)
90 #  define MAC_PASS_ALL_MULTI	(1 << 19)
91 #  define MAC_FULL_DUPLEX	(1 << 20)
92 #  define MAC_NORMAL_MODE	0
93 #  define MAC_INT_LOOPBACK	(1 << 21)
94 #  define MAC_EXT_LOOPBACK	(1 << 22)
95 #  define MAC_DISABLE_RX_OWN	(1 << 23)
96 #  define MAC_BIG_ENDIAN	(1 << 30)
97 #  define MAC_RX_ALL		(1 << 31)
98 #define MAC_ADDRESS_HIGH	0x4
99 #define MAC_ADDRESS_LOW		0x8
100 #define MAC_MCAST_HIGH		0xC
101 #define MAC_MCAST_LOW		0x10
102 #define MAC_MII_CNTRL		0x14
103 #  define MAC_MII_BUSY		(1 << 0)
104 #  define MAC_MII_READ		0
105 #  define MAC_MII_WRITE		(1 << 1)
106 #  define MAC_SET_MII_SELECT_REG(X) (((X) & 0x1f) << 6)
107 #  define MAC_SET_MII_SELECT_PHY(X) (((X) & 0x1f) << 11)
108 #define MAC_MII_DATA		0x18
109 #define MAC_FLOW_CNTRL		0x1C
110 #  define MAC_FLOW_CNTRL_BUSY	(1 << 0)
111 #  define MAC_FLOW_CNTRL_ENABLE (1 << 1)
112 #  define MAC_PASS_CONTROL	(1 << 2)
113 #  define MAC_SET_PAUSE(X)	(((X) & 0xffff) << 16)
114 #define MAC_VLAN1_TAG		0x20
115 #define MAC_VLAN2_TAG		0x24
116 
117 /* Ethernet Controller Enable */
118 #  define MAC_EN_CLOCK_ENABLE	(1 << 0)
119 #  define MAC_EN_RESET0		(1 << 1)
120 #  define MAC_EN_TOSS		(0 << 2)
121 #  define MAC_EN_CACHEABLE	(1 << 3)
122 #  define MAC_EN_RESET1		(1 << 4)
123 #  define MAC_EN_RESET2		(1 << 5)
124 #  define MAC_DMA_RESET		(1 << 6)
125 
126 /* Ethernet Controller DMA Channels */
127 /* offsets from MAC_TX_RING_ADDR address */
128 #define MAC_TX_BUFF0_STATUS	0x0
129 #  define TX_FRAME_ABORTED	(1 << 0)
130 #  define TX_JAB_TIMEOUT	(1 << 1)
131 #  define TX_NO_CARRIER		(1 << 2)
132 #  define TX_LOSS_CARRIER	(1 << 3)
133 #  define TX_EXC_DEF		(1 << 4)
134 #  define TX_LATE_COLL_ABORT	(1 << 5)
135 #  define TX_EXC_COLL		(1 << 6)
136 #  define TX_UNDERRUN		(1 << 7)
137 #  define TX_DEFERRED		(1 << 8)
138 #  define TX_LATE_COLL		(1 << 9)
139 #  define TX_COLL_CNT_MASK	(0xF << 10)
140 #  define TX_PKT_RETRY		(1 << 31)
141 #define MAC_TX_BUFF0_ADDR	0x4
142 #  define TX_DMA_ENABLE		(1 << 0)
143 #  define TX_T_DONE		(1 << 1)
144 #  define TX_GET_DMA_BUFFER(X)	(((X) >> 2) & 0x3)
145 #define MAC_TX_BUFF0_LEN	0x8
146 #define MAC_TX_BUFF1_STATUS	0x10
147 #define MAC_TX_BUFF1_ADDR	0x14
148 #define MAC_TX_BUFF1_LEN	0x18
149 #define MAC_TX_BUFF2_STATUS	0x20
150 #define MAC_TX_BUFF2_ADDR	0x24
151 #define MAC_TX_BUFF2_LEN	0x28
152 #define MAC_TX_BUFF3_STATUS	0x30
153 #define MAC_TX_BUFF3_ADDR	0x34
154 #define MAC_TX_BUFF3_LEN	0x38
155 
156 /* offsets from MAC_RX_RING_ADDR */
157 #define MAC_RX_BUFF0_STATUS	0x0
158 #  define RX_FRAME_LEN_MASK	0x3fff
159 #  define RX_WDOG_TIMER		(1 << 14)
160 #  define RX_RUNT		(1 << 15)
161 #  define RX_OVERLEN		(1 << 16)
162 #  define RX_COLL		(1 << 17)
163 #  define RX_ETHER		(1 << 18)
164 #  define RX_MII_ERROR		(1 << 19)
165 #  define RX_DRIBBLING		(1 << 20)
166 #  define RX_CRC_ERROR		(1 << 21)
167 #  define RX_VLAN1		(1 << 22)
168 #  define RX_VLAN2		(1 << 23)
169 #  define RX_LEN_ERROR		(1 << 24)
170 #  define RX_CNTRL_FRAME	(1 << 25)
171 #  define RX_U_CNTRL_FRAME	(1 << 26)
172 #  define RX_MCAST_FRAME	(1 << 27)
173 #  define RX_BCAST_FRAME	(1 << 28)
174 #  define RX_FILTER_FAIL	(1 << 29)
175 #  define RX_PACKET_FILTER	(1 << 30)
176 #  define RX_MISSED_FRAME	(1 << 31)
177 
178 #  define RX_ERROR (RX_WDOG_TIMER | RX_RUNT | RX_OVERLEN |  \
179 		    RX_COLL | RX_MII_ERROR | RX_CRC_ERROR | \
180 		    RX_LEN_ERROR | RX_U_CNTRL_FRAME | RX_MISSED_FRAME)
181 #define MAC_RX_BUFF0_ADDR	0x4
182 #  define RX_DMA_ENABLE		(1 << 0)
183 #  define RX_T_DONE		(1 << 1)
184 #  define RX_GET_DMA_BUFFER(X)	(((X) >> 2) & 0x3)
185 #  define RX_SET_BUFF_ADDR(X)	((X) & 0xffffffc0)
186 #define MAC_RX_BUFF1_STATUS	0x10
187 #define MAC_RX_BUFF1_ADDR	0x14
188 #define MAC_RX_BUFF2_STATUS	0x20
189 #define MAC_RX_BUFF2_ADDR	0x24
190 #define MAC_RX_BUFF3_STATUS	0x30
191 #define MAC_RX_BUFF3_ADDR	0x34
192 
193 /*
194  * Theory of operation
195  *
196  * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
197  * There are four receive and four transmit descriptors.  These
198  * descriptors are not in memory; rather, they are just a set of
199  * hardware registers.
200  *
201  * Since the Au1000 has a coherent data cache, the receive and
202  * transmit buffers are allocated from the KSEG0 segment. The
203  * hardware registers, however, are still mapped at KSEG1 to
204  * make sure there's no out-of-order writes, and that all writes
205  * complete immediately.
206  */
207 
208 /*
209  * board-specific configurations
210  *
211  * PHY detection algorithm
212  *
213  * If phy_static_config is undefined, the PHY setup is
214  * autodetected:
215  *
216  * mii_probe() first searches the current MAC's MII bus for a PHY,
217  * selecting the first (or last, if phy_search_highest_addr is
218  * defined) PHY address not already claimed by another netdev.
219  *
220  * If nothing was found that way when searching for the 2nd ethernet
221  * controller's PHY and phy1_search_mac0 is defined, then
222  * the first MII bus is searched as well for an unclaimed PHY; this is
223  * needed in case of a dual-PHY accessible only through the MAC0's MII
224  * bus.
225  *
226  * Finally, if no PHY is found, then the corresponding ethernet
227  * controller is not registered to the network subsystem.
228  */
229 
230 /* autodetection defaults: phy1_search_mac0 */
231 
232 /* static PHY setup
233  *
234  * most boards PHY setup should be detectable properly with the
235  * autodetection algorithm in mii_probe(), but in some cases (e.g. if
236  * you have a switch attached, or want to use the PHY's interrupt
237  * notification capabilities) you can provide a static PHY
238  * configuration here
239  *
240  * IRQs may only be set, if a PHY address was configured
241  * If a PHY address is given, also a bus id is required to be set
242  *
243  * ps: make sure the used irqs are configured properly in the board
244  * specific irq-map
245  */
246 
247 static void au1000_enable_mac(struct net_device *dev, int force_reset)
248 {
249 	unsigned long flags;
250 	struct au1000_private *aup = netdev_priv(dev);
251 
252 	spin_lock_irqsave(&aup->lock, flags);
253 
254 	if (force_reset || (!aup->mac_enabled)) {
255 		writel(MAC_EN_CLOCK_ENABLE, aup->enable);
256 		wmb(); /* drain writebuffer */
257 		mdelay(2);
258 		writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
259 				| MAC_EN_CLOCK_ENABLE), aup->enable);
260 		wmb(); /* drain writebuffer */
261 		mdelay(2);
262 
263 		aup->mac_enabled = 1;
264 	}
265 
266 	spin_unlock_irqrestore(&aup->lock, flags);
267 }
268 
269 /*
270  * MII operations
271  */
272 static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
273 {
274 	struct au1000_private *aup = netdev_priv(dev);
275 	u32 *const mii_control_reg = &aup->mac->mii_control;
276 	u32 *const mii_data_reg = &aup->mac->mii_data;
277 	u32 timedout = 20;
278 	u32 mii_control;
279 
280 	while (readl(mii_control_reg) & MAC_MII_BUSY) {
281 		mdelay(1);
282 		if (--timedout == 0) {
283 			netdev_err(dev, "read_MII busy timeout!!\n");
284 			return -1;
285 		}
286 	}
287 
288 	mii_control = MAC_SET_MII_SELECT_REG(reg) |
289 		MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ;
290 
291 	writel(mii_control, mii_control_reg);
292 
293 	timedout = 20;
294 	while (readl(mii_control_reg) & MAC_MII_BUSY) {
295 		mdelay(1);
296 		if (--timedout == 0) {
297 			netdev_err(dev, "mdio_read busy timeout!!\n");
298 			return -1;
299 		}
300 	}
301 	return readl(mii_data_reg);
302 }
303 
304 static void au1000_mdio_write(struct net_device *dev, int phy_addr,
305 			      int reg, u16 value)
306 {
307 	struct au1000_private *aup = netdev_priv(dev);
308 	u32 *const mii_control_reg = &aup->mac->mii_control;
309 	u32 *const mii_data_reg = &aup->mac->mii_data;
310 	u32 timedout = 20;
311 	u32 mii_control;
312 
313 	while (readl(mii_control_reg) & MAC_MII_BUSY) {
314 		mdelay(1);
315 		if (--timedout == 0) {
316 			netdev_err(dev, "mdio_write busy timeout!!\n");
317 			return;
318 		}
319 	}
320 
321 	mii_control = MAC_SET_MII_SELECT_REG(reg) |
322 		MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE;
323 
324 	writel(value, mii_data_reg);
325 	writel(mii_control, mii_control_reg);
326 }
327 
328 static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
329 {
330 	struct net_device *const dev = bus->priv;
331 
332 	/* make sure the MAC associated with this
333 	 * mii_bus is enabled
334 	 */
335 	au1000_enable_mac(dev, 0);
336 
337 	return au1000_mdio_read(dev, phy_addr, regnum);
338 }
339 
340 static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
341 				u16 value)
342 {
343 	struct net_device *const dev = bus->priv;
344 
345 	/* make sure the MAC associated with this
346 	 * mii_bus is enabled
347 	 */
348 	au1000_enable_mac(dev, 0);
349 
350 	au1000_mdio_write(dev, phy_addr, regnum, value);
351 	return 0;
352 }
353 
354 static int au1000_mdiobus_reset(struct mii_bus *bus)
355 {
356 	struct net_device *const dev = bus->priv;
357 
358 	/* make sure the MAC associated with this
359 	 * mii_bus is enabled
360 	 */
361 	au1000_enable_mac(dev, 0);
362 
363 	return 0;
364 }
365 
366 static void au1000_hard_stop(struct net_device *dev)
367 {
368 	struct au1000_private *aup = netdev_priv(dev);
369 	u32 reg;
370 
371 	netif_dbg(aup, drv, dev, "hard stop\n");
372 
373 	reg = readl(&aup->mac->control);
374 	reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
375 	writel(reg, &aup->mac->control);
376 	wmb(); /* drain writebuffer */
377 	mdelay(10);
378 }
379 
380 static void au1000_enable_rx_tx(struct net_device *dev)
381 {
382 	struct au1000_private *aup = netdev_priv(dev);
383 	u32 reg;
384 
385 	netif_dbg(aup, hw, dev, "enable_rx_tx\n");
386 
387 	reg = readl(&aup->mac->control);
388 	reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
389 	writel(reg, &aup->mac->control);
390 	wmb(); /* drain writebuffer */
391 	mdelay(10);
392 }
393 
394 static void
395 au1000_adjust_link(struct net_device *dev)
396 {
397 	struct au1000_private *aup = netdev_priv(dev);
398 	struct phy_device *phydev = dev->phydev;
399 	unsigned long flags;
400 	u32 reg;
401 
402 	int status_change = 0;
403 
404 	BUG_ON(!phydev);
405 
406 	spin_lock_irqsave(&aup->lock, flags);
407 
408 	if (phydev->link && (aup->old_speed != phydev->speed)) {
409 		/* speed changed */
410 
411 		switch (phydev->speed) {
412 		case SPEED_10:
413 		case SPEED_100:
414 			break;
415 		default:
416 			netdev_warn(dev, "Speed (%d) is not 10/100 ???\n",
417 							phydev->speed);
418 			break;
419 		}
420 
421 		aup->old_speed = phydev->speed;
422 
423 		status_change = 1;
424 	}
425 
426 	if (phydev->link && (aup->old_duplex != phydev->duplex)) {
427 		/* duplex mode changed */
428 
429 		/* switching duplex mode requires to disable rx and tx! */
430 		au1000_hard_stop(dev);
431 
432 		reg = readl(&aup->mac->control);
433 		if (DUPLEX_FULL == phydev->duplex) {
434 			reg |= MAC_FULL_DUPLEX;
435 			reg &= ~MAC_DISABLE_RX_OWN;
436 		} else {
437 			reg &= ~MAC_FULL_DUPLEX;
438 			reg |= MAC_DISABLE_RX_OWN;
439 		}
440 		writel(reg, &aup->mac->control);
441 		wmb(); /* drain writebuffer */
442 		mdelay(1);
443 
444 		au1000_enable_rx_tx(dev);
445 		aup->old_duplex = phydev->duplex;
446 
447 		status_change = 1;
448 	}
449 
450 	if (phydev->link != aup->old_link) {
451 		/* link state changed */
452 
453 		if (!phydev->link) {
454 			/* link went down */
455 			aup->old_speed = 0;
456 			aup->old_duplex = -1;
457 		}
458 
459 		aup->old_link = phydev->link;
460 		status_change = 1;
461 	}
462 
463 	spin_unlock_irqrestore(&aup->lock, flags);
464 
465 	if (status_change) {
466 		if (phydev->link)
467 			netdev_info(dev, "link up (%d/%s)\n",
468 			       phydev->speed,
469 			       DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
470 		else
471 			netdev_info(dev, "link down\n");
472 	}
473 }
474 
475 static int au1000_mii_probe(struct net_device *dev)
476 {
477 	struct au1000_private *const aup = netdev_priv(dev);
478 	struct phy_device *phydev = NULL;
479 	int phy_addr;
480 
481 	if (aup->phy_static_config) {
482 		BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
483 
484 		if (aup->phy_addr)
485 			phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr);
486 		else
487 			netdev_info(dev, "using PHY-less setup\n");
488 		return 0;
489 	}
490 
491 	/* find the first (lowest address) PHY
492 	 * on the current MAC's MII bus
493 	 */
494 	for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
495 		if (mdiobus_get_phy(aup->mii_bus, phy_addr)) {
496 			phydev = mdiobus_get_phy(aup->mii_bus, phy_addr);
497 			if (!aup->phy_search_highest_addr)
498 				/* break out with first one found */
499 				break;
500 		}
501 
502 	if (aup->phy1_search_mac0) {
503 		/* try harder to find a PHY */
504 		if (!phydev && (aup->mac_id == 1)) {
505 			/* no PHY found, maybe we have a dual PHY? */
506 			dev_info(&dev->dev, ": no PHY found on MAC1, "
507 				"let's see if it's attached to MAC0...\n");
508 
509 			/* find the first (lowest address) non-attached
510 			 * PHY on the MAC0 MII bus
511 			 */
512 			for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
513 				struct phy_device *const tmp_phydev =
514 					mdiobus_get_phy(aup->mii_bus,
515 							phy_addr);
516 
517 				if (aup->mac_id == 1)
518 					break;
519 
520 				/* no PHY here... */
521 				if (!tmp_phydev)
522 					continue;
523 
524 				/* already claimed by MAC0 */
525 				if (tmp_phydev->attached_dev)
526 					continue;
527 
528 				phydev = tmp_phydev;
529 				break; /* found it */
530 			}
531 		}
532 	}
533 
534 	if (!phydev) {
535 		netdev_err(dev, "no PHY found\n");
536 		return -1;
537 	}
538 
539 	/* now we are supposed to have a proper phydev, to attach to... */
540 	BUG_ON(phydev->attached_dev);
541 
542 	phydev = phy_connect(dev, phydev_name(phydev),
543 			     &au1000_adjust_link, PHY_INTERFACE_MODE_MII);
544 
545 	if (IS_ERR(phydev)) {
546 		netdev_err(dev, "Could not attach to PHY\n");
547 		return PTR_ERR(phydev);
548 	}
549 
550 	phy_set_max_speed(phydev, SPEED_100);
551 
552 	aup->old_link = 0;
553 	aup->old_speed = 0;
554 	aup->old_duplex = -1;
555 
556 	phy_attached_info(phydev);
557 
558 	return 0;
559 }
560 
561 
562 /*
563  * Buffer allocation/deallocation routines. The buffer descriptor returned
564  * has the virtual and dma address of a buffer suitable for
565  * both, receive and transmit operations.
566  */
567 static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
568 {
569 	struct db_dest *pDB;
570 	pDB = aup->pDBfree;
571 
572 	if (pDB)
573 		aup->pDBfree = pDB->pnext;
574 
575 	return pDB;
576 }
577 
578 void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
579 {
580 	struct db_dest *pDBfree = aup->pDBfree;
581 	if (pDBfree)
582 		pDBfree->pnext = pDB;
583 	aup->pDBfree = pDB;
584 }
585 
586 static void au1000_reset_mac_unlocked(struct net_device *dev)
587 {
588 	struct au1000_private *const aup = netdev_priv(dev);
589 	int i;
590 
591 	au1000_hard_stop(dev);
592 
593 	writel(MAC_EN_CLOCK_ENABLE, aup->enable);
594 	wmb(); /* drain writebuffer */
595 	mdelay(2);
596 	writel(0, aup->enable);
597 	wmb(); /* drain writebuffer */
598 	mdelay(2);
599 
600 	aup->tx_full = 0;
601 	for (i = 0; i < NUM_RX_DMA; i++) {
602 		/* reset control bits */
603 		aup->rx_dma_ring[i]->buff_stat &= ~0xf;
604 	}
605 	for (i = 0; i < NUM_TX_DMA; i++) {
606 		/* reset control bits */
607 		aup->tx_dma_ring[i]->buff_stat &= ~0xf;
608 	}
609 
610 	aup->mac_enabled = 0;
611 
612 }
613 
614 static void au1000_reset_mac(struct net_device *dev)
615 {
616 	struct au1000_private *const aup = netdev_priv(dev);
617 	unsigned long flags;
618 
619 	netif_dbg(aup, hw, dev, "reset mac, aup %x\n",
620 					(unsigned)aup);
621 
622 	spin_lock_irqsave(&aup->lock, flags);
623 
624 	au1000_reset_mac_unlocked(dev);
625 
626 	spin_unlock_irqrestore(&aup->lock, flags);
627 }
628 
629 /*
630  * Setup the receive and transmit "rings".  These pointers are the addresses
631  * of the rx and tx MAC DMA registers so they are fixed by the hardware --
632  * these are not descriptors sitting in memory.
633  */
634 static void
635 au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base)
636 {
637 	int i;
638 
639 	for (i = 0; i < NUM_RX_DMA; i++) {
640 		aup->rx_dma_ring[i] = (struct rx_dma *)
641 			(tx_base + 0x100 + sizeof(struct rx_dma) * i);
642 	}
643 	for (i = 0; i < NUM_TX_DMA; i++) {
644 		aup->tx_dma_ring[i] = (struct tx_dma *)
645 			(tx_base + sizeof(struct tx_dma) * i);
646 	}
647 }
648 
649 /*
650  * ethtool operations
651  */
652 
653 static void
654 au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
655 {
656 	struct au1000_private *aup = netdev_priv(dev);
657 
658 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
659 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
660 	snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
661 		 aup->mac_id);
662 }
663 
664 static void au1000_set_msglevel(struct net_device *dev, u32 value)
665 {
666 	struct au1000_private *aup = netdev_priv(dev);
667 	aup->msg_enable = value;
668 }
669 
670 static u32 au1000_get_msglevel(struct net_device *dev)
671 {
672 	struct au1000_private *aup = netdev_priv(dev);
673 	return aup->msg_enable;
674 }
675 
676 static const struct ethtool_ops au1000_ethtool_ops = {
677 	.get_drvinfo = au1000_get_drvinfo,
678 	.get_link = ethtool_op_get_link,
679 	.get_msglevel = au1000_get_msglevel,
680 	.set_msglevel = au1000_set_msglevel,
681 	.get_link_ksettings = phy_ethtool_get_link_ksettings,
682 	.set_link_ksettings = phy_ethtool_set_link_ksettings,
683 };
684 
685 
686 /*
687  * Initialize the interface.
688  *
689  * When the device powers up, the clocks are disabled and the
690  * mac is in reset state.  When the interface is closed, we
691  * do the same -- reset the device and disable the clocks to
692  * conserve power. Thus, whenever au1000_init() is called,
693  * the device should already be in reset state.
694  */
695 static int au1000_init(struct net_device *dev)
696 {
697 	struct au1000_private *aup = netdev_priv(dev);
698 	unsigned long flags;
699 	int i;
700 	u32 control;
701 
702 	netif_dbg(aup, hw, dev, "au1000_init\n");
703 
704 	/* bring the device out of reset */
705 	au1000_enable_mac(dev, 1);
706 
707 	spin_lock_irqsave(&aup->lock, flags);
708 
709 	writel(0, &aup->mac->control);
710 	aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
711 	aup->tx_tail = aup->tx_head;
712 	aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
713 
714 	writel(dev->dev_addr[5]<<8 | dev->dev_addr[4],
715 					&aup->mac->mac_addr_high);
716 	writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
717 		dev->dev_addr[1]<<8 | dev->dev_addr[0],
718 					&aup->mac->mac_addr_low);
719 
720 
721 	for (i = 0; i < NUM_RX_DMA; i++)
722 		aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
723 
724 	wmb(); /* drain writebuffer */
725 
726 	control = MAC_RX_ENABLE | MAC_TX_ENABLE;
727 #ifndef CONFIG_CPU_LITTLE_ENDIAN
728 	control |= MAC_BIG_ENDIAN;
729 #endif
730 	if (dev->phydev) {
731 		if (dev->phydev->link && (DUPLEX_FULL == dev->phydev->duplex))
732 			control |= MAC_FULL_DUPLEX;
733 		else
734 			control |= MAC_DISABLE_RX_OWN;
735 	} else { /* PHY-less op, assume full-duplex */
736 		control |= MAC_FULL_DUPLEX;
737 	}
738 
739 	writel(control, &aup->mac->control);
740 	writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */
741 	wmb(); /* drain writebuffer */
742 
743 	spin_unlock_irqrestore(&aup->lock, flags);
744 	return 0;
745 }
746 
747 static inline void au1000_update_rx_stats(struct net_device *dev, u32 status)
748 {
749 	struct net_device_stats *ps = &dev->stats;
750 
751 	ps->rx_packets++;
752 	if (status & RX_MCAST_FRAME)
753 		ps->multicast++;
754 
755 	if (status & RX_ERROR) {
756 		ps->rx_errors++;
757 		if (status & RX_MISSED_FRAME)
758 			ps->rx_missed_errors++;
759 		if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR))
760 			ps->rx_length_errors++;
761 		if (status & RX_CRC_ERROR)
762 			ps->rx_crc_errors++;
763 		if (status & RX_COLL)
764 			ps->collisions++;
765 	} else
766 		ps->rx_bytes += status & RX_FRAME_LEN_MASK;
767 
768 }
769 
770 /*
771  * Au1000 receive routine.
772  */
773 static int au1000_rx(struct net_device *dev)
774 {
775 	struct au1000_private *aup = netdev_priv(dev);
776 	struct sk_buff *skb;
777 	struct rx_dma *prxd;
778 	u32 buff_stat, status;
779 	struct db_dest *pDB;
780 	u32	frmlen;
781 
782 	netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
783 
784 	prxd = aup->rx_dma_ring[aup->rx_head];
785 	buff_stat = prxd->buff_stat;
786 	while (buff_stat & RX_T_DONE)  {
787 		status = prxd->status;
788 		pDB = aup->rx_db_inuse[aup->rx_head];
789 		au1000_update_rx_stats(dev, status);
790 		if (!(status & RX_ERROR))  {
791 
792 			/* good frame */
793 			frmlen = (status & RX_FRAME_LEN_MASK);
794 			frmlen -= 4; /* Remove FCS */
795 			skb = netdev_alloc_skb(dev, frmlen + 2);
796 			if (skb == NULL) {
797 				dev->stats.rx_dropped++;
798 				continue;
799 			}
800 			skb_reserve(skb, 2);	/* 16 byte IP header align */
801 			skb_copy_to_linear_data(skb,
802 				(unsigned char *)pDB->vaddr, frmlen);
803 			skb_put(skb, frmlen);
804 			skb->protocol = eth_type_trans(skb, dev);
805 			netif_rx(skb);	/* pass the packet to upper layers */
806 		} else {
807 			if (au1000_debug > 4) {
808 				pr_err("rx_error(s):");
809 				if (status & RX_MISSED_FRAME)
810 					pr_cont(" miss");
811 				if (status & RX_WDOG_TIMER)
812 					pr_cont(" wdog");
813 				if (status & RX_RUNT)
814 					pr_cont(" runt");
815 				if (status & RX_OVERLEN)
816 					pr_cont(" overlen");
817 				if (status & RX_COLL)
818 					pr_cont(" coll");
819 				if (status & RX_MII_ERROR)
820 					pr_cont(" mii error");
821 				if (status & RX_CRC_ERROR)
822 					pr_cont(" crc error");
823 				if (status & RX_LEN_ERROR)
824 					pr_cont(" len error");
825 				if (status & RX_U_CNTRL_FRAME)
826 					pr_cont(" u control frame");
827 				pr_cont("\n");
828 			}
829 		}
830 		prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
831 		aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
832 		wmb(); /* drain writebuffer */
833 
834 		/* next descriptor */
835 		prxd = aup->rx_dma_ring[aup->rx_head];
836 		buff_stat = prxd->buff_stat;
837 	}
838 	return 0;
839 }
840 
841 static void au1000_update_tx_stats(struct net_device *dev, u32 status)
842 {
843 	struct net_device_stats *ps = &dev->stats;
844 
845 	if (status & TX_FRAME_ABORTED) {
846 		if (!dev->phydev || (DUPLEX_FULL == dev->phydev->duplex)) {
847 			if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
848 				/* any other tx errors are only valid
849 				 * in half duplex mode
850 				 */
851 				ps->tx_errors++;
852 				ps->tx_aborted_errors++;
853 			}
854 		} else {
855 			ps->tx_errors++;
856 			ps->tx_aborted_errors++;
857 			if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
858 				ps->tx_carrier_errors++;
859 		}
860 	}
861 }
862 
863 /*
864  * Called from the interrupt service routine to acknowledge
865  * the TX DONE bits.  This is a must if the irq is setup as
866  * edge triggered.
867  */
868 static void au1000_tx_ack(struct net_device *dev)
869 {
870 	struct au1000_private *aup = netdev_priv(dev);
871 	struct tx_dma *ptxd;
872 
873 	ptxd = aup->tx_dma_ring[aup->tx_tail];
874 
875 	while (ptxd->buff_stat & TX_T_DONE) {
876 		au1000_update_tx_stats(dev, ptxd->status);
877 		ptxd->buff_stat &= ~TX_T_DONE;
878 		ptxd->len = 0;
879 		wmb(); /* drain writebuffer */
880 
881 		aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
882 		ptxd = aup->tx_dma_ring[aup->tx_tail];
883 
884 		if (aup->tx_full) {
885 			aup->tx_full = 0;
886 			netif_wake_queue(dev);
887 		}
888 	}
889 }
890 
891 /*
892  * Au1000 interrupt service routine.
893  */
894 static irqreturn_t au1000_interrupt(int irq, void *dev_id)
895 {
896 	struct net_device *dev = dev_id;
897 
898 	/* Handle RX interrupts first to minimize chance of overrun */
899 
900 	au1000_rx(dev);
901 	au1000_tx_ack(dev);
902 	return IRQ_RETVAL(1);
903 }
904 
905 static int au1000_open(struct net_device *dev)
906 {
907 	int retval;
908 	struct au1000_private *aup = netdev_priv(dev);
909 
910 	netif_dbg(aup, drv, dev, "open: dev=%p\n", dev);
911 
912 	retval = request_irq(dev->irq, au1000_interrupt, 0,
913 					dev->name, dev);
914 	if (retval) {
915 		netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
916 		return retval;
917 	}
918 
919 	retval = au1000_init(dev);
920 	if (retval) {
921 		netdev_err(dev, "error in au1000_init\n");
922 		free_irq(dev->irq, dev);
923 		return retval;
924 	}
925 
926 	if (dev->phydev)
927 		phy_start(dev->phydev);
928 
929 	netif_start_queue(dev);
930 
931 	netif_dbg(aup, drv, dev, "open: Initialization done.\n");
932 
933 	return 0;
934 }
935 
936 static int au1000_close(struct net_device *dev)
937 {
938 	unsigned long flags;
939 	struct au1000_private *const aup = netdev_priv(dev);
940 
941 	netif_dbg(aup, drv, dev, "close: dev=%p\n", dev);
942 
943 	if (dev->phydev)
944 		phy_stop(dev->phydev);
945 
946 	spin_lock_irqsave(&aup->lock, flags);
947 
948 	au1000_reset_mac_unlocked(dev);
949 
950 	/* stop the device */
951 	netif_stop_queue(dev);
952 
953 	/* disable the interrupt */
954 	free_irq(dev->irq, dev);
955 	spin_unlock_irqrestore(&aup->lock, flags);
956 
957 	return 0;
958 }
959 
960 /*
961  * Au1000 transmit routine.
962  */
963 static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
964 {
965 	struct au1000_private *aup = netdev_priv(dev);
966 	struct net_device_stats *ps = &dev->stats;
967 	struct tx_dma *ptxd;
968 	u32 buff_stat;
969 	struct db_dest *pDB;
970 	int i;
971 
972 	netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
973 				(unsigned)aup, skb->len,
974 				skb->data, aup->tx_head);
975 
976 	ptxd = aup->tx_dma_ring[aup->tx_head];
977 	buff_stat = ptxd->buff_stat;
978 	if (buff_stat & TX_DMA_ENABLE) {
979 		/* We've wrapped around and the transmitter is still busy */
980 		netif_stop_queue(dev);
981 		aup->tx_full = 1;
982 		return NETDEV_TX_BUSY;
983 	} else if (buff_stat & TX_T_DONE) {
984 		au1000_update_tx_stats(dev, ptxd->status);
985 		ptxd->len = 0;
986 	}
987 
988 	if (aup->tx_full) {
989 		aup->tx_full = 0;
990 		netif_wake_queue(dev);
991 	}
992 
993 	pDB = aup->tx_db_inuse[aup->tx_head];
994 	skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
995 	if (skb->len < ETH_ZLEN) {
996 		for (i = skb->len; i < ETH_ZLEN; i++)
997 			((char *)pDB->vaddr)[i] = 0;
998 
999 		ptxd->len = ETH_ZLEN;
1000 	} else
1001 		ptxd->len = skb->len;
1002 
1003 	ps->tx_packets++;
1004 	ps->tx_bytes += ptxd->len;
1005 
1006 	ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
1007 	wmb(); /* drain writebuffer */
1008 	dev_kfree_skb(skb);
1009 	aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
1010 	return NETDEV_TX_OK;
1011 }
1012 
1013 /*
1014  * The Tx ring has been full longer than the watchdog timeout
1015  * value. The transmitter must be hung?
1016  */
1017 static void au1000_tx_timeout(struct net_device *dev, unsigned int txqueue)
1018 {
1019 	netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
1020 	au1000_reset_mac(dev);
1021 	au1000_init(dev);
1022 	netif_trans_update(dev); /* prevent tx timeout */
1023 	netif_wake_queue(dev);
1024 }
1025 
1026 static void au1000_multicast_list(struct net_device *dev)
1027 {
1028 	struct au1000_private *aup = netdev_priv(dev);
1029 	u32 reg;
1030 
1031 	netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags);
1032 	reg = readl(&aup->mac->control);
1033 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1034 		reg |= MAC_PROMISCUOUS;
1035 	} else if ((dev->flags & IFF_ALLMULTI)  ||
1036 			   netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
1037 		reg |= MAC_PASS_ALL_MULTI;
1038 		reg &= ~MAC_PROMISCUOUS;
1039 		netdev_info(dev, "Pass all multicast\n");
1040 	} else {
1041 		struct netdev_hw_addr *ha;
1042 		u32 mc_filter[2];	/* Multicast hash filter */
1043 
1044 		mc_filter[1] = mc_filter[0] = 0;
1045 		netdev_for_each_mc_addr(ha, dev)
1046 			set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
1047 					(long *)mc_filter);
1048 		writel(mc_filter[1], &aup->mac->multi_hash_high);
1049 		writel(mc_filter[0], &aup->mac->multi_hash_low);
1050 		reg &= ~MAC_PROMISCUOUS;
1051 		reg |= MAC_HASH_MODE;
1052 	}
1053 	writel(reg, &aup->mac->control);
1054 }
1055 
1056 static const struct net_device_ops au1000_netdev_ops = {
1057 	.ndo_open		= au1000_open,
1058 	.ndo_stop		= au1000_close,
1059 	.ndo_start_xmit		= au1000_tx,
1060 	.ndo_set_rx_mode	= au1000_multicast_list,
1061 	.ndo_do_ioctl		= phy_do_ioctl_running,
1062 	.ndo_tx_timeout		= au1000_tx_timeout,
1063 	.ndo_set_mac_address	= eth_mac_addr,
1064 	.ndo_validate_addr	= eth_validate_addr,
1065 };
1066 
1067 static int au1000_probe(struct platform_device *pdev)
1068 {
1069 	struct au1000_private *aup = NULL;
1070 	struct au1000_eth_platform_data *pd;
1071 	struct net_device *dev = NULL;
1072 	struct db_dest *pDB, *pDBfree;
1073 	int irq, i, err = 0;
1074 	struct resource *base, *macen, *macdma;
1075 
1076 	base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1077 	if (!base) {
1078 		dev_err(&pdev->dev, "failed to retrieve base register\n");
1079 		err = -ENODEV;
1080 		goto out;
1081 	}
1082 
1083 	macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1084 	if (!macen) {
1085 		dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n");
1086 		err = -ENODEV;
1087 		goto out;
1088 	}
1089 
1090 	irq = platform_get_irq(pdev, 0);
1091 	if (irq < 0) {
1092 		err = -ENODEV;
1093 		goto out;
1094 	}
1095 
1096 	macdma = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1097 	if (!macdma) {
1098 		dev_err(&pdev->dev, "failed to retrieve MACDMA registers\n");
1099 		err = -ENODEV;
1100 		goto out;
1101 	}
1102 
1103 	if (!request_mem_region(base->start, resource_size(base),
1104 							pdev->name)) {
1105 		dev_err(&pdev->dev, "failed to request memory region for base registers\n");
1106 		err = -ENXIO;
1107 		goto out;
1108 	}
1109 
1110 	if (!request_mem_region(macen->start, resource_size(macen),
1111 							pdev->name)) {
1112 		dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
1113 		err = -ENXIO;
1114 		goto err_request;
1115 	}
1116 
1117 	if (!request_mem_region(macdma->start, resource_size(macdma),
1118 							pdev->name)) {
1119 		dev_err(&pdev->dev, "failed to request MACDMA memory region\n");
1120 		err = -ENXIO;
1121 		goto err_macdma;
1122 	}
1123 
1124 	dev = alloc_etherdev(sizeof(struct au1000_private));
1125 	if (!dev) {
1126 		err = -ENOMEM;
1127 		goto err_alloc;
1128 	}
1129 
1130 	SET_NETDEV_DEV(dev, &pdev->dev);
1131 	platform_set_drvdata(pdev, dev);
1132 	aup = netdev_priv(dev);
1133 
1134 	spin_lock_init(&aup->lock);
1135 	aup->msg_enable = (au1000_debug < 4 ?
1136 				AU1000_DEF_MSG_ENABLE : au1000_debug);
1137 
1138 	/* Allocate the data buffers
1139 	 * Snooping works fine with eth on all au1xxx
1140 	 */
1141 	aup->vaddr = (u32)dma_alloc_attrs(&pdev->dev, MAX_BUF_SIZE *
1142 					  (NUM_TX_BUFFS + NUM_RX_BUFFS),
1143 					  &aup->dma_addr, 0,
1144 					  DMA_ATTR_NON_CONSISTENT);
1145 	if (!aup->vaddr) {
1146 		dev_err(&pdev->dev, "failed to allocate data buffers\n");
1147 		err = -ENOMEM;
1148 		goto err_vaddr;
1149 	}
1150 
1151 	/* aup->mac is the base address of the MAC's registers */
1152 	aup->mac = (struct mac_reg *)
1153 			ioremap(base->start, resource_size(base));
1154 	if (!aup->mac) {
1155 		dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
1156 		err = -ENXIO;
1157 		goto err_remap1;
1158 	}
1159 
1160 	/* Setup some variables for quick register address access */
1161 	aup->enable = (u32 *)ioremap(macen->start,
1162 						resource_size(macen));
1163 	if (!aup->enable) {
1164 		dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
1165 		err = -ENXIO;
1166 		goto err_remap2;
1167 	}
1168 	aup->mac_id = pdev->id;
1169 
1170 	aup->macdma = ioremap(macdma->start, resource_size(macdma));
1171 	if (!aup->macdma) {
1172 		dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n");
1173 		err = -ENXIO;
1174 		goto err_remap3;
1175 	}
1176 
1177 	au1000_setup_hw_rings(aup, aup->macdma);
1178 
1179 	writel(0, aup->enable);
1180 	aup->mac_enabled = 0;
1181 
1182 	pd = dev_get_platdata(&pdev->dev);
1183 	if (!pd) {
1184 		dev_info(&pdev->dev, "no platform_data passed,"
1185 					" PHY search on MAC0\n");
1186 		aup->phy1_search_mac0 = 1;
1187 	} else {
1188 		if (is_valid_ether_addr(pd->mac)) {
1189 			memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
1190 		} else {
1191 			/* Set a random MAC since no valid provided by platform_data. */
1192 			eth_hw_addr_random(dev);
1193 		}
1194 
1195 		aup->phy_static_config = pd->phy_static_config;
1196 		aup->phy_search_highest_addr = pd->phy_search_highest_addr;
1197 		aup->phy1_search_mac0 = pd->phy1_search_mac0;
1198 		aup->phy_addr = pd->phy_addr;
1199 		aup->phy_busid = pd->phy_busid;
1200 		aup->phy_irq = pd->phy_irq;
1201 	}
1202 
1203 	if (aup->phy_busid > 0) {
1204 		dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n");
1205 		err = -ENODEV;
1206 		goto err_mdiobus_alloc;
1207 	}
1208 
1209 	aup->mii_bus = mdiobus_alloc();
1210 	if (aup->mii_bus == NULL) {
1211 		dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
1212 		err = -ENOMEM;
1213 		goto err_mdiobus_alloc;
1214 	}
1215 
1216 	aup->mii_bus->priv = dev;
1217 	aup->mii_bus->read = au1000_mdiobus_read;
1218 	aup->mii_bus->write = au1000_mdiobus_write;
1219 	aup->mii_bus->reset = au1000_mdiobus_reset;
1220 	aup->mii_bus->name = "au1000_eth_mii";
1221 	snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1222 		pdev->name, aup->mac_id);
1223 
1224 	/* if known, set corresponding PHY IRQs */
1225 	if (aup->phy_static_config)
1226 		if (aup->phy_irq && aup->phy_busid == aup->mac_id)
1227 			aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq;
1228 
1229 	err = mdiobus_register(aup->mii_bus);
1230 	if (err) {
1231 		dev_err(&pdev->dev, "failed to register MDIO bus\n");
1232 		goto err_mdiobus_reg;
1233 	}
1234 
1235 	err = au1000_mii_probe(dev);
1236 	if (err != 0)
1237 		goto err_out;
1238 
1239 	pDBfree = NULL;
1240 	/* setup the data buffer descriptors and attach a buffer to each one */
1241 	pDB = aup->db;
1242 	for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
1243 		pDB->pnext = pDBfree;
1244 		pDBfree = pDB;
1245 		pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
1246 		pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
1247 		pDB++;
1248 	}
1249 	aup->pDBfree = pDBfree;
1250 
1251 	err = -ENODEV;
1252 	for (i = 0; i < NUM_RX_DMA; i++) {
1253 		pDB = au1000_GetFreeDB(aup);
1254 		if (!pDB)
1255 			goto err_out;
1256 
1257 		aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1258 		aup->rx_db_inuse[i] = pDB;
1259 	}
1260 
1261 	err = -ENODEV;
1262 	for (i = 0; i < NUM_TX_DMA; i++) {
1263 		pDB = au1000_GetFreeDB(aup);
1264 		if (!pDB)
1265 			goto err_out;
1266 
1267 		aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1268 		aup->tx_dma_ring[i]->len = 0;
1269 		aup->tx_db_inuse[i] = pDB;
1270 	}
1271 
1272 	dev->base_addr = base->start;
1273 	dev->irq = irq;
1274 	dev->netdev_ops = &au1000_netdev_ops;
1275 	dev->ethtool_ops = &au1000_ethtool_ops;
1276 	dev->watchdog_timeo = ETH_TX_TIMEOUT;
1277 
1278 	/*
1279 	 * The boot code uses the ethernet controller, so reset it to start
1280 	 * fresh.  au1000_init() expects that the device is in reset state.
1281 	 */
1282 	au1000_reset_mac(dev);
1283 
1284 	err = register_netdev(dev);
1285 	if (err) {
1286 		netdev_err(dev, "Cannot register net device, aborting.\n");
1287 		goto err_out;
1288 	}
1289 
1290 	netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
1291 			(unsigned long)base->start, irq);
1292 
1293 	pr_info_once("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1294 
1295 	return 0;
1296 
1297 err_out:
1298 	if (aup->mii_bus != NULL)
1299 		mdiobus_unregister(aup->mii_bus);
1300 
1301 	/* here we should have a valid dev plus aup-> register addresses
1302 	 * so we can reset the mac properly.
1303 	 */
1304 	au1000_reset_mac(dev);
1305 
1306 	for (i = 0; i < NUM_RX_DMA; i++) {
1307 		if (aup->rx_db_inuse[i])
1308 			au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1309 	}
1310 	for (i = 0; i < NUM_TX_DMA; i++) {
1311 		if (aup->tx_db_inuse[i])
1312 			au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1313 	}
1314 err_mdiobus_reg:
1315 	mdiobus_free(aup->mii_bus);
1316 err_mdiobus_alloc:
1317 	iounmap(aup->macdma);
1318 err_remap3:
1319 	iounmap(aup->enable);
1320 err_remap2:
1321 	iounmap(aup->mac);
1322 err_remap1:
1323 	dma_free_attrs(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
1324 			(void *)aup->vaddr, aup->dma_addr,
1325 			DMA_ATTR_NON_CONSISTENT);
1326 err_vaddr:
1327 	free_netdev(dev);
1328 err_alloc:
1329 	release_mem_region(macdma->start, resource_size(macdma));
1330 err_macdma:
1331 	release_mem_region(macen->start, resource_size(macen));
1332 err_request:
1333 	release_mem_region(base->start, resource_size(base));
1334 out:
1335 	return err;
1336 }
1337 
1338 static int au1000_remove(struct platform_device *pdev)
1339 {
1340 	struct net_device *dev = platform_get_drvdata(pdev);
1341 	struct au1000_private *aup = netdev_priv(dev);
1342 	int i;
1343 	struct resource *base, *macen;
1344 
1345 	unregister_netdev(dev);
1346 	mdiobus_unregister(aup->mii_bus);
1347 	mdiobus_free(aup->mii_bus);
1348 
1349 	for (i = 0; i < NUM_RX_DMA; i++)
1350 		if (aup->rx_db_inuse[i])
1351 			au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1352 
1353 	for (i = 0; i < NUM_TX_DMA; i++)
1354 		if (aup->tx_db_inuse[i])
1355 			au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1356 
1357 	dma_free_attrs(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
1358 			(void *)aup->vaddr, aup->dma_addr,
1359 			DMA_ATTR_NON_CONSISTENT);
1360 
1361 	iounmap(aup->macdma);
1362 	iounmap(aup->mac);
1363 	iounmap(aup->enable);
1364 
1365 	base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1366 	release_mem_region(base->start, resource_size(base));
1367 
1368 	base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1369 	release_mem_region(base->start, resource_size(base));
1370 
1371 	macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1372 	release_mem_region(macen->start, resource_size(macen));
1373 
1374 	free_netdev(dev);
1375 
1376 	return 0;
1377 }
1378 
1379 static struct platform_driver au1000_eth_driver = {
1380 	.probe  = au1000_probe,
1381 	.remove = au1000_remove,
1382 	.driver = {
1383 		.name   = "au1000-eth",
1384 	},
1385 };
1386 
1387 module_platform_driver(au1000_eth_driver);
1388 
1389 MODULE_ALIAS("platform:au1000-eth");
1390