1 /*
2  * Copyright (C) 2011 Michal Simek <monstr@monstr.eu>
3  * Copyright (C) 2011 PetaLogix
4  * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
5  *
6  * SPDX-License-Identifier:	GPL-2.0+
7  */
8 
9 #include <config.h>
10 #include <common.h>
11 #include <net.h>
12 #include <malloc.h>
13 #include <asm/io.h>
14 #include <phy.h>
15 #include <miiphy.h>
16 
17 #if !defined(CONFIG_PHYLIB)
18 # error AXI_ETHERNET requires PHYLIB
19 #endif
20 
21 /* Link setup */
22 #define XAE_EMMC_LINKSPEED_MASK	0xC0000000 /* Link speed */
23 #define XAE_EMMC_LINKSPD_10	0x00000000 /* Link Speed mask for 10 Mbit */
24 #define XAE_EMMC_LINKSPD_100	0x40000000 /* Link Speed mask for 100 Mbit */
25 #define XAE_EMMC_LINKSPD_1000	0x80000000 /* Link Speed mask for 1000 Mbit */
26 
27 /* Interrupt Status/Enable/Mask Registers bit definitions */
28 #define XAE_INT_RXRJECT_MASK	0x00000008 /* Rx frame rejected */
29 #define XAE_INT_MGTRDY_MASK	0x00000080 /* MGT clock Lock */
30 
31 /* Receive Configuration Word 1 (RCW1) Register bit definitions */
32 #define XAE_RCW1_RX_MASK	0x10000000 /* Receiver enable */
33 
34 /* Transmitter Configuration (TC) Register bit definitions */
35 #define XAE_TC_TX_MASK		0x10000000 /* Transmitter enable */
36 
37 #define XAE_UAW1_UNICASTADDR_MASK	0x0000FFFF
38 
39 /* MDIO Management Configuration (MC) Register bit definitions */
40 #define XAE_MDIO_MC_MDIOEN_MASK		0x00000040 /* MII management enable*/
41 
42 /* MDIO Management Control Register (MCR) Register bit definitions */
43 #define XAE_MDIO_MCR_PHYAD_MASK		0x1F000000 /* Phy Address Mask */
44 #define XAE_MDIO_MCR_PHYAD_SHIFT	24	   /* Phy Address Shift */
45 #define XAE_MDIO_MCR_REGAD_MASK		0x001F0000 /* Reg Address Mask */
46 #define XAE_MDIO_MCR_REGAD_SHIFT	16	   /* Reg Address Shift */
47 #define XAE_MDIO_MCR_OP_READ_MASK	0x00008000 /* Op Code Read Mask */
48 #define XAE_MDIO_MCR_OP_WRITE_MASK	0x00004000 /* Op Code Write Mask */
49 #define XAE_MDIO_MCR_INITIATE_MASK	0x00000800 /* Ready Mask */
50 #define XAE_MDIO_MCR_READY_MASK		0x00000080 /* Ready Mask */
51 
52 #define XAE_MDIO_DIV_DFT	29	/* Default MDIO clock divisor */
53 
54 /* DMA macros */
55 /* Bitmasks of XAXIDMA_CR_OFFSET register */
56 #define XAXIDMA_CR_RUNSTOP_MASK	0x00000001 /* Start/stop DMA channel */
57 #define XAXIDMA_CR_RESET_MASK	0x00000004 /* Reset DMA engine */
58 
59 /* Bitmasks of XAXIDMA_SR_OFFSET register */
60 #define XAXIDMA_HALTED_MASK	0x00000001  /* DMA channel halted */
61 
62 /* Bitmask for interrupts */
63 #define XAXIDMA_IRQ_IOC_MASK	0x00001000 /* Completion intr */
64 #define XAXIDMA_IRQ_DELAY_MASK	0x00002000 /* Delay interrupt */
65 #define XAXIDMA_IRQ_ALL_MASK	0x00007000 /* All interrupts */
66 
67 /* Bitmasks of XAXIDMA_BD_CTRL_OFFSET register */
68 #define XAXIDMA_BD_CTRL_TXSOF_MASK	0x08000000 /* First tx packet */
69 #define XAXIDMA_BD_CTRL_TXEOF_MASK	0x04000000 /* Last tx packet */
70 
71 #define DMAALIGN	128
72 
73 static u8 rxframe[PKTSIZE_ALIGN] __attribute((aligned(DMAALIGN)));
74 
75 /* Reflect dma offsets */
76 struct axidma_reg {
77 	u32 control; /* DMACR */
78 	u32 status; /* DMASR */
79 	u32 current; /* CURDESC */
80 	u32 reserved;
81 	u32 tail; /* TAILDESC */
82 };
83 
84 /* Private driver structures */
85 struct axidma_priv {
86 	struct axidma_reg *dmatx;
87 	struct axidma_reg *dmarx;
88 	int phyaddr;
89 
90 	struct phy_device *phydev;
91 	struct mii_dev *bus;
92 };
93 
94 /* BD descriptors */
95 struct axidma_bd {
96 	u32 next;	/* Next descriptor pointer */
97 	u32 reserved1;
98 	u32 phys;	/* Buffer address */
99 	u32 reserved2;
100 	u32 reserved3;
101 	u32 reserved4;
102 	u32 cntrl;	/* Control */
103 	u32 status;	/* Status */
104 	u32 app0;
105 	u32 app1;	/* TX start << 16 | insert */
106 	u32 app2;	/* TX csum seed */
107 	u32 app3;
108 	u32 app4;
109 	u32 sw_id_offset;
110 	u32 reserved5;
111 	u32 reserved6;
112 };
113 
114 /* Static BDs - driver uses only one BD */
115 static struct axidma_bd tx_bd __attribute((aligned(DMAALIGN)));
116 static struct axidma_bd rx_bd __attribute((aligned(DMAALIGN)));
117 
118 struct axi_regs {
119 	u32 reserved[3];
120 	u32 is; /* 0xC: Interrupt status */
121 	u32 reserved2;
122 	u32 ie; /* 0x14: Interrupt enable */
123 	u32 reserved3[251];
124 	u32 rcw1; /* 0x404: Rx Configuration Word 1 */
125 	u32 tc; /* 0x408: Tx Configuration */
126 	u32 reserved4;
127 	u32 emmc; /* 0x410: EMAC mode configuration */
128 	u32 reserved5[59];
129 	u32 mdio_mc; /* 0x500: MII Management Config */
130 	u32 mdio_mcr; /* 0x504: MII Management Control */
131 	u32 mdio_mwd; /* 0x508: MII Management Write Data */
132 	u32 mdio_mrd; /* 0x50C: MII Management Read Data */
133 	u32 reserved6[124];
134 	u32 uaw0; /* 0x700: Unicast address word 0 */
135 	u32 uaw1; /* 0x704: Unicast address word 1 */
136 };
137 
138 /* Use MII register 1 (MII status register) to detect PHY */
139 #define PHY_DETECT_REG  1
140 
141 /*
142  * Mask used to verify certain PHY features (or register contents)
143  * in the register above:
144  *  0x1000: 10Mbps full duplex support
145  *  0x0800: 10Mbps half duplex support
146  *  0x0008: Auto-negotiation support
147  */
148 #define PHY_DETECT_MASK 0x1808
149 
150 static inline int mdio_wait(struct eth_device *dev)
151 {
152 	struct axi_regs *regs = (struct axi_regs *)dev->iobase;
153 	u32 timeout = 200;
154 
155 	/* Wait till MDIO interface is ready to accept a new transaction. */
156 	while (timeout && (!(in_be32(&regs->mdio_mcr)
157 						& XAE_MDIO_MCR_READY_MASK))) {
158 		timeout--;
159 		udelay(1);
160 	}
161 	if (!timeout) {
162 		printf("%s: Timeout\n", __func__);
163 		return 1;
164 	}
165 	return 0;
166 }
167 
168 static u32 phyread(struct eth_device *dev, u32 phyaddress, u32 registernum,
169 								u16 *val)
170 {
171 	struct axi_regs *regs = (struct axi_regs *)dev->iobase;
172 	u32 mdioctrlreg = 0;
173 
174 	if (mdio_wait(dev))
175 		return 1;
176 
177 	mdioctrlreg = ((phyaddress << XAE_MDIO_MCR_PHYAD_SHIFT) &
178 			XAE_MDIO_MCR_PHYAD_MASK) |
179 			((registernum << XAE_MDIO_MCR_REGAD_SHIFT)
180 			& XAE_MDIO_MCR_REGAD_MASK) |
181 			XAE_MDIO_MCR_INITIATE_MASK |
182 			XAE_MDIO_MCR_OP_READ_MASK;
183 
184 	out_be32(&regs->mdio_mcr, mdioctrlreg);
185 
186 	if (mdio_wait(dev))
187 		return 1;
188 
189 	/* Read data */
190 	*val = in_be32(&regs->mdio_mrd);
191 	return 0;
192 }
193 
194 static u32 phywrite(struct eth_device *dev, u32 phyaddress, u32 registernum,
195 								u32 data)
196 {
197 	struct axi_regs *regs = (struct axi_regs *)dev->iobase;
198 	u32 mdioctrlreg = 0;
199 
200 	if (mdio_wait(dev))
201 		return 1;
202 
203 	mdioctrlreg = ((phyaddress << XAE_MDIO_MCR_PHYAD_SHIFT) &
204 			XAE_MDIO_MCR_PHYAD_MASK) |
205 			((registernum << XAE_MDIO_MCR_REGAD_SHIFT)
206 			& XAE_MDIO_MCR_REGAD_MASK) |
207 			XAE_MDIO_MCR_INITIATE_MASK |
208 			XAE_MDIO_MCR_OP_WRITE_MASK;
209 
210 	/* Write data */
211 	out_be32(&regs->mdio_mwd, data);
212 
213 	out_be32(&regs->mdio_mcr, mdioctrlreg);
214 
215 	if (mdio_wait(dev))
216 		return 1;
217 
218 	return 0;
219 }
220 
221 /* Setting axi emac and phy to proper setting */
222 static int setup_phy(struct eth_device *dev)
223 {
224 	u16 phyreg;
225 	u32 i, speed, emmc_reg, ret;
226 	struct axidma_priv *priv = dev->priv;
227 	struct axi_regs *regs = (struct axi_regs *)dev->iobase;
228 	struct phy_device *phydev;
229 
230 	u32 supported = SUPPORTED_10baseT_Half |
231 			SUPPORTED_10baseT_Full |
232 			SUPPORTED_100baseT_Half |
233 			SUPPORTED_100baseT_Full |
234 			SUPPORTED_1000baseT_Half |
235 			SUPPORTED_1000baseT_Full;
236 
237 	if (priv->phyaddr == -1) {
238 		/* Detect the PHY address */
239 		for (i = 31; i >= 0; i--) {
240 			ret = phyread(dev, i, PHY_DETECT_REG, &phyreg);
241 			if (!ret && (phyreg != 0xFFFF) &&
242 			((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) {
243 				/* Found a valid PHY address */
244 				priv->phyaddr = i;
245 				debug("axiemac: Found valid phy address, %x\n",
246 									phyreg);
247 				break;
248 			}
249 		}
250 	}
251 
252 	/* Interface - look at tsec */
253 	phydev = phy_connect(priv->bus, priv->phyaddr, dev, 0);
254 
255 	phydev->supported &= supported;
256 	phydev->advertising = phydev->supported;
257 	priv->phydev = phydev;
258 	phy_config(phydev);
259 	if (phy_startup(phydev)) {
260 		printf("axiemac: could not initialize PHY %s\n",
261 		       phydev->dev->name);
262 		return 0;
263 	}
264 	if (!phydev->link) {
265 		printf("%s: No link.\n", phydev->dev->name);
266 		return 0;
267 	}
268 
269 	switch (phydev->speed) {
270 	case 1000:
271 		speed = XAE_EMMC_LINKSPD_1000;
272 		break;
273 	case 100:
274 		speed = XAE_EMMC_LINKSPD_100;
275 		break;
276 	case 10:
277 		speed = XAE_EMMC_LINKSPD_10;
278 		break;
279 	default:
280 		return 0;
281 	}
282 
283 	/* Setup the emac for the phy speed */
284 	emmc_reg = in_be32(&regs->emmc);
285 	emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
286 	emmc_reg |= speed;
287 
288 	/* Write new speed setting out to Axi Ethernet */
289 	out_be32(&regs->emmc, emmc_reg);
290 
291 	/*
292 	* Setting the operating speed of the MAC needs a delay. There
293 	* doesn't seem to be register to poll, so please consider this
294 	* during your application design.
295 	*/
296 	udelay(1);
297 
298 	return 1;
299 }
300 
301 /* STOP DMA transfers */
302 static void axiemac_halt(struct eth_device *dev)
303 {
304 	struct axidma_priv *priv = dev->priv;
305 	u32 temp;
306 
307 	/* Stop the hardware */
308 	temp = in_be32(&priv->dmatx->control);
309 	temp &= ~XAXIDMA_CR_RUNSTOP_MASK;
310 	out_be32(&priv->dmatx->control, temp);
311 
312 	temp = in_be32(&priv->dmarx->control);
313 	temp &= ~XAXIDMA_CR_RUNSTOP_MASK;
314 	out_be32(&priv->dmarx->control, temp);
315 
316 	debug("axiemac: Halted\n");
317 }
318 
319 static int axi_ethernet_init(struct eth_device *dev)
320 {
321 	struct axi_regs *regs = (struct axi_regs *)dev->iobase;
322 	u32 timeout = 200;
323 
324 	/*
325 	 * Check the status of the MgtRdy bit in the interrupt status
326 	 * registers. This must be done to allow the MGT clock to become stable
327 	 * for the Sgmii and 1000BaseX PHY interfaces. No other register reads
328 	 * will be valid until this bit is valid.
329 	 * The bit is always a 1 for all other PHY interfaces.
330 	 */
331 	while (timeout && (!(in_be32(&regs->is) & XAE_INT_MGTRDY_MASK))) {
332 		timeout--;
333 		udelay(1);
334 	}
335 	if (!timeout) {
336 		printf("%s: Timeout\n", __func__);
337 		return 1;
338 	}
339 
340 	/* Stop the device and reset HW */
341 	/* Disable interrupts */
342 	out_be32(&regs->ie, 0);
343 
344 	/* Disable the receiver */
345 	out_be32(&regs->rcw1, in_be32(&regs->rcw1) & ~XAE_RCW1_RX_MASK);
346 
347 	/*
348 	 * Stopping the receiver in mid-packet causes a dropped packet
349 	 * indication from HW. Clear it.
350 	 */
351 	/* Set the interrupt status register to clear the interrupt */
352 	out_be32(&regs->is, XAE_INT_RXRJECT_MASK);
353 
354 	/* Setup HW */
355 	/* Set default MDIO divisor */
356 	out_be32(&regs->mdio_mc, XAE_MDIO_DIV_DFT | XAE_MDIO_MC_MDIOEN_MASK);
357 
358 	debug("axiemac: InitHw done\n");
359 	return 0;
360 }
361 
362 static int axiemac_setup_mac(struct eth_device *dev)
363 {
364 	struct axi_regs *regs = (struct axi_regs *)dev->iobase;
365 
366 	/* Set the MAC address */
367 	int val = ((dev->enetaddr[3] << 24) | (dev->enetaddr[2] << 16) |
368 		(dev->enetaddr[1] << 8) | (dev->enetaddr[0]));
369 	out_be32(&regs->uaw0, val);
370 
371 	val = (dev->enetaddr[5] << 8) | dev->enetaddr[4] ;
372 	val |= in_be32(&regs->uaw1) & ~XAE_UAW1_UNICASTADDR_MASK;
373 	out_be32(&regs->uaw1, val);
374 	return 0;
375 }
376 
377 /* Reset DMA engine */
378 static void axi_dma_init(struct eth_device *dev)
379 {
380 	struct axidma_priv *priv = dev->priv;
381 	u32 timeout = 500;
382 
383 	/* Reset the engine so the hardware starts from a known state */
384 	out_be32(&priv->dmatx->control, XAXIDMA_CR_RESET_MASK);
385 	out_be32(&priv->dmarx->control, XAXIDMA_CR_RESET_MASK);
386 
387 	/* At the initialization time, hardware should finish reset quickly */
388 	while (timeout--) {
389 		/* Check transmit/receive channel */
390 		/* Reset is done when the reset bit is low */
391 		if (!(in_be32(&priv->dmatx->control) |
392 				in_be32(&priv->dmarx->control))
393 						& XAXIDMA_CR_RESET_MASK) {
394 			break;
395 		}
396 	}
397 	if (!timeout)
398 		printf("%s: Timeout\n", __func__);
399 }
400 
401 static int axiemac_init(struct eth_device *dev, bd_t * bis)
402 {
403 	struct axidma_priv *priv = dev->priv;
404 	struct axi_regs *regs = (struct axi_regs *)dev->iobase;
405 	u32 temp;
406 
407 	debug("axiemac: Init started\n");
408 	/*
409 	 * Initialize AXIDMA engine. AXIDMA engine must be initialized before
410 	 * AxiEthernet. During AXIDMA engine initialization, AXIDMA hardware is
411 	 * reset, and since AXIDMA reset line is connected to AxiEthernet, this
412 	 * would ensure a reset of AxiEthernet.
413 	 */
414 	axi_dma_init(dev);
415 
416 	/* Initialize AxiEthernet hardware. */
417 	if (axi_ethernet_init(dev))
418 		return -1;
419 
420 	/* Disable all RX interrupts before RxBD space setup */
421 	temp = in_be32(&priv->dmarx->control);
422 	temp &= ~XAXIDMA_IRQ_ALL_MASK;
423 	out_be32(&priv->dmarx->control, temp);
424 
425 	/* Start DMA RX channel. Now it's ready to receive data.*/
426 	out_be32(&priv->dmarx->current, (u32)&rx_bd);
427 
428 	/* Setup the BD. */
429 	memset(&rx_bd, 0, sizeof(rx_bd));
430 	rx_bd.next = (u32)&rx_bd;
431 	rx_bd.phys = (u32)&rxframe;
432 	rx_bd.cntrl = sizeof(rxframe);
433 	/* Flush the last BD so DMA core could see the updates */
434 	flush_cache((u32)&rx_bd, sizeof(rx_bd));
435 
436 	/* It is necessary to flush rxframe because if you don't do it
437 	 * then cache can contain uninitialized data */
438 	flush_cache((u32)&rxframe, sizeof(rxframe));
439 
440 	/* Start the hardware */
441 	temp = in_be32(&priv->dmarx->control);
442 	temp |= XAXIDMA_CR_RUNSTOP_MASK;
443 	out_be32(&priv->dmarx->control, temp);
444 
445 	/* Rx BD is ready - start */
446 	out_be32(&priv->dmarx->tail, (u32)&rx_bd);
447 
448 	/* Enable TX */
449 	out_be32(&regs->tc, XAE_TC_TX_MASK);
450 	/* Enable RX */
451 	out_be32(&regs->rcw1, XAE_RCW1_RX_MASK);
452 
453 	/* PHY setup */
454 	if (!setup_phy(dev)) {
455 		axiemac_halt(dev);
456 		return -1;
457 	}
458 
459 	debug("axiemac: Init complete\n");
460 	return 0;
461 }
462 
463 static int axiemac_send(struct eth_device *dev, void *ptr, int len)
464 {
465 	struct axidma_priv *priv = dev->priv;
466 	u32 timeout;
467 
468 	if (len > PKTSIZE_ALIGN)
469 		len = PKTSIZE_ALIGN;
470 
471 	/* Flush packet to main memory to be trasfered by DMA */
472 	flush_cache((u32)ptr, len);
473 
474 	/* Setup Tx BD */
475 	memset(&tx_bd, 0, sizeof(tx_bd));
476 	/* At the end of the ring, link the last BD back to the top */
477 	tx_bd.next = (u32)&tx_bd;
478 	tx_bd.phys = (u32)ptr;
479 	/* Save len */
480 	tx_bd.cntrl = len | XAXIDMA_BD_CTRL_TXSOF_MASK |
481 						XAXIDMA_BD_CTRL_TXEOF_MASK;
482 
483 	/* Flush the last BD so DMA core could see the updates */
484 	flush_cache((u32)&tx_bd, sizeof(tx_bd));
485 
486 	if (in_be32(&priv->dmatx->status) & XAXIDMA_HALTED_MASK) {
487 		u32 temp;
488 		out_be32(&priv->dmatx->current, (u32)&tx_bd);
489 		/* Start the hardware */
490 		temp = in_be32(&priv->dmatx->control);
491 		temp |= XAXIDMA_CR_RUNSTOP_MASK;
492 		out_be32(&priv->dmatx->control, temp);
493 	}
494 
495 	/* Start transfer */
496 	out_be32(&priv->dmatx->tail, (u32)&tx_bd);
497 
498 	/* Wait for transmission to complete */
499 	debug("axiemac: Waiting for tx to be done\n");
500 	timeout = 200;
501 	while (timeout && (!in_be32(&priv->dmatx->status) &
502 			(XAXIDMA_IRQ_DELAY_MASK | XAXIDMA_IRQ_IOC_MASK))) {
503 		timeout--;
504 		udelay(1);
505 	}
506 	if (!timeout) {
507 		printf("%s: Timeout\n", __func__);
508 		return 1;
509 	}
510 
511 	debug("axiemac: Sending complete\n");
512 	return 0;
513 }
514 
515 static int isrxready(struct eth_device *dev)
516 {
517 	u32 status;
518 	struct axidma_priv *priv = dev->priv;
519 
520 	/* Read pending interrupts */
521 	status = in_be32(&priv->dmarx->status);
522 
523 	/* Acknowledge pending interrupts */
524 	out_be32(&priv->dmarx->status, status & XAXIDMA_IRQ_ALL_MASK);
525 
526 	/*
527 	 * If Reception done interrupt is asserted, call RX call back function
528 	 * to handle the processed BDs and then raise the according flag.
529 	 */
530 	if ((status & (XAXIDMA_IRQ_DELAY_MASK | XAXIDMA_IRQ_IOC_MASK)))
531 		return 1;
532 
533 	return 0;
534 }
535 
536 static int axiemac_recv(struct eth_device *dev)
537 {
538 	u32 length;
539 	struct axidma_priv *priv = dev->priv;
540 	u32 temp;
541 
542 	/* Wait for an incoming packet */
543 	if (!isrxready(dev))
544 		return 0;
545 
546 	debug("axiemac: RX data ready\n");
547 
548 	/* Disable IRQ for a moment till packet is handled */
549 	temp = in_be32(&priv->dmarx->control);
550 	temp &= ~XAXIDMA_IRQ_ALL_MASK;
551 	out_be32(&priv->dmarx->control, temp);
552 
553 	length = rx_bd.app4 & 0xFFFF; /* max length mask */
554 #ifdef DEBUG
555 	print_buffer(&rxframe, &rxframe[0], 1, length, 16);
556 #endif
557 	/* Pass the received frame up for processing */
558 	if (length)
559 		net_process_received_packet(rxframe, length);
560 
561 #ifdef DEBUG
562 	/* It is useful to clear buffer to be sure that it is consistent */
563 	memset(rxframe, 0, sizeof(rxframe));
564 #endif
565 	/* Setup RxBD */
566 	/* Clear the whole buffer and setup it again - all flags are cleared */
567 	memset(&rx_bd, 0, sizeof(rx_bd));
568 	rx_bd.next = (u32)&rx_bd;
569 	rx_bd.phys = (u32)&rxframe;
570 	rx_bd.cntrl = sizeof(rxframe);
571 
572 	/* Write bd to HW */
573 	flush_cache((u32)&rx_bd, sizeof(rx_bd));
574 
575 	/* It is necessary to flush rxframe because if you don't do it
576 	 * then cache will contain previous packet */
577 	flush_cache((u32)&rxframe, sizeof(rxframe));
578 
579 	/* Rx BD is ready - start again */
580 	out_be32(&priv->dmarx->tail, (u32)&rx_bd);
581 
582 	debug("axiemac: RX completed, framelength = %d\n", length);
583 
584 	return length;
585 }
586 
587 static int axiemac_miiphy_read(const char *devname, uchar addr,
588 							uchar reg, ushort *val)
589 {
590 	struct eth_device *dev = eth_get_dev();
591 	u32 ret;
592 
593 	ret = phyread(dev, addr, reg, val);
594 	debug("axiemac: Read MII 0x%x, 0x%x, 0x%x\n", addr, reg, *val);
595 	return ret;
596 }
597 
598 static int axiemac_miiphy_write(const char *devname, uchar addr,
599 							uchar reg, ushort val)
600 {
601 	struct eth_device *dev = eth_get_dev();
602 
603 	debug("axiemac: Write MII 0x%x, 0x%x, 0x%x\n", addr, reg, val);
604 	return phywrite(dev, addr, reg, val);
605 }
606 
607 static int axiemac_bus_reset(struct mii_dev *bus)
608 {
609 	debug("axiemac: Bus reset\n");
610 	return 0;
611 }
612 
613 int xilinx_axiemac_initialize(bd_t *bis, unsigned long base_addr,
614 							unsigned long dma_addr)
615 {
616 	struct eth_device *dev;
617 	struct axidma_priv *priv;
618 
619 	dev = calloc(1, sizeof(struct eth_device));
620 	if (dev == NULL)
621 		return -1;
622 
623 	dev->priv = calloc(1, sizeof(struct axidma_priv));
624 	if (dev->priv == NULL) {
625 		free(dev);
626 		return -1;
627 	}
628 	priv = dev->priv;
629 
630 	sprintf(dev->name, "aximac.%lx", base_addr);
631 
632 	dev->iobase = base_addr;
633 	priv->dmatx = (struct axidma_reg *)dma_addr;
634 	/* RX channel offset is 0x30 */
635 	priv->dmarx = (struct axidma_reg *)(dma_addr + 0x30);
636 	dev->init = axiemac_init;
637 	dev->halt = axiemac_halt;
638 	dev->send = axiemac_send;
639 	dev->recv = axiemac_recv;
640 	dev->write_hwaddr = axiemac_setup_mac;
641 
642 #ifdef CONFIG_PHY_ADDR
643 	priv->phyaddr = CONFIG_PHY_ADDR;
644 #else
645 	priv->phyaddr = -1;
646 #endif
647 
648 	eth_register(dev);
649 
650 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
651 	miiphy_register(dev->name, axiemac_miiphy_read, axiemac_miiphy_write);
652 	priv->bus = miiphy_get_dev_by_name(dev->name);
653 	priv->bus->reset = axiemac_bus_reset;
654 #endif
655 	return 1;
656 }
657