xref: /openbmc/u-boot/drivers/net/mvgbe.c (revision 713cb680)
1 /*
2  * (C) Copyright 2009
3  * Marvell Semiconductor <www.marvell.com>
4  * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
5  *
6  * (C) Copyright 2003
7  * Ingo Assmus <ingo.assmus@keymile.com>
8  *
9  * based on - Driver for MV64360X ethernet ports
10  * Copyright (C) 2002 rabeeh@galileo.co.il
11  *
12  * See file CREDITS for list of people who contributed to this
13  * project.
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License as
17  * published by the Free Software Foundation; either version 2 of
18  * the License, or (at your option) any later version.
19  *
20  * This program is distributed in the hope that it will be useful,
21  * but WITHOUT ANY WARRANTY; without even the implied warranty of
22  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23  * GNU General Public License for more details.
24  *
25  * You should have received a copy of the GNU General Public License
26  * along with this program; if not, write to the Free Software
27  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
28  * MA 02110-1301 USA
29  */
30 
31 #include <common.h>
32 #include <net.h>
33 #include <malloc.h>
34 #include <miiphy.h>
35 #include <asm/io.h>
36 #include <asm/errno.h>
37 #include <asm/types.h>
38 #include <asm/system.h>
39 #include <asm/byteorder.h>
40 #include <asm/arch/cpu.h>
41 
42 #if defined(CONFIG_KIRKWOOD)
43 #include <asm/arch/kirkwood.h>
44 #elif defined(CONFIG_ORION5X)
45 #include <asm/arch/orion5x.h>
46 #elif defined(CONFIG_DOVE)
47 #include <asm/arch/dove.h>
48 #endif
49 
50 #include "mvgbe.h"
51 
52 DECLARE_GLOBAL_DATA_PTR;
53 
54 #define MV_PHY_ADR_REQUEST 0xee
55 #define MVGBE_SMI_REG (((struct mvgbe_registers *)MVGBE0_BASE)->smi)
56 
57 #if defined(CONFIG_PHYLIB) || defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
58 /*
59  * smi_reg_read - miiphy_read callback function.
60  *
61  * Returns 16bit phy register value, or 0xffff on error
62  */
63 static int smi_reg_read(const char *devname, u8 phy_adr, u8 reg_ofs, u16 * data)
64 {
65 	struct eth_device *dev = eth_get_dev_by_name(devname);
66 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
67 	struct mvgbe_registers *regs = dmvgbe->regs;
68 	u32 smi_reg;
69 	u32 timeout;
70 
71 	/* Phyadr read request */
72 	if (phy_adr == MV_PHY_ADR_REQUEST &&
73 			reg_ofs == MV_PHY_ADR_REQUEST) {
74 		/* */
75 		*data = (u16) (MVGBE_REG_RD(regs->phyadr) & PHYADR_MASK);
76 		return 0;
77 	}
78 	/* check parameters */
79 	if (phy_adr > PHYADR_MASK) {
80 		printf("Err..(%s) Invalid PHY address %d\n",
81 			__FUNCTION__, phy_adr);
82 		return -EFAULT;
83 	}
84 	if (reg_ofs > PHYREG_MASK) {
85 		printf("Err..(%s) Invalid register offset %d\n",
86 			__FUNCTION__, reg_ofs);
87 		return -EFAULT;
88 	}
89 
90 	timeout = MVGBE_PHY_SMI_TIMEOUT;
91 	/* wait till the SMI is not busy */
92 	do {
93 		/* read smi register */
94 		smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
95 		if (timeout-- == 0) {
96 			printf("Err..(%s) SMI busy timeout\n", __FUNCTION__);
97 			return -EFAULT;
98 		}
99 	} while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
100 
101 	/* fill the phy address and regiser offset and read opcode */
102 	smi_reg = (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
103 		| (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS)
104 		| MVGBE_PHY_SMI_OPCODE_READ;
105 
106 	/* write the smi register */
107 	MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
108 
109 	/*wait till read value is ready */
110 	timeout = MVGBE_PHY_SMI_TIMEOUT;
111 
112 	do {
113 		/* read smi register */
114 		smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
115 		if (timeout-- == 0) {
116 			printf("Err..(%s) SMI read ready timeout\n",
117 				__FUNCTION__);
118 			return -EFAULT;
119 		}
120 	} while (!(smi_reg & MVGBE_PHY_SMI_READ_VALID_MASK));
121 
122 	/* Wait for the data to update in the SMI register */
123 	for (timeout = 0; timeout < MVGBE_PHY_SMI_TIMEOUT; timeout++)
124 		;
125 
126 	*data = (u16) (MVGBE_REG_RD(MVGBE_SMI_REG) & MVGBE_PHY_SMI_DATA_MASK);
127 
128 	debug("%s:(adr %d, off %d) value= %04x\n", __FUNCTION__, phy_adr,
129 		reg_ofs, *data);
130 
131 	return 0;
132 }
133 
134 /*
135  * smi_reg_write - imiiphy_write callback function.
136  *
137  * Returns 0 if write succeed, -EINVAL on bad parameters
138  * -ETIME on timeout
139  */
140 static int smi_reg_write(const char *devname, u8 phy_adr, u8 reg_ofs, u16 data)
141 {
142 	struct eth_device *dev = eth_get_dev_by_name(devname);
143 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
144 	struct mvgbe_registers *regs = dmvgbe->regs;
145 	u32 smi_reg;
146 	u32 timeout;
147 
148 	/* Phyadr write request*/
149 	if (phy_adr == MV_PHY_ADR_REQUEST &&
150 			reg_ofs == MV_PHY_ADR_REQUEST) {
151 		MVGBE_REG_WR(regs->phyadr, data);
152 		return 0;
153 	}
154 
155 	/* check parameters */
156 	if (phy_adr > PHYADR_MASK) {
157 		printf("Err..(%s) Invalid phy address\n", __FUNCTION__);
158 		return -EINVAL;
159 	}
160 	if (reg_ofs > PHYREG_MASK) {
161 		printf("Err..(%s) Invalid register offset\n", __FUNCTION__);
162 		return -EINVAL;
163 	}
164 
165 	/* wait till the SMI is not busy */
166 	timeout = MVGBE_PHY_SMI_TIMEOUT;
167 	do {
168 		/* read smi register */
169 		smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
170 		if (timeout-- == 0) {
171 			printf("Err..(%s) SMI busy timeout\n", __FUNCTION__);
172 			return -ETIME;
173 		}
174 	} while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
175 
176 	/* fill the phy addr and reg offset and write opcode and data */
177 	smi_reg = (data << MVGBE_PHY_SMI_DATA_OFFS);
178 	smi_reg |= (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
179 		| (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS);
180 	smi_reg &= ~MVGBE_PHY_SMI_OPCODE_READ;
181 
182 	/* write the smi register */
183 	MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
184 
185 	return 0;
186 }
187 #endif
188 
189 #if defined(CONFIG_PHYLIB)
190 int mvgbe_phy_read(struct mii_dev *bus, int phy_addr, int dev_addr,
191 		   int reg_addr)
192 {
193 	u16 data;
194 	int ret;
195 	ret = smi_reg_read(bus->name, phy_addr, reg_addr, &data);
196 	if (ret)
197 		return ret;
198 	return data;
199 }
200 
201 int mvgbe_phy_write(struct mii_dev *bus, int phy_addr, int dev_addr,
202 		    int reg_addr, u16 data)
203 {
204 	return smi_reg_write(bus->name, phy_addr, reg_addr, data);
205 }
206 #endif
207 
208 /* Stop and checks all queues */
209 static void stop_queue(u32 * qreg)
210 {
211 	u32 reg_data;
212 
213 	reg_data = readl(qreg);
214 
215 	if (reg_data & 0xFF) {
216 		/* Issue stop command for active channels only */
217 		writel((reg_data << 8), qreg);
218 
219 		/* Wait for all queue activity to terminate. */
220 		do {
221 			/*
222 			 * Check port cause register that all queues
223 			 * are stopped
224 			 */
225 			reg_data = readl(qreg);
226 		}
227 		while (reg_data & 0xFF);
228 	}
229 }
230 
231 /*
232  * set_access_control - Config address decode parameters for Ethernet unit
233  *
234  * This function configures the address decode parameters for the Gigabit
235  * Ethernet Controller according the given parameters struct.
236  *
237  * @regs	Register struct pointer.
238  * @param	Address decode parameter struct.
239  */
240 static void set_access_control(struct mvgbe_registers *regs,
241 				struct mvgbe_winparam *param)
242 {
243 	u32 access_prot_reg;
244 
245 	/* Set access control register */
246 	access_prot_reg = MVGBE_REG_RD(regs->epap);
247 	/* clear window permission */
248 	access_prot_reg &= (~(3 << (param->win * 2)));
249 	access_prot_reg |= (param->access_ctrl << (param->win * 2));
250 	MVGBE_REG_WR(regs->epap, access_prot_reg);
251 
252 	/* Set window Size reg (SR) */
253 	MVGBE_REG_WR(regs->barsz[param->win].size,
254 			(((param->size / 0x10000) - 1) << 16));
255 
256 	/* Set window Base address reg (BA) */
257 	MVGBE_REG_WR(regs->barsz[param->win].bar,
258 			(param->target | param->attrib | param->base_addr));
259 	/* High address remap reg (HARR) */
260 	if (param->win < 4)
261 		MVGBE_REG_WR(regs->ha_remap[param->win], param->high_addr);
262 
263 	/* Base address enable reg (BARER) */
264 	if (param->enable == 1)
265 		MVGBE_REG_BITS_RESET(regs->bare, (1 << param->win));
266 	else
267 		MVGBE_REG_BITS_SET(regs->bare, (1 << param->win));
268 }
269 
270 static void set_dram_access(struct mvgbe_registers *regs)
271 {
272 	struct mvgbe_winparam win_param;
273 	int i;
274 
275 	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
276 		/* Set access parameters for DRAM bank i */
277 		win_param.win = i;	/* Use Ethernet window i */
278 		/* Window target - DDR */
279 		win_param.target = MVGBE_TARGET_DRAM;
280 		/* Enable full access */
281 		win_param.access_ctrl = EWIN_ACCESS_FULL;
282 		win_param.high_addr = 0;
283 		/* Get bank base and size */
284 		win_param.base_addr = gd->bd->bi_dram[i].start;
285 		win_param.size = gd->bd->bi_dram[i].size;
286 		if (win_param.size == 0)
287 			win_param.enable = 0;
288 		else
289 			win_param.enable = 1;	/* Enable the access */
290 
291 		/* Enable DRAM bank */
292 		switch (i) {
293 		case 0:
294 			win_param.attrib = EBAR_DRAM_CS0;
295 			break;
296 		case 1:
297 			win_param.attrib = EBAR_DRAM_CS1;
298 			break;
299 		case 2:
300 			win_param.attrib = EBAR_DRAM_CS2;
301 			break;
302 		case 3:
303 			win_param.attrib = EBAR_DRAM_CS3;
304 			break;
305 		default:
306 			/* invalid bank, disable access */
307 			win_param.enable = 0;
308 			win_param.attrib = 0;
309 			break;
310 		}
311 		/* Set the access control for address window(EPAPR) RD/WR */
312 		set_access_control(regs, &win_param);
313 	}
314 }
315 
316 /*
317  * port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
318  *
319  * Go through all the DA filter tables (Unicast, Special Multicast & Other
320  * Multicast) and set each entry to 0.
321  */
322 static void port_init_mac_tables(struct mvgbe_registers *regs)
323 {
324 	int table_index;
325 
326 	/* Clear DA filter unicast table (Ex_dFUT) */
327 	for (table_index = 0; table_index < 4; ++table_index)
328 		MVGBE_REG_WR(regs->dfut[table_index], 0);
329 
330 	for (table_index = 0; table_index < 64; ++table_index) {
331 		/* Clear DA filter special multicast table (Ex_dFSMT) */
332 		MVGBE_REG_WR(regs->dfsmt[table_index], 0);
333 		/* Clear DA filter other multicast table (Ex_dFOMT) */
334 		MVGBE_REG_WR(regs->dfomt[table_index], 0);
335 	}
336 }
337 
338 /*
339  * port_uc_addr - This function Set the port unicast address table
340  *
341  * This function locates the proper entry in the Unicast table for the
342  * specified MAC nibble and sets its properties according to function
343  * parameters.
344  * This function add/removes MAC addresses from the port unicast address
345  * table.
346  *
347  * @uc_nibble	Unicast MAC Address last nibble.
348  * @option      0 = Add, 1 = remove address.
349  *
350  * RETURN: 1 if output succeeded. 0 if option parameter is invalid.
351  */
352 static int port_uc_addr(struct mvgbe_registers *regs, u8 uc_nibble,
353 			int option)
354 {
355 	u32 unicast_reg;
356 	u32 tbl_offset;
357 	u32 reg_offset;
358 
359 	/* Locate the Unicast table entry */
360 	uc_nibble = (0xf & uc_nibble);
361 	/* Register offset from unicast table base */
362 	tbl_offset = (uc_nibble / 4);
363 	/* Entry offset within the above register */
364 	reg_offset = uc_nibble % 4;
365 
366 	switch (option) {
367 	case REJECT_MAC_ADDR:
368 		/*
369 		 * Clear accepts frame bit at specified unicast
370 		 * DA table entry
371 		 */
372 		unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
373 		unicast_reg &= (0xFF << (8 * reg_offset));
374 		MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
375 		break;
376 	case ACCEPT_MAC_ADDR:
377 		/* Set accepts frame bit at unicast DA filter table entry */
378 		unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
379 		unicast_reg &= (0xFF << (8 * reg_offset));
380 		unicast_reg |= ((0x01 | (RXUQ << 1)) << (8 * reg_offset));
381 		MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
382 		break;
383 	default:
384 		return 0;
385 	}
386 	return 1;
387 }
388 
389 /*
390  * port_uc_addr_set - This function Set the port Unicast address.
391  */
392 static void port_uc_addr_set(struct mvgbe_registers *regs, u8 * p_addr)
393 {
394 	u32 mac_h;
395 	u32 mac_l;
396 
397 	mac_l = (p_addr[4] << 8) | (p_addr[5]);
398 	mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
399 		(p_addr[3] << 0);
400 
401 	MVGBE_REG_WR(regs->macal, mac_l);
402 	MVGBE_REG_WR(regs->macah, mac_h);
403 
404 	/* Accept frames of this address */
405 	port_uc_addr(regs, p_addr[5], ACCEPT_MAC_ADDR);
406 }
407 
408 /*
409  * mvgbe_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
410  */
411 static void mvgbe_init_rx_desc_ring(struct mvgbe_device *dmvgbe)
412 {
413 	struct mvgbe_rxdesc *p_rx_desc;
414 	int i;
415 
416 	/* initialize the Rx descriptors ring */
417 	p_rx_desc = dmvgbe->p_rxdesc;
418 	for (i = 0; i < RINGSZ; i++) {
419 		p_rx_desc->cmd_sts =
420 			MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
421 		p_rx_desc->buf_size = PKTSIZE_ALIGN;
422 		p_rx_desc->byte_cnt = 0;
423 		p_rx_desc->buf_ptr = dmvgbe->p_rxbuf + i * PKTSIZE_ALIGN;
424 		if (i == (RINGSZ - 1))
425 			p_rx_desc->nxtdesc_p = dmvgbe->p_rxdesc;
426 		else {
427 			p_rx_desc->nxtdesc_p = (struct mvgbe_rxdesc *)
428 				((u32) p_rx_desc + MV_RXQ_DESC_ALIGNED_SIZE);
429 			p_rx_desc = p_rx_desc->nxtdesc_p;
430 		}
431 	}
432 	dmvgbe->p_rxdesc_curr = dmvgbe->p_rxdesc;
433 }
434 
435 static int mvgbe_init(struct eth_device *dev)
436 {
437 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
438 	struct mvgbe_registers *regs = dmvgbe->regs;
439 #if (defined (CONFIG_MII) || defined (CONFIG_CMD_MII)) \
440 	 && defined (CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
441 	int i;
442 #endif
443 	/* setup RX rings */
444 	mvgbe_init_rx_desc_ring(dmvgbe);
445 
446 	/* Clear the ethernet port interrupts */
447 	MVGBE_REG_WR(regs->ic, 0);
448 	MVGBE_REG_WR(regs->ice, 0);
449 	/* Unmask RX buffer and TX end interrupt */
450 	MVGBE_REG_WR(regs->pim, INT_CAUSE_UNMASK_ALL);
451 	/* Unmask phy and link status changes interrupts */
452 	MVGBE_REG_WR(regs->peim, INT_CAUSE_UNMASK_ALL_EXT);
453 
454 	set_dram_access(regs);
455 	port_init_mac_tables(regs);
456 	port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
457 
458 	/* Assign port configuration and command. */
459 	MVGBE_REG_WR(regs->pxc, PRT_CFG_VAL);
460 	MVGBE_REG_WR(regs->pxcx, PORT_CFG_EXTEND_VALUE);
461 	MVGBE_REG_WR(regs->psc0, PORT_SERIAL_CONTROL_VALUE);
462 
463 	/* Assign port SDMA configuration */
464 	MVGBE_REG_WR(regs->sdc, PORT_SDMA_CFG_VALUE);
465 	MVGBE_REG_WR(regs->tqx[0].qxttbc, QTKNBKT_DEF_VAL);
466 	MVGBE_REG_WR(regs->tqx[0].tqxtbc,
467 		(QMTBS_DEF_VAL << 16) | QTKNRT_DEF_VAL);
468 	/* Turn off the port/RXUQ bandwidth limitation */
469 	MVGBE_REG_WR(regs->pmtu, 0);
470 
471 	/* Set maximum receive buffer to 9700 bytes */
472 	MVGBE_REG_WR(regs->psc0, MVGBE_MAX_RX_PACKET_9700BYTE
473 			| (MVGBE_REG_RD(regs->psc0) & MRU_MASK));
474 
475 	/* Enable port initially */
476 	MVGBE_REG_BITS_SET(regs->psc0, MVGBE_SERIAL_PORT_EN);
477 
478 	/*
479 	 * Set ethernet MTU for leaky bucket mechanism to 0 - this will
480 	 * disable the leaky bucket mechanism .
481 	 */
482 	MVGBE_REG_WR(regs->pmtu, 0);
483 
484 	/* Assignment of Rx CRDB of given RXUQ */
485 	MVGBE_REG_WR(regs->rxcdp[RXUQ], (u32) dmvgbe->p_rxdesc_curr);
486 	/* ensure previous write is done before enabling Rx DMA */
487 	isb();
488 	/* Enable port Rx. */
489 	MVGBE_REG_WR(regs->rqc, (1 << RXUQ));
490 
491 #if (defined(CONFIG_MII) || defined(CONFIG_CMD_MII)) && \
492 	!defined(CONFIG_PHYLIB) && \
493 	defined(CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
494 	/* Wait up to 5s for the link status */
495 	for (i = 0; i < 5; i++) {
496 		u16 phyadr;
497 
498 		miiphy_read(dev->name, MV_PHY_ADR_REQUEST,
499 				MV_PHY_ADR_REQUEST, &phyadr);
500 		/* Return if we get link up */
501 		if (miiphy_link(dev->name, phyadr))
502 			return 0;
503 		udelay(1000000);
504 	}
505 
506 	printf("No link on %s\n", dev->name);
507 	return -1;
508 #endif
509 	return 0;
510 }
511 
512 static int mvgbe_halt(struct eth_device *dev)
513 {
514 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
515 	struct mvgbe_registers *regs = dmvgbe->regs;
516 
517 	/* Disable all gigE address decoder */
518 	MVGBE_REG_WR(regs->bare, 0x3f);
519 
520 	stop_queue(&regs->tqc);
521 	stop_queue(&regs->rqc);
522 
523 	/* Disable port */
524 	MVGBE_REG_BITS_RESET(regs->psc0, MVGBE_SERIAL_PORT_EN);
525 	/* Set port is not reset */
526 	MVGBE_REG_BITS_RESET(regs->psc1, 1 << 4);
527 #ifdef CONFIG_SYS_MII_MODE
528 	/* Set MMI interface up */
529 	MVGBE_REG_BITS_RESET(regs->psc1, 1 << 3);
530 #endif
531 	/* Disable & mask ethernet port interrupts */
532 	MVGBE_REG_WR(regs->ic, 0);
533 	MVGBE_REG_WR(regs->ice, 0);
534 	MVGBE_REG_WR(regs->pim, 0);
535 	MVGBE_REG_WR(regs->peim, 0);
536 
537 	return 0;
538 }
539 
540 static int mvgbe_write_hwaddr(struct eth_device *dev)
541 {
542 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
543 	struct mvgbe_registers *regs = dmvgbe->regs;
544 
545 	/* Programs net device MAC address after initialization */
546 	port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
547 	return 0;
548 }
549 
550 static int mvgbe_send(struct eth_device *dev, void *dataptr, int datasize)
551 {
552 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
553 	struct mvgbe_registers *regs = dmvgbe->regs;
554 	struct mvgbe_txdesc *p_txdesc = dmvgbe->p_txdesc;
555 	void *p = (void *)dataptr;
556 	u32 cmd_sts;
557 	u32 txuq0_reg_addr;
558 
559 	/* Copy buffer if it's misaligned */
560 	if ((u32) dataptr & 0x07) {
561 		if (datasize > PKTSIZE_ALIGN) {
562 			printf("Non-aligned data too large (%d)\n",
563 					datasize);
564 			return -1;
565 		}
566 
567 		memcpy(dmvgbe->p_aligned_txbuf, p, datasize);
568 		p = dmvgbe->p_aligned_txbuf;
569 	}
570 
571 	p_txdesc->cmd_sts = MVGBE_ZERO_PADDING | MVGBE_GEN_CRC;
572 	p_txdesc->cmd_sts |= MVGBE_TX_FIRST_DESC | MVGBE_TX_LAST_DESC;
573 	p_txdesc->cmd_sts |= MVGBE_BUFFER_OWNED_BY_DMA;
574 	p_txdesc->cmd_sts |= MVGBE_TX_EN_INTERRUPT;
575 	p_txdesc->buf_ptr = (u8 *) p;
576 	p_txdesc->byte_cnt = datasize;
577 
578 	/* Set this tc desc as zeroth TXUQ */
579 	txuq0_reg_addr = (u32)&regs->tcqdp[TXUQ];
580 	writel((u32) p_txdesc, txuq0_reg_addr);
581 
582 	/* ensure tx desc writes above are performed before we start Tx DMA */
583 	isb();
584 
585 	/* Apply send command using zeroth TXUQ */
586 	MVGBE_REG_WR(regs->tqc, (1 << TXUQ));
587 
588 	/*
589 	 * wait for packet xmit completion
590 	 */
591 	cmd_sts = readl(&p_txdesc->cmd_sts);
592 	while (cmd_sts & MVGBE_BUFFER_OWNED_BY_DMA) {
593 		/* return fail if error is detected */
594 		if ((cmd_sts & (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME)) ==
595 				(MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME) &&
596 				cmd_sts & (MVGBE_UR_ERROR | MVGBE_RL_ERROR)) {
597 			printf("Err..(%s) in xmit packet\n", __FUNCTION__);
598 			return -1;
599 		}
600 		cmd_sts = readl(&p_txdesc->cmd_sts);
601 	};
602 	return 0;
603 }
604 
605 static int mvgbe_recv(struct eth_device *dev)
606 {
607 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
608 	struct mvgbe_rxdesc *p_rxdesc_curr = dmvgbe->p_rxdesc_curr;
609 	u32 cmd_sts;
610 	u32 timeout = 0;
611 	u32 rxdesc_curr_addr;
612 
613 	/* wait untill rx packet available or timeout */
614 	do {
615 		if (timeout < MVGBE_PHY_SMI_TIMEOUT)
616 			timeout++;
617 		else {
618 			debug("%s time out...\n", __FUNCTION__);
619 			return -1;
620 		}
621 	} while (readl(&p_rxdesc_curr->cmd_sts) & MVGBE_BUFFER_OWNED_BY_DMA);
622 
623 	if (p_rxdesc_curr->byte_cnt != 0) {
624 		debug("%s: Received %d byte Packet @ 0x%x (cmd_sts= %08x)\n",
625 			__FUNCTION__, (u32) p_rxdesc_curr->byte_cnt,
626 			(u32) p_rxdesc_curr->buf_ptr,
627 			(u32) p_rxdesc_curr->cmd_sts);
628 	}
629 
630 	/*
631 	 * In case received a packet without first/last bits on
632 	 * OR the error summary bit is on,
633 	 * the packets needs to be dropeed.
634 	 */
635 	cmd_sts = readl(&p_rxdesc_curr->cmd_sts);
636 
637 	if ((cmd_sts &
638 		(MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC))
639 		!= (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC)) {
640 
641 		printf("Err..(%s) Dropping packet spread on"
642 			" multiple descriptors\n", __FUNCTION__);
643 
644 	} else if (cmd_sts & MVGBE_ERROR_SUMMARY) {
645 
646 		printf("Err..(%s) Dropping packet with errors\n",
647 			__FUNCTION__);
648 
649 	} else {
650 		/* !!! call higher layer processing */
651 		debug("%s: Sending Received packet to"
652 			" upper layer (NetReceive)\n", __FUNCTION__);
653 
654 		/* let the upper layer handle the packet */
655 		NetReceive((p_rxdesc_curr->buf_ptr + RX_BUF_OFFSET),
656 			(int)(p_rxdesc_curr->byte_cnt - RX_BUF_OFFSET));
657 	}
658 	/*
659 	 * free these descriptors and point next in the ring
660 	 */
661 	p_rxdesc_curr->cmd_sts =
662 		MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
663 	p_rxdesc_curr->buf_size = PKTSIZE_ALIGN;
664 	p_rxdesc_curr->byte_cnt = 0;
665 
666 	rxdesc_curr_addr = (u32)&dmvgbe->p_rxdesc_curr;
667 	writel((unsigned)p_rxdesc_curr->nxtdesc_p, rxdesc_curr_addr);
668 
669 	return 0;
670 }
671 
672 #if defined(CONFIG_PHYLIB)
673 int mvgbe_phylib_init(struct eth_device *dev, int phyid)
674 {
675 	struct mii_dev *bus;
676 	struct phy_device *phydev;
677 	int ret;
678 
679 	bus = mdio_alloc();
680 	if (!bus) {
681 		printf("mdio_alloc failed\n");
682 		return -ENOMEM;
683 	}
684 	bus->read = mvgbe_phy_read;
685 	bus->write = mvgbe_phy_write;
686 	sprintf(bus->name, dev->name);
687 
688 	ret = mdio_register(bus);
689 	if (ret) {
690 		printf("mdio_register failed\n");
691 		free(bus);
692 		return -ENOMEM;
693 	}
694 
695 	/* Set phy address of the port */
696 	mvgbe_phy_write(bus, MV_PHY_ADR_REQUEST, 0, MV_PHY_ADR_REQUEST, phyid);
697 
698 	phydev = phy_connect(bus, phyid, dev, PHY_INTERFACE_MODE_RGMII);
699 	if (!phydev) {
700 		printf("phy_connect failed\n");
701 		return -ENODEV;
702 	}
703 
704 	phy_config(phydev);
705 	phy_startup(phydev);
706 
707 	return 0;
708 }
709 #endif
710 
711 int mvgbe_initialize(bd_t *bis)
712 {
713 	struct mvgbe_device *dmvgbe;
714 	struct eth_device *dev;
715 	int devnum;
716 	u8 used_ports[MAX_MVGBE_DEVS] = CONFIG_MVGBE_PORTS;
717 
718 	for (devnum = 0; devnum < MAX_MVGBE_DEVS; devnum++) {
719 		/*skip if port is configured not to use */
720 		if (used_ports[devnum] == 0)
721 			continue;
722 
723 		dmvgbe = malloc(sizeof(struct mvgbe_device));
724 
725 		if (!dmvgbe)
726 			goto error1;
727 
728 		memset(dmvgbe, 0, sizeof(struct mvgbe_device));
729 
730 		dmvgbe->p_rxdesc =
731 			(struct mvgbe_rxdesc *)memalign(PKTALIGN,
732 			MV_RXQ_DESC_ALIGNED_SIZE*RINGSZ + 1);
733 
734 		if (!dmvgbe->p_rxdesc)
735 			goto error2;
736 
737 		dmvgbe->p_rxbuf = (u8 *) memalign(PKTALIGN,
738 			RINGSZ*PKTSIZE_ALIGN + 1);
739 
740 		if (!dmvgbe->p_rxbuf)
741 			goto error3;
742 
743 		dmvgbe->p_aligned_txbuf = memalign(8, PKTSIZE_ALIGN);
744 
745 		if (!dmvgbe->p_aligned_txbuf)
746 			goto error4;
747 
748 		dmvgbe->p_txdesc = (struct mvgbe_txdesc *) memalign(
749 			PKTALIGN, sizeof(struct mvgbe_txdesc) + 1);
750 
751 		if (!dmvgbe->p_txdesc) {
752 			free(dmvgbe->p_aligned_txbuf);
753 error4:
754 			free(dmvgbe->p_rxbuf);
755 error3:
756 			free(dmvgbe->p_rxdesc);
757 error2:
758 			free(dmvgbe);
759 error1:
760 			printf("Err.. %s Failed to allocate memory\n",
761 				__FUNCTION__);
762 			return -1;
763 		}
764 
765 		dev = &dmvgbe->dev;
766 
767 		/* must be less than sizeof(dev->name) */
768 		sprintf(dev->name, "egiga%d", devnum);
769 
770 		switch (devnum) {
771 		case 0:
772 			dmvgbe->regs = (void *)MVGBE0_BASE;
773 			break;
774 #if defined(MVGBE1_BASE)
775 		case 1:
776 			dmvgbe->regs = (void *)MVGBE1_BASE;
777 			break;
778 #endif
779 		default:	/* this should never happen */
780 			printf("Err..(%s) Invalid device number %d\n",
781 				__FUNCTION__, devnum);
782 			return -1;
783 		}
784 
785 		dev->init = (void *)mvgbe_init;
786 		dev->halt = (void *)mvgbe_halt;
787 		dev->send = (void *)mvgbe_send;
788 		dev->recv = (void *)mvgbe_recv;
789 		dev->write_hwaddr = (void *)mvgbe_write_hwaddr;
790 
791 		eth_register(dev);
792 
793 #if defined(CONFIG_PHYLIB)
794 		mvgbe_phylib_init(dev, PHY_BASE_ADR + devnum);
795 #elif defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
796 		miiphy_register(dev->name, smi_reg_read, smi_reg_write);
797 		/* Set phy address of the port */
798 		miiphy_write(dev->name, MV_PHY_ADR_REQUEST,
799 				MV_PHY_ADR_REQUEST, PHY_BASE_ADR + devnum);
800 #endif
801 	}
802 	return 0;
803 }
804