xref: /openbmc/u-boot/drivers/net/mvgbe.c (revision 21299d3a)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * (C) Copyright 2009
4  * Marvell Semiconductor <www.marvell.com>
5  * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
6  *
7  * (C) Copyright 2003
8  * Ingo Assmus <ingo.assmus@keymile.com>
9  *
10  * based on - Driver for MV64360X ethernet ports
11  * Copyright (C) 2002 rabeeh@galileo.co.il
12  */
13 
14 #include <common.h>
15 #include <net.h>
16 #include <malloc.h>
17 #include <miiphy.h>
18 #include <asm/io.h>
19 #include <linux/errno.h>
20 #include <asm/types.h>
21 #include <asm/system.h>
22 #include <asm/byteorder.h>
23 #include <asm/arch/cpu.h>
24 
25 #if defined(CONFIG_KIRKWOOD)
26 #include <asm/arch/soc.h>
27 #elif defined(CONFIG_ORION5X)
28 #include <asm/arch/orion5x.h>
29 #elif defined(CONFIG_DOVE)
30 #include <asm/arch/dove.h>
31 #endif
32 
33 #include "mvgbe.h"
34 
35 DECLARE_GLOBAL_DATA_PTR;
36 
37 #ifndef CONFIG_MVGBE_PORTS
38 # define CONFIG_MVGBE_PORTS {0, 0}
39 #endif
40 
41 #define MV_PHY_ADR_REQUEST 0xee
42 #define MVGBE_SMI_REG (((struct mvgbe_registers *)MVGBE0_BASE)->smi)
43 
44 #if defined(CONFIG_PHYLIB) || defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
45 /*
46  * smi_reg_read - miiphy_read callback function.
47  *
48  * Returns 16bit phy register value, or 0xffff on error
49  */
50 static int smi_reg_read(struct mii_dev *bus, int phy_adr, int devad,
51 			int reg_ofs)
52 {
53 	u16 data = 0;
54 	struct eth_device *dev = eth_get_dev_by_name(bus->name);
55 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
56 	struct mvgbe_registers *regs = dmvgbe->regs;
57 	u32 smi_reg;
58 	u32 timeout;
59 
60 	/* Phyadr read request */
61 	if (phy_adr == MV_PHY_ADR_REQUEST &&
62 			reg_ofs == MV_PHY_ADR_REQUEST) {
63 		/* */
64 		data = (u16) (MVGBE_REG_RD(regs->phyadr) & PHYADR_MASK);
65 		return data;
66 	}
67 	/* check parameters */
68 	if (phy_adr > PHYADR_MASK) {
69 		printf("Err..(%s) Invalid PHY address %d\n",
70 			__func__, phy_adr);
71 		return -EFAULT;
72 	}
73 	if (reg_ofs > PHYREG_MASK) {
74 		printf("Err..(%s) Invalid register offset %d\n",
75 			__func__, reg_ofs);
76 		return -EFAULT;
77 	}
78 
79 	timeout = MVGBE_PHY_SMI_TIMEOUT;
80 	/* wait till the SMI is not busy */
81 	do {
82 		/* read smi register */
83 		smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
84 		if (timeout-- == 0) {
85 			printf("Err..(%s) SMI busy timeout\n", __func__);
86 			return -EFAULT;
87 		}
88 	} while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
89 
90 	/* fill the phy address and regiser offset and read opcode */
91 	smi_reg = (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
92 		| (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS)
93 		| MVGBE_PHY_SMI_OPCODE_READ;
94 
95 	/* write the smi register */
96 	MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
97 
98 	/*wait till read value is ready */
99 	timeout = MVGBE_PHY_SMI_TIMEOUT;
100 
101 	do {
102 		/* read smi register */
103 		smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
104 		if (timeout-- == 0) {
105 			printf("Err..(%s) SMI read ready timeout\n",
106 				__func__);
107 			return -EFAULT;
108 		}
109 	} while (!(smi_reg & MVGBE_PHY_SMI_READ_VALID_MASK));
110 
111 	/* Wait for the data to update in the SMI register */
112 	for (timeout = 0; timeout < MVGBE_PHY_SMI_TIMEOUT; timeout++)
113 		;
114 
115 	data = (u16) (MVGBE_REG_RD(MVGBE_SMI_REG) & MVGBE_PHY_SMI_DATA_MASK);
116 
117 	debug("%s:(adr %d, off %d) value= %04x\n", __func__, phy_adr, reg_ofs,
118 	      data);
119 
120 	return data;
121 }
122 
123 /*
124  * smi_reg_write - imiiphy_write callback function.
125  *
126  * Returns 0 if write succeed, -EINVAL on bad parameters
127  * -ETIME on timeout
128  */
129 static int smi_reg_write(struct mii_dev *bus, int phy_adr, int devad,
130 			 int reg_ofs, u16 data)
131 {
132 	struct eth_device *dev = eth_get_dev_by_name(bus->name);
133 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
134 	struct mvgbe_registers *regs = dmvgbe->regs;
135 	u32 smi_reg;
136 	u32 timeout;
137 
138 	/* Phyadr write request*/
139 	if (phy_adr == MV_PHY_ADR_REQUEST &&
140 			reg_ofs == MV_PHY_ADR_REQUEST) {
141 		MVGBE_REG_WR(regs->phyadr, data);
142 		return 0;
143 	}
144 
145 	/* check parameters */
146 	if (phy_adr > PHYADR_MASK) {
147 		printf("Err..(%s) Invalid phy address\n", __func__);
148 		return -EINVAL;
149 	}
150 	if (reg_ofs > PHYREG_MASK) {
151 		printf("Err..(%s) Invalid register offset\n", __func__);
152 		return -EINVAL;
153 	}
154 
155 	/* wait till the SMI is not busy */
156 	timeout = MVGBE_PHY_SMI_TIMEOUT;
157 	do {
158 		/* read smi register */
159 		smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
160 		if (timeout-- == 0) {
161 			printf("Err..(%s) SMI busy timeout\n", __func__);
162 			return -ETIME;
163 		}
164 	} while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
165 
166 	/* fill the phy addr and reg offset and write opcode and data */
167 	smi_reg = (data << MVGBE_PHY_SMI_DATA_OFFS);
168 	smi_reg |= (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
169 		| (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS);
170 	smi_reg &= ~MVGBE_PHY_SMI_OPCODE_READ;
171 
172 	/* write the smi register */
173 	MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
174 
175 	return 0;
176 }
177 #endif
178 
179 /* Stop and checks all queues */
180 static void stop_queue(u32 * qreg)
181 {
182 	u32 reg_data;
183 
184 	reg_data = readl(qreg);
185 
186 	if (reg_data & 0xFF) {
187 		/* Issue stop command for active channels only */
188 		writel((reg_data << 8), qreg);
189 
190 		/* Wait for all queue activity to terminate. */
191 		do {
192 			/*
193 			 * Check port cause register that all queues
194 			 * are stopped
195 			 */
196 			reg_data = readl(qreg);
197 		}
198 		while (reg_data & 0xFF);
199 	}
200 }
201 
202 /*
203  * set_access_control - Config address decode parameters for Ethernet unit
204  *
205  * This function configures the address decode parameters for the Gigabit
206  * Ethernet Controller according the given parameters struct.
207  *
208  * @regs	Register struct pointer.
209  * @param	Address decode parameter struct.
210  */
211 static void set_access_control(struct mvgbe_registers *regs,
212 				struct mvgbe_winparam *param)
213 {
214 	u32 access_prot_reg;
215 
216 	/* Set access control register */
217 	access_prot_reg = MVGBE_REG_RD(regs->epap);
218 	/* clear window permission */
219 	access_prot_reg &= (~(3 << (param->win * 2)));
220 	access_prot_reg |= (param->access_ctrl << (param->win * 2));
221 	MVGBE_REG_WR(regs->epap, access_prot_reg);
222 
223 	/* Set window Size reg (SR) */
224 	MVGBE_REG_WR(regs->barsz[param->win].size,
225 			(((param->size / 0x10000) - 1) << 16));
226 
227 	/* Set window Base address reg (BA) */
228 	MVGBE_REG_WR(regs->barsz[param->win].bar,
229 			(param->target | param->attrib | param->base_addr));
230 	/* High address remap reg (HARR) */
231 	if (param->win < 4)
232 		MVGBE_REG_WR(regs->ha_remap[param->win], param->high_addr);
233 
234 	/* Base address enable reg (BARER) */
235 	if (param->enable == 1)
236 		MVGBE_REG_BITS_RESET(regs->bare, (1 << param->win));
237 	else
238 		MVGBE_REG_BITS_SET(regs->bare, (1 << param->win));
239 }
240 
241 static void set_dram_access(struct mvgbe_registers *regs)
242 {
243 	struct mvgbe_winparam win_param;
244 	int i;
245 
246 	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
247 		/* Set access parameters for DRAM bank i */
248 		win_param.win = i;	/* Use Ethernet window i */
249 		/* Window target - DDR */
250 		win_param.target = MVGBE_TARGET_DRAM;
251 		/* Enable full access */
252 		win_param.access_ctrl = EWIN_ACCESS_FULL;
253 		win_param.high_addr = 0;
254 		/* Get bank base and size */
255 		win_param.base_addr = gd->bd->bi_dram[i].start;
256 		win_param.size = gd->bd->bi_dram[i].size;
257 		if (win_param.size == 0)
258 			win_param.enable = 0;
259 		else
260 			win_param.enable = 1;	/* Enable the access */
261 
262 		/* Enable DRAM bank */
263 		switch (i) {
264 		case 0:
265 			win_param.attrib = EBAR_DRAM_CS0;
266 			break;
267 		case 1:
268 			win_param.attrib = EBAR_DRAM_CS1;
269 			break;
270 		case 2:
271 			win_param.attrib = EBAR_DRAM_CS2;
272 			break;
273 		case 3:
274 			win_param.attrib = EBAR_DRAM_CS3;
275 			break;
276 		default:
277 			/* invalid bank, disable access */
278 			win_param.enable = 0;
279 			win_param.attrib = 0;
280 			break;
281 		}
282 		/* Set the access control for address window(EPAPR) RD/WR */
283 		set_access_control(regs, &win_param);
284 	}
285 }
286 
287 /*
288  * port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
289  *
290  * Go through all the DA filter tables (Unicast, Special Multicast & Other
291  * Multicast) and set each entry to 0.
292  */
293 static void port_init_mac_tables(struct mvgbe_registers *regs)
294 {
295 	int table_index;
296 
297 	/* Clear DA filter unicast table (Ex_dFUT) */
298 	for (table_index = 0; table_index < 4; ++table_index)
299 		MVGBE_REG_WR(regs->dfut[table_index], 0);
300 
301 	for (table_index = 0; table_index < 64; ++table_index) {
302 		/* Clear DA filter special multicast table (Ex_dFSMT) */
303 		MVGBE_REG_WR(regs->dfsmt[table_index], 0);
304 		/* Clear DA filter other multicast table (Ex_dFOMT) */
305 		MVGBE_REG_WR(regs->dfomt[table_index], 0);
306 	}
307 }
308 
309 /*
310  * port_uc_addr - This function Set the port unicast address table
311  *
312  * This function locates the proper entry in the Unicast table for the
313  * specified MAC nibble and sets its properties according to function
314  * parameters.
315  * This function add/removes MAC addresses from the port unicast address
316  * table.
317  *
318  * @uc_nibble	Unicast MAC Address last nibble.
319  * @option      0 = Add, 1 = remove address.
320  *
321  * RETURN: 1 if output succeeded. 0 if option parameter is invalid.
322  */
323 static int port_uc_addr(struct mvgbe_registers *regs, u8 uc_nibble,
324 			int option)
325 {
326 	u32 unicast_reg;
327 	u32 tbl_offset;
328 	u32 reg_offset;
329 
330 	/* Locate the Unicast table entry */
331 	uc_nibble = (0xf & uc_nibble);
332 	/* Register offset from unicast table base */
333 	tbl_offset = (uc_nibble / 4);
334 	/* Entry offset within the above register */
335 	reg_offset = uc_nibble % 4;
336 
337 	switch (option) {
338 	case REJECT_MAC_ADDR:
339 		/*
340 		 * Clear accepts frame bit at specified unicast
341 		 * DA table entry
342 		 */
343 		unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
344 		unicast_reg &= (0xFF << (8 * reg_offset));
345 		MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
346 		break;
347 	case ACCEPT_MAC_ADDR:
348 		/* Set accepts frame bit at unicast DA filter table entry */
349 		unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
350 		unicast_reg &= (0xFF << (8 * reg_offset));
351 		unicast_reg |= ((0x01 | (RXUQ << 1)) << (8 * reg_offset));
352 		MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
353 		break;
354 	default:
355 		return 0;
356 	}
357 	return 1;
358 }
359 
360 /*
361  * port_uc_addr_set - This function Set the port Unicast address.
362  */
363 static void port_uc_addr_set(struct mvgbe_registers *regs, u8 * p_addr)
364 {
365 	u32 mac_h;
366 	u32 mac_l;
367 
368 	mac_l = (p_addr[4] << 8) | (p_addr[5]);
369 	mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
370 		(p_addr[3] << 0);
371 
372 	MVGBE_REG_WR(regs->macal, mac_l);
373 	MVGBE_REG_WR(regs->macah, mac_h);
374 
375 	/* Accept frames of this address */
376 	port_uc_addr(regs, p_addr[5], ACCEPT_MAC_ADDR);
377 }
378 
379 /*
380  * mvgbe_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
381  */
382 static void mvgbe_init_rx_desc_ring(struct mvgbe_device *dmvgbe)
383 {
384 	struct mvgbe_rxdesc *p_rx_desc;
385 	int i;
386 
387 	/* initialize the Rx descriptors ring */
388 	p_rx_desc = dmvgbe->p_rxdesc;
389 	for (i = 0; i < RINGSZ; i++) {
390 		p_rx_desc->cmd_sts =
391 			MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
392 		p_rx_desc->buf_size = PKTSIZE_ALIGN;
393 		p_rx_desc->byte_cnt = 0;
394 		p_rx_desc->buf_ptr = dmvgbe->p_rxbuf + i * PKTSIZE_ALIGN;
395 		if (i == (RINGSZ - 1))
396 			p_rx_desc->nxtdesc_p = dmvgbe->p_rxdesc;
397 		else {
398 			p_rx_desc->nxtdesc_p = (struct mvgbe_rxdesc *)
399 				((u32) p_rx_desc + MV_RXQ_DESC_ALIGNED_SIZE);
400 			p_rx_desc = p_rx_desc->nxtdesc_p;
401 		}
402 	}
403 	dmvgbe->p_rxdesc_curr = dmvgbe->p_rxdesc;
404 }
405 
406 static int mvgbe_init(struct eth_device *dev)
407 {
408 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
409 	struct mvgbe_registers *regs = dmvgbe->regs;
410 #if (defined(CONFIG_MII) || defined(CONFIG_CMD_MII)) &&  \
411 	!defined(CONFIG_PHYLIB) &&			 \
412 	defined(CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
413 	int i;
414 #endif
415 	/* setup RX rings */
416 	mvgbe_init_rx_desc_ring(dmvgbe);
417 
418 	/* Clear the ethernet port interrupts */
419 	MVGBE_REG_WR(regs->ic, 0);
420 	MVGBE_REG_WR(regs->ice, 0);
421 	/* Unmask RX buffer and TX end interrupt */
422 	MVGBE_REG_WR(regs->pim, INT_CAUSE_UNMASK_ALL);
423 	/* Unmask phy and link status changes interrupts */
424 	MVGBE_REG_WR(regs->peim, INT_CAUSE_UNMASK_ALL_EXT);
425 
426 	set_dram_access(regs);
427 	port_init_mac_tables(regs);
428 	port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
429 
430 	/* Assign port configuration and command. */
431 	MVGBE_REG_WR(regs->pxc, PRT_CFG_VAL);
432 	MVGBE_REG_WR(regs->pxcx, PORT_CFG_EXTEND_VALUE);
433 	MVGBE_REG_WR(regs->psc0, PORT_SERIAL_CONTROL_VALUE);
434 
435 	/* Assign port SDMA configuration */
436 	MVGBE_REG_WR(regs->sdc, PORT_SDMA_CFG_VALUE);
437 	MVGBE_REG_WR(regs->tqx[0].qxttbc, QTKNBKT_DEF_VAL);
438 	MVGBE_REG_WR(regs->tqx[0].tqxtbc,
439 		(QMTBS_DEF_VAL << 16) | QTKNRT_DEF_VAL);
440 	/* Turn off the port/RXUQ bandwidth limitation */
441 	MVGBE_REG_WR(regs->pmtu, 0);
442 
443 	/* Set maximum receive buffer to 9700 bytes */
444 	MVGBE_REG_WR(regs->psc0, MVGBE_MAX_RX_PACKET_9700BYTE
445 			| (MVGBE_REG_RD(regs->psc0) & MRU_MASK));
446 
447 	/* Enable port initially */
448 	MVGBE_REG_BITS_SET(regs->psc0, MVGBE_SERIAL_PORT_EN);
449 
450 	/*
451 	 * Set ethernet MTU for leaky bucket mechanism to 0 - this will
452 	 * disable the leaky bucket mechanism .
453 	 */
454 	MVGBE_REG_WR(regs->pmtu, 0);
455 
456 	/* Assignment of Rx CRDB of given RXUQ */
457 	MVGBE_REG_WR(regs->rxcdp[RXUQ], (u32) dmvgbe->p_rxdesc_curr);
458 	/* ensure previous write is done before enabling Rx DMA */
459 	isb();
460 	/* Enable port Rx. */
461 	MVGBE_REG_WR(regs->rqc, (1 << RXUQ));
462 
463 #if (defined(CONFIG_MII) || defined(CONFIG_CMD_MII)) && \
464 	!defined(CONFIG_PHYLIB) && \
465 	defined(CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
466 	/* Wait up to 5s for the link status */
467 	for (i = 0; i < 5; i++) {
468 		u16 phyadr;
469 
470 		miiphy_read(dev->name, MV_PHY_ADR_REQUEST,
471 				MV_PHY_ADR_REQUEST, &phyadr);
472 		/* Return if we get link up */
473 		if (miiphy_link(dev->name, phyadr))
474 			return 0;
475 		udelay(1000000);
476 	}
477 
478 	printf("No link on %s\n", dev->name);
479 	return -1;
480 #endif
481 	return 0;
482 }
483 
484 static int mvgbe_halt(struct eth_device *dev)
485 {
486 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
487 	struct mvgbe_registers *regs = dmvgbe->regs;
488 
489 	/* Disable all gigE address decoder */
490 	MVGBE_REG_WR(regs->bare, 0x3f);
491 
492 	stop_queue(&regs->tqc);
493 	stop_queue(&regs->rqc);
494 
495 	/* Disable port */
496 	MVGBE_REG_BITS_RESET(regs->psc0, MVGBE_SERIAL_PORT_EN);
497 	/* Set port is not reset */
498 	MVGBE_REG_BITS_RESET(regs->psc1, 1 << 4);
499 #ifdef CONFIG_SYS_MII_MODE
500 	/* Set MMI interface up */
501 	MVGBE_REG_BITS_RESET(regs->psc1, 1 << 3);
502 #endif
503 	/* Disable & mask ethernet port interrupts */
504 	MVGBE_REG_WR(regs->ic, 0);
505 	MVGBE_REG_WR(regs->ice, 0);
506 	MVGBE_REG_WR(regs->pim, 0);
507 	MVGBE_REG_WR(regs->peim, 0);
508 
509 	return 0;
510 }
511 
512 static int mvgbe_write_hwaddr(struct eth_device *dev)
513 {
514 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
515 	struct mvgbe_registers *regs = dmvgbe->regs;
516 
517 	/* Programs net device MAC address after initialization */
518 	port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
519 	return 0;
520 }
521 
522 static int mvgbe_send(struct eth_device *dev, void *dataptr, int datasize)
523 {
524 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
525 	struct mvgbe_registers *regs = dmvgbe->regs;
526 	struct mvgbe_txdesc *p_txdesc = dmvgbe->p_txdesc;
527 	void *p = (void *)dataptr;
528 	u32 cmd_sts;
529 	u32 txuq0_reg_addr;
530 
531 	/* Copy buffer if it's misaligned */
532 	if ((u32) dataptr & 0x07) {
533 		if (datasize > PKTSIZE_ALIGN) {
534 			printf("Non-aligned data too large (%d)\n",
535 					datasize);
536 			return -1;
537 		}
538 
539 		memcpy(dmvgbe->p_aligned_txbuf, p, datasize);
540 		p = dmvgbe->p_aligned_txbuf;
541 	}
542 
543 	p_txdesc->cmd_sts = MVGBE_ZERO_PADDING | MVGBE_GEN_CRC;
544 	p_txdesc->cmd_sts |= MVGBE_TX_FIRST_DESC | MVGBE_TX_LAST_DESC;
545 	p_txdesc->cmd_sts |= MVGBE_BUFFER_OWNED_BY_DMA;
546 	p_txdesc->cmd_sts |= MVGBE_TX_EN_INTERRUPT;
547 	p_txdesc->buf_ptr = (u8 *) p;
548 	p_txdesc->byte_cnt = datasize;
549 
550 	/* Set this tc desc as zeroth TXUQ */
551 	txuq0_reg_addr = (u32)&regs->tcqdp[TXUQ];
552 	writel((u32) p_txdesc, txuq0_reg_addr);
553 
554 	/* ensure tx desc writes above are performed before we start Tx DMA */
555 	isb();
556 
557 	/* Apply send command using zeroth TXUQ */
558 	MVGBE_REG_WR(regs->tqc, (1 << TXUQ));
559 
560 	/*
561 	 * wait for packet xmit completion
562 	 */
563 	cmd_sts = readl(&p_txdesc->cmd_sts);
564 	while (cmd_sts & MVGBE_BUFFER_OWNED_BY_DMA) {
565 		/* return fail if error is detected */
566 		if ((cmd_sts & (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME)) ==
567 				(MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME) &&
568 				cmd_sts & (MVGBE_UR_ERROR | MVGBE_RL_ERROR)) {
569 			printf("Err..(%s) in xmit packet\n", __func__);
570 			return -1;
571 		}
572 		cmd_sts = readl(&p_txdesc->cmd_sts);
573 	};
574 	return 0;
575 }
576 
577 static int mvgbe_recv(struct eth_device *dev)
578 {
579 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
580 	struct mvgbe_rxdesc *p_rxdesc_curr = dmvgbe->p_rxdesc_curr;
581 	u32 cmd_sts;
582 	u32 timeout = 0;
583 	u32 rxdesc_curr_addr;
584 
585 	/* wait untill rx packet available or timeout */
586 	do {
587 		if (timeout < MVGBE_PHY_SMI_TIMEOUT)
588 			timeout++;
589 		else {
590 			debug("%s time out...\n", __func__);
591 			return -1;
592 		}
593 	} while (readl(&p_rxdesc_curr->cmd_sts) & MVGBE_BUFFER_OWNED_BY_DMA);
594 
595 	if (p_rxdesc_curr->byte_cnt != 0) {
596 		debug("%s: Received %d byte Packet @ 0x%x (cmd_sts= %08x)\n",
597 			__func__, (u32) p_rxdesc_curr->byte_cnt,
598 			(u32) p_rxdesc_curr->buf_ptr,
599 			(u32) p_rxdesc_curr->cmd_sts);
600 	}
601 
602 	/*
603 	 * In case received a packet without first/last bits on
604 	 * OR the error summary bit is on,
605 	 * the packets needs to be dropeed.
606 	 */
607 	cmd_sts = readl(&p_rxdesc_curr->cmd_sts);
608 
609 	if ((cmd_sts &
610 		(MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC))
611 		!= (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC)) {
612 
613 		printf("Err..(%s) Dropping packet spread on"
614 			" multiple descriptors\n", __func__);
615 
616 	} else if (cmd_sts & MVGBE_ERROR_SUMMARY) {
617 
618 		printf("Err..(%s) Dropping packet with errors\n",
619 			__func__);
620 
621 	} else {
622 		/* !!! call higher layer processing */
623 		debug("%s: Sending Received packet to"
624 		      " upper layer (net_process_received_packet)\n",
625 		      __func__);
626 
627 		/* let the upper layer handle the packet */
628 		net_process_received_packet((p_rxdesc_curr->buf_ptr +
629 					     RX_BUF_OFFSET),
630 					    (int)(p_rxdesc_curr->byte_cnt -
631 						  RX_BUF_OFFSET));
632 	}
633 	/*
634 	 * free these descriptors and point next in the ring
635 	 */
636 	p_rxdesc_curr->cmd_sts =
637 		MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
638 	p_rxdesc_curr->buf_size = PKTSIZE_ALIGN;
639 	p_rxdesc_curr->byte_cnt = 0;
640 
641 	rxdesc_curr_addr = (u32)&dmvgbe->p_rxdesc_curr;
642 	writel((unsigned)p_rxdesc_curr->nxtdesc_p, rxdesc_curr_addr);
643 
644 	return 0;
645 }
646 
647 #if defined(CONFIG_PHYLIB)
648 int mvgbe_phylib_init(struct eth_device *dev, int phyid)
649 {
650 	struct mii_dev *bus;
651 	struct phy_device *phydev;
652 	int ret;
653 
654 	bus = mdio_alloc();
655 	if (!bus) {
656 		printf("mdio_alloc failed\n");
657 		return -ENOMEM;
658 	}
659 	bus->read = smi_reg_read;
660 	bus->write = smi_reg_write;
661 	strcpy(bus->name, dev->name);
662 
663 	ret = mdio_register(bus);
664 	if (ret) {
665 		printf("mdio_register failed\n");
666 		free(bus);
667 		return -ENOMEM;
668 	}
669 
670 	/* Set phy address of the port */
671 	smi_reg_write(bus, MV_PHY_ADR_REQUEST, 0, MV_PHY_ADR_REQUEST, phyid);
672 
673 	phydev = phy_connect(bus, phyid, dev, PHY_INTERFACE_MODE_RGMII);
674 	if (!phydev) {
675 		printf("phy_connect failed\n");
676 		return -ENODEV;
677 	}
678 
679 	phy_config(phydev);
680 	phy_startup(phydev);
681 
682 	return 0;
683 }
684 #endif
685 
686 int mvgbe_initialize(bd_t *bis)
687 {
688 	struct mvgbe_device *dmvgbe;
689 	struct eth_device *dev;
690 	int devnum;
691 	u8 used_ports[MAX_MVGBE_DEVS] = CONFIG_MVGBE_PORTS;
692 
693 	for (devnum = 0; devnum < MAX_MVGBE_DEVS; devnum++) {
694 		/*skip if port is configured not to use */
695 		if (used_ports[devnum] == 0)
696 			continue;
697 
698 		dmvgbe = malloc(sizeof(struct mvgbe_device));
699 
700 		if (!dmvgbe)
701 			goto error1;
702 
703 		memset(dmvgbe, 0, sizeof(struct mvgbe_device));
704 
705 		dmvgbe->p_rxdesc =
706 			(struct mvgbe_rxdesc *)memalign(PKTALIGN,
707 			MV_RXQ_DESC_ALIGNED_SIZE*RINGSZ + 1);
708 
709 		if (!dmvgbe->p_rxdesc)
710 			goto error2;
711 
712 		dmvgbe->p_rxbuf = (u8 *) memalign(PKTALIGN,
713 			RINGSZ*PKTSIZE_ALIGN + 1);
714 
715 		if (!dmvgbe->p_rxbuf)
716 			goto error3;
717 
718 		dmvgbe->p_aligned_txbuf = memalign(8, PKTSIZE_ALIGN);
719 
720 		if (!dmvgbe->p_aligned_txbuf)
721 			goto error4;
722 
723 		dmvgbe->p_txdesc = (struct mvgbe_txdesc *) memalign(
724 			PKTALIGN, sizeof(struct mvgbe_txdesc) + 1);
725 
726 		if (!dmvgbe->p_txdesc) {
727 			free(dmvgbe->p_aligned_txbuf);
728 error4:
729 			free(dmvgbe->p_rxbuf);
730 error3:
731 			free(dmvgbe->p_rxdesc);
732 error2:
733 			free(dmvgbe);
734 error1:
735 			printf("Err.. %s Failed to allocate memory\n",
736 				__func__);
737 			return -1;
738 		}
739 
740 		dev = &dmvgbe->dev;
741 
742 		/* must be less than sizeof(dev->name) */
743 		sprintf(dev->name, "egiga%d", devnum);
744 
745 		switch (devnum) {
746 		case 0:
747 			dmvgbe->regs = (void *)MVGBE0_BASE;
748 			break;
749 #if defined(MVGBE1_BASE)
750 		case 1:
751 			dmvgbe->regs = (void *)MVGBE1_BASE;
752 			break;
753 #endif
754 		default:	/* this should never happen */
755 			printf("Err..(%s) Invalid device number %d\n",
756 				__func__, devnum);
757 			return -1;
758 		}
759 
760 		dev->init = (void *)mvgbe_init;
761 		dev->halt = (void *)mvgbe_halt;
762 		dev->send = (void *)mvgbe_send;
763 		dev->recv = (void *)mvgbe_recv;
764 		dev->write_hwaddr = (void *)mvgbe_write_hwaddr;
765 
766 		eth_register(dev);
767 
768 #if defined(CONFIG_PHYLIB)
769 		mvgbe_phylib_init(dev, PHY_BASE_ADR + devnum);
770 #elif defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
771 		int retval;
772 		struct mii_dev *mdiodev = mdio_alloc();
773 		if (!mdiodev)
774 			return -ENOMEM;
775 		strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
776 		mdiodev->read = smi_reg_read;
777 		mdiodev->write = smi_reg_write;
778 
779 		retval = mdio_register(mdiodev);
780 		if (retval < 0)
781 			return retval;
782 		/* Set phy address of the port */
783 		miiphy_write(dev->name, MV_PHY_ADR_REQUEST,
784 				MV_PHY_ADR_REQUEST, PHY_BASE_ADR + devnum);
785 #endif
786 	}
787 	return 0;
788 }
789