xref: /openbmc/u-boot/drivers/net/mvgbe.c (revision e9c847c3)
1 /*
2  * (C) Copyright 2009
3  * Marvell Semiconductor <www.marvell.com>
4  * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
5  *
6  * (C) Copyright 2003
7  * Ingo Assmus <ingo.assmus@keymile.com>
8  *
9  * based on - Driver for MV64360X ethernet ports
10  * Copyright (C) 2002 rabeeh@galileo.co.il
11  *
12  * SPDX-License-Identifier:	GPL-2.0+
13  */
14 
15 #include <common.h>
16 #include <net.h>
17 #include <malloc.h>
18 #include <miiphy.h>
19 #include <asm/io.h>
20 #include <linux/errno.h>
21 #include <asm/types.h>
22 #include <asm/system.h>
23 #include <asm/byteorder.h>
24 #include <asm/arch/cpu.h>
25 
26 #if defined(CONFIG_KIRKWOOD)
27 #include <asm/arch/soc.h>
28 #elif defined(CONFIG_ORION5X)
29 #include <asm/arch/orion5x.h>
30 #elif defined(CONFIG_DOVE)
31 #include <asm/arch/dove.h>
32 #endif
33 
34 #include "mvgbe.h"
35 
36 DECLARE_GLOBAL_DATA_PTR;
37 
38 #ifndef CONFIG_MVGBE_PORTS
39 # define CONFIG_MVGBE_PORTS {0, 0}
40 #endif
41 
42 #define MV_PHY_ADR_REQUEST 0xee
43 #define MVGBE_SMI_REG (((struct mvgbe_registers *)MVGBE0_BASE)->smi)
44 
45 #if defined(CONFIG_PHYLIB) || defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
46 /*
47  * smi_reg_read - miiphy_read callback function.
48  *
49  * Returns 16bit phy register value, or 0xffff on error
50  */
51 static int smi_reg_read(struct mii_dev *bus, int phy_adr, int devad,
52 			int reg_ofs)
53 {
54 	u16 data = 0;
55 	struct eth_device *dev = eth_get_dev_by_name(bus->name);
56 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
57 	struct mvgbe_registers *regs = dmvgbe->regs;
58 	u32 smi_reg;
59 	u32 timeout;
60 
61 	/* Phyadr read request */
62 	if (phy_adr == MV_PHY_ADR_REQUEST &&
63 			reg_ofs == MV_PHY_ADR_REQUEST) {
64 		/* */
65 		data = (u16) (MVGBE_REG_RD(regs->phyadr) & PHYADR_MASK);
66 		return data;
67 	}
68 	/* check parameters */
69 	if (phy_adr > PHYADR_MASK) {
70 		printf("Err..(%s) Invalid PHY address %d\n",
71 			__func__, phy_adr);
72 		return -EFAULT;
73 	}
74 	if (reg_ofs > PHYREG_MASK) {
75 		printf("Err..(%s) Invalid register offset %d\n",
76 			__func__, reg_ofs);
77 		return -EFAULT;
78 	}
79 
80 	timeout = MVGBE_PHY_SMI_TIMEOUT;
81 	/* wait till the SMI is not busy */
82 	do {
83 		/* read smi register */
84 		smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
85 		if (timeout-- == 0) {
86 			printf("Err..(%s) SMI busy timeout\n", __func__);
87 			return -EFAULT;
88 		}
89 	} while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
90 
91 	/* fill the phy address and regiser offset and read opcode */
92 	smi_reg = (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
93 		| (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS)
94 		| MVGBE_PHY_SMI_OPCODE_READ;
95 
96 	/* write the smi register */
97 	MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
98 
99 	/*wait till read value is ready */
100 	timeout = MVGBE_PHY_SMI_TIMEOUT;
101 
102 	do {
103 		/* read smi register */
104 		smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
105 		if (timeout-- == 0) {
106 			printf("Err..(%s) SMI read ready timeout\n",
107 				__func__);
108 			return -EFAULT;
109 		}
110 	} while (!(smi_reg & MVGBE_PHY_SMI_READ_VALID_MASK));
111 
112 	/* Wait for the data to update in the SMI register */
113 	for (timeout = 0; timeout < MVGBE_PHY_SMI_TIMEOUT; timeout++)
114 		;
115 
116 	data = (u16) (MVGBE_REG_RD(MVGBE_SMI_REG) & MVGBE_PHY_SMI_DATA_MASK);
117 
118 	debug("%s:(adr %d, off %d) value= %04x\n", __func__, phy_adr, reg_ofs,
119 	      data);
120 
121 	return data;
122 }
123 
124 /*
125  * smi_reg_write - imiiphy_write callback function.
126  *
127  * Returns 0 if write succeed, -EINVAL on bad parameters
128  * -ETIME on timeout
129  */
130 static int smi_reg_write(struct mii_dev *bus, int phy_adr, int devad,
131 			 int reg_ofs, u16 data)
132 {
133 	struct eth_device *dev = eth_get_dev_by_name(bus->name);
134 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
135 	struct mvgbe_registers *regs = dmvgbe->regs;
136 	u32 smi_reg;
137 	u32 timeout;
138 
139 	/* Phyadr write request*/
140 	if (phy_adr == MV_PHY_ADR_REQUEST &&
141 			reg_ofs == MV_PHY_ADR_REQUEST) {
142 		MVGBE_REG_WR(regs->phyadr, data);
143 		return 0;
144 	}
145 
146 	/* check parameters */
147 	if (phy_adr > PHYADR_MASK) {
148 		printf("Err..(%s) Invalid phy address\n", __func__);
149 		return -EINVAL;
150 	}
151 	if (reg_ofs > PHYREG_MASK) {
152 		printf("Err..(%s) Invalid register offset\n", __func__);
153 		return -EINVAL;
154 	}
155 
156 	/* wait till the SMI is not busy */
157 	timeout = MVGBE_PHY_SMI_TIMEOUT;
158 	do {
159 		/* read smi register */
160 		smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
161 		if (timeout-- == 0) {
162 			printf("Err..(%s) SMI busy timeout\n", __func__);
163 			return -ETIME;
164 		}
165 	} while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
166 
167 	/* fill the phy addr and reg offset and write opcode and data */
168 	smi_reg = (data << MVGBE_PHY_SMI_DATA_OFFS);
169 	smi_reg |= (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
170 		| (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS);
171 	smi_reg &= ~MVGBE_PHY_SMI_OPCODE_READ;
172 
173 	/* write the smi register */
174 	MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
175 
176 	return 0;
177 }
178 #endif
179 
180 /* Stop and checks all queues */
181 static void stop_queue(u32 * qreg)
182 {
183 	u32 reg_data;
184 
185 	reg_data = readl(qreg);
186 
187 	if (reg_data & 0xFF) {
188 		/* Issue stop command for active channels only */
189 		writel((reg_data << 8), qreg);
190 
191 		/* Wait for all queue activity to terminate. */
192 		do {
193 			/*
194 			 * Check port cause register that all queues
195 			 * are stopped
196 			 */
197 			reg_data = readl(qreg);
198 		}
199 		while (reg_data & 0xFF);
200 	}
201 }
202 
203 /*
204  * set_access_control - Config address decode parameters for Ethernet unit
205  *
206  * This function configures the address decode parameters for the Gigabit
207  * Ethernet Controller according the given parameters struct.
208  *
209  * @regs	Register struct pointer.
210  * @param	Address decode parameter struct.
211  */
212 static void set_access_control(struct mvgbe_registers *regs,
213 				struct mvgbe_winparam *param)
214 {
215 	u32 access_prot_reg;
216 
217 	/* Set access control register */
218 	access_prot_reg = MVGBE_REG_RD(regs->epap);
219 	/* clear window permission */
220 	access_prot_reg &= (~(3 << (param->win * 2)));
221 	access_prot_reg |= (param->access_ctrl << (param->win * 2));
222 	MVGBE_REG_WR(regs->epap, access_prot_reg);
223 
224 	/* Set window Size reg (SR) */
225 	MVGBE_REG_WR(regs->barsz[param->win].size,
226 			(((param->size / 0x10000) - 1) << 16));
227 
228 	/* Set window Base address reg (BA) */
229 	MVGBE_REG_WR(regs->barsz[param->win].bar,
230 			(param->target | param->attrib | param->base_addr));
231 	/* High address remap reg (HARR) */
232 	if (param->win < 4)
233 		MVGBE_REG_WR(regs->ha_remap[param->win], param->high_addr);
234 
235 	/* Base address enable reg (BARER) */
236 	if (param->enable == 1)
237 		MVGBE_REG_BITS_RESET(regs->bare, (1 << param->win));
238 	else
239 		MVGBE_REG_BITS_SET(regs->bare, (1 << param->win));
240 }
241 
242 static void set_dram_access(struct mvgbe_registers *regs)
243 {
244 	struct mvgbe_winparam win_param;
245 	int i;
246 
247 	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
248 		/* Set access parameters for DRAM bank i */
249 		win_param.win = i;	/* Use Ethernet window i */
250 		/* Window target - DDR */
251 		win_param.target = MVGBE_TARGET_DRAM;
252 		/* Enable full access */
253 		win_param.access_ctrl = EWIN_ACCESS_FULL;
254 		win_param.high_addr = 0;
255 		/* Get bank base and size */
256 		win_param.base_addr = gd->bd->bi_dram[i].start;
257 		win_param.size = gd->bd->bi_dram[i].size;
258 		if (win_param.size == 0)
259 			win_param.enable = 0;
260 		else
261 			win_param.enable = 1;	/* Enable the access */
262 
263 		/* Enable DRAM bank */
264 		switch (i) {
265 		case 0:
266 			win_param.attrib = EBAR_DRAM_CS0;
267 			break;
268 		case 1:
269 			win_param.attrib = EBAR_DRAM_CS1;
270 			break;
271 		case 2:
272 			win_param.attrib = EBAR_DRAM_CS2;
273 			break;
274 		case 3:
275 			win_param.attrib = EBAR_DRAM_CS3;
276 			break;
277 		default:
278 			/* invalid bank, disable access */
279 			win_param.enable = 0;
280 			win_param.attrib = 0;
281 			break;
282 		}
283 		/* Set the access control for address window(EPAPR) RD/WR */
284 		set_access_control(regs, &win_param);
285 	}
286 }
287 
288 /*
289  * port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
290  *
291  * Go through all the DA filter tables (Unicast, Special Multicast & Other
292  * Multicast) and set each entry to 0.
293  */
294 static void port_init_mac_tables(struct mvgbe_registers *regs)
295 {
296 	int table_index;
297 
298 	/* Clear DA filter unicast table (Ex_dFUT) */
299 	for (table_index = 0; table_index < 4; ++table_index)
300 		MVGBE_REG_WR(regs->dfut[table_index], 0);
301 
302 	for (table_index = 0; table_index < 64; ++table_index) {
303 		/* Clear DA filter special multicast table (Ex_dFSMT) */
304 		MVGBE_REG_WR(regs->dfsmt[table_index], 0);
305 		/* Clear DA filter other multicast table (Ex_dFOMT) */
306 		MVGBE_REG_WR(regs->dfomt[table_index], 0);
307 	}
308 }
309 
310 /*
311  * port_uc_addr - This function Set the port unicast address table
312  *
313  * This function locates the proper entry in the Unicast table for the
314  * specified MAC nibble and sets its properties according to function
315  * parameters.
316  * This function add/removes MAC addresses from the port unicast address
317  * table.
318  *
319  * @uc_nibble	Unicast MAC Address last nibble.
320  * @option      0 = Add, 1 = remove address.
321  *
322  * RETURN: 1 if output succeeded. 0 if option parameter is invalid.
323  */
324 static int port_uc_addr(struct mvgbe_registers *regs, u8 uc_nibble,
325 			int option)
326 {
327 	u32 unicast_reg;
328 	u32 tbl_offset;
329 	u32 reg_offset;
330 
331 	/* Locate the Unicast table entry */
332 	uc_nibble = (0xf & uc_nibble);
333 	/* Register offset from unicast table base */
334 	tbl_offset = (uc_nibble / 4);
335 	/* Entry offset within the above register */
336 	reg_offset = uc_nibble % 4;
337 
338 	switch (option) {
339 	case REJECT_MAC_ADDR:
340 		/*
341 		 * Clear accepts frame bit at specified unicast
342 		 * DA table entry
343 		 */
344 		unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
345 		unicast_reg &= (0xFF << (8 * reg_offset));
346 		MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
347 		break;
348 	case ACCEPT_MAC_ADDR:
349 		/* Set accepts frame bit at unicast DA filter table entry */
350 		unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
351 		unicast_reg &= (0xFF << (8 * reg_offset));
352 		unicast_reg |= ((0x01 | (RXUQ << 1)) << (8 * reg_offset));
353 		MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
354 		break;
355 	default:
356 		return 0;
357 	}
358 	return 1;
359 }
360 
361 /*
362  * port_uc_addr_set - This function Set the port Unicast address.
363  */
364 static void port_uc_addr_set(struct mvgbe_registers *regs, u8 * p_addr)
365 {
366 	u32 mac_h;
367 	u32 mac_l;
368 
369 	mac_l = (p_addr[4] << 8) | (p_addr[5]);
370 	mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
371 		(p_addr[3] << 0);
372 
373 	MVGBE_REG_WR(regs->macal, mac_l);
374 	MVGBE_REG_WR(regs->macah, mac_h);
375 
376 	/* Accept frames of this address */
377 	port_uc_addr(regs, p_addr[5], ACCEPT_MAC_ADDR);
378 }
379 
380 /*
381  * mvgbe_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
382  */
383 static void mvgbe_init_rx_desc_ring(struct mvgbe_device *dmvgbe)
384 {
385 	struct mvgbe_rxdesc *p_rx_desc;
386 	int i;
387 
388 	/* initialize the Rx descriptors ring */
389 	p_rx_desc = dmvgbe->p_rxdesc;
390 	for (i = 0; i < RINGSZ; i++) {
391 		p_rx_desc->cmd_sts =
392 			MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
393 		p_rx_desc->buf_size = PKTSIZE_ALIGN;
394 		p_rx_desc->byte_cnt = 0;
395 		p_rx_desc->buf_ptr = dmvgbe->p_rxbuf + i * PKTSIZE_ALIGN;
396 		if (i == (RINGSZ - 1))
397 			p_rx_desc->nxtdesc_p = dmvgbe->p_rxdesc;
398 		else {
399 			p_rx_desc->nxtdesc_p = (struct mvgbe_rxdesc *)
400 				((u32) p_rx_desc + MV_RXQ_DESC_ALIGNED_SIZE);
401 			p_rx_desc = p_rx_desc->nxtdesc_p;
402 		}
403 	}
404 	dmvgbe->p_rxdesc_curr = dmvgbe->p_rxdesc;
405 }
406 
407 static int mvgbe_init(struct eth_device *dev)
408 {
409 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
410 	struct mvgbe_registers *regs = dmvgbe->regs;
411 #if (defined(CONFIG_MII) || defined(CONFIG_CMD_MII)) &&  \
412 	!defined(CONFIG_PHYLIB) &&			 \
413 	defined(CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
414 	int i;
415 #endif
416 	/* setup RX rings */
417 	mvgbe_init_rx_desc_ring(dmvgbe);
418 
419 	/* Clear the ethernet port interrupts */
420 	MVGBE_REG_WR(regs->ic, 0);
421 	MVGBE_REG_WR(regs->ice, 0);
422 	/* Unmask RX buffer and TX end interrupt */
423 	MVGBE_REG_WR(regs->pim, INT_CAUSE_UNMASK_ALL);
424 	/* Unmask phy and link status changes interrupts */
425 	MVGBE_REG_WR(regs->peim, INT_CAUSE_UNMASK_ALL_EXT);
426 
427 	set_dram_access(regs);
428 	port_init_mac_tables(regs);
429 	port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
430 
431 	/* Assign port configuration and command. */
432 	MVGBE_REG_WR(regs->pxc, PRT_CFG_VAL);
433 	MVGBE_REG_WR(regs->pxcx, PORT_CFG_EXTEND_VALUE);
434 	MVGBE_REG_WR(regs->psc0, PORT_SERIAL_CONTROL_VALUE);
435 
436 	/* Assign port SDMA configuration */
437 	MVGBE_REG_WR(regs->sdc, PORT_SDMA_CFG_VALUE);
438 	MVGBE_REG_WR(regs->tqx[0].qxttbc, QTKNBKT_DEF_VAL);
439 	MVGBE_REG_WR(regs->tqx[0].tqxtbc,
440 		(QMTBS_DEF_VAL << 16) | QTKNRT_DEF_VAL);
441 	/* Turn off the port/RXUQ bandwidth limitation */
442 	MVGBE_REG_WR(regs->pmtu, 0);
443 
444 	/* Set maximum receive buffer to 9700 bytes */
445 	MVGBE_REG_WR(regs->psc0, MVGBE_MAX_RX_PACKET_9700BYTE
446 			| (MVGBE_REG_RD(regs->psc0) & MRU_MASK));
447 
448 	/* Enable port initially */
449 	MVGBE_REG_BITS_SET(regs->psc0, MVGBE_SERIAL_PORT_EN);
450 
451 	/*
452 	 * Set ethernet MTU for leaky bucket mechanism to 0 - this will
453 	 * disable the leaky bucket mechanism .
454 	 */
455 	MVGBE_REG_WR(regs->pmtu, 0);
456 
457 	/* Assignment of Rx CRDB of given RXUQ */
458 	MVGBE_REG_WR(regs->rxcdp[RXUQ], (u32) dmvgbe->p_rxdesc_curr);
459 	/* ensure previous write is done before enabling Rx DMA */
460 	isb();
461 	/* Enable port Rx. */
462 	MVGBE_REG_WR(regs->rqc, (1 << RXUQ));
463 
464 #if (defined(CONFIG_MII) || defined(CONFIG_CMD_MII)) && \
465 	!defined(CONFIG_PHYLIB) && \
466 	defined(CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
467 	/* Wait up to 5s for the link status */
468 	for (i = 0; i < 5; i++) {
469 		u16 phyadr;
470 
471 		miiphy_read(dev->name, MV_PHY_ADR_REQUEST,
472 				MV_PHY_ADR_REQUEST, &phyadr);
473 		/* Return if we get link up */
474 		if (miiphy_link(dev->name, phyadr))
475 			return 0;
476 		udelay(1000000);
477 	}
478 
479 	printf("No link on %s\n", dev->name);
480 	return -1;
481 #endif
482 	return 0;
483 }
484 
485 static int mvgbe_halt(struct eth_device *dev)
486 {
487 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
488 	struct mvgbe_registers *regs = dmvgbe->regs;
489 
490 	/* Disable all gigE address decoder */
491 	MVGBE_REG_WR(regs->bare, 0x3f);
492 
493 	stop_queue(&regs->tqc);
494 	stop_queue(&regs->rqc);
495 
496 	/* Disable port */
497 	MVGBE_REG_BITS_RESET(regs->psc0, MVGBE_SERIAL_PORT_EN);
498 	/* Set port is not reset */
499 	MVGBE_REG_BITS_RESET(regs->psc1, 1 << 4);
500 #ifdef CONFIG_SYS_MII_MODE
501 	/* Set MMI interface up */
502 	MVGBE_REG_BITS_RESET(regs->psc1, 1 << 3);
503 #endif
504 	/* Disable & mask ethernet port interrupts */
505 	MVGBE_REG_WR(regs->ic, 0);
506 	MVGBE_REG_WR(regs->ice, 0);
507 	MVGBE_REG_WR(regs->pim, 0);
508 	MVGBE_REG_WR(regs->peim, 0);
509 
510 	return 0;
511 }
512 
513 static int mvgbe_write_hwaddr(struct eth_device *dev)
514 {
515 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
516 	struct mvgbe_registers *regs = dmvgbe->regs;
517 
518 	/* Programs net device MAC address after initialization */
519 	port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
520 	return 0;
521 }
522 
523 static int mvgbe_send(struct eth_device *dev, void *dataptr, int datasize)
524 {
525 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
526 	struct mvgbe_registers *regs = dmvgbe->regs;
527 	struct mvgbe_txdesc *p_txdesc = dmvgbe->p_txdesc;
528 	void *p = (void *)dataptr;
529 	u32 cmd_sts;
530 	u32 txuq0_reg_addr;
531 
532 	/* Copy buffer if it's misaligned */
533 	if ((u32) dataptr & 0x07) {
534 		if (datasize > PKTSIZE_ALIGN) {
535 			printf("Non-aligned data too large (%d)\n",
536 					datasize);
537 			return -1;
538 		}
539 
540 		memcpy(dmvgbe->p_aligned_txbuf, p, datasize);
541 		p = dmvgbe->p_aligned_txbuf;
542 	}
543 
544 	p_txdesc->cmd_sts = MVGBE_ZERO_PADDING | MVGBE_GEN_CRC;
545 	p_txdesc->cmd_sts |= MVGBE_TX_FIRST_DESC | MVGBE_TX_LAST_DESC;
546 	p_txdesc->cmd_sts |= MVGBE_BUFFER_OWNED_BY_DMA;
547 	p_txdesc->cmd_sts |= MVGBE_TX_EN_INTERRUPT;
548 	p_txdesc->buf_ptr = (u8 *) p;
549 	p_txdesc->byte_cnt = datasize;
550 
551 	/* Set this tc desc as zeroth TXUQ */
552 	txuq0_reg_addr = (u32)&regs->tcqdp[TXUQ];
553 	writel((u32) p_txdesc, txuq0_reg_addr);
554 
555 	/* ensure tx desc writes above are performed before we start Tx DMA */
556 	isb();
557 
558 	/* Apply send command using zeroth TXUQ */
559 	MVGBE_REG_WR(regs->tqc, (1 << TXUQ));
560 
561 	/*
562 	 * wait for packet xmit completion
563 	 */
564 	cmd_sts = readl(&p_txdesc->cmd_sts);
565 	while (cmd_sts & MVGBE_BUFFER_OWNED_BY_DMA) {
566 		/* return fail if error is detected */
567 		if ((cmd_sts & (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME)) ==
568 				(MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME) &&
569 				cmd_sts & (MVGBE_UR_ERROR | MVGBE_RL_ERROR)) {
570 			printf("Err..(%s) in xmit packet\n", __func__);
571 			return -1;
572 		}
573 		cmd_sts = readl(&p_txdesc->cmd_sts);
574 	};
575 	return 0;
576 }
577 
578 static int mvgbe_recv(struct eth_device *dev)
579 {
580 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
581 	struct mvgbe_rxdesc *p_rxdesc_curr = dmvgbe->p_rxdesc_curr;
582 	u32 cmd_sts;
583 	u32 timeout = 0;
584 	u32 rxdesc_curr_addr;
585 
586 	/* wait untill rx packet available or timeout */
587 	do {
588 		if (timeout < MVGBE_PHY_SMI_TIMEOUT)
589 			timeout++;
590 		else {
591 			debug("%s time out...\n", __func__);
592 			return -1;
593 		}
594 	} while (readl(&p_rxdesc_curr->cmd_sts) & MVGBE_BUFFER_OWNED_BY_DMA);
595 
596 	if (p_rxdesc_curr->byte_cnt != 0) {
597 		debug("%s: Received %d byte Packet @ 0x%x (cmd_sts= %08x)\n",
598 			__func__, (u32) p_rxdesc_curr->byte_cnt,
599 			(u32) p_rxdesc_curr->buf_ptr,
600 			(u32) p_rxdesc_curr->cmd_sts);
601 	}
602 
603 	/*
604 	 * In case received a packet without first/last bits on
605 	 * OR the error summary bit is on,
606 	 * the packets needs to be dropeed.
607 	 */
608 	cmd_sts = readl(&p_rxdesc_curr->cmd_sts);
609 
610 	if ((cmd_sts &
611 		(MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC))
612 		!= (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC)) {
613 
614 		printf("Err..(%s) Dropping packet spread on"
615 			" multiple descriptors\n", __func__);
616 
617 	} else if (cmd_sts & MVGBE_ERROR_SUMMARY) {
618 
619 		printf("Err..(%s) Dropping packet with errors\n",
620 			__func__);
621 
622 	} else {
623 		/* !!! call higher layer processing */
624 		debug("%s: Sending Received packet to"
625 		      " upper layer (net_process_received_packet)\n",
626 		      __func__);
627 
628 		/* let the upper layer handle the packet */
629 		net_process_received_packet((p_rxdesc_curr->buf_ptr +
630 					     RX_BUF_OFFSET),
631 					    (int)(p_rxdesc_curr->byte_cnt -
632 						  RX_BUF_OFFSET));
633 	}
634 	/*
635 	 * free these descriptors and point next in the ring
636 	 */
637 	p_rxdesc_curr->cmd_sts =
638 		MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
639 	p_rxdesc_curr->buf_size = PKTSIZE_ALIGN;
640 	p_rxdesc_curr->byte_cnt = 0;
641 
642 	rxdesc_curr_addr = (u32)&dmvgbe->p_rxdesc_curr;
643 	writel((unsigned)p_rxdesc_curr->nxtdesc_p, rxdesc_curr_addr);
644 
645 	return 0;
646 }
647 
648 #if defined(CONFIG_PHYLIB)
649 int mvgbe_phylib_init(struct eth_device *dev, int phyid)
650 {
651 	struct mii_dev *bus;
652 	struct phy_device *phydev;
653 	int ret;
654 
655 	bus = mdio_alloc();
656 	if (!bus) {
657 		printf("mdio_alloc failed\n");
658 		return -ENOMEM;
659 	}
660 	bus->read = smi_reg_read;
661 	bus->write = smi_reg_write;
662 	strcpy(bus->name, dev->name);
663 
664 	ret = mdio_register(bus);
665 	if (ret) {
666 		printf("mdio_register failed\n");
667 		free(bus);
668 		return -ENOMEM;
669 	}
670 
671 	/* Set phy address of the port */
672 	smi_reg_write(bus, MV_PHY_ADR_REQUEST, 0, MV_PHY_ADR_REQUEST, phyid);
673 
674 	phydev = phy_connect(bus, phyid, dev, PHY_INTERFACE_MODE_RGMII);
675 	if (!phydev) {
676 		printf("phy_connect failed\n");
677 		return -ENODEV;
678 	}
679 
680 	phy_config(phydev);
681 	phy_startup(phydev);
682 
683 	return 0;
684 }
685 #endif
686 
687 int mvgbe_initialize(bd_t *bis)
688 {
689 	struct mvgbe_device *dmvgbe;
690 	struct eth_device *dev;
691 	int devnum;
692 	u8 used_ports[MAX_MVGBE_DEVS] = CONFIG_MVGBE_PORTS;
693 
694 	for (devnum = 0; devnum < MAX_MVGBE_DEVS; devnum++) {
695 		/*skip if port is configured not to use */
696 		if (used_ports[devnum] == 0)
697 			continue;
698 
699 		dmvgbe = malloc(sizeof(struct mvgbe_device));
700 
701 		if (!dmvgbe)
702 			goto error1;
703 
704 		memset(dmvgbe, 0, sizeof(struct mvgbe_device));
705 
706 		dmvgbe->p_rxdesc =
707 			(struct mvgbe_rxdesc *)memalign(PKTALIGN,
708 			MV_RXQ_DESC_ALIGNED_SIZE*RINGSZ + 1);
709 
710 		if (!dmvgbe->p_rxdesc)
711 			goto error2;
712 
713 		dmvgbe->p_rxbuf = (u8 *) memalign(PKTALIGN,
714 			RINGSZ*PKTSIZE_ALIGN + 1);
715 
716 		if (!dmvgbe->p_rxbuf)
717 			goto error3;
718 
719 		dmvgbe->p_aligned_txbuf = memalign(8, PKTSIZE_ALIGN);
720 
721 		if (!dmvgbe->p_aligned_txbuf)
722 			goto error4;
723 
724 		dmvgbe->p_txdesc = (struct mvgbe_txdesc *) memalign(
725 			PKTALIGN, sizeof(struct mvgbe_txdesc) + 1);
726 
727 		if (!dmvgbe->p_txdesc) {
728 			free(dmvgbe->p_aligned_txbuf);
729 error4:
730 			free(dmvgbe->p_rxbuf);
731 error3:
732 			free(dmvgbe->p_rxdesc);
733 error2:
734 			free(dmvgbe);
735 error1:
736 			printf("Err.. %s Failed to allocate memory\n",
737 				__func__);
738 			return -1;
739 		}
740 
741 		dev = &dmvgbe->dev;
742 
743 		/* must be less than sizeof(dev->name) */
744 		sprintf(dev->name, "egiga%d", devnum);
745 
746 		switch (devnum) {
747 		case 0:
748 			dmvgbe->regs = (void *)MVGBE0_BASE;
749 			break;
750 #if defined(MVGBE1_BASE)
751 		case 1:
752 			dmvgbe->regs = (void *)MVGBE1_BASE;
753 			break;
754 #endif
755 		default:	/* this should never happen */
756 			printf("Err..(%s) Invalid device number %d\n",
757 				__func__, devnum);
758 			return -1;
759 		}
760 
761 		dev->init = (void *)mvgbe_init;
762 		dev->halt = (void *)mvgbe_halt;
763 		dev->send = (void *)mvgbe_send;
764 		dev->recv = (void *)mvgbe_recv;
765 		dev->write_hwaddr = (void *)mvgbe_write_hwaddr;
766 
767 		eth_register(dev);
768 
769 #if defined(CONFIG_PHYLIB)
770 		mvgbe_phylib_init(dev, PHY_BASE_ADR + devnum);
771 #elif defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
772 		int retval;
773 		struct mii_dev *mdiodev = mdio_alloc();
774 		if (!mdiodev)
775 			return -ENOMEM;
776 		strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
777 		mdiodev->read = smi_reg_read;
778 		mdiodev->write = smi_reg_write;
779 
780 		retval = mdio_register(mdiodev);
781 		if (retval < 0)
782 			return retval;
783 		/* Set phy address of the port */
784 		miiphy_write(dev->name, MV_PHY_ADR_REQUEST,
785 				MV_PHY_ADR_REQUEST, PHY_BASE_ADR + devnum);
786 #endif
787 	}
788 	return 0;
789 }
790