xref: /openbmc/u-boot/drivers/net/mvgbe.c (revision e895a4b0)
1 /*
2  * (C) Copyright 2009
3  * Marvell Semiconductor <www.marvell.com>
4  * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
5  *
6  * (C) Copyright 2003
7  * Ingo Assmus <ingo.assmus@keymile.com>
8  *
9  * based on - Driver for MV64360X ethernet ports
10  * Copyright (C) 2002 rabeeh@galileo.co.il
11  *
12  * SPDX-License-Identifier:	GPL-2.0+
13  */
14 
15 #include <common.h>
16 #include <net.h>
17 #include <malloc.h>
18 #include <miiphy.h>
19 #include <asm/io.h>
20 #include <asm/errno.h>
21 #include <asm/types.h>
22 #include <asm/system.h>
23 #include <asm/byteorder.h>
24 #include <asm/arch/cpu.h>
25 
26 #if defined(CONFIG_KIRKWOOD)
27 #include <asm/arch/soc.h>
28 #elif defined(CONFIG_ORION5X)
29 #include <asm/arch/orion5x.h>
30 #elif defined(CONFIG_DOVE)
31 #include <asm/arch/dove.h>
32 #endif
33 
34 #include "mvgbe.h"
35 
36 DECLARE_GLOBAL_DATA_PTR;
37 
38 #define MV_PHY_ADR_REQUEST 0xee
39 #define MVGBE_SMI_REG (((struct mvgbe_registers *)MVGBE0_BASE)->smi)
40 
41 #if defined(CONFIG_PHYLIB) || defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
42 /*
43  * smi_reg_read - miiphy_read callback function.
44  *
45  * Returns 16bit phy register value, or 0xffff on error
46  */
47 static int smi_reg_read(const char *devname, u8 phy_adr, u8 reg_ofs, u16 * data)
48 {
49 	struct eth_device *dev = eth_get_dev_by_name(devname);
50 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
51 	struct mvgbe_registers *regs = dmvgbe->regs;
52 	u32 smi_reg;
53 	u32 timeout;
54 
55 	/* Phyadr read request */
56 	if (phy_adr == MV_PHY_ADR_REQUEST &&
57 			reg_ofs == MV_PHY_ADR_REQUEST) {
58 		/* */
59 		*data = (u16) (MVGBE_REG_RD(regs->phyadr) & PHYADR_MASK);
60 		return 0;
61 	}
62 	/* check parameters */
63 	if (phy_adr > PHYADR_MASK) {
64 		printf("Err..(%s) Invalid PHY address %d\n",
65 			__FUNCTION__, phy_adr);
66 		return -EFAULT;
67 	}
68 	if (reg_ofs > PHYREG_MASK) {
69 		printf("Err..(%s) Invalid register offset %d\n",
70 			__FUNCTION__, reg_ofs);
71 		return -EFAULT;
72 	}
73 
74 	timeout = MVGBE_PHY_SMI_TIMEOUT;
75 	/* wait till the SMI is not busy */
76 	do {
77 		/* read smi register */
78 		smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
79 		if (timeout-- == 0) {
80 			printf("Err..(%s) SMI busy timeout\n", __FUNCTION__);
81 			return -EFAULT;
82 		}
83 	} while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
84 
85 	/* fill the phy address and regiser offset and read opcode */
86 	smi_reg = (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
87 		| (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS)
88 		| MVGBE_PHY_SMI_OPCODE_READ;
89 
90 	/* write the smi register */
91 	MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
92 
93 	/*wait till read value is ready */
94 	timeout = MVGBE_PHY_SMI_TIMEOUT;
95 
96 	do {
97 		/* read smi register */
98 		smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
99 		if (timeout-- == 0) {
100 			printf("Err..(%s) SMI read ready timeout\n",
101 				__FUNCTION__);
102 			return -EFAULT;
103 		}
104 	} while (!(smi_reg & MVGBE_PHY_SMI_READ_VALID_MASK));
105 
106 	/* Wait for the data to update in the SMI register */
107 	for (timeout = 0; timeout < MVGBE_PHY_SMI_TIMEOUT; timeout++)
108 		;
109 
110 	*data = (u16) (MVGBE_REG_RD(MVGBE_SMI_REG) & MVGBE_PHY_SMI_DATA_MASK);
111 
112 	debug("%s:(adr %d, off %d) value= %04x\n", __FUNCTION__, phy_adr,
113 		reg_ofs, *data);
114 
115 	return 0;
116 }
117 
118 /*
119  * smi_reg_write - imiiphy_write callback function.
120  *
121  * Returns 0 if write succeed, -EINVAL on bad parameters
122  * -ETIME on timeout
123  */
124 static int smi_reg_write(const char *devname, u8 phy_adr, u8 reg_ofs, u16 data)
125 {
126 	struct eth_device *dev = eth_get_dev_by_name(devname);
127 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
128 	struct mvgbe_registers *regs = dmvgbe->regs;
129 	u32 smi_reg;
130 	u32 timeout;
131 
132 	/* Phyadr write request*/
133 	if (phy_adr == MV_PHY_ADR_REQUEST &&
134 			reg_ofs == MV_PHY_ADR_REQUEST) {
135 		MVGBE_REG_WR(regs->phyadr, data);
136 		return 0;
137 	}
138 
139 	/* check parameters */
140 	if (phy_adr > PHYADR_MASK) {
141 		printf("Err..(%s) Invalid phy address\n", __FUNCTION__);
142 		return -EINVAL;
143 	}
144 	if (reg_ofs > PHYREG_MASK) {
145 		printf("Err..(%s) Invalid register offset\n", __FUNCTION__);
146 		return -EINVAL;
147 	}
148 
149 	/* wait till the SMI is not busy */
150 	timeout = MVGBE_PHY_SMI_TIMEOUT;
151 	do {
152 		/* read smi register */
153 		smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
154 		if (timeout-- == 0) {
155 			printf("Err..(%s) SMI busy timeout\n", __FUNCTION__);
156 			return -ETIME;
157 		}
158 	} while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
159 
160 	/* fill the phy addr and reg offset and write opcode and data */
161 	smi_reg = (data << MVGBE_PHY_SMI_DATA_OFFS);
162 	smi_reg |= (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
163 		| (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS);
164 	smi_reg &= ~MVGBE_PHY_SMI_OPCODE_READ;
165 
166 	/* write the smi register */
167 	MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
168 
169 	return 0;
170 }
171 #endif
172 
173 #if defined(CONFIG_PHYLIB)
174 int mvgbe_phy_read(struct mii_dev *bus, int phy_addr, int dev_addr,
175 		   int reg_addr)
176 {
177 	u16 data;
178 	int ret;
179 	ret = smi_reg_read(bus->name, phy_addr, reg_addr, &data);
180 	if (ret)
181 		return ret;
182 	return data;
183 }
184 
185 int mvgbe_phy_write(struct mii_dev *bus, int phy_addr, int dev_addr,
186 		    int reg_addr, u16 data)
187 {
188 	return smi_reg_write(bus->name, phy_addr, reg_addr, data);
189 }
190 #endif
191 
192 /* Stop and checks all queues */
193 static void stop_queue(u32 * qreg)
194 {
195 	u32 reg_data;
196 
197 	reg_data = readl(qreg);
198 
199 	if (reg_data & 0xFF) {
200 		/* Issue stop command for active channels only */
201 		writel((reg_data << 8), qreg);
202 
203 		/* Wait for all queue activity to terminate. */
204 		do {
205 			/*
206 			 * Check port cause register that all queues
207 			 * are stopped
208 			 */
209 			reg_data = readl(qreg);
210 		}
211 		while (reg_data & 0xFF);
212 	}
213 }
214 
215 /*
216  * set_access_control - Config address decode parameters for Ethernet unit
217  *
218  * This function configures the address decode parameters for the Gigabit
219  * Ethernet Controller according the given parameters struct.
220  *
221  * @regs	Register struct pointer.
222  * @param	Address decode parameter struct.
223  */
224 static void set_access_control(struct mvgbe_registers *regs,
225 				struct mvgbe_winparam *param)
226 {
227 	u32 access_prot_reg;
228 
229 	/* Set access control register */
230 	access_prot_reg = MVGBE_REG_RD(regs->epap);
231 	/* clear window permission */
232 	access_prot_reg &= (~(3 << (param->win * 2)));
233 	access_prot_reg |= (param->access_ctrl << (param->win * 2));
234 	MVGBE_REG_WR(regs->epap, access_prot_reg);
235 
236 	/* Set window Size reg (SR) */
237 	MVGBE_REG_WR(regs->barsz[param->win].size,
238 			(((param->size / 0x10000) - 1) << 16));
239 
240 	/* Set window Base address reg (BA) */
241 	MVGBE_REG_WR(regs->barsz[param->win].bar,
242 			(param->target | param->attrib | param->base_addr));
243 	/* High address remap reg (HARR) */
244 	if (param->win < 4)
245 		MVGBE_REG_WR(regs->ha_remap[param->win], param->high_addr);
246 
247 	/* Base address enable reg (BARER) */
248 	if (param->enable == 1)
249 		MVGBE_REG_BITS_RESET(regs->bare, (1 << param->win));
250 	else
251 		MVGBE_REG_BITS_SET(regs->bare, (1 << param->win));
252 }
253 
254 static void set_dram_access(struct mvgbe_registers *regs)
255 {
256 	struct mvgbe_winparam win_param;
257 	int i;
258 
259 	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
260 		/* Set access parameters for DRAM bank i */
261 		win_param.win = i;	/* Use Ethernet window i */
262 		/* Window target - DDR */
263 		win_param.target = MVGBE_TARGET_DRAM;
264 		/* Enable full access */
265 		win_param.access_ctrl = EWIN_ACCESS_FULL;
266 		win_param.high_addr = 0;
267 		/* Get bank base and size */
268 		win_param.base_addr = gd->bd->bi_dram[i].start;
269 		win_param.size = gd->bd->bi_dram[i].size;
270 		if (win_param.size == 0)
271 			win_param.enable = 0;
272 		else
273 			win_param.enable = 1;	/* Enable the access */
274 
275 		/* Enable DRAM bank */
276 		switch (i) {
277 		case 0:
278 			win_param.attrib = EBAR_DRAM_CS0;
279 			break;
280 		case 1:
281 			win_param.attrib = EBAR_DRAM_CS1;
282 			break;
283 		case 2:
284 			win_param.attrib = EBAR_DRAM_CS2;
285 			break;
286 		case 3:
287 			win_param.attrib = EBAR_DRAM_CS3;
288 			break;
289 		default:
290 			/* invalid bank, disable access */
291 			win_param.enable = 0;
292 			win_param.attrib = 0;
293 			break;
294 		}
295 		/* Set the access control for address window(EPAPR) RD/WR */
296 		set_access_control(regs, &win_param);
297 	}
298 }
299 
300 /*
301  * port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
302  *
303  * Go through all the DA filter tables (Unicast, Special Multicast & Other
304  * Multicast) and set each entry to 0.
305  */
306 static void port_init_mac_tables(struct mvgbe_registers *regs)
307 {
308 	int table_index;
309 
310 	/* Clear DA filter unicast table (Ex_dFUT) */
311 	for (table_index = 0; table_index < 4; ++table_index)
312 		MVGBE_REG_WR(regs->dfut[table_index], 0);
313 
314 	for (table_index = 0; table_index < 64; ++table_index) {
315 		/* Clear DA filter special multicast table (Ex_dFSMT) */
316 		MVGBE_REG_WR(regs->dfsmt[table_index], 0);
317 		/* Clear DA filter other multicast table (Ex_dFOMT) */
318 		MVGBE_REG_WR(regs->dfomt[table_index], 0);
319 	}
320 }
321 
322 /*
323  * port_uc_addr - This function Set the port unicast address table
324  *
325  * This function locates the proper entry in the Unicast table for the
326  * specified MAC nibble and sets its properties according to function
327  * parameters.
328  * This function add/removes MAC addresses from the port unicast address
329  * table.
330  *
331  * @uc_nibble	Unicast MAC Address last nibble.
332  * @option      0 = Add, 1 = remove address.
333  *
334  * RETURN: 1 if output succeeded. 0 if option parameter is invalid.
335  */
336 static int port_uc_addr(struct mvgbe_registers *regs, u8 uc_nibble,
337 			int option)
338 {
339 	u32 unicast_reg;
340 	u32 tbl_offset;
341 	u32 reg_offset;
342 
343 	/* Locate the Unicast table entry */
344 	uc_nibble = (0xf & uc_nibble);
345 	/* Register offset from unicast table base */
346 	tbl_offset = (uc_nibble / 4);
347 	/* Entry offset within the above register */
348 	reg_offset = uc_nibble % 4;
349 
350 	switch (option) {
351 	case REJECT_MAC_ADDR:
352 		/*
353 		 * Clear accepts frame bit at specified unicast
354 		 * DA table entry
355 		 */
356 		unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
357 		unicast_reg &= (0xFF << (8 * reg_offset));
358 		MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
359 		break;
360 	case ACCEPT_MAC_ADDR:
361 		/* Set accepts frame bit at unicast DA filter table entry */
362 		unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
363 		unicast_reg &= (0xFF << (8 * reg_offset));
364 		unicast_reg |= ((0x01 | (RXUQ << 1)) << (8 * reg_offset));
365 		MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
366 		break;
367 	default:
368 		return 0;
369 	}
370 	return 1;
371 }
372 
373 /*
374  * port_uc_addr_set - This function Set the port Unicast address.
375  */
376 static void port_uc_addr_set(struct mvgbe_registers *regs, u8 * p_addr)
377 {
378 	u32 mac_h;
379 	u32 mac_l;
380 
381 	mac_l = (p_addr[4] << 8) | (p_addr[5]);
382 	mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
383 		(p_addr[3] << 0);
384 
385 	MVGBE_REG_WR(regs->macal, mac_l);
386 	MVGBE_REG_WR(regs->macah, mac_h);
387 
388 	/* Accept frames of this address */
389 	port_uc_addr(regs, p_addr[5], ACCEPT_MAC_ADDR);
390 }
391 
392 /*
393  * mvgbe_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
394  */
395 static void mvgbe_init_rx_desc_ring(struct mvgbe_device *dmvgbe)
396 {
397 	struct mvgbe_rxdesc *p_rx_desc;
398 	int i;
399 
400 	/* initialize the Rx descriptors ring */
401 	p_rx_desc = dmvgbe->p_rxdesc;
402 	for (i = 0; i < RINGSZ; i++) {
403 		p_rx_desc->cmd_sts =
404 			MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
405 		p_rx_desc->buf_size = PKTSIZE_ALIGN;
406 		p_rx_desc->byte_cnt = 0;
407 		p_rx_desc->buf_ptr = dmvgbe->p_rxbuf + i * PKTSIZE_ALIGN;
408 		if (i == (RINGSZ - 1))
409 			p_rx_desc->nxtdesc_p = dmvgbe->p_rxdesc;
410 		else {
411 			p_rx_desc->nxtdesc_p = (struct mvgbe_rxdesc *)
412 				((u32) p_rx_desc + MV_RXQ_DESC_ALIGNED_SIZE);
413 			p_rx_desc = p_rx_desc->nxtdesc_p;
414 		}
415 	}
416 	dmvgbe->p_rxdesc_curr = dmvgbe->p_rxdesc;
417 }
418 
419 static int mvgbe_init(struct eth_device *dev)
420 {
421 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
422 	struct mvgbe_registers *regs = dmvgbe->regs;
423 #if (defined(CONFIG_MII) || defined(CONFIG_CMD_MII)) &&  \
424 	!defined(CONFIG_PHYLIB) &&			 \
425 	defined(CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
426 	int i;
427 #endif
428 	/* setup RX rings */
429 	mvgbe_init_rx_desc_ring(dmvgbe);
430 
431 	/* Clear the ethernet port interrupts */
432 	MVGBE_REG_WR(regs->ic, 0);
433 	MVGBE_REG_WR(regs->ice, 0);
434 	/* Unmask RX buffer and TX end interrupt */
435 	MVGBE_REG_WR(regs->pim, INT_CAUSE_UNMASK_ALL);
436 	/* Unmask phy and link status changes interrupts */
437 	MVGBE_REG_WR(regs->peim, INT_CAUSE_UNMASK_ALL_EXT);
438 
439 	set_dram_access(regs);
440 	port_init_mac_tables(regs);
441 	port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
442 
443 	/* Assign port configuration and command. */
444 	MVGBE_REG_WR(regs->pxc, PRT_CFG_VAL);
445 	MVGBE_REG_WR(regs->pxcx, PORT_CFG_EXTEND_VALUE);
446 	MVGBE_REG_WR(regs->psc0, PORT_SERIAL_CONTROL_VALUE);
447 
448 	/* Assign port SDMA configuration */
449 	MVGBE_REG_WR(regs->sdc, PORT_SDMA_CFG_VALUE);
450 	MVGBE_REG_WR(regs->tqx[0].qxttbc, QTKNBKT_DEF_VAL);
451 	MVGBE_REG_WR(regs->tqx[0].tqxtbc,
452 		(QMTBS_DEF_VAL << 16) | QTKNRT_DEF_VAL);
453 	/* Turn off the port/RXUQ bandwidth limitation */
454 	MVGBE_REG_WR(regs->pmtu, 0);
455 
456 	/* Set maximum receive buffer to 9700 bytes */
457 	MVGBE_REG_WR(regs->psc0, MVGBE_MAX_RX_PACKET_9700BYTE
458 			| (MVGBE_REG_RD(regs->psc0) & MRU_MASK));
459 
460 	/* Enable port initially */
461 	MVGBE_REG_BITS_SET(regs->psc0, MVGBE_SERIAL_PORT_EN);
462 
463 	/*
464 	 * Set ethernet MTU for leaky bucket mechanism to 0 - this will
465 	 * disable the leaky bucket mechanism .
466 	 */
467 	MVGBE_REG_WR(regs->pmtu, 0);
468 
469 	/* Assignment of Rx CRDB of given RXUQ */
470 	MVGBE_REG_WR(regs->rxcdp[RXUQ], (u32) dmvgbe->p_rxdesc_curr);
471 	/* ensure previous write is done before enabling Rx DMA */
472 	isb();
473 	/* Enable port Rx. */
474 	MVGBE_REG_WR(regs->rqc, (1 << RXUQ));
475 
476 #if (defined(CONFIG_MII) || defined(CONFIG_CMD_MII)) && \
477 	!defined(CONFIG_PHYLIB) && \
478 	defined(CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
479 	/* Wait up to 5s for the link status */
480 	for (i = 0; i < 5; i++) {
481 		u16 phyadr;
482 
483 		miiphy_read(dev->name, MV_PHY_ADR_REQUEST,
484 				MV_PHY_ADR_REQUEST, &phyadr);
485 		/* Return if we get link up */
486 		if (miiphy_link(dev->name, phyadr))
487 			return 0;
488 		udelay(1000000);
489 	}
490 
491 	printf("No link on %s\n", dev->name);
492 	return -1;
493 #endif
494 	return 0;
495 }
496 
497 static int mvgbe_halt(struct eth_device *dev)
498 {
499 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
500 	struct mvgbe_registers *regs = dmvgbe->regs;
501 
502 	/* Disable all gigE address decoder */
503 	MVGBE_REG_WR(regs->bare, 0x3f);
504 
505 	stop_queue(&regs->tqc);
506 	stop_queue(&regs->rqc);
507 
508 	/* Disable port */
509 	MVGBE_REG_BITS_RESET(regs->psc0, MVGBE_SERIAL_PORT_EN);
510 	/* Set port is not reset */
511 	MVGBE_REG_BITS_RESET(regs->psc1, 1 << 4);
512 #ifdef CONFIG_SYS_MII_MODE
513 	/* Set MMI interface up */
514 	MVGBE_REG_BITS_RESET(regs->psc1, 1 << 3);
515 #endif
516 	/* Disable & mask ethernet port interrupts */
517 	MVGBE_REG_WR(regs->ic, 0);
518 	MVGBE_REG_WR(regs->ice, 0);
519 	MVGBE_REG_WR(regs->pim, 0);
520 	MVGBE_REG_WR(regs->peim, 0);
521 
522 	return 0;
523 }
524 
525 static int mvgbe_write_hwaddr(struct eth_device *dev)
526 {
527 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
528 	struct mvgbe_registers *regs = dmvgbe->regs;
529 
530 	/* Programs net device MAC address after initialization */
531 	port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
532 	return 0;
533 }
534 
535 static int mvgbe_send(struct eth_device *dev, void *dataptr, int datasize)
536 {
537 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
538 	struct mvgbe_registers *regs = dmvgbe->regs;
539 	struct mvgbe_txdesc *p_txdesc = dmvgbe->p_txdesc;
540 	void *p = (void *)dataptr;
541 	u32 cmd_sts;
542 	u32 txuq0_reg_addr;
543 
544 	/* Copy buffer if it's misaligned */
545 	if ((u32) dataptr & 0x07) {
546 		if (datasize > PKTSIZE_ALIGN) {
547 			printf("Non-aligned data too large (%d)\n",
548 					datasize);
549 			return -1;
550 		}
551 
552 		memcpy(dmvgbe->p_aligned_txbuf, p, datasize);
553 		p = dmvgbe->p_aligned_txbuf;
554 	}
555 
556 	p_txdesc->cmd_sts = MVGBE_ZERO_PADDING | MVGBE_GEN_CRC;
557 	p_txdesc->cmd_sts |= MVGBE_TX_FIRST_DESC | MVGBE_TX_LAST_DESC;
558 	p_txdesc->cmd_sts |= MVGBE_BUFFER_OWNED_BY_DMA;
559 	p_txdesc->cmd_sts |= MVGBE_TX_EN_INTERRUPT;
560 	p_txdesc->buf_ptr = (u8 *) p;
561 	p_txdesc->byte_cnt = datasize;
562 
563 	/* Set this tc desc as zeroth TXUQ */
564 	txuq0_reg_addr = (u32)&regs->tcqdp[TXUQ];
565 	writel((u32) p_txdesc, txuq0_reg_addr);
566 
567 	/* ensure tx desc writes above are performed before we start Tx DMA */
568 	isb();
569 
570 	/* Apply send command using zeroth TXUQ */
571 	MVGBE_REG_WR(regs->tqc, (1 << TXUQ));
572 
573 	/*
574 	 * wait for packet xmit completion
575 	 */
576 	cmd_sts = readl(&p_txdesc->cmd_sts);
577 	while (cmd_sts & MVGBE_BUFFER_OWNED_BY_DMA) {
578 		/* return fail if error is detected */
579 		if ((cmd_sts & (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME)) ==
580 				(MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME) &&
581 				cmd_sts & (MVGBE_UR_ERROR | MVGBE_RL_ERROR)) {
582 			printf("Err..(%s) in xmit packet\n", __FUNCTION__);
583 			return -1;
584 		}
585 		cmd_sts = readl(&p_txdesc->cmd_sts);
586 	};
587 	return 0;
588 }
589 
590 static int mvgbe_recv(struct eth_device *dev)
591 {
592 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
593 	struct mvgbe_rxdesc *p_rxdesc_curr = dmvgbe->p_rxdesc_curr;
594 	u32 cmd_sts;
595 	u32 timeout = 0;
596 	u32 rxdesc_curr_addr;
597 
598 	/* wait untill rx packet available or timeout */
599 	do {
600 		if (timeout < MVGBE_PHY_SMI_TIMEOUT)
601 			timeout++;
602 		else {
603 			debug("%s time out...\n", __FUNCTION__);
604 			return -1;
605 		}
606 	} while (readl(&p_rxdesc_curr->cmd_sts) & MVGBE_BUFFER_OWNED_BY_DMA);
607 
608 	if (p_rxdesc_curr->byte_cnt != 0) {
609 		debug("%s: Received %d byte Packet @ 0x%x (cmd_sts= %08x)\n",
610 			__FUNCTION__, (u32) p_rxdesc_curr->byte_cnt,
611 			(u32) p_rxdesc_curr->buf_ptr,
612 			(u32) p_rxdesc_curr->cmd_sts);
613 	}
614 
615 	/*
616 	 * In case received a packet without first/last bits on
617 	 * OR the error summary bit is on,
618 	 * the packets needs to be dropeed.
619 	 */
620 	cmd_sts = readl(&p_rxdesc_curr->cmd_sts);
621 
622 	if ((cmd_sts &
623 		(MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC))
624 		!= (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC)) {
625 
626 		printf("Err..(%s) Dropping packet spread on"
627 			" multiple descriptors\n", __FUNCTION__);
628 
629 	} else if (cmd_sts & MVGBE_ERROR_SUMMARY) {
630 
631 		printf("Err..(%s) Dropping packet with errors\n",
632 			__FUNCTION__);
633 
634 	} else {
635 		/* !!! call higher layer processing */
636 		debug("%s: Sending Received packet to"
637 			" upper layer (NetReceive)\n", __FUNCTION__);
638 
639 		/* let the upper layer handle the packet */
640 		NetReceive((p_rxdesc_curr->buf_ptr + RX_BUF_OFFSET),
641 			(int)(p_rxdesc_curr->byte_cnt - RX_BUF_OFFSET));
642 	}
643 	/*
644 	 * free these descriptors and point next in the ring
645 	 */
646 	p_rxdesc_curr->cmd_sts =
647 		MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
648 	p_rxdesc_curr->buf_size = PKTSIZE_ALIGN;
649 	p_rxdesc_curr->byte_cnt = 0;
650 
651 	rxdesc_curr_addr = (u32)&dmvgbe->p_rxdesc_curr;
652 	writel((unsigned)p_rxdesc_curr->nxtdesc_p, rxdesc_curr_addr);
653 
654 	return 0;
655 }
656 
657 #if defined(CONFIG_PHYLIB)
658 int mvgbe_phylib_init(struct eth_device *dev, int phyid)
659 {
660 	struct mii_dev *bus;
661 	struct phy_device *phydev;
662 	int ret;
663 
664 	bus = mdio_alloc();
665 	if (!bus) {
666 		printf("mdio_alloc failed\n");
667 		return -ENOMEM;
668 	}
669 	bus->read = mvgbe_phy_read;
670 	bus->write = mvgbe_phy_write;
671 	sprintf(bus->name, dev->name);
672 
673 	ret = mdio_register(bus);
674 	if (ret) {
675 		printf("mdio_register failed\n");
676 		free(bus);
677 		return -ENOMEM;
678 	}
679 
680 	/* Set phy address of the port */
681 	mvgbe_phy_write(bus, MV_PHY_ADR_REQUEST, 0, MV_PHY_ADR_REQUEST, phyid);
682 
683 	phydev = phy_connect(bus, phyid, dev, PHY_INTERFACE_MODE_RGMII);
684 	if (!phydev) {
685 		printf("phy_connect failed\n");
686 		return -ENODEV;
687 	}
688 
689 	phy_config(phydev);
690 	phy_startup(phydev);
691 
692 	return 0;
693 }
694 #endif
695 
696 int mvgbe_initialize(bd_t *bis)
697 {
698 	struct mvgbe_device *dmvgbe;
699 	struct eth_device *dev;
700 	int devnum;
701 	u8 used_ports[MAX_MVGBE_DEVS] = CONFIG_MVGBE_PORTS;
702 
703 	for (devnum = 0; devnum < MAX_MVGBE_DEVS; devnum++) {
704 		/*skip if port is configured not to use */
705 		if (used_ports[devnum] == 0)
706 			continue;
707 
708 		dmvgbe = malloc(sizeof(struct mvgbe_device));
709 
710 		if (!dmvgbe)
711 			goto error1;
712 
713 		memset(dmvgbe, 0, sizeof(struct mvgbe_device));
714 
715 		dmvgbe->p_rxdesc =
716 			(struct mvgbe_rxdesc *)memalign(PKTALIGN,
717 			MV_RXQ_DESC_ALIGNED_SIZE*RINGSZ + 1);
718 
719 		if (!dmvgbe->p_rxdesc)
720 			goto error2;
721 
722 		dmvgbe->p_rxbuf = (u8 *) memalign(PKTALIGN,
723 			RINGSZ*PKTSIZE_ALIGN + 1);
724 
725 		if (!dmvgbe->p_rxbuf)
726 			goto error3;
727 
728 		dmvgbe->p_aligned_txbuf = memalign(8, PKTSIZE_ALIGN);
729 
730 		if (!dmvgbe->p_aligned_txbuf)
731 			goto error4;
732 
733 		dmvgbe->p_txdesc = (struct mvgbe_txdesc *) memalign(
734 			PKTALIGN, sizeof(struct mvgbe_txdesc) + 1);
735 
736 		if (!dmvgbe->p_txdesc) {
737 			free(dmvgbe->p_aligned_txbuf);
738 error4:
739 			free(dmvgbe->p_rxbuf);
740 error3:
741 			free(dmvgbe->p_rxdesc);
742 error2:
743 			free(dmvgbe);
744 error1:
745 			printf("Err.. %s Failed to allocate memory\n",
746 				__FUNCTION__);
747 			return -1;
748 		}
749 
750 		dev = &dmvgbe->dev;
751 
752 		/* must be less than sizeof(dev->name) */
753 		sprintf(dev->name, "egiga%d", devnum);
754 
755 		switch (devnum) {
756 		case 0:
757 			dmvgbe->regs = (void *)MVGBE0_BASE;
758 			break;
759 #if defined(MVGBE1_BASE)
760 		case 1:
761 			dmvgbe->regs = (void *)MVGBE1_BASE;
762 			break;
763 #endif
764 		default:	/* this should never happen */
765 			printf("Err..(%s) Invalid device number %d\n",
766 				__FUNCTION__, devnum);
767 			return -1;
768 		}
769 
770 		dev->init = (void *)mvgbe_init;
771 		dev->halt = (void *)mvgbe_halt;
772 		dev->send = (void *)mvgbe_send;
773 		dev->recv = (void *)mvgbe_recv;
774 		dev->write_hwaddr = (void *)mvgbe_write_hwaddr;
775 
776 		eth_register(dev);
777 
778 #if defined(CONFIG_PHYLIB)
779 		mvgbe_phylib_init(dev, PHY_BASE_ADR + devnum);
780 #elif defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
781 		miiphy_register(dev->name, smi_reg_read, smi_reg_write);
782 		/* Set phy address of the port */
783 		miiphy_write(dev->name, MV_PHY_ADR_REQUEST,
784 				MV_PHY_ADR_REQUEST, PHY_BASE_ADR + devnum);
785 #endif
786 	}
787 	return 0;
788 }
789