xref: /openbmc/u-boot/drivers/net/fec_mxc.c (revision d9bef0ad)
1 /*
2  * (C) Copyright 2009 Ilya Yanok, Emcraft Systems Ltd <yanok@emcraft.com>
3  * (C) Copyright 2008,2009 Eric Jarrige <eric.jarrige@armadeus.org>
4  * (C) Copyright 2008 Armadeus Systems nc
5  * (C) Copyright 2007 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
6  * (C) Copyright 2007 Pengutronix, Juergen Beisert <j.beisert@pengutronix.de>
7  *
8  * SPDX-License-Identifier:	GPL-2.0+
9  */
10 
11 #include <common.h>
12 #include <malloc.h>
13 #include <net.h>
14 #include <miiphy.h>
15 #include "fec_mxc.h"
16 
17 #include <asm/arch/clock.h>
18 #include <asm/arch/imx-regs.h>
19 #include <asm/io.h>
20 #include <asm/errno.h>
21 #include <linux/compiler.h>
22 
23 DECLARE_GLOBAL_DATA_PTR;
24 
25 /*
26  * Timeout the transfer after 5 mS. This is usually a bit more, since
27  * the code in the tightloops this timeout is used in adds some overhead.
28  */
29 #define FEC_XFER_TIMEOUT	5000
30 
31 #ifndef CONFIG_MII
32 #error "CONFIG_MII has to be defined!"
33 #endif
34 
35 #ifndef CONFIG_FEC_XCV_TYPE
36 #define CONFIG_FEC_XCV_TYPE MII100
37 #endif
38 
39 /*
40  * The i.MX28 operates with packets in big endian. We need to swap them before
41  * sending and after receiving.
42  */
43 #ifdef CONFIG_MX28
44 #define CONFIG_FEC_MXC_SWAP_PACKET
45 #endif
46 
47 #define RXDESC_PER_CACHELINE (ARCH_DMA_MINALIGN/sizeof(struct fec_bd))
48 
49 /* Check various alignment issues at compile time */
50 #if ((ARCH_DMA_MINALIGN < 16) || (ARCH_DMA_MINALIGN % 16 != 0))
51 #error "ARCH_DMA_MINALIGN must be multiple of 16!"
52 #endif
53 
54 #if ((PKTALIGN < ARCH_DMA_MINALIGN) || \
55 	(PKTALIGN % ARCH_DMA_MINALIGN != 0))
56 #error "PKTALIGN must be multiple of ARCH_DMA_MINALIGN!"
57 #endif
58 
59 #undef DEBUG
60 
61 struct nbuf {
62 	uint8_t data[1500];	/**< actual data */
63 	int length;		/**< actual length */
64 	int used;		/**< buffer in use or not */
65 	uint8_t head[16];	/**< MAC header(6 + 6 + 2) + 2(aligned) */
66 };
67 
68 #ifdef CONFIG_FEC_MXC_SWAP_PACKET
69 static void swap_packet(uint32_t *packet, int length)
70 {
71 	int i;
72 
73 	for (i = 0; i < DIV_ROUND_UP(length, 4); i++)
74 		packet[i] = __swab32(packet[i]);
75 }
76 #endif
77 
78 /*
79  * MII-interface related functions
80  */
81 static int fec_mdio_read(struct ethernet_regs *eth, uint8_t phyAddr,
82 		uint8_t regAddr)
83 {
84 	uint32_t reg;		/* convenient holder for the PHY register */
85 	uint32_t phy;		/* convenient holder for the PHY */
86 	uint32_t start;
87 	int val;
88 
89 	/*
90 	 * reading from any PHY's register is done by properly
91 	 * programming the FEC's MII data register.
92 	 */
93 	writel(FEC_IEVENT_MII, &eth->ievent);
94 	reg = regAddr << FEC_MII_DATA_RA_SHIFT;
95 	phy = phyAddr << FEC_MII_DATA_PA_SHIFT;
96 
97 	writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA |
98 			phy | reg, &eth->mii_data);
99 
100 	/*
101 	 * wait for the related interrupt
102 	 */
103 	start = get_timer(0);
104 	while (!(readl(&eth->ievent) & FEC_IEVENT_MII)) {
105 		if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) {
106 			printf("Read MDIO failed...\n");
107 			return -1;
108 		}
109 	}
110 
111 	/*
112 	 * clear mii interrupt bit
113 	 */
114 	writel(FEC_IEVENT_MII, &eth->ievent);
115 
116 	/*
117 	 * it's now safe to read the PHY's register
118 	 */
119 	val = (unsigned short)readl(&eth->mii_data);
120 	debug("%s: phy: %02x reg:%02x val:%#x\n", __func__, phyAddr,
121 			regAddr, val);
122 	return val;
123 }
124 
125 static void fec_mii_setspeed(struct ethernet_regs *eth)
126 {
127 	/*
128 	 * Set MII_SPEED = (1/(mii_speed * 2)) * System Clock
129 	 * and do not drop the Preamble.
130 	 */
131 	writel((((imx_get_fecclk() / 1000000) + 2) / 5) << 1,
132 			&eth->mii_speed);
133 	debug("%s: mii_speed %08x\n", __func__, readl(&eth->mii_speed));
134 }
135 
136 static int fec_mdio_write(struct ethernet_regs *eth, uint8_t phyAddr,
137 		uint8_t regAddr, uint16_t data)
138 {
139 	uint32_t reg;		/* convenient holder for the PHY register */
140 	uint32_t phy;		/* convenient holder for the PHY */
141 	uint32_t start;
142 
143 	reg = regAddr << FEC_MII_DATA_RA_SHIFT;
144 	phy = phyAddr << FEC_MII_DATA_PA_SHIFT;
145 
146 	writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR |
147 		FEC_MII_DATA_TA | phy | reg | data, &eth->mii_data);
148 
149 	/*
150 	 * wait for the MII interrupt
151 	 */
152 	start = get_timer(0);
153 	while (!(readl(&eth->ievent) & FEC_IEVENT_MII)) {
154 		if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) {
155 			printf("Write MDIO failed...\n");
156 			return -1;
157 		}
158 	}
159 
160 	/*
161 	 * clear MII interrupt bit
162 	 */
163 	writel(FEC_IEVENT_MII, &eth->ievent);
164 	debug("%s: phy: %02x reg:%02x val:%#x\n", __func__, phyAddr,
165 			regAddr, data);
166 
167 	return 0;
168 }
169 
170 int fec_phy_read(struct mii_dev *bus, int phyAddr, int dev_addr, int regAddr)
171 {
172 	return fec_mdio_read(bus->priv, phyAddr, regAddr);
173 }
174 
175 int fec_phy_write(struct mii_dev *bus, int phyAddr, int dev_addr, int regAddr,
176 		u16 data)
177 {
178 	return fec_mdio_write(bus->priv, phyAddr, regAddr, data);
179 }
180 
181 #ifndef CONFIG_PHYLIB
182 static int miiphy_restart_aneg(struct eth_device *dev)
183 {
184 	int ret = 0;
185 #if !defined(CONFIG_FEC_MXC_NO_ANEG)
186 	struct fec_priv *fec = (struct fec_priv *)dev->priv;
187 	struct ethernet_regs *eth = fec->bus->priv;
188 
189 	/*
190 	 * Wake up from sleep if necessary
191 	 * Reset PHY, then delay 300ns
192 	 */
193 #ifdef CONFIG_MX27
194 	fec_mdio_write(eth, fec->phy_id, MII_DCOUNTER, 0x00FF);
195 #endif
196 	fec_mdio_write(eth, fec->phy_id, MII_BMCR, BMCR_RESET);
197 	udelay(1000);
198 
199 	/*
200 	 * Set the auto-negotiation advertisement register bits
201 	 */
202 	fec_mdio_write(eth, fec->phy_id, MII_ADVERTISE,
203 			LPA_100FULL | LPA_100HALF | LPA_10FULL |
204 			LPA_10HALF | PHY_ANLPAR_PSB_802_3);
205 	fec_mdio_write(eth, fec->phy_id, MII_BMCR,
206 			BMCR_ANENABLE | BMCR_ANRESTART);
207 
208 	if (fec->mii_postcall)
209 		ret = fec->mii_postcall(fec->phy_id);
210 
211 #endif
212 	return ret;
213 }
214 
215 static int miiphy_wait_aneg(struct eth_device *dev)
216 {
217 	uint32_t start;
218 	int status;
219 	struct fec_priv *fec = (struct fec_priv *)dev->priv;
220 	struct ethernet_regs *eth = fec->bus->priv;
221 
222 	/*
223 	 * Wait for AN completion
224 	 */
225 	start = get_timer(0);
226 	do {
227 		if (get_timer(start) > (CONFIG_SYS_HZ * 5)) {
228 			printf("%s: Autonegotiation timeout\n", dev->name);
229 			return -1;
230 		}
231 
232 		status = fec_mdio_read(eth, fec->phy_id, MII_BMSR);
233 		if (status < 0) {
234 			printf("%s: Autonegotiation failed. status: %d\n",
235 					dev->name, status);
236 			return -1;
237 		}
238 	} while (!(status & BMSR_LSTATUS));
239 
240 	return 0;
241 }
242 #endif
243 
244 static int fec_rx_task_enable(struct fec_priv *fec)
245 {
246 	writel(FEC_R_DES_ACTIVE_RDAR, &fec->eth->r_des_active);
247 	return 0;
248 }
249 
250 static int fec_rx_task_disable(struct fec_priv *fec)
251 {
252 	return 0;
253 }
254 
255 static int fec_tx_task_enable(struct fec_priv *fec)
256 {
257 	writel(FEC_X_DES_ACTIVE_TDAR, &fec->eth->x_des_active);
258 	return 0;
259 }
260 
261 static int fec_tx_task_disable(struct fec_priv *fec)
262 {
263 	return 0;
264 }
265 
266 /**
267  * Initialize receive task's buffer descriptors
268  * @param[in] fec all we know about the device yet
269  * @param[in] count receive buffer count to be allocated
270  * @param[in] dsize desired size of each receive buffer
271  * @return 0 on success
272  *
273  * For this task we need additional memory for the data buffers. And each
274  * data buffer requires some alignment. Thy must be aligned to a specific
275  * boundary each.
276  */
277 static int fec_rbd_init(struct fec_priv *fec, int count, int dsize)
278 {
279 	uint32_t size;
280 	int i;
281 
282 	/*
283 	 * Allocate memory for the buffers. This allocation respects the
284 	 * alignment
285 	 */
286 	size = roundup(dsize, ARCH_DMA_MINALIGN);
287 	for (i = 0; i < count; i++) {
288 		uint32_t data_ptr = readl(&fec->rbd_base[i].data_pointer);
289 		if (data_ptr == 0) {
290 			uint8_t *data = memalign(ARCH_DMA_MINALIGN,
291 						 size);
292 			if (!data) {
293 				printf("%s: error allocating rxbuf %d\n",
294 				       __func__, i);
295 				goto err;
296 			}
297 			writel((uint32_t)data, &fec->rbd_base[i].data_pointer);
298 		} /* needs allocation */
299 		writew(FEC_RBD_EMPTY, &fec->rbd_base[i].status);
300 		writew(0, &fec->rbd_base[i].data_length);
301 	}
302 
303 	/* Mark the last RBD to close the ring. */
304 	writew(FEC_RBD_WRAP | FEC_RBD_EMPTY, &fec->rbd_base[i - 1].status);
305 	fec->rbd_index = 0;
306 
307 	return 0;
308 
309 err:
310 	for (; i >= 0; i--) {
311 		uint32_t data_ptr = readl(&fec->rbd_base[i].data_pointer);
312 		free((void *)data_ptr);
313 	}
314 
315 	return -ENOMEM;
316 }
317 
318 /**
319  * Initialize transmit task's buffer descriptors
320  * @param[in] fec all we know about the device yet
321  *
322  * Transmit buffers are created externally. We only have to init the BDs here.\n
323  * Note: There is a race condition in the hardware. When only one BD is in
324  * use it must be marked with the WRAP bit to use it for every transmitt.
325  * This bit in combination with the READY bit results into double transmit
326  * of each data buffer. It seems the state machine checks READY earlier then
327  * resetting it after the first transfer.
328  * Using two BDs solves this issue.
329  */
330 static void fec_tbd_init(struct fec_priv *fec)
331 {
332 	unsigned addr = (unsigned)fec->tbd_base;
333 	unsigned size = roundup(2 * sizeof(struct fec_bd),
334 				ARCH_DMA_MINALIGN);
335 	writew(0x0000, &fec->tbd_base[0].status);
336 	writew(FEC_TBD_WRAP, &fec->tbd_base[1].status);
337 	fec->tbd_index = 0;
338 	flush_dcache_range(addr, addr+size);
339 }
340 
341 /**
342  * Mark the given read buffer descriptor as free
343  * @param[in] last 1 if this is the last buffer descriptor in the chain, else 0
344  * @param[in] pRbd buffer descriptor to mark free again
345  */
346 static void fec_rbd_clean(int last, struct fec_bd *pRbd)
347 {
348 	unsigned short flags = FEC_RBD_EMPTY;
349 	if (last)
350 		flags |= FEC_RBD_WRAP;
351 	writew(flags, &pRbd->status);
352 	writew(0, &pRbd->data_length);
353 }
354 
355 static int fec_get_hwaddr(struct eth_device *dev, int dev_id,
356 						unsigned char *mac)
357 {
358 	imx_get_mac_from_fuse(dev_id, mac);
359 	return !is_valid_ether_addr(mac);
360 }
361 
362 static int fec_set_hwaddr(struct eth_device *dev)
363 {
364 	uchar *mac = dev->enetaddr;
365 	struct fec_priv *fec = (struct fec_priv *)dev->priv;
366 
367 	writel(0, &fec->eth->iaddr1);
368 	writel(0, &fec->eth->iaddr2);
369 	writel(0, &fec->eth->gaddr1);
370 	writel(0, &fec->eth->gaddr2);
371 
372 	/*
373 	 * Set physical address
374 	 */
375 	writel((mac[0] << 24) + (mac[1] << 16) + (mac[2] << 8) + mac[3],
376 			&fec->eth->paddr1);
377 	writel((mac[4] << 24) + (mac[5] << 16) + 0x8808, &fec->eth->paddr2);
378 
379 	return 0;
380 }
381 
382 /*
383  * Do initial configuration of the FEC registers
384  */
385 static void fec_reg_setup(struct fec_priv *fec)
386 {
387 	uint32_t rcntrl;
388 
389 	/*
390 	 * Set interrupt mask register
391 	 */
392 	writel(0x00000000, &fec->eth->imask);
393 
394 	/*
395 	 * Clear FEC-Lite interrupt event register(IEVENT)
396 	 */
397 	writel(0xffffffff, &fec->eth->ievent);
398 
399 
400 	/*
401 	 * Set FEC-Lite receive control register(R_CNTRL):
402 	 */
403 
404 	/* Start with frame length = 1518, common for all modes. */
405 	rcntrl = PKTSIZE << FEC_RCNTRL_MAX_FL_SHIFT;
406 	if (fec->xcv_type != SEVENWIRE)		/* xMII modes */
407 		rcntrl |= FEC_RCNTRL_FCE | FEC_RCNTRL_MII_MODE;
408 	if (fec->xcv_type == RGMII)
409 		rcntrl |= FEC_RCNTRL_RGMII;
410 	else if (fec->xcv_type == RMII)
411 		rcntrl |= FEC_RCNTRL_RMII;
412 
413 	writel(rcntrl, &fec->eth->r_cntrl);
414 }
415 
416 /**
417  * Start the FEC engine
418  * @param[in] dev Our device to handle
419  */
420 static int fec_open(struct eth_device *edev)
421 {
422 	struct fec_priv *fec = (struct fec_priv *)edev->priv;
423 	int speed;
424 	uint32_t addr, size;
425 	int i;
426 
427 	debug("fec_open: fec_open(dev)\n");
428 	/* full-duplex, heartbeat disabled */
429 	writel(1 << 2, &fec->eth->x_cntrl);
430 	fec->rbd_index = 0;
431 
432 	/* Invalidate all descriptors */
433 	for (i = 0; i < FEC_RBD_NUM - 1; i++)
434 		fec_rbd_clean(0, &fec->rbd_base[i]);
435 	fec_rbd_clean(1, &fec->rbd_base[i]);
436 
437 	/* Flush the descriptors into RAM */
438 	size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd),
439 			ARCH_DMA_MINALIGN);
440 	addr = (uint32_t)fec->rbd_base;
441 	flush_dcache_range(addr, addr + size);
442 
443 #ifdef FEC_QUIRK_ENET_MAC
444 	/* Enable ENET HW endian SWAP */
445 	writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_DBSWAP,
446 		&fec->eth->ecntrl);
447 	/* Enable ENET store and forward mode */
448 	writel(readl(&fec->eth->x_wmrk) | FEC_X_WMRK_STRFWD,
449 		&fec->eth->x_wmrk);
450 #endif
451 	/*
452 	 * Enable FEC-Lite controller
453 	 */
454 	writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_ETHER_EN,
455 		&fec->eth->ecntrl);
456 #if defined(CONFIG_MX25) || defined(CONFIG_MX53) || defined(CONFIG_MX6SL)
457 	udelay(100);
458 	/*
459 	 * setup the MII gasket for RMII mode
460 	 */
461 
462 	/* disable the gasket */
463 	writew(0, &fec->eth->miigsk_enr);
464 
465 	/* wait for the gasket to be disabled */
466 	while (readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY)
467 		udelay(2);
468 
469 	/* configure gasket for RMII, 50 MHz, no loopback, and no echo */
470 	writew(MIIGSK_CFGR_IF_MODE_RMII, &fec->eth->miigsk_cfgr);
471 
472 	/* re-enable the gasket */
473 	writew(MIIGSK_ENR_EN, &fec->eth->miigsk_enr);
474 
475 	/* wait until MII gasket is ready */
476 	int max_loops = 10;
477 	while ((readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY) == 0) {
478 		if (--max_loops <= 0) {
479 			printf("WAIT for MII Gasket ready timed out\n");
480 			break;
481 		}
482 	}
483 #endif
484 
485 #ifdef CONFIG_PHYLIB
486 	{
487 		/* Start up the PHY */
488 		int ret = phy_startup(fec->phydev);
489 
490 		if (ret) {
491 			printf("Could not initialize PHY %s\n",
492 			       fec->phydev->dev->name);
493 			return ret;
494 		}
495 		speed = fec->phydev->speed;
496 	}
497 #else
498 	miiphy_wait_aneg(edev);
499 	speed = miiphy_speed(edev->name, fec->phy_id);
500 	miiphy_duplex(edev->name, fec->phy_id);
501 #endif
502 
503 #ifdef FEC_QUIRK_ENET_MAC
504 	{
505 		u32 ecr = readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_SPEED;
506 		u32 rcr = readl(&fec->eth->r_cntrl) & ~FEC_RCNTRL_RMII_10T;
507 		if (speed == _1000BASET)
508 			ecr |= FEC_ECNTRL_SPEED;
509 		else if (speed != _100BASET)
510 			rcr |= FEC_RCNTRL_RMII_10T;
511 		writel(ecr, &fec->eth->ecntrl);
512 		writel(rcr, &fec->eth->r_cntrl);
513 	}
514 #endif
515 	debug("%s:Speed=%i\n", __func__, speed);
516 
517 	/*
518 	 * Enable SmartDMA receive task
519 	 */
520 	fec_rx_task_enable(fec);
521 
522 	udelay(100000);
523 	return 0;
524 }
525 
526 static int fec_init(struct eth_device *dev, bd_t* bd)
527 {
528 	struct fec_priv *fec = (struct fec_priv *)dev->priv;
529 	uint32_t mib_ptr = (uint32_t)&fec->eth->rmon_t_drop;
530 	uint32_t size;
531 	int i, ret;
532 
533 	/* Initialize MAC address */
534 	fec_set_hwaddr(dev);
535 
536 	/*
537 	 * Allocate transmit descriptors, there are two in total. This
538 	 * allocation respects cache alignment.
539 	 */
540 	if (!fec->tbd_base) {
541 		size = roundup(2 * sizeof(struct fec_bd),
542 				ARCH_DMA_MINALIGN);
543 		fec->tbd_base = memalign(ARCH_DMA_MINALIGN, size);
544 		if (!fec->tbd_base) {
545 			ret = -ENOMEM;
546 			goto err1;
547 		}
548 		memset(fec->tbd_base, 0, size);
549 		fec_tbd_init(fec);
550 	}
551 
552 	/*
553 	 * Allocate receive descriptors. This allocation respects cache
554 	 * alignment.
555 	 */
556 	if (!fec->rbd_base) {
557 		size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd),
558 				ARCH_DMA_MINALIGN);
559 		fec->rbd_base = memalign(ARCH_DMA_MINALIGN, size);
560 		if (!fec->rbd_base) {
561 			ret = -ENOMEM;
562 			goto err2;
563 		}
564 		memset(fec->rbd_base, 0, size);
565 		/*
566 		 * Initialize RxBD ring
567 		 */
568 		if (fec_rbd_init(fec, FEC_RBD_NUM, FEC_MAX_PKT_SIZE) < 0) {
569 			ret = -ENOMEM;
570 			goto err3;
571 		}
572 		flush_dcache_range((unsigned)fec->rbd_base,
573 				   (unsigned)fec->rbd_base + size);
574 	}
575 
576 	fec_reg_setup(fec);
577 
578 	if (fec->xcv_type != SEVENWIRE)
579 		fec_mii_setspeed(fec->bus->priv);
580 
581 	/*
582 	 * Set Opcode/Pause Duration Register
583 	 */
584 	writel(0x00010020, &fec->eth->op_pause);	/* FIXME 0xffff0020; */
585 	writel(0x2, &fec->eth->x_wmrk);
586 	/*
587 	 * Set multicast address filter
588 	 */
589 	writel(0x00000000, &fec->eth->gaddr1);
590 	writel(0x00000000, &fec->eth->gaddr2);
591 
592 
593 	/* clear MIB RAM */
594 	for (i = mib_ptr; i <= mib_ptr + 0xfc; i += 4)
595 		writel(0, i);
596 
597 	/* FIFO receive start register */
598 	writel(0x520, &fec->eth->r_fstart);
599 
600 	/* size and address of each buffer */
601 	writel(FEC_MAX_PKT_SIZE, &fec->eth->emrbr);
602 	writel((uint32_t)fec->tbd_base, &fec->eth->etdsr);
603 	writel((uint32_t)fec->rbd_base, &fec->eth->erdsr);
604 
605 #ifndef CONFIG_PHYLIB
606 	if (fec->xcv_type != SEVENWIRE)
607 		miiphy_restart_aneg(dev);
608 #endif
609 	fec_open(dev);
610 	return 0;
611 
612 err3:
613 	free(fec->rbd_base);
614 err2:
615 	free(fec->tbd_base);
616 err1:
617 	return ret;
618 }
619 
620 /**
621  * Halt the FEC engine
622  * @param[in] dev Our device to handle
623  */
624 static void fec_halt(struct eth_device *dev)
625 {
626 	struct fec_priv *fec = (struct fec_priv *)dev->priv;
627 	int counter = 0xffff;
628 
629 	/*
630 	 * issue graceful stop command to the FEC transmitter if necessary
631 	 */
632 	writel(FEC_TCNTRL_GTS | readl(&fec->eth->x_cntrl),
633 			&fec->eth->x_cntrl);
634 
635 	debug("eth_halt: wait for stop regs\n");
636 	/*
637 	 * wait for graceful stop to register
638 	 */
639 	while ((counter--) && (!(readl(&fec->eth->ievent) & FEC_IEVENT_GRA)))
640 		udelay(1);
641 
642 	/*
643 	 * Disable SmartDMA tasks
644 	 */
645 	fec_tx_task_disable(fec);
646 	fec_rx_task_disable(fec);
647 
648 	/*
649 	 * Disable the Ethernet Controller
650 	 * Note: this will also reset the BD index counter!
651 	 */
652 	writel(readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_ETHER_EN,
653 			&fec->eth->ecntrl);
654 	fec->rbd_index = 0;
655 	fec->tbd_index = 0;
656 	debug("eth_halt: done\n");
657 }
658 
659 /**
660  * Transmit one frame
661  * @param[in] dev Our ethernet device to handle
662  * @param[in] packet Pointer to the data to be transmitted
663  * @param[in] length Data count in bytes
664  * @return 0 on success
665  */
666 static int fec_send(struct eth_device *dev, void *packet, int length)
667 {
668 	unsigned int status;
669 	uint32_t size, end;
670 	uint32_t addr;
671 	int timeout = FEC_XFER_TIMEOUT;
672 	int ret = 0;
673 
674 	/*
675 	 * This routine transmits one frame.  This routine only accepts
676 	 * 6-byte Ethernet addresses.
677 	 */
678 	struct fec_priv *fec = (struct fec_priv *)dev->priv;
679 
680 	/*
681 	 * Check for valid length of data.
682 	 */
683 	if ((length > 1500) || (length <= 0)) {
684 		printf("Payload (%d) too large\n", length);
685 		return -1;
686 	}
687 
688 	/*
689 	 * Setup the transmit buffer. We are always using the first buffer for
690 	 * transmission, the second will be empty and only used to stop the DMA
691 	 * engine. We also flush the packet to RAM here to avoid cache trouble.
692 	 */
693 #ifdef CONFIG_FEC_MXC_SWAP_PACKET
694 	swap_packet((uint32_t *)packet, length);
695 #endif
696 
697 	addr = (uint32_t)packet;
698 	end = roundup(addr + length, ARCH_DMA_MINALIGN);
699 	addr &= ~(ARCH_DMA_MINALIGN - 1);
700 	flush_dcache_range(addr, end);
701 
702 	writew(length, &fec->tbd_base[fec->tbd_index].data_length);
703 	writel(addr, &fec->tbd_base[fec->tbd_index].data_pointer);
704 
705 	/*
706 	 * update BD's status now
707 	 * This block:
708 	 * - is always the last in a chain (means no chain)
709 	 * - should transmitt the CRC
710 	 * - might be the last BD in the list, so the address counter should
711 	 *   wrap (-> keep the WRAP flag)
712 	 */
713 	status = readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_WRAP;
714 	status |= FEC_TBD_LAST | FEC_TBD_TC | FEC_TBD_READY;
715 	writew(status, &fec->tbd_base[fec->tbd_index].status);
716 
717 	/*
718 	 * Flush data cache. This code flushes both TX descriptors to RAM.
719 	 * After this code, the descriptors will be safely in RAM and we
720 	 * can start DMA.
721 	 */
722 	size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
723 	addr = (uint32_t)fec->tbd_base;
724 	flush_dcache_range(addr, addr + size);
725 
726 	/*
727 	 * Below we read the DMA descriptor's last four bytes back from the
728 	 * DRAM. This is important in order to make sure that all WRITE
729 	 * operations on the bus that were triggered by previous cache FLUSH
730 	 * have completed.
731 	 *
732 	 * Otherwise, on MX28, it is possible to observe a corruption of the
733 	 * DMA descriptors. Please refer to schematic "Figure 1-2" in MX28RM
734 	 * for the bus structure of MX28. The scenario is as follows:
735 	 *
736 	 * 1) ARM core triggers a series of WRITEs on the AHB_ARB2 bus going
737 	 *    to DRAM due to flush_dcache_range()
738 	 * 2) ARM core writes the FEC registers via AHB_ARB2
739 	 * 3) FEC DMA starts reading/writing from/to DRAM via AHB_ARB3
740 	 *
741 	 * Note that 2) does sometimes finish before 1) due to reordering of
742 	 * WRITE accesses on the AHB bus, therefore triggering 3) before the
743 	 * DMA descriptor is fully written into DRAM. This results in occasional
744 	 * corruption of the DMA descriptor.
745 	 */
746 	readl(addr + size - 4);
747 
748 	/*
749 	 * Enable SmartDMA transmit task
750 	 */
751 	fec_tx_task_enable(fec);
752 
753 	/*
754 	 * Wait until frame is sent. On each turn of the wait cycle, we must
755 	 * invalidate data cache to see what's really in RAM. Also, we need
756 	 * barrier here.
757 	 */
758 	while (--timeout) {
759 		if (!(readl(&fec->eth->x_des_active) & FEC_X_DES_ACTIVE_TDAR))
760 			break;
761 	}
762 
763 	if (!timeout)
764 		ret = -EINVAL;
765 
766 	invalidate_dcache_range(addr, addr + size);
767 	if (readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_READY)
768 		ret = -EINVAL;
769 
770 	debug("fec_send: status 0x%x index %d ret %i\n",
771 			readw(&fec->tbd_base[fec->tbd_index].status),
772 			fec->tbd_index, ret);
773 	/* for next transmission use the other buffer */
774 	if (fec->tbd_index)
775 		fec->tbd_index = 0;
776 	else
777 		fec->tbd_index = 1;
778 
779 	return ret;
780 }
781 
782 /**
783  * Pull one frame from the card
784  * @param[in] dev Our ethernet device to handle
785  * @return Length of packet read
786  */
787 static int fec_recv(struct eth_device *dev)
788 {
789 	struct fec_priv *fec = (struct fec_priv *)dev->priv;
790 	struct fec_bd *rbd = &fec->rbd_base[fec->rbd_index];
791 	unsigned long ievent;
792 	int frame_length, len = 0;
793 	struct nbuf *frame;
794 	uint16_t bd_status;
795 	uint32_t addr, size, end;
796 	int i;
797 	ALLOC_CACHE_ALIGN_BUFFER(uchar, buff, FEC_MAX_PKT_SIZE);
798 
799 	/*
800 	 * Check if any critical events have happened
801 	 */
802 	ievent = readl(&fec->eth->ievent);
803 	writel(ievent, &fec->eth->ievent);
804 	debug("fec_recv: ievent 0x%lx\n", ievent);
805 	if (ievent & FEC_IEVENT_BABR) {
806 		fec_halt(dev);
807 		fec_init(dev, fec->bd);
808 		printf("some error: 0x%08lx\n", ievent);
809 		return 0;
810 	}
811 	if (ievent & FEC_IEVENT_HBERR) {
812 		/* Heartbeat error */
813 		writel(0x00000001 | readl(&fec->eth->x_cntrl),
814 				&fec->eth->x_cntrl);
815 	}
816 	if (ievent & FEC_IEVENT_GRA) {
817 		/* Graceful stop complete */
818 		if (readl(&fec->eth->x_cntrl) & 0x00000001) {
819 			fec_halt(dev);
820 			writel(~0x00000001 & readl(&fec->eth->x_cntrl),
821 					&fec->eth->x_cntrl);
822 			fec_init(dev, fec->bd);
823 		}
824 	}
825 
826 	/*
827 	 * Read the buffer status. Before the status can be read, the data cache
828 	 * must be invalidated, because the data in RAM might have been changed
829 	 * by DMA. The descriptors are properly aligned to cachelines so there's
830 	 * no need to worry they'd overlap.
831 	 *
832 	 * WARNING: By invalidating the descriptor here, we also invalidate
833 	 * the descriptors surrounding this one. Therefore we can NOT change the
834 	 * contents of this descriptor nor the surrounding ones. The problem is
835 	 * that in order to mark the descriptor as processed, we need to change
836 	 * the descriptor. The solution is to mark the whole cache line when all
837 	 * descriptors in the cache line are processed.
838 	 */
839 	addr = (uint32_t)rbd;
840 	addr &= ~(ARCH_DMA_MINALIGN - 1);
841 	size = roundup(sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
842 	invalidate_dcache_range(addr, addr + size);
843 
844 	bd_status = readw(&rbd->status);
845 	debug("fec_recv: status 0x%x\n", bd_status);
846 
847 	if (!(bd_status & FEC_RBD_EMPTY)) {
848 		if ((bd_status & FEC_RBD_LAST) && !(bd_status & FEC_RBD_ERR) &&
849 			((readw(&rbd->data_length) - 4) > 14)) {
850 			/*
851 			 * Get buffer address and size
852 			 */
853 			frame = (struct nbuf *)readl(&rbd->data_pointer);
854 			frame_length = readw(&rbd->data_length) - 4;
855 			/*
856 			 * Invalidate data cache over the buffer
857 			 */
858 			addr = (uint32_t)frame;
859 			end = roundup(addr + frame_length, ARCH_DMA_MINALIGN);
860 			addr &= ~(ARCH_DMA_MINALIGN - 1);
861 			invalidate_dcache_range(addr, end);
862 
863 			/*
864 			 *  Fill the buffer and pass it to upper layers
865 			 */
866 #ifdef CONFIG_FEC_MXC_SWAP_PACKET
867 			swap_packet((uint32_t *)frame->data, frame_length);
868 #endif
869 			memcpy(buff, frame->data, frame_length);
870 			NetReceive(buff, frame_length);
871 			len = frame_length;
872 		} else {
873 			if (bd_status & FEC_RBD_ERR)
874 				printf("error frame: 0x%08lx 0x%08x\n",
875 						(ulong)rbd->data_pointer,
876 						bd_status);
877 		}
878 
879 		/*
880 		 * Free the current buffer, restart the engine and move forward
881 		 * to the next buffer. Here we check if the whole cacheline of
882 		 * descriptors was already processed and if so, we mark it free
883 		 * as whole.
884 		 */
885 		size = RXDESC_PER_CACHELINE - 1;
886 		if ((fec->rbd_index & size) == size) {
887 			i = fec->rbd_index - size;
888 			addr = (uint32_t)&fec->rbd_base[i];
889 			for (; i <= fec->rbd_index ; i++) {
890 				fec_rbd_clean(i == (FEC_RBD_NUM - 1),
891 					      &fec->rbd_base[i]);
892 			}
893 			flush_dcache_range(addr,
894 				addr + ARCH_DMA_MINALIGN);
895 		}
896 
897 		fec_rx_task_enable(fec);
898 		fec->rbd_index = (fec->rbd_index + 1) % FEC_RBD_NUM;
899 	}
900 	debug("fec_recv: stop\n");
901 
902 	return len;
903 }
904 
905 static void fec_set_dev_name(char *dest, int dev_id)
906 {
907 	sprintf(dest, (dev_id == -1) ? "FEC" : "FEC%i", dev_id);
908 }
909 
910 #ifdef CONFIG_PHYLIB
911 int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr,
912 		struct mii_dev *bus, struct phy_device *phydev)
913 #else
914 static int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr,
915 		struct mii_dev *bus, int phy_id)
916 #endif
917 {
918 	struct eth_device *edev;
919 	struct fec_priv *fec;
920 	unsigned char ethaddr[6];
921 	uint32_t start;
922 	int ret = 0;
923 
924 	/* create and fill edev struct */
925 	edev = (struct eth_device *)malloc(sizeof(struct eth_device));
926 	if (!edev) {
927 		puts("fec_mxc: not enough malloc memory for eth_device\n");
928 		ret = -ENOMEM;
929 		goto err1;
930 	}
931 
932 	fec = (struct fec_priv *)malloc(sizeof(struct fec_priv));
933 	if (!fec) {
934 		puts("fec_mxc: not enough malloc memory for fec_priv\n");
935 		ret = -ENOMEM;
936 		goto err2;
937 	}
938 
939 	memset(edev, 0, sizeof(*edev));
940 	memset(fec, 0, sizeof(*fec));
941 
942 	edev->priv = fec;
943 	edev->init = fec_init;
944 	edev->send = fec_send;
945 	edev->recv = fec_recv;
946 	edev->halt = fec_halt;
947 	edev->write_hwaddr = fec_set_hwaddr;
948 
949 	fec->eth = (struct ethernet_regs *)base_addr;
950 	fec->bd = bd;
951 
952 	fec->xcv_type = CONFIG_FEC_XCV_TYPE;
953 
954 	/* Reset chip. */
955 	writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_RESET, &fec->eth->ecntrl);
956 	start = get_timer(0);
957 	while (readl(&fec->eth->ecntrl) & FEC_ECNTRL_RESET) {
958 		if (get_timer(start) > (CONFIG_SYS_HZ * 5)) {
959 			printf("FEC MXC: Timeout reseting chip\n");
960 			goto err3;
961 		}
962 		udelay(10);
963 	}
964 
965 	fec_reg_setup(fec);
966 	fec_set_dev_name(edev->name, dev_id);
967 	fec->dev_id = (dev_id == -1) ? 0 : dev_id;
968 	fec->bus = bus;
969 	fec_mii_setspeed(bus->priv);
970 #ifdef CONFIG_PHYLIB
971 	fec->phydev = phydev;
972 	phy_connect_dev(phydev, edev);
973 	/* Configure phy */
974 	phy_config(phydev);
975 #else
976 	fec->phy_id = phy_id;
977 #endif
978 	eth_register(edev);
979 
980 	if (fec_get_hwaddr(edev, dev_id, ethaddr) == 0) {
981 		debug("got MAC%d address from fuse: %pM\n", dev_id, ethaddr);
982 		memcpy(edev->enetaddr, ethaddr, 6);
983 		if (!getenv("ethaddr"))
984 			eth_setenv_enetaddr("ethaddr", ethaddr);
985 	}
986 	return ret;
987 err3:
988 	free(fec);
989 err2:
990 	free(edev);
991 err1:
992 	return ret;
993 }
994 
995 struct mii_dev *fec_get_miibus(uint32_t base_addr, int dev_id)
996 {
997 	struct ethernet_regs *eth = (struct ethernet_regs *)base_addr;
998 	struct mii_dev *bus;
999 	int ret;
1000 
1001 	bus = mdio_alloc();
1002 	if (!bus) {
1003 		printf("mdio_alloc failed\n");
1004 		return NULL;
1005 	}
1006 	bus->read = fec_phy_read;
1007 	bus->write = fec_phy_write;
1008 	bus->priv = eth;
1009 	fec_set_dev_name(bus->name, dev_id);
1010 
1011 	ret = mdio_register(bus);
1012 	if (ret) {
1013 		printf("mdio_register failed\n");
1014 		free(bus);
1015 		return NULL;
1016 	}
1017 	fec_mii_setspeed(eth);
1018 	return bus;
1019 }
1020 
1021 int fecmxc_initialize_multi(bd_t *bd, int dev_id, int phy_id, uint32_t addr)
1022 {
1023 	uint32_t base_mii;
1024 	struct mii_dev *bus = NULL;
1025 #ifdef CONFIG_PHYLIB
1026 	struct phy_device *phydev = NULL;
1027 #endif
1028 	int ret;
1029 
1030 #ifdef CONFIG_MX28
1031 	/*
1032 	 * The i.MX28 has two ethernet interfaces, but they are not equal.
1033 	 * Only the first one can access the MDIO bus.
1034 	 */
1035 	base_mii = MXS_ENET0_BASE;
1036 #else
1037 	base_mii = addr;
1038 #endif
1039 	debug("eth_init: fec_probe(bd, %i, %i) @ %08x\n", dev_id, phy_id, addr);
1040 	bus = fec_get_miibus(base_mii, dev_id);
1041 	if (!bus)
1042 		return -ENOMEM;
1043 #ifdef CONFIG_PHYLIB
1044 	phydev = phy_find_by_mask(bus, 1 << phy_id, PHY_INTERFACE_MODE_RGMII);
1045 	if (!phydev) {
1046 		free(bus);
1047 		return -ENOMEM;
1048 	}
1049 	ret = fec_probe(bd, dev_id, addr, bus, phydev);
1050 #else
1051 	ret = fec_probe(bd, dev_id, addr, bus, phy_id);
1052 #endif
1053 	if (ret) {
1054 #ifdef CONFIG_PHYLIB
1055 		free(phydev);
1056 #endif
1057 		free(bus);
1058 	}
1059 	return ret;
1060 }
1061 
1062 #ifdef CONFIG_FEC_MXC_PHYADDR
1063 int fecmxc_initialize(bd_t *bd)
1064 {
1065 	return fecmxc_initialize_multi(bd, -1, CONFIG_FEC_MXC_PHYADDR,
1066 			IMX_FEC_BASE);
1067 }
1068 #endif
1069 
1070 #ifndef CONFIG_PHYLIB
1071 int fecmxc_register_mii_postcall(struct eth_device *dev, int (*cb)(int))
1072 {
1073 	struct fec_priv *fec = (struct fec_priv *)dev->priv;
1074 	fec->mii_postcall = cb;
1075 	return 0;
1076 }
1077 #endif
1078