xref: /openbmc/u-boot/drivers/net/fec_mxc.c (revision 3dc23f78)
1 /*
2  * (C) Copyright 2009 Ilya Yanok, Emcraft Systems Ltd <yanok@emcraft.com>
3  * (C) Copyright 2008,2009 Eric Jarrige <eric.jarrige@armadeus.org>
4  * (C) Copyright 2008 Armadeus Systems nc
5  * (C) Copyright 2007 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
6  * (C) Copyright 2007 Pengutronix, Juergen Beisert <j.beisert@pengutronix.de>
7  *
8  * SPDX-License-Identifier:	GPL-2.0+
9  */
10 
11 #include <common.h>
12 #include <malloc.h>
13 #include <net.h>
14 #include <miiphy.h>
15 #include "fec_mxc.h"
16 
17 #include <asm/arch/clock.h>
18 #include <asm/arch/imx-regs.h>
19 #include <asm/io.h>
20 #include <asm/errno.h>
21 #include <linux/compiler.h>
22 
23 DECLARE_GLOBAL_DATA_PTR;
24 
25 /*
26  * Timeout the transfer after 5 mS. This is usually a bit more, since
27  * the code in the tightloops this timeout is used in adds some overhead.
28  */
29 #define FEC_XFER_TIMEOUT	5000
30 
31 /*
32  * The standard 32-byte DMA alignment does not work on mx6solox, which requires
33  * 64-byte alignment in the DMA RX FEC buffer.
34  * Introduce the FEC_DMA_RX_MINALIGN which can cover mx6solox needs and also
35  * satisfies the alignment on other SoCs (32-bytes)
36  */
37 #define FEC_DMA_RX_MINALIGN	64
38 
39 #ifndef CONFIG_MII
40 #error "CONFIG_MII has to be defined!"
41 #endif
42 
43 #ifndef CONFIG_FEC_XCV_TYPE
44 #define CONFIG_FEC_XCV_TYPE MII100
45 #endif
46 
47 /*
48  * The i.MX28 operates with packets in big endian. We need to swap them before
49  * sending and after receiving.
50  */
51 #ifdef CONFIG_MX28
52 #define CONFIG_FEC_MXC_SWAP_PACKET
53 #endif
54 
55 #define RXDESC_PER_CACHELINE (ARCH_DMA_MINALIGN/sizeof(struct fec_bd))
56 
57 /* Check various alignment issues at compile time */
58 #if ((ARCH_DMA_MINALIGN < 16) || (ARCH_DMA_MINALIGN % 16 != 0))
59 #error "ARCH_DMA_MINALIGN must be multiple of 16!"
60 #endif
61 
62 #if ((PKTALIGN < ARCH_DMA_MINALIGN) || \
63 	(PKTALIGN % ARCH_DMA_MINALIGN != 0))
64 #error "PKTALIGN must be multiple of ARCH_DMA_MINALIGN!"
65 #endif
66 
67 #undef DEBUG
68 
69 struct nbuf {
70 	uint8_t data[1500];	/**< actual data */
71 	int length;		/**< actual length */
72 	int used;		/**< buffer in use or not */
73 	uint8_t head[16];	/**< MAC header(6 + 6 + 2) + 2(aligned) */
74 };
75 
76 #ifdef CONFIG_FEC_MXC_SWAP_PACKET
77 static void swap_packet(uint32_t *packet, int length)
78 {
79 	int i;
80 
81 	for (i = 0; i < DIV_ROUND_UP(length, 4); i++)
82 		packet[i] = __swab32(packet[i]);
83 }
84 #endif
85 
86 /*
87  * MII-interface related functions
88  */
89 static int fec_mdio_read(struct ethernet_regs *eth, uint8_t phyAddr,
90 		uint8_t regAddr)
91 {
92 	uint32_t reg;		/* convenient holder for the PHY register */
93 	uint32_t phy;		/* convenient holder for the PHY */
94 	uint32_t start;
95 	int val;
96 
97 	/*
98 	 * reading from any PHY's register is done by properly
99 	 * programming the FEC's MII data register.
100 	 */
101 	writel(FEC_IEVENT_MII, &eth->ievent);
102 	reg = regAddr << FEC_MII_DATA_RA_SHIFT;
103 	phy = phyAddr << FEC_MII_DATA_PA_SHIFT;
104 
105 	writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA |
106 			phy | reg, &eth->mii_data);
107 
108 	/*
109 	 * wait for the related interrupt
110 	 */
111 	start = get_timer(0);
112 	while (!(readl(&eth->ievent) & FEC_IEVENT_MII)) {
113 		if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) {
114 			printf("Read MDIO failed...\n");
115 			return -1;
116 		}
117 	}
118 
119 	/*
120 	 * clear mii interrupt bit
121 	 */
122 	writel(FEC_IEVENT_MII, &eth->ievent);
123 
124 	/*
125 	 * it's now safe to read the PHY's register
126 	 */
127 	val = (unsigned short)readl(&eth->mii_data);
128 	debug("%s: phy: %02x reg:%02x val:%#x\n", __func__, phyAddr,
129 			regAddr, val);
130 	return val;
131 }
132 
133 static void fec_mii_setspeed(struct ethernet_regs *eth)
134 {
135 	/*
136 	 * Set MII_SPEED = (1/(mii_speed * 2)) * System Clock
137 	 * and do not drop the Preamble.
138 	 */
139 	register u32 speed = DIV_ROUND_UP(imx_get_fecclk(), 5000000);
140 #ifdef FEC_QUIRK_ENET_MAC
141 	speed--;
142 #endif
143 	speed <<= 1;
144 	writel(speed, &eth->mii_speed);
145 	debug("%s: mii_speed %08x\n", __func__, readl(&eth->mii_speed));
146 }
147 
148 static int fec_mdio_write(struct ethernet_regs *eth, uint8_t phyAddr,
149 		uint8_t regAddr, uint16_t data)
150 {
151 	uint32_t reg;		/* convenient holder for the PHY register */
152 	uint32_t phy;		/* convenient holder for the PHY */
153 	uint32_t start;
154 
155 	reg = regAddr << FEC_MII_DATA_RA_SHIFT;
156 	phy = phyAddr << FEC_MII_DATA_PA_SHIFT;
157 
158 	writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR |
159 		FEC_MII_DATA_TA | phy | reg | data, &eth->mii_data);
160 
161 	/*
162 	 * wait for the MII interrupt
163 	 */
164 	start = get_timer(0);
165 	while (!(readl(&eth->ievent) & FEC_IEVENT_MII)) {
166 		if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) {
167 			printf("Write MDIO failed...\n");
168 			return -1;
169 		}
170 	}
171 
172 	/*
173 	 * clear MII interrupt bit
174 	 */
175 	writel(FEC_IEVENT_MII, &eth->ievent);
176 	debug("%s: phy: %02x reg:%02x val:%#x\n", __func__, phyAddr,
177 			regAddr, data);
178 
179 	return 0;
180 }
181 
182 int fec_phy_read(struct mii_dev *bus, int phyAddr, int dev_addr, int regAddr)
183 {
184 	return fec_mdio_read(bus->priv, phyAddr, regAddr);
185 }
186 
187 int fec_phy_write(struct mii_dev *bus, int phyAddr, int dev_addr, int regAddr,
188 		u16 data)
189 {
190 	return fec_mdio_write(bus->priv, phyAddr, regAddr, data);
191 }
192 
193 #ifndef CONFIG_PHYLIB
194 static int miiphy_restart_aneg(struct eth_device *dev)
195 {
196 	int ret = 0;
197 #if !defined(CONFIG_FEC_MXC_NO_ANEG)
198 	struct fec_priv *fec = (struct fec_priv *)dev->priv;
199 	struct ethernet_regs *eth = fec->bus->priv;
200 
201 	/*
202 	 * Wake up from sleep if necessary
203 	 * Reset PHY, then delay 300ns
204 	 */
205 #ifdef CONFIG_MX27
206 	fec_mdio_write(eth, fec->phy_id, MII_DCOUNTER, 0x00FF);
207 #endif
208 	fec_mdio_write(eth, fec->phy_id, MII_BMCR, BMCR_RESET);
209 	udelay(1000);
210 
211 	/*
212 	 * Set the auto-negotiation advertisement register bits
213 	 */
214 	fec_mdio_write(eth, fec->phy_id, MII_ADVERTISE,
215 			LPA_100FULL | LPA_100HALF | LPA_10FULL |
216 			LPA_10HALF | PHY_ANLPAR_PSB_802_3);
217 	fec_mdio_write(eth, fec->phy_id, MII_BMCR,
218 			BMCR_ANENABLE | BMCR_ANRESTART);
219 
220 	if (fec->mii_postcall)
221 		ret = fec->mii_postcall(fec->phy_id);
222 
223 #endif
224 	return ret;
225 }
226 
227 static int miiphy_wait_aneg(struct eth_device *dev)
228 {
229 	uint32_t start;
230 	int status;
231 	struct fec_priv *fec = (struct fec_priv *)dev->priv;
232 	struct ethernet_regs *eth = fec->bus->priv;
233 
234 	/*
235 	 * Wait for AN completion
236 	 */
237 	start = get_timer(0);
238 	do {
239 		if (get_timer(start) > (CONFIG_SYS_HZ * 5)) {
240 			printf("%s: Autonegotiation timeout\n", dev->name);
241 			return -1;
242 		}
243 
244 		status = fec_mdio_read(eth, fec->phy_id, MII_BMSR);
245 		if (status < 0) {
246 			printf("%s: Autonegotiation failed. status: %d\n",
247 					dev->name, status);
248 			return -1;
249 		}
250 	} while (!(status & BMSR_LSTATUS));
251 
252 	return 0;
253 }
254 #endif
255 
256 static int fec_rx_task_enable(struct fec_priv *fec)
257 {
258 	writel(FEC_R_DES_ACTIVE_RDAR, &fec->eth->r_des_active);
259 	return 0;
260 }
261 
262 static int fec_rx_task_disable(struct fec_priv *fec)
263 {
264 	return 0;
265 }
266 
267 static int fec_tx_task_enable(struct fec_priv *fec)
268 {
269 	writel(FEC_X_DES_ACTIVE_TDAR, &fec->eth->x_des_active);
270 	return 0;
271 }
272 
273 static int fec_tx_task_disable(struct fec_priv *fec)
274 {
275 	return 0;
276 }
277 
278 /**
279  * Initialize receive task's buffer descriptors
280  * @param[in] fec all we know about the device yet
281  * @param[in] count receive buffer count to be allocated
282  * @param[in] dsize desired size of each receive buffer
283  * @return 0 on success
284  *
285  * Init all RX descriptors to default values.
286  */
287 static void fec_rbd_init(struct fec_priv *fec, int count, int dsize)
288 {
289 	uint32_t size;
290 	uint8_t *data;
291 	int i;
292 
293 	/*
294 	 * Reload the RX descriptors with default values and wipe
295 	 * the RX buffers.
296 	 */
297 	size = roundup(dsize, ARCH_DMA_MINALIGN);
298 	for (i = 0; i < count; i++) {
299 		data = (uint8_t *)fec->rbd_base[i].data_pointer;
300 		memset(data, 0, dsize);
301 		flush_dcache_range((uint32_t)data, (uint32_t)data + size);
302 
303 		fec->rbd_base[i].status = FEC_RBD_EMPTY;
304 		fec->rbd_base[i].data_length = 0;
305 	}
306 
307 	/* Mark the last RBD to close the ring. */
308 	fec->rbd_base[i - 1].status = FEC_RBD_WRAP | FEC_RBD_EMPTY;
309 	fec->rbd_index = 0;
310 
311 	flush_dcache_range((unsigned)fec->rbd_base,
312 			   (unsigned)fec->rbd_base + size);
313 }
314 
315 /**
316  * Initialize transmit task's buffer descriptors
317  * @param[in] fec all we know about the device yet
318  *
319  * Transmit buffers are created externally. We only have to init the BDs here.\n
320  * Note: There is a race condition in the hardware. When only one BD is in
321  * use it must be marked with the WRAP bit to use it for every transmitt.
322  * This bit in combination with the READY bit results into double transmit
323  * of each data buffer. It seems the state machine checks READY earlier then
324  * resetting it after the first transfer.
325  * Using two BDs solves this issue.
326  */
327 static void fec_tbd_init(struct fec_priv *fec)
328 {
329 	unsigned addr = (unsigned)fec->tbd_base;
330 	unsigned size = roundup(2 * sizeof(struct fec_bd),
331 				ARCH_DMA_MINALIGN);
332 
333 	memset(fec->tbd_base, 0, size);
334 	fec->tbd_base[0].status = 0;
335 	fec->tbd_base[1].status = FEC_TBD_WRAP;
336 	fec->tbd_index = 0;
337 	flush_dcache_range(addr, addr + size);
338 }
339 
340 /**
341  * Mark the given read buffer descriptor as free
342  * @param[in] last 1 if this is the last buffer descriptor in the chain, else 0
343  * @param[in] pRbd buffer descriptor to mark free again
344  */
345 static void fec_rbd_clean(int last, struct fec_bd *pRbd)
346 {
347 	unsigned short flags = FEC_RBD_EMPTY;
348 	if (last)
349 		flags |= FEC_RBD_WRAP;
350 	writew(flags, &pRbd->status);
351 	writew(0, &pRbd->data_length);
352 }
353 
354 static int fec_get_hwaddr(struct eth_device *dev, int dev_id,
355 						unsigned char *mac)
356 {
357 	imx_get_mac_from_fuse(dev_id, mac);
358 	return !is_valid_ether_addr(mac);
359 }
360 
361 static int fec_set_hwaddr(struct eth_device *dev)
362 {
363 	uchar *mac = dev->enetaddr;
364 	struct fec_priv *fec = (struct fec_priv *)dev->priv;
365 
366 	writel(0, &fec->eth->iaddr1);
367 	writel(0, &fec->eth->iaddr2);
368 	writel(0, &fec->eth->gaddr1);
369 	writel(0, &fec->eth->gaddr2);
370 
371 	/*
372 	 * Set physical address
373 	 */
374 	writel((mac[0] << 24) + (mac[1] << 16) + (mac[2] << 8) + mac[3],
375 			&fec->eth->paddr1);
376 	writel((mac[4] << 24) + (mac[5] << 16) + 0x8808, &fec->eth->paddr2);
377 
378 	return 0;
379 }
380 
381 /*
382  * Do initial configuration of the FEC registers
383  */
384 static void fec_reg_setup(struct fec_priv *fec)
385 {
386 	uint32_t rcntrl;
387 
388 	/*
389 	 * Set interrupt mask register
390 	 */
391 	writel(0x00000000, &fec->eth->imask);
392 
393 	/*
394 	 * Clear FEC-Lite interrupt event register(IEVENT)
395 	 */
396 	writel(0xffffffff, &fec->eth->ievent);
397 
398 
399 	/*
400 	 * Set FEC-Lite receive control register(R_CNTRL):
401 	 */
402 
403 	/* Start with frame length = 1518, common for all modes. */
404 	rcntrl = PKTSIZE << FEC_RCNTRL_MAX_FL_SHIFT;
405 	if (fec->xcv_type != SEVENWIRE)		/* xMII modes */
406 		rcntrl |= FEC_RCNTRL_FCE | FEC_RCNTRL_MII_MODE;
407 	if (fec->xcv_type == RGMII)
408 		rcntrl |= FEC_RCNTRL_RGMII;
409 	else if (fec->xcv_type == RMII)
410 		rcntrl |= FEC_RCNTRL_RMII;
411 
412 	writel(rcntrl, &fec->eth->r_cntrl);
413 }
414 
415 /**
416  * Start the FEC engine
417  * @param[in] dev Our device to handle
418  */
419 static int fec_open(struct eth_device *edev)
420 {
421 	struct fec_priv *fec = (struct fec_priv *)edev->priv;
422 	int speed;
423 	uint32_t addr, size;
424 	int i;
425 
426 	debug("fec_open: fec_open(dev)\n");
427 	/* full-duplex, heartbeat disabled */
428 	writel(1 << 2, &fec->eth->x_cntrl);
429 	fec->rbd_index = 0;
430 
431 	/* Invalidate all descriptors */
432 	for (i = 0; i < FEC_RBD_NUM - 1; i++)
433 		fec_rbd_clean(0, &fec->rbd_base[i]);
434 	fec_rbd_clean(1, &fec->rbd_base[i]);
435 
436 	/* Flush the descriptors into RAM */
437 	size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd),
438 			ARCH_DMA_MINALIGN);
439 	addr = (uint32_t)fec->rbd_base;
440 	flush_dcache_range(addr, addr + size);
441 
442 #ifdef FEC_QUIRK_ENET_MAC
443 	/* Enable ENET HW endian SWAP */
444 	writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_DBSWAP,
445 		&fec->eth->ecntrl);
446 	/* Enable ENET store and forward mode */
447 	writel(readl(&fec->eth->x_wmrk) | FEC_X_WMRK_STRFWD,
448 		&fec->eth->x_wmrk);
449 #endif
450 	/*
451 	 * Enable FEC-Lite controller
452 	 */
453 	writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_ETHER_EN,
454 		&fec->eth->ecntrl);
455 #if defined(CONFIG_MX25) || defined(CONFIG_MX53) || defined(CONFIG_MX6SL)
456 	udelay(100);
457 	/*
458 	 * setup the MII gasket for RMII mode
459 	 */
460 
461 	/* disable the gasket */
462 	writew(0, &fec->eth->miigsk_enr);
463 
464 	/* wait for the gasket to be disabled */
465 	while (readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY)
466 		udelay(2);
467 
468 	/* configure gasket for RMII, 50 MHz, no loopback, and no echo */
469 	writew(MIIGSK_CFGR_IF_MODE_RMII, &fec->eth->miigsk_cfgr);
470 
471 	/* re-enable the gasket */
472 	writew(MIIGSK_ENR_EN, &fec->eth->miigsk_enr);
473 
474 	/* wait until MII gasket is ready */
475 	int max_loops = 10;
476 	while ((readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY) == 0) {
477 		if (--max_loops <= 0) {
478 			printf("WAIT for MII Gasket ready timed out\n");
479 			break;
480 		}
481 	}
482 #endif
483 
484 #ifdef CONFIG_PHYLIB
485 	{
486 		/* Start up the PHY */
487 		int ret = phy_startup(fec->phydev);
488 
489 		if (ret) {
490 			printf("Could not initialize PHY %s\n",
491 			       fec->phydev->dev->name);
492 			return ret;
493 		}
494 		speed = fec->phydev->speed;
495 	}
496 #else
497 	miiphy_wait_aneg(edev);
498 	speed = miiphy_speed(edev->name, fec->phy_id);
499 	miiphy_duplex(edev->name, fec->phy_id);
500 #endif
501 
502 #ifdef FEC_QUIRK_ENET_MAC
503 	{
504 		u32 ecr = readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_SPEED;
505 		u32 rcr = readl(&fec->eth->r_cntrl) & ~FEC_RCNTRL_RMII_10T;
506 		if (speed == _1000BASET)
507 			ecr |= FEC_ECNTRL_SPEED;
508 		else if (speed != _100BASET)
509 			rcr |= FEC_RCNTRL_RMII_10T;
510 		writel(ecr, &fec->eth->ecntrl);
511 		writel(rcr, &fec->eth->r_cntrl);
512 	}
513 #endif
514 	debug("%s:Speed=%i\n", __func__, speed);
515 
516 	/*
517 	 * Enable SmartDMA receive task
518 	 */
519 	fec_rx_task_enable(fec);
520 
521 	udelay(100000);
522 	return 0;
523 }
524 
525 static int fec_init(struct eth_device *dev, bd_t* bd)
526 {
527 	struct fec_priv *fec = (struct fec_priv *)dev->priv;
528 	uint32_t mib_ptr = (uint32_t)&fec->eth->rmon_t_drop;
529 	int i;
530 
531 	/* Initialize MAC address */
532 	fec_set_hwaddr(dev);
533 
534 	/*
535 	 * Setup transmit descriptors, there are two in total.
536 	 */
537 	fec_tbd_init(fec);
538 
539 	/* Setup receive descriptors. */
540 	fec_rbd_init(fec, FEC_RBD_NUM, FEC_MAX_PKT_SIZE);
541 
542 	fec_reg_setup(fec);
543 
544 	if (fec->xcv_type != SEVENWIRE)
545 		fec_mii_setspeed(fec->bus->priv);
546 
547 	/*
548 	 * Set Opcode/Pause Duration Register
549 	 */
550 	writel(0x00010020, &fec->eth->op_pause);	/* FIXME 0xffff0020; */
551 	writel(0x2, &fec->eth->x_wmrk);
552 	/*
553 	 * Set multicast address filter
554 	 */
555 	writel(0x00000000, &fec->eth->gaddr1);
556 	writel(0x00000000, &fec->eth->gaddr2);
557 
558 
559 	/* clear MIB RAM */
560 	for (i = mib_ptr; i <= mib_ptr + 0xfc; i += 4)
561 		writel(0, i);
562 
563 	/* FIFO receive start register */
564 	writel(0x520, &fec->eth->r_fstart);
565 
566 	/* size and address of each buffer */
567 	writel(FEC_MAX_PKT_SIZE, &fec->eth->emrbr);
568 	writel((uint32_t)fec->tbd_base, &fec->eth->etdsr);
569 	writel((uint32_t)fec->rbd_base, &fec->eth->erdsr);
570 
571 #ifndef CONFIG_PHYLIB
572 	if (fec->xcv_type != SEVENWIRE)
573 		miiphy_restart_aneg(dev);
574 #endif
575 	fec_open(dev);
576 	return 0;
577 }
578 
579 /**
580  * Halt the FEC engine
581  * @param[in] dev Our device to handle
582  */
583 static void fec_halt(struct eth_device *dev)
584 {
585 	struct fec_priv *fec = (struct fec_priv *)dev->priv;
586 	int counter = 0xffff;
587 
588 	/*
589 	 * issue graceful stop command to the FEC transmitter if necessary
590 	 */
591 	writel(FEC_TCNTRL_GTS | readl(&fec->eth->x_cntrl),
592 			&fec->eth->x_cntrl);
593 
594 	debug("eth_halt: wait for stop regs\n");
595 	/*
596 	 * wait for graceful stop to register
597 	 */
598 	while ((counter--) && (!(readl(&fec->eth->ievent) & FEC_IEVENT_GRA)))
599 		udelay(1);
600 
601 	/*
602 	 * Disable SmartDMA tasks
603 	 */
604 	fec_tx_task_disable(fec);
605 	fec_rx_task_disable(fec);
606 
607 	/*
608 	 * Disable the Ethernet Controller
609 	 * Note: this will also reset the BD index counter!
610 	 */
611 	writel(readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_ETHER_EN,
612 			&fec->eth->ecntrl);
613 	fec->rbd_index = 0;
614 	fec->tbd_index = 0;
615 	debug("eth_halt: done\n");
616 }
617 
618 /**
619  * Transmit one frame
620  * @param[in] dev Our ethernet device to handle
621  * @param[in] packet Pointer to the data to be transmitted
622  * @param[in] length Data count in bytes
623  * @return 0 on success
624  */
625 static int fec_send(struct eth_device *dev, void *packet, int length)
626 {
627 	unsigned int status;
628 	uint32_t size, end;
629 	uint32_t addr;
630 	int timeout = FEC_XFER_TIMEOUT;
631 	int ret = 0;
632 
633 	/*
634 	 * This routine transmits one frame.  This routine only accepts
635 	 * 6-byte Ethernet addresses.
636 	 */
637 	struct fec_priv *fec = (struct fec_priv *)dev->priv;
638 
639 	/*
640 	 * Check for valid length of data.
641 	 */
642 	if ((length > 1500) || (length <= 0)) {
643 		printf("Payload (%d) too large\n", length);
644 		return -1;
645 	}
646 
647 	/*
648 	 * Setup the transmit buffer. We are always using the first buffer for
649 	 * transmission, the second will be empty and only used to stop the DMA
650 	 * engine. We also flush the packet to RAM here to avoid cache trouble.
651 	 */
652 #ifdef CONFIG_FEC_MXC_SWAP_PACKET
653 	swap_packet((uint32_t *)packet, length);
654 #endif
655 
656 	addr = (uint32_t)packet;
657 	end = roundup(addr + length, ARCH_DMA_MINALIGN);
658 	addr &= ~(ARCH_DMA_MINALIGN - 1);
659 	flush_dcache_range(addr, end);
660 
661 	writew(length, &fec->tbd_base[fec->tbd_index].data_length);
662 	writel(addr, &fec->tbd_base[fec->tbd_index].data_pointer);
663 
664 	/*
665 	 * update BD's status now
666 	 * This block:
667 	 * - is always the last in a chain (means no chain)
668 	 * - should transmitt the CRC
669 	 * - might be the last BD in the list, so the address counter should
670 	 *   wrap (-> keep the WRAP flag)
671 	 */
672 	status = readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_WRAP;
673 	status |= FEC_TBD_LAST | FEC_TBD_TC | FEC_TBD_READY;
674 	writew(status, &fec->tbd_base[fec->tbd_index].status);
675 
676 	/*
677 	 * Flush data cache. This code flushes both TX descriptors to RAM.
678 	 * After this code, the descriptors will be safely in RAM and we
679 	 * can start DMA.
680 	 */
681 	size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
682 	addr = (uint32_t)fec->tbd_base;
683 	flush_dcache_range(addr, addr + size);
684 
685 	/*
686 	 * Below we read the DMA descriptor's last four bytes back from the
687 	 * DRAM. This is important in order to make sure that all WRITE
688 	 * operations on the bus that were triggered by previous cache FLUSH
689 	 * have completed.
690 	 *
691 	 * Otherwise, on MX28, it is possible to observe a corruption of the
692 	 * DMA descriptors. Please refer to schematic "Figure 1-2" in MX28RM
693 	 * for the bus structure of MX28. The scenario is as follows:
694 	 *
695 	 * 1) ARM core triggers a series of WRITEs on the AHB_ARB2 bus going
696 	 *    to DRAM due to flush_dcache_range()
697 	 * 2) ARM core writes the FEC registers via AHB_ARB2
698 	 * 3) FEC DMA starts reading/writing from/to DRAM via AHB_ARB3
699 	 *
700 	 * Note that 2) does sometimes finish before 1) due to reordering of
701 	 * WRITE accesses on the AHB bus, therefore triggering 3) before the
702 	 * DMA descriptor is fully written into DRAM. This results in occasional
703 	 * corruption of the DMA descriptor.
704 	 */
705 	readl(addr + size - 4);
706 
707 	/*
708 	 * Enable SmartDMA transmit task
709 	 */
710 	fec_tx_task_enable(fec);
711 
712 	/*
713 	 * Wait until frame is sent. On each turn of the wait cycle, we must
714 	 * invalidate data cache to see what's really in RAM. Also, we need
715 	 * barrier here.
716 	 */
717 	while (--timeout) {
718 		if (!(readl(&fec->eth->x_des_active) & FEC_X_DES_ACTIVE_TDAR))
719 			break;
720 	}
721 
722 	if (!timeout) {
723 		ret = -EINVAL;
724 		goto out;
725 	}
726 
727 	/*
728 	 * The TDAR bit is cleared when the descriptors are all out from TX
729 	 * but on mx6solox we noticed that the READY bit is still not cleared
730 	 * right after TDAR.
731 	 * These are two distinct signals, and in IC simulation, we found that
732 	 * TDAR always gets cleared prior than the READY bit of last BD becomes
733 	 * cleared.
734 	 * In mx6solox, we use a later version of FEC IP. It looks like that
735 	 * this intrinsic behaviour of TDAR bit has changed in this newer FEC
736 	 * version.
737 	 *
738 	 * Fix this by polling the READY bit of BD after the TDAR polling,
739 	 * which covers the mx6solox case and does not harm the other SoCs.
740 	 */
741 	timeout = FEC_XFER_TIMEOUT;
742 	while (--timeout) {
743 		invalidate_dcache_range(addr, addr + size);
744 		if (!(readw(&fec->tbd_base[fec->tbd_index].status) &
745 		    FEC_TBD_READY))
746 			break;
747 	}
748 
749 	if (!timeout)
750 		ret = -EINVAL;
751 
752 out:
753 	debug("fec_send: status 0x%x index %d ret %i\n",
754 			readw(&fec->tbd_base[fec->tbd_index].status),
755 			fec->tbd_index, ret);
756 	/* for next transmission use the other buffer */
757 	if (fec->tbd_index)
758 		fec->tbd_index = 0;
759 	else
760 		fec->tbd_index = 1;
761 
762 	return ret;
763 }
764 
765 /**
766  * Pull one frame from the card
767  * @param[in] dev Our ethernet device to handle
768  * @return Length of packet read
769  */
770 static int fec_recv(struct eth_device *dev)
771 {
772 	struct fec_priv *fec = (struct fec_priv *)dev->priv;
773 	struct fec_bd *rbd = &fec->rbd_base[fec->rbd_index];
774 	unsigned long ievent;
775 	int frame_length, len = 0;
776 	struct nbuf *frame;
777 	uint16_t bd_status;
778 	uint32_t addr, size, end;
779 	int i;
780 	ALLOC_CACHE_ALIGN_BUFFER(uchar, buff, FEC_MAX_PKT_SIZE);
781 
782 	/*
783 	 * Check if any critical events have happened
784 	 */
785 	ievent = readl(&fec->eth->ievent);
786 	writel(ievent, &fec->eth->ievent);
787 	debug("fec_recv: ievent 0x%lx\n", ievent);
788 	if (ievent & FEC_IEVENT_BABR) {
789 		fec_halt(dev);
790 		fec_init(dev, fec->bd);
791 		printf("some error: 0x%08lx\n", ievent);
792 		return 0;
793 	}
794 	if (ievent & FEC_IEVENT_HBERR) {
795 		/* Heartbeat error */
796 		writel(0x00000001 | readl(&fec->eth->x_cntrl),
797 				&fec->eth->x_cntrl);
798 	}
799 	if (ievent & FEC_IEVENT_GRA) {
800 		/* Graceful stop complete */
801 		if (readl(&fec->eth->x_cntrl) & 0x00000001) {
802 			fec_halt(dev);
803 			writel(~0x00000001 & readl(&fec->eth->x_cntrl),
804 					&fec->eth->x_cntrl);
805 			fec_init(dev, fec->bd);
806 		}
807 	}
808 
809 	/*
810 	 * Read the buffer status. Before the status can be read, the data cache
811 	 * must be invalidated, because the data in RAM might have been changed
812 	 * by DMA. The descriptors are properly aligned to cachelines so there's
813 	 * no need to worry they'd overlap.
814 	 *
815 	 * WARNING: By invalidating the descriptor here, we also invalidate
816 	 * the descriptors surrounding this one. Therefore we can NOT change the
817 	 * contents of this descriptor nor the surrounding ones. The problem is
818 	 * that in order to mark the descriptor as processed, we need to change
819 	 * the descriptor. The solution is to mark the whole cache line when all
820 	 * descriptors in the cache line are processed.
821 	 */
822 	addr = (uint32_t)rbd;
823 	addr &= ~(ARCH_DMA_MINALIGN - 1);
824 	size = roundup(sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
825 	invalidate_dcache_range(addr, addr + size);
826 
827 	bd_status = readw(&rbd->status);
828 	debug("fec_recv: status 0x%x\n", bd_status);
829 
830 	if (!(bd_status & FEC_RBD_EMPTY)) {
831 		if ((bd_status & FEC_RBD_LAST) && !(bd_status & FEC_RBD_ERR) &&
832 			((readw(&rbd->data_length) - 4) > 14)) {
833 			/*
834 			 * Get buffer address and size
835 			 */
836 			frame = (struct nbuf *)readl(&rbd->data_pointer);
837 			frame_length = readw(&rbd->data_length) - 4;
838 			/*
839 			 * Invalidate data cache over the buffer
840 			 */
841 			addr = (uint32_t)frame;
842 			end = roundup(addr + frame_length, ARCH_DMA_MINALIGN);
843 			addr &= ~(ARCH_DMA_MINALIGN - 1);
844 			invalidate_dcache_range(addr, end);
845 
846 			/*
847 			 *  Fill the buffer and pass it to upper layers
848 			 */
849 #ifdef CONFIG_FEC_MXC_SWAP_PACKET
850 			swap_packet((uint32_t *)frame->data, frame_length);
851 #endif
852 			memcpy(buff, frame->data, frame_length);
853 			NetReceive(buff, frame_length);
854 			len = frame_length;
855 		} else {
856 			if (bd_status & FEC_RBD_ERR)
857 				printf("error frame: 0x%08lx 0x%08x\n",
858 						(ulong)rbd->data_pointer,
859 						bd_status);
860 		}
861 
862 		/*
863 		 * Free the current buffer, restart the engine and move forward
864 		 * to the next buffer. Here we check if the whole cacheline of
865 		 * descriptors was already processed and if so, we mark it free
866 		 * as whole.
867 		 */
868 		size = RXDESC_PER_CACHELINE - 1;
869 		if ((fec->rbd_index & size) == size) {
870 			i = fec->rbd_index - size;
871 			addr = (uint32_t)&fec->rbd_base[i];
872 			for (; i <= fec->rbd_index ; i++) {
873 				fec_rbd_clean(i == (FEC_RBD_NUM - 1),
874 					      &fec->rbd_base[i]);
875 			}
876 			flush_dcache_range(addr,
877 				addr + ARCH_DMA_MINALIGN);
878 		}
879 
880 		fec_rx_task_enable(fec);
881 		fec->rbd_index = (fec->rbd_index + 1) % FEC_RBD_NUM;
882 	}
883 	debug("fec_recv: stop\n");
884 
885 	return len;
886 }
887 
888 static void fec_set_dev_name(char *dest, int dev_id)
889 {
890 	sprintf(dest, (dev_id == -1) ? "FEC" : "FEC%i", dev_id);
891 }
892 
893 static int fec_alloc_descs(struct fec_priv *fec)
894 {
895 	unsigned int size;
896 	int i;
897 	uint8_t *data;
898 
899 	/* Allocate TX descriptors. */
900 	size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
901 	fec->tbd_base = memalign(ARCH_DMA_MINALIGN, size);
902 	if (!fec->tbd_base)
903 		goto err_tx;
904 
905 	/* Allocate RX descriptors. */
906 	size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
907 	fec->rbd_base = memalign(ARCH_DMA_MINALIGN, size);
908 	if (!fec->rbd_base)
909 		goto err_rx;
910 
911 	memset(fec->rbd_base, 0, size);
912 
913 	/* Allocate RX buffers. */
914 
915 	/* Maximum RX buffer size. */
916 	size = roundup(FEC_MAX_PKT_SIZE, FEC_DMA_RX_MINALIGN);
917 	for (i = 0; i < FEC_RBD_NUM; i++) {
918 		data = memalign(FEC_DMA_RX_MINALIGN, size);
919 		if (!data) {
920 			printf("%s: error allocating rxbuf %d\n", __func__, i);
921 			goto err_ring;
922 		}
923 
924 		memset(data, 0, size);
925 
926 		fec->rbd_base[i].data_pointer = (uint32_t)data;
927 		fec->rbd_base[i].status = FEC_RBD_EMPTY;
928 		fec->rbd_base[i].data_length = 0;
929 		/* Flush the buffer to memory. */
930 		flush_dcache_range((uint32_t)data, (uint32_t)data + size);
931 	}
932 
933 	/* Mark the last RBD to close the ring. */
934 	fec->rbd_base[i - 1].status = FEC_RBD_WRAP | FEC_RBD_EMPTY;
935 
936 	fec->rbd_index = 0;
937 	fec->tbd_index = 0;
938 
939 	return 0;
940 
941 err_ring:
942 	for (; i >= 0; i--)
943 		free((void *)fec->rbd_base[i].data_pointer);
944 	free(fec->rbd_base);
945 err_rx:
946 	free(fec->tbd_base);
947 err_tx:
948 	return -ENOMEM;
949 }
950 
951 static void fec_free_descs(struct fec_priv *fec)
952 {
953 	int i;
954 
955 	for (i = 0; i < FEC_RBD_NUM; i++)
956 		free((void *)fec->rbd_base[i].data_pointer);
957 	free(fec->rbd_base);
958 	free(fec->tbd_base);
959 }
960 
961 #ifdef CONFIG_PHYLIB
962 int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr,
963 		struct mii_dev *bus, struct phy_device *phydev)
964 #else
965 static int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr,
966 		struct mii_dev *bus, int phy_id)
967 #endif
968 {
969 	struct eth_device *edev;
970 	struct fec_priv *fec;
971 	unsigned char ethaddr[6];
972 	uint32_t start;
973 	int ret = 0;
974 
975 	/* create and fill edev struct */
976 	edev = (struct eth_device *)malloc(sizeof(struct eth_device));
977 	if (!edev) {
978 		puts("fec_mxc: not enough malloc memory for eth_device\n");
979 		ret = -ENOMEM;
980 		goto err1;
981 	}
982 
983 	fec = (struct fec_priv *)malloc(sizeof(struct fec_priv));
984 	if (!fec) {
985 		puts("fec_mxc: not enough malloc memory for fec_priv\n");
986 		ret = -ENOMEM;
987 		goto err2;
988 	}
989 
990 	memset(edev, 0, sizeof(*edev));
991 	memset(fec, 0, sizeof(*fec));
992 
993 	ret = fec_alloc_descs(fec);
994 	if (ret)
995 		goto err3;
996 
997 	edev->priv = fec;
998 	edev->init = fec_init;
999 	edev->send = fec_send;
1000 	edev->recv = fec_recv;
1001 	edev->halt = fec_halt;
1002 	edev->write_hwaddr = fec_set_hwaddr;
1003 
1004 	fec->eth = (struct ethernet_regs *)base_addr;
1005 	fec->bd = bd;
1006 
1007 	fec->xcv_type = CONFIG_FEC_XCV_TYPE;
1008 
1009 	/* Reset chip. */
1010 	writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_RESET, &fec->eth->ecntrl);
1011 	start = get_timer(0);
1012 	while (readl(&fec->eth->ecntrl) & FEC_ECNTRL_RESET) {
1013 		if (get_timer(start) > (CONFIG_SYS_HZ * 5)) {
1014 			printf("FEC MXC: Timeout reseting chip\n");
1015 			goto err4;
1016 		}
1017 		udelay(10);
1018 	}
1019 
1020 	fec_reg_setup(fec);
1021 	fec_set_dev_name(edev->name, dev_id);
1022 	fec->dev_id = (dev_id == -1) ? 0 : dev_id;
1023 	fec->bus = bus;
1024 	fec_mii_setspeed(bus->priv);
1025 #ifdef CONFIG_PHYLIB
1026 	fec->phydev = phydev;
1027 	phy_connect_dev(phydev, edev);
1028 	/* Configure phy */
1029 	phy_config(phydev);
1030 #else
1031 	fec->phy_id = phy_id;
1032 #endif
1033 	eth_register(edev);
1034 
1035 	if (fec_get_hwaddr(edev, dev_id, ethaddr) == 0) {
1036 		debug("got MAC%d address from fuse: %pM\n", dev_id, ethaddr);
1037 		memcpy(edev->enetaddr, ethaddr, 6);
1038 		if (!getenv("ethaddr"))
1039 			eth_setenv_enetaddr("ethaddr", ethaddr);
1040 	}
1041 	return ret;
1042 err4:
1043 	fec_free_descs(fec);
1044 err3:
1045 	free(fec);
1046 err2:
1047 	free(edev);
1048 err1:
1049 	return ret;
1050 }
1051 
1052 struct mii_dev *fec_get_miibus(uint32_t base_addr, int dev_id)
1053 {
1054 	struct ethernet_regs *eth = (struct ethernet_regs *)base_addr;
1055 	struct mii_dev *bus;
1056 	int ret;
1057 
1058 	bus = mdio_alloc();
1059 	if (!bus) {
1060 		printf("mdio_alloc failed\n");
1061 		return NULL;
1062 	}
1063 	bus->read = fec_phy_read;
1064 	bus->write = fec_phy_write;
1065 	bus->priv = eth;
1066 	fec_set_dev_name(bus->name, dev_id);
1067 
1068 	ret = mdio_register(bus);
1069 	if (ret) {
1070 		printf("mdio_register failed\n");
1071 		free(bus);
1072 		return NULL;
1073 	}
1074 	fec_mii_setspeed(eth);
1075 	return bus;
1076 }
1077 
1078 int fecmxc_initialize_multi(bd_t *bd, int dev_id, int phy_id, uint32_t addr)
1079 {
1080 	uint32_t base_mii;
1081 	struct mii_dev *bus = NULL;
1082 #ifdef CONFIG_PHYLIB
1083 	struct phy_device *phydev = NULL;
1084 #endif
1085 	int ret;
1086 
1087 #ifdef CONFIG_MX28
1088 	/*
1089 	 * The i.MX28 has two ethernet interfaces, but they are not equal.
1090 	 * Only the first one can access the MDIO bus.
1091 	 */
1092 	base_mii = MXS_ENET0_BASE;
1093 #else
1094 	base_mii = addr;
1095 #endif
1096 	debug("eth_init: fec_probe(bd, %i, %i) @ %08x\n", dev_id, phy_id, addr);
1097 	bus = fec_get_miibus(base_mii, dev_id);
1098 	if (!bus)
1099 		return -ENOMEM;
1100 #ifdef CONFIG_PHYLIB
1101 	phydev = phy_find_by_mask(bus, 1 << phy_id, PHY_INTERFACE_MODE_RGMII);
1102 	if (!phydev) {
1103 		free(bus);
1104 		return -ENOMEM;
1105 	}
1106 	ret = fec_probe(bd, dev_id, addr, bus, phydev);
1107 #else
1108 	ret = fec_probe(bd, dev_id, addr, bus, phy_id);
1109 #endif
1110 	if (ret) {
1111 #ifdef CONFIG_PHYLIB
1112 		free(phydev);
1113 #endif
1114 		free(bus);
1115 	}
1116 	return ret;
1117 }
1118 
1119 #ifdef CONFIG_FEC_MXC_PHYADDR
1120 int fecmxc_initialize(bd_t *bd)
1121 {
1122 	return fecmxc_initialize_multi(bd, -1, CONFIG_FEC_MXC_PHYADDR,
1123 			IMX_FEC_BASE);
1124 }
1125 #endif
1126 
1127 #ifndef CONFIG_PHYLIB
1128 int fecmxc_register_mii_postcall(struct eth_device *dev, int (*cb)(int))
1129 {
1130 	struct fec_priv *fec = (struct fec_priv *)dev->priv;
1131 	fec->mii_postcall = cb;
1132 	return 0;
1133 }
1134 #endif
1135