xref: /openbmc/u-boot/drivers/qe/uec.c (revision 0cf4fd3c)
1 /*
2  * Copyright (C) 2006 Freescale Semiconductor, Inc.
3  *
4  * Dave Liu <daveliu@freescale.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation; either version 2 of
9  * the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
19  * MA 02111-1307 USA
20  */
21 
22 #include "common.h"
23 #include "net.h"
24 #include "malloc.h"
25 #include "asm/errno.h"
26 #include "asm/io.h"
27 #include "asm/immap_qe.h"
28 #include "qe.h"
29 #include "uccf.h"
30 #include "uec.h"
31 #include "uec_phy.h"
32 #include "miiphy.h"
33 
34 #ifdef CONFIG_UEC_ETH1
35 static uec_info_t eth1_uec_info = {
36 	.uf_info		= {
37 		.ucc_num	= CFG_UEC1_UCC_NUM,
38 		.rx_clock	= CFG_UEC1_RX_CLK,
39 		.tx_clock	= CFG_UEC1_TX_CLK,
40 		.eth_type	= CFG_UEC1_ETH_TYPE,
41 	},
42 #if (CFG_UEC1_ETH_TYPE == FAST_ETH)
43 	.num_threads_tx		= UEC_NUM_OF_THREADS_1,
44 	.num_threads_rx		= UEC_NUM_OF_THREADS_1,
45 #else
46 	.num_threads_tx		= UEC_NUM_OF_THREADS_4,
47 	.num_threads_rx		= UEC_NUM_OF_THREADS_4,
48 #endif
49 	.riscTx			= QE_RISC_ALLOCATION_RISC1_AND_RISC2,
50 	.riscRx			= QE_RISC_ALLOCATION_RISC1_AND_RISC2,
51 	.tx_bd_ring_len		= 16,
52 	.rx_bd_ring_len		= 16,
53 	.phy_address		= CFG_UEC1_PHY_ADDR,
54 	.enet_interface		= CFG_UEC1_INTERFACE_MODE,
55 };
56 #endif
57 #ifdef CONFIG_UEC_ETH2
58 static uec_info_t eth2_uec_info = {
59 	.uf_info		= {
60 		.ucc_num	= CFG_UEC2_UCC_NUM,
61 		.rx_clock	= CFG_UEC2_RX_CLK,
62 		.tx_clock	= CFG_UEC2_TX_CLK,
63 		.eth_type	= CFG_UEC2_ETH_TYPE,
64 	},
65 #if (CFG_UEC2_ETH_TYPE == FAST_ETH)
66 	.num_threads_tx		= UEC_NUM_OF_THREADS_1,
67 	.num_threads_rx		= UEC_NUM_OF_THREADS_1,
68 #else
69 	.num_threads_tx		= UEC_NUM_OF_THREADS_4,
70 	.num_threads_rx		= UEC_NUM_OF_THREADS_4,
71 #endif
72 	.riscTx			= QE_RISC_ALLOCATION_RISC1_AND_RISC2,
73 	.riscRx			= QE_RISC_ALLOCATION_RISC1_AND_RISC2,
74 	.tx_bd_ring_len		= 16,
75 	.rx_bd_ring_len		= 16,
76 	.phy_address		= CFG_UEC2_PHY_ADDR,
77 	.enet_interface		= CFG_UEC2_INTERFACE_MODE,
78 };
79 #endif
80 #ifdef CONFIG_UEC_ETH3
81 static uec_info_t eth3_uec_info = {
82 	.uf_info		= {
83 		.ucc_num	= CFG_UEC3_UCC_NUM,
84 		.rx_clock	= CFG_UEC3_RX_CLK,
85 		.tx_clock	= CFG_UEC3_TX_CLK,
86 		.eth_type	= CFG_UEC3_ETH_TYPE,
87 	},
88 #if (CFG_UEC3_ETH_TYPE == FAST_ETH)
89 	.num_threads_tx		= UEC_NUM_OF_THREADS_1,
90 	.num_threads_rx		= UEC_NUM_OF_THREADS_1,
91 #else
92 	.num_threads_tx		= UEC_NUM_OF_THREADS_4,
93 	.num_threads_rx		= UEC_NUM_OF_THREADS_4,
94 #endif
95 	.riscTx			= QE_RISC_ALLOCATION_RISC1_AND_RISC2,
96 	.riscRx			= QE_RISC_ALLOCATION_RISC1_AND_RISC2,
97 	.tx_bd_ring_len		= 16,
98 	.rx_bd_ring_len		= 16,
99 	.phy_address		= CFG_UEC3_PHY_ADDR,
100 	.enet_interface		= CFG_UEC3_INTERFACE_MODE,
101 };
102 #endif
103 #ifdef CONFIG_UEC_ETH4
104 static uec_info_t eth4_uec_info = {
105 	.uf_info		= {
106 		.ucc_num	= CFG_UEC4_UCC_NUM,
107 		.rx_clock	= CFG_UEC4_RX_CLK,
108 		.tx_clock	= CFG_UEC4_TX_CLK,
109 		.eth_type	= CFG_UEC4_ETH_TYPE,
110 	},
111 #if (CFG_UEC4_ETH_TYPE == FAST_ETH)
112 	.num_threads_tx		= UEC_NUM_OF_THREADS_1,
113 	.num_threads_rx		= UEC_NUM_OF_THREADS_1,
114 #else
115 	.num_threads_tx		= UEC_NUM_OF_THREADS_4,
116 	.num_threads_rx		= UEC_NUM_OF_THREADS_4,
117 #endif
118 	.riscTx			= QE_RISC_ALLOCATION_RISC1_AND_RISC2,
119 	.riscRx			= QE_RISC_ALLOCATION_RISC1_AND_RISC2,
120 	.tx_bd_ring_len		= 16,
121 	.rx_bd_ring_len		= 16,
122 	.phy_address		= CFG_UEC4_PHY_ADDR,
123 	.enet_interface		= CFG_UEC4_INTERFACE_MODE,
124 };
125 #endif
126 
127 #define MAXCONTROLLERS	(4)
128 
129 static struct eth_device *devlist[MAXCONTROLLERS];
130 
131 u16 phy_read (struct uec_mii_info *mii_info, u16 regnum);
132 void phy_write (struct uec_mii_info *mii_info, u16 regnum, u16 val);
133 
134 static int uec_mac_enable(uec_private_t *uec, comm_dir_e mode)
135 {
136 	uec_t		*uec_regs;
137 	u32		maccfg1;
138 
139 	if (!uec) {
140 		printf("%s: uec not initial\n", __FUNCTION__);
141 		return -EINVAL;
142 	}
143 	uec_regs = uec->uec_regs;
144 
145 	maccfg1 = in_be32(&uec_regs->maccfg1);
146 
147 	if (mode & COMM_DIR_TX)	{
148 		maccfg1 |= MACCFG1_ENABLE_TX;
149 		out_be32(&uec_regs->maccfg1, maccfg1);
150 		uec->mac_tx_enabled = 1;
151 	}
152 
153 	if (mode & COMM_DIR_RX)	{
154 		maccfg1 |= MACCFG1_ENABLE_RX;
155 		out_be32(&uec_regs->maccfg1, maccfg1);
156 		uec->mac_rx_enabled = 1;
157 	}
158 
159 	return 0;
160 }
161 
162 static int uec_mac_disable(uec_private_t *uec, comm_dir_e mode)
163 {
164 	uec_t		*uec_regs;
165 	u32		maccfg1;
166 
167 	if (!uec) {
168 		printf("%s: uec not initial\n", __FUNCTION__);
169 		return -EINVAL;
170 	}
171 	uec_regs = uec->uec_regs;
172 
173 	maccfg1 = in_be32(&uec_regs->maccfg1);
174 
175 	if (mode & COMM_DIR_TX)	{
176 		maccfg1 &= ~MACCFG1_ENABLE_TX;
177 		out_be32(&uec_regs->maccfg1, maccfg1);
178 		uec->mac_tx_enabled = 0;
179 	}
180 
181 	if (mode & COMM_DIR_RX)	{
182 		maccfg1 &= ~MACCFG1_ENABLE_RX;
183 		out_be32(&uec_regs->maccfg1, maccfg1);
184 		uec->mac_rx_enabled = 0;
185 	}
186 
187 	return 0;
188 }
189 
190 static int uec_graceful_stop_tx(uec_private_t *uec)
191 {
192 	ucc_fast_t		*uf_regs;
193 	u32			cecr_subblock;
194 	u32			ucce;
195 
196 	if (!uec || !uec->uccf) {
197 		printf("%s: No handle passed.\n", __FUNCTION__);
198 		return -EINVAL;
199 	}
200 
201 	uf_regs = uec->uccf->uf_regs;
202 
203 	/* Clear the grace stop event */
204 	out_be32(&uf_regs->ucce, UCCE_GRA);
205 
206 	/* Issue host command */
207 	cecr_subblock =
208 		 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
209 	qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
210 			 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
211 
212 	/* Wait for command to complete */
213 	do {
214 		ucce = in_be32(&uf_regs->ucce);
215 	} while (! (ucce & UCCE_GRA));
216 
217 	uec->grace_stopped_tx = 1;
218 
219 	return 0;
220 }
221 
222 static int uec_graceful_stop_rx(uec_private_t *uec)
223 {
224 	u32		cecr_subblock;
225 	u8		ack;
226 
227 	if (!uec) {
228 		printf("%s: No handle passed.\n", __FUNCTION__);
229 		return -EINVAL;
230 	}
231 
232 	if (!uec->p_rx_glbl_pram) {
233 		printf("%s: No init rx global parameter\n", __FUNCTION__);
234 		return -EINVAL;
235 	}
236 
237 	/* Clear acknowledge bit */
238 	ack = uec->p_rx_glbl_pram->rxgstpack;
239 	ack &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
240 	uec->p_rx_glbl_pram->rxgstpack = ack;
241 
242 	/* Keep issuing cmd and checking ack bit until it is asserted */
243 	do {
244 		/* Issue host command */
245 		cecr_subblock =
246 		 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
247 		qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
248 				 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
249 		ack = uec->p_rx_glbl_pram->rxgstpack;
250 	} while (! (ack & GRACEFUL_STOP_ACKNOWLEDGE_RX ));
251 
252 	uec->grace_stopped_rx = 1;
253 
254 	return 0;
255 }
256 
257 static int uec_restart_tx(uec_private_t *uec)
258 {
259 	u32		cecr_subblock;
260 
261 	if (!uec || !uec->uec_info) {
262 		printf("%s: No handle passed.\n", __FUNCTION__);
263 		return -EINVAL;
264 	}
265 
266 	cecr_subblock =
267 	 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
268 	qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
269 			 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
270 
271 	uec->grace_stopped_tx = 0;
272 
273 	return 0;
274 }
275 
276 static int uec_restart_rx(uec_private_t *uec)
277 {
278 	u32		cecr_subblock;
279 
280 	if (!uec || !uec->uec_info) {
281 		printf("%s: No handle passed.\n", __FUNCTION__);
282 		return -EINVAL;
283 	}
284 
285 	cecr_subblock =
286 	 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
287 	qe_issue_cmd(QE_RESTART_RX, cecr_subblock,
288 			 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
289 
290 	uec->grace_stopped_rx = 0;
291 
292 	return 0;
293 }
294 
295 static int uec_open(uec_private_t *uec, comm_dir_e mode)
296 {
297 	ucc_fast_private_t	*uccf;
298 
299 	if (!uec || !uec->uccf) {
300 		printf("%s: No handle passed.\n", __FUNCTION__);
301 		return -EINVAL;
302 	}
303 	uccf = uec->uccf;
304 
305 	/* check if the UCC number is in range. */
306 	if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
307 		printf("%s: ucc_num out of range.\n", __FUNCTION__);
308 		return -EINVAL;
309 	}
310 
311 	/* Enable MAC */
312 	uec_mac_enable(uec, mode);
313 
314 	/* Enable UCC fast */
315 	ucc_fast_enable(uccf, mode);
316 
317 	/* RISC microcode start */
318 	if ((mode & COMM_DIR_TX) && uec->grace_stopped_tx) {
319 		uec_restart_tx(uec);
320 	}
321 	if ((mode & COMM_DIR_RX) && uec->grace_stopped_rx) {
322 		uec_restart_rx(uec);
323 	}
324 
325 	return 0;
326 }
327 
328 static int uec_stop(uec_private_t *uec, comm_dir_e mode)
329 {
330 	ucc_fast_private_t	*uccf;
331 
332 	if (!uec || !uec->uccf) {
333 		printf("%s: No handle passed.\n", __FUNCTION__);
334 		return -EINVAL;
335 	}
336 	uccf = uec->uccf;
337 
338 	/* check if the UCC number is in range. */
339 	if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
340 		printf("%s: ucc_num out of range.\n", __FUNCTION__);
341 		return -EINVAL;
342 	}
343 	/* Stop any transmissions */
344 	if ((mode & COMM_DIR_TX) && !uec->grace_stopped_tx) {
345 		uec_graceful_stop_tx(uec);
346 	}
347 	/* Stop any receptions */
348 	if ((mode & COMM_DIR_RX) && !uec->grace_stopped_rx) {
349 		uec_graceful_stop_rx(uec);
350 	}
351 
352 	/* Disable the UCC fast */
353 	ucc_fast_disable(uec->uccf, mode);
354 
355 	/* Disable the MAC */
356 	uec_mac_disable(uec, mode);
357 
358 	return 0;
359 }
360 
361 static int uec_set_mac_duplex(uec_private_t *uec, int duplex)
362 {
363 	uec_t		*uec_regs;
364 	u32		maccfg2;
365 
366 	if (!uec) {
367 		printf("%s: uec not initial\n", __FUNCTION__);
368 		return -EINVAL;
369 	}
370 	uec_regs = uec->uec_regs;
371 
372 	if (duplex == DUPLEX_HALF) {
373 		maccfg2 = in_be32(&uec_regs->maccfg2);
374 		maccfg2 &= ~MACCFG2_FDX;
375 		out_be32(&uec_regs->maccfg2, maccfg2);
376 	}
377 
378 	if (duplex == DUPLEX_FULL) {
379 		maccfg2 = in_be32(&uec_regs->maccfg2);
380 		maccfg2 |= MACCFG2_FDX;
381 		out_be32(&uec_regs->maccfg2, maccfg2);
382 	}
383 
384 	return 0;
385 }
386 
387 static int uec_set_mac_if_mode(uec_private_t *uec, enet_interface_e if_mode)
388 {
389 	enet_interface_e	enet_if_mode;
390 	uec_info_t		*uec_info;
391 	uec_t			*uec_regs;
392 	u32			upsmr;
393 	u32			maccfg2;
394 
395 	if (!uec) {
396 		printf("%s: uec not initial\n", __FUNCTION__);
397 		return -EINVAL;
398 	}
399 
400 	uec_info = uec->uec_info;
401 	uec_regs = uec->uec_regs;
402 	enet_if_mode = if_mode;
403 
404 	maccfg2 = in_be32(&uec_regs->maccfg2);
405 	maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
406 
407 	upsmr = in_be32(&uec->uccf->uf_regs->upsmr);
408 	upsmr &= ~(UPSMR_RPM | UPSMR_TBIM | UPSMR_R10M | UPSMR_RMM);
409 
410 	switch (enet_if_mode) {
411 		case ENET_100_MII:
412 		case ENET_10_MII:
413 			maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
414 			break;
415 		case ENET_1000_GMII:
416 			maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
417 			break;
418 		case ENET_1000_TBI:
419 			maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
420 			upsmr |= UPSMR_TBIM;
421 			break;
422 		case ENET_1000_RTBI:
423 			maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
424 			upsmr |= (UPSMR_RPM | UPSMR_TBIM);
425 			break;
426 		case ENET_1000_RGMII_RXID:
427 		case ENET_1000_RGMII:
428 			maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
429 			upsmr |= UPSMR_RPM;
430 			break;
431 		case ENET_100_RGMII:
432 			maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
433 			upsmr |= UPSMR_RPM;
434 			break;
435 		case ENET_10_RGMII:
436 			maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
437 			upsmr |= (UPSMR_RPM | UPSMR_R10M);
438 			break;
439 		case ENET_100_RMII:
440 			maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
441 			upsmr |= UPSMR_RMM;
442 			break;
443 		case ENET_10_RMII:
444 			maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
445 			upsmr |= (UPSMR_R10M | UPSMR_RMM);
446 			break;
447 		default:
448 			return -EINVAL;
449 			break;
450 	}
451 	out_be32(&uec_regs->maccfg2, maccfg2);
452 	out_be32(&uec->uccf->uf_regs->upsmr, upsmr);
453 
454 	return 0;
455 }
456 
457 static int init_mii_management_configuration(uec_mii_t *uec_mii_regs)
458 {
459 	uint		timeout = 0x1000;
460 	u32		miimcfg = 0;
461 
462 	miimcfg = in_be32(&uec_mii_regs->miimcfg);
463 	miimcfg |= MIIMCFG_MNGMNT_CLC_DIV_INIT_VALUE;
464 	out_be32(&uec_mii_regs->miimcfg, miimcfg);
465 
466 	/* Wait until the bus is free */
467 	while ((in_be32(&uec_mii_regs->miimcfg) & MIIMIND_BUSY) && timeout--);
468 	if (timeout <= 0) {
469 		printf("%s: The MII Bus is stuck!", __FUNCTION__);
470 		return -ETIMEDOUT;
471 	}
472 
473 	return 0;
474 }
475 
476 static int init_phy(struct eth_device *dev)
477 {
478 	uec_private_t		*uec;
479 	uec_mii_t		*umii_regs;
480 	struct uec_mii_info	*mii_info;
481 	struct phy_info		*curphy;
482 	int			err;
483 
484 	uec = (uec_private_t *)dev->priv;
485 	umii_regs = uec->uec_mii_regs;
486 
487 	uec->oldlink = 0;
488 	uec->oldspeed = 0;
489 	uec->oldduplex = -1;
490 
491 	mii_info = malloc(sizeof(*mii_info));
492 	if (!mii_info) {
493 		printf("%s: Could not allocate mii_info", dev->name);
494 		return -ENOMEM;
495 	}
496 	memset(mii_info, 0, sizeof(*mii_info));
497 
498 	if (uec->uec_info->uf_info.eth_type == GIGA_ETH) {
499 		mii_info->speed = SPEED_1000;
500 	} else {
501 		mii_info->speed = SPEED_100;
502 	}
503 
504 	mii_info->duplex = DUPLEX_FULL;
505 	mii_info->pause = 0;
506 	mii_info->link = 1;
507 
508 	mii_info->advertising = (ADVERTISED_10baseT_Half |
509 				ADVERTISED_10baseT_Full |
510 				ADVERTISED_100baseT_Half |
511 				ADVERTISED_100baseT_Full |
512 				ADVERTISED_1000baseT_Full);
513 	mii_info->autoneg = 1;
514 	mii_info->mii_id = uec->uec_info->phy_address;
515 	mii_info->dev = dev;
516 
517 	mii_info->mdio_read = &uec_read_phy_reg;
518 	mii_info->mdio_write = &uec_write_phy_reg;
519 
520 	uec->mii_info = mii_info;
521 
522 	qe_set_mii_clk_src(uec->uec_info->uf_info.ucc_num);
523 
524 	if (init_mii_management_configuration(umii_regs)) {
525 		printf("%s: The MII Bus is stuck!", dev->name);
526 		err = -1;
527 		goto bus_fail;
528 	}
529 
530 	/* get info for this PHY */
531 	curphy = uec_get_phy_info(uec->mii_info);
532 	if (!curphy) {
533 		printf("%s: No PHY found", dev->name);
534 		err = -1;
535 		goto no_phy;
536 	}
537 
538 	mii_info->phyinfo = curphy;
539 
540 	/* Run the commands which initialize the PHY */
541 	if (curphy->init) {
542 		err = curphy->init(uec->mii_info);
543 		if (err)
544 			goto phy_init_fail;
545 	}
546 
547 	return 0;
548 
549 phy_init_fail:
550 no_phy:
551 bus_fail:
552 	free(mii_info);
553 	return err;
554 }
555 
556 static void adjust_link(struct eth_device *dev)
557 {
558 	uec_private_t		*uec = (uec_private_t *)dev->priv;
559 	uec_t			*uec_regs;
560 	struct uec_mii_info	*mii_info = uec->mii_info;
561 
562 	extern void change_phy_interface_mode(struct eth_device *dev,
563 					 enet_interface_e mode);
564 	uec_regs = uec->uec_regs;
565 
566 	if (mii_info->link) {
567 		/* Now we make sure that we can be in full duplex mode.
568 		* If not, we operate in half-duplex mode. */
569 		if (mii_info->duplex != uec->oldduplex) {
570 			if (!(mii_info->duplex)) {
571 				uec_set_mac_duplex(uec, DUPLEX_HALF);
572 				printf("%s: Half Duplex\n", dev->name);
573 			} else {
574 				uec_set_mac_duplex(uec, DUPLEX_FULL);
575 				printf("%s: Full Duplex\n", dev->name);
576 			}
577 			uec->oldduplex = mii_info->duplex;
578 		}
579 
580 		if (mii_info->speed != uec->oldspeed) {
581 			if (uec->uec_info->uf_info.eth_type == GIGA_ETH) {
582 				switch (mii_info->speed) {
583 				case 1000:
584 					break;
585 				case 100:
586 					printf ("switching to rgmii 100\n");
587 					/* change phy to rgmii 100 */
588 					change_phy_interface_mode(dev,
589 								ENET_100_RGMII);
590 					/* change the MAC interface mode */
591 					uec_set_mac_if_mode(uec,ENET_100_RGMII);
592 					break;
593 				case 10:
594 					printf ("switching to rgmii 10\n");
595 					/* change phy to rgmii 10 */
596 					change_phy_interface_mode(dev,
597 								ENET_10_RGMII);
598 					/* change the MAC interface mode */
599 					uec_set_mac_if_mode(uec,ENET_10_RGMII);
600 					break;
601 				default:
602 					printf("%s: Ack,Speed(%d)is illegal\n",
603 						dev->name, mii_info->speed);
604 					break;
605 				}
606 			}
607 
608 			printf("%s: Speed %dBT\n", dev->name, mii_info->speed);
609 			uec->oldspeed = mii_info->speed;
610 		}
611 
612 		if (!uec->oldlink) {
613 			printf("%s: Link is up\n", dev->name);
614 			uec->oldlink = 1;
615 		}
616 
617 	} else { /* if (mii_info->link) */
618 		if (uec->oldlink) {
619 			printf("%s: Link is down\n", dev->name);
620 			uec->oldlink = 0;
621 			uec->oldspeed = 0;
622 			uec->oldduplex = -1;
623 		}
624 	}
625 }
626 
627 static void phy_change(struct eth_device *dev)
628 {
629 	uec_private_t	*uec = (uec_private_t *)dev->priv;
630 
631 	/* Update the link, speed, duplex */
632 	uec->mii_info->phyinfo->read_status(uec->mii_info);
633 
634 	/* Adjust the interface according to speed */
635 	adjust_link(dev);
636 }
637 
638 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) \
639 	&& !defined(BITBANGMII)
640 
641 /*
642  * Read a MII PHY register.
643  *
644  * Returns:
645  *  0 on success
646  */
647 static int uec_miiphy_read(char *devname, unsigned char addr,
648 			    unsigned char reg, unsigned short *value)
649 {
650 	*value = uec_read_phy_reg(devlist[0], addr, reg);
651 
652 	return 0;
653 }
654 
655 /*
656  * Write a MII PHY register.
657  *
658  * Returns:
659  *  0 on success
660  */
661 static int uec_miiphy_write(char *devname, unsigned char addr,
662 			     unsigned char reg, unsigned short value)
663 {
664 	uec_write_phy_reg(devlist[0], addr, reg, value);
665 
666 	return 0;
667 }
668 
669 #endif
670 
671 static int uec_set_mac_address(uec_private_t *uec, u8 *mac_addr)
672 {
673 	uec_t		*uec_regs;
674 	u32		mac_addr1;
675 	u32		mac_addr2;
676 
677 	if (!uec) {
678 		printf("%s: uec not initial\n", __FUNCTION__);
679 		return -EINVAL;
680 	}
681 
682 	uec_regs = uec->uec_regs;
683 
684 	/* if a station address of 0x12345678ABCD, perform a write to
685 	MACSTNADDR1 of 0xCDAB7856,
686 	MACSTNADDR2 of 0x34120000 */
687 
688 	mac_addr1 = (mac_addr[5] << 24) | (mac_addr[4] << 16) | \
689 			(mac_addr[3] << 8)  | (mac_addr[2]);
690 	out_be32(&uec_regs->macstnaddr1, mac_addr1);
691 
692 	mac_addr2 = ((mac_addr[1] << 24) | (mac_addr[0] << 16)) & 0xffff0000;
693 	out_be32(&uec_regs->macstnaddr2, mac_addr2);
694 
695 	return 0;
696 }
697 
698 static int uec_convert_threads_num(uec_num_of_threads_e threads_num,
699 					 int *threads_num_ret)
700 {
701 	int	num_threads_numerica;
702 
703 	switch (threads_num) {
704 		case UEC_NUM_OF_THREADS_1:
705 			num_threads_numerica = 1;
706 			break;
707 		case UEC_NUM_OF_THREADS_2:
708 			num_threads_numerica = 2;
709 			break;
710 		case UEC_NUM_OF_THREADS_4:
711 			num_threads_numerica = 4;
712 			break;
713 		case UEC_NUM_OF_THREADS_6:
714 			num_threads_numerica = 6;
715 			break;
716 		case UEC_NUM_OF_THREADS_8:
717 			num_threads_numerica = 8;
718 			break;
719 		default:
720 			printf("%s: Bad number of threads value.",
721 				 __FUNCTION__);
722 			return -EINVAL;
723 	}
724 
725 	*threads_num_ret = num_threads_numerica;
726 
727 	return 0;
728 }
729 
730 static void uec_init_tx_parameter(uec_private_t *uec, int num_threads_tx)
731 {
732 	uec_info_t	*uec_info;
733 	u32		end_bd;
734 	u8		bmrx = 0;
735 	int		i;
736 
737 	uec_info = uec->uec_info;
738 
739 	/* Alloc global Tx parameter RAM page */
740 	uec->tx_glbl_pram_offset = qe_muram_alloc(
741 				sizeof(uec_tx_global_pram_t),
742 				 UEC_TX_GLOBAL_PRAM_ALIGNMENT);
743 	uec->p_tx_glbl_pram = (uec_tx_global_pram_t *)
744 				qe_muram_addr(uec->tx_glbl_pram_offset);
745 
746 	/* Zero the global Tx prameter RAM */
747 	memset(uec->p_tx_glbl_pram, 0, sizeof(uec_tx_global_pram_t));
748 
749 	/* Init global Tx parameter RAM */
750 
751 	/* TEMODER, RMON statistics disable, one Tx queue */
752 	out_be16(&uec->p_tx_glbl_pram->temoder, TEMODER_INIT_VALUE);
753 
754 	/* SQPTR */
755 	uec->send_q_mem_reg_offset = qe_muram_alloc(
756 				sizeof(uec_send_queue_qd_t),
757 				 UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
758 	uec->p_send_q_mem_reg = (uec_send_queue_mem_region_t *)
759 				qe_muram_addr(uec->send_q_mem_reg_offset);
760 	out_be32(&uec->p_tx_glbl_pram->sqptr, uec->send_q_mem_reg_offset);
761 
762 	/* Setup the table with TxBDs ring */
763 	end_bd = (u32)uec->p_tx_bd_ring + (uec_info->tx_bd_ring_len - 1)
764 					 * SIZEOFBD;
765 	out_be32(&uec->p_send_q_mem_reg->sqqd[0].bd_ring_base,
766 				 (u32)(uec->p_tx_bd_ring));
767 	out_be32(&uec->p_send_q_mem_reg->sqqd[0].last_bd_completed_address,
768 						 end_bd);
769 
770 	/* Scheduler Base Pointer, we have only one Tx queue, no need it */
771 	out_be32(&uec->p_tx_glbl_pram->schedulerbasepointer, 0);
772 
773 	/* TxRMON Base Pointer, TxRMON disable, we don't need it */
774 	out_be32(&uec->p_tx_glbl_pram->txrmonbaseptr, 0);
775 
776 	/* TSTATE, global snooping, big endian, the CSB bus selected */
777 	bmrx = BMR_INIT_VALUE;
778 	out_be32(&uec->p_tx_glbl_pram->tstate, ((u32)(bmrx) << BMR_SHIFT));
779 
780 	/* IPH_Offset */
781 	for (i = 0; i < MAX_IPH_OFFSET_ENTRY; i++) {
782 		out_8(&uec->p_tx_glbl_pram->iphoffset[i], 0);
783 	}
784 
785 	/* VTAG table */
786 	for (i = 0; i < UEC_TX_VTAG_TABLE_ENTRY_MAX; i++) {
787 		out_be32(&uec->p_tx_glbl_pram->vtagtable[i], 0);
788 	}
789 
790 	/* TQPTR */
791 	uec->thread_dat_tx_offset = qe_muram_alloc(
792 		num_threads_tx * sizeof(uec_thread_data_tx_t) +
793 		 32 *(num_threads_tx == 1), UEC_THREAD_DATA_ALIGNMENT);
794 
795 	uec->p_thread_data_tx = (uec_thread_data_tx_t *)
796 				qe_muram_addr(uec->thread_dat_tx_offset);
797 	out_be32(&uec->p_tx_glbl_pram->tqptr, uec->thread_dat_tx_offset);
798 }
799 
800 static void uec_init_rx_parameter(uec_private_t *uec, int num_threads_rx)
801 {
802 	u8	bmrx = 0;
803 	int	i;
804 	uec_82xx_address_filtering_pram_t	*p_af_pram;
805 
806 	/* Allocate global Rx parameter RAM page */
807 	uec->rx_glbl_pram_offset = qe_muram_alloc(
808 		sizeof(uec_rx_global_pram_t), UEC_RX_GLOBAL_PRAM_ALIGNMENT);
809 	uec->p_rx_glbl_pram = (uec_rx_global_pram_t *)
810 				qe_muram_addr(uec->rx_glbl_pram_offset);
811 
812 	/* Zero Global Rx parameter RAM */
813 	memset(uec->p_rx_glbl_pram, 0, sizeof(uec_rx_global_pram_t));
814 
815 	/* Init global Rx parameter RAM */
816 	/* REMODER, Extended feature mode disable, VLAN disable,
817 	 LossLess flow control disable, Receive firmware statisic disable,
818 	 Extended address parsing mode disable, One Rx queues,
819 	 Dynamic maximum/minimum frame length disable, IP checksum check
820 	 disable, IP address alignment disable
821 	*/
822 	out_be32(&uec->p_rx_glbl_pram->remoder, REMODER_INIT_VALUE);
823 
824 	/* RQPTR */
825 	uec->thread_dat_rx_offset = qe_muram_alloc(
826 			num_threads_rx * sizeof(uec_thread_data_rx_t),
827 			 UEC_THREAD_DATA_ALIGNMENT);
828 	uec->p_thread_data_rx = (uec_thread_data_rx_t *)
829 				qe_muram_addr(uec->thread_dat_rx_offset);
830 	out_be32(&uec->p_rx_glbl_pram->rqptr, uec->thread_dat_rx_offset);
831 
832 	/* Type_or_Len */
833 	out_be16(&uec->p_rx_glbl_pram->typeorlen, 3072);
834 
835 	/* RxRMON base pointer, we don't need it */
836 	out_be32(&uec->p_rx_glbl_pram->rxrmonbaseptr, 0);
837 
838 	/* IntCoalescingPTR, we don't need it, no interrupt */
839 	out_be32(&uec->p_rx_glbl_pram->intcoalescingptr, 0);
840 
841 	/* RSTATE, global snooping, big endian, the CSB bus selected */
842 	bmrx = BMR_INIT_VALUE;
843 	out_8(&uec->p_rx_glbl_pram->rstate, bmrx);
844 
845 	/* MRBLR */
846 	out_be16(&uec->p_rx_glbl_pram->mrblr, MAX_RXBUF_LEN);
847 
848 	/* RBDQPTR */
849 	uec->rx_bd_qs_tbl_offset = qe_muram_alloc(
850 				sizeof(uec_rx_bd_queues_entry_t) + \
851 				sizeof(uec_rx_prefetched_bds_t),
852 				 UEC_RX_BD_QUEUES_ALIGNMENT);
853 	uec->p_rx_bd_qs_tbl = (uec_rx_bd_queues_entry_t *)
854 				qe_muram_addr(uec->rx_bd_qs_tbl_offset);
855 
856 	/* Zero it */
857 	memset(uec->p_rx_bd_qs_tbl, 0, sizeof(uec_rx_bd_queues_entry_t) + \
858 					sizeof(uec_rx_prefetched_bds_t));
859 	out_be32(&uec->p_rx_glbl_pram->rbdqptr, uec->rx_bd_qs_tbl_offset);
860 	out_be32(&uec->p_rx_bd_qs_tbl->externalbdbaseptr,
861 		 (u32)uec->p_rx_bd_ring);
862 
863 	/* MFLR */
864 	out_be16(&uec->p_rx_glbl_pram->mflr, MAX_FRAME_LEN);
865 	/* MINFLR */
866 	out_be16(&uec->p_rx_glbl_pram->minflr, MIN_FRAME_LEN);
867 	/* MAXD1 */
868 	out_be16(&uec->p_rx_glbl_pram->maxd1, MAX_DMA1_LEN);
869 	/* MAXD2 */
870 	out_be16(&uec->p_rx_glbl_pram->maxd2, MAX_DMA2_LEN);
871 	/* ECAM_PTR */
872 	out_be32(&uec->p_rx_glbl_pram->ecamptr, 0);
873 	/* L2QT */
874 	out_be32(&uec->p_rx_glbl_pram->l2qt, 0);
875 	/* L3QT */
876 	for (i = 0; i < 8; i++)	{
877 		out_be32(&uec->p_rx_glbl_pram->l3qt[i], 0);
878 	}
879 
880 	/* VLAN_TYPE */
881 	out_be16(&uec->p_rx_glbl_pram->vlantype, 0x8100);
882 	/* TCI */
883 	out_be16(&uec->p_rx_glbl_pram->vlantci, 0);
884 
885 	/* Clear PQ2 style address filtering hash table */
886 	p_af_pram = (uec_82xx_address_filtering_pram_t *) \
887 			uec->p_rx_glbl_pram->addressfiltering;
888 
889 	p_af_pram->iaddr_h = 0;
890 	p_af_pram->iaddr_l = 0;
891 	p_af_pram->gaddr_h = 0;
892 	p_af_pram->gaddr_l = 0;
893 }
894 
895 static int uec_issue_init_enet_rxtx_cmd(uec_private_t *uec,
896 					 int thread_tx, int thread_rx)
897 {
898 	uec_init_cmd_pram_t		*p_init_enet_param;
899 	u32				init_enet_param_offset;
900 	uec_info_t			*uec_info;
901 	int				i;
902 	int				snum;
903 	u32				init_enet_offset;
904 	u32				entry_val;
905 	u32				command;
906 	u32				cecr_subblock;
907 
908 	uec_info = uec->uec_info;
909 
910 	/* Allocate init enet command parameter */
911 	uec->init_enet_param_offset = qe_muram_alloc(
912 					sizeof(uec_init_cmd_pram_t), 4);
913 	init_enet_param_offset = uec->init_enet_param_offset;
914 	uec->p_init_enet_param = (uec_init_cmd_pram_t *)
915 				qe_muram_addr(uec->init_enet_param_offset);
916 
917 	/* Zero init enet command struct */
918 	memset((void *)uec->p_init_enet_param, 0, sizeof(uec_init_cmd_pram_t));
919 
920 	/* Init the command struct */
921 	p_init_enet_param = uec->p_init_enet_param;
922 	p_init_enet_param->resinit0 = ENET_INIT_PARAM_MAGIC_RES_INIT0;
923 	p_init_enet_param->resinit1 = ENET_INIT_PARAM_MAGIC_RES_INIT1;
924 	p_init_enet_param->resinit2 = ENET_INIT_PARAM_MAGIC_RES_INIT2;
925 	p_init_enet_param->resinit3 = ENET_INIT_PARAM_MAGIC_RES_INIT3;
926 	p_init_enet_param->resinit4 = ENET_INIT_PARAM_MAGIC_RES_INIT4;
927 	p_init_enet_param->largestexternallookupkeysize = 0;
928 
929 	p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_rx)
930 					 << ENET_INIT_PARAM_RGF_SHIFT;
931 	p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_tx)
932 					 << ENET_INIT_PARAM_TGF_SHIFT;
933 
934 	/* Init Rx global parameter pointer */
935 	p_init_enet_param->rgftgfrxglobal |= uec->rx_glbl_pram_offset |
936 						 (u32)uec_info->riscRx;
937 
938 	/* Init Rx threads */
939 	for (i = 0; i < (thread_rx + 1); i++) {
940 		if ((snum = qe_get_snum()) < 0) {
941 			printf("%s can not get snum\n", __FUNCTION__);
942 			return -ENOMEM;
943 		}
944 
945 		if (i==0) {
946 			init_enet_offset = 0;
947 		} else {
948 			init_enet_offset = qe_muram_alloc(
949 					sizeof(uec_thread_rx_pram_t),
950 					 UEC_THREAD_RX_PRAM_ALIGNMENT);
951 		}
952 
953 		entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
954 				 init_enet_offset | (u32)uec_info->riscRx;
955 		p_init_enet_param->rxthread[i] = entry_val;
956 	}
957 
958 	/* Init Tx global parameter pointer */
959 	p_init_enet_param->txglobal = uec->tx_glbl_pram_offset |
960 					 (u32)uec_info->riscTx;
961 
962 	/* Init Tx threads */
963 	for (i = 0; i < thread_tx; i++) {
964 		if ((snum = qe_get_snum()) < 0)	{
965 			printf("%s can not get snum\n", __FUNCTION__);
966 			return -ENOMEM;
967 		}
968 
969 		init_enet_offset = qe_muram_alloc(sizeof(uec_thread_tx_pram_t),
970 						 UEC_THREAD_TX_PRAM_ALIGNMENT);
971 
972 		entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
973 				 init_enet_offset | (u32)uec_info->riscTx;
974 		p_init_enet_param->txthread[i] = entry_val;
975 	}
976 
977 	__asm__ __volatile__("sync");
978 
979 	/* Issue QE command */
980 	command = QE_INIT_TX_RX;
981 	cecr_subblock =	ucc_fast_get_qe_cr_subblock(
982 				uec->uec_info->uf_info.ucc_num);
983 	qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
984 						 init_enet_param_offset);
985 
986 	return 0;
987 }
988 
989 static int uec_startup(uec_private_t *uec)
990 {
991 	uec_info_t			*uec_info;
992 	ucc_fast_info_t			*uf_info;
993 	ucc_fast_private_t		*uccf;
994 	ucc_fast_t			*uf_regs;
995 	uec_t				*uec_regs;
996 	int				num_threads_tx;
997 	int				num_threads_rx;
998 	u32				utbipar;
999 	enet_interface_e		enet_interface;
1000 	u32				length;
1001 	u32				align;
1002 	qe_bd_t				*bd;
1003 	u8				*buf;
1004 	int				i;
1005 
1006 	if (!uec || !uec->uec_info) {
1007 		printf("%s: uec or uec_info not initial\n", __FUNCTION__);
1008 		return -EINVAL;
1009 	}
1010 
1011 	uec_info = uec->uec_info;
1012 	uf_info = &(uec_info->uf_info);
1013 
1014 	/* Check if Rx BD ring len is illegal */
1015 	if ((uec_info->rx_bd_ring_len < UEC_RX_BD_RING_SIZE_MIN) || \
1016 		(uec_info->rx_bd_ring_len % UEC_RX_BD_RING_SIZE_ALIGNMENT)) {
1017 		printf("%s: Rx BD ring len must be multiple of 4, and > 8.\n",
1018 			 __FUNCTION__);
1019 		return -EINVAL;
1020 	}
1021 
1022 	/* Check if Tx BD ring len is illegal */
1023 	if (uec_info->tx_bd_ring_len < UEC_TX_BD_RING_SIZE_MIN) {
1024 		printf("%s: Tx BD ring length must not be smaller than 2.\n",
1025 			 __FUNCTION__);
1026 		return -EINVAL;
1027 	}
1028 
1029 	/* Check if MRBLR is illegal */
1030 	if ((MAX_RXBUF_LEN == 0) || (MAX_RXBUF_LEN  % UEC_MRBLR_ALIGNMENT)) {
1031 		printf("%s: max rx buffer length must be mutliple of 128.\n",
1032 			 __FUNCTION__);
1033 		return -EINVAL;
1034 	}
1035 
1036 	/* Both Rx and Tx are stopped */
1037 	uec->grace_stopped_rx = 1;
1038 	uec->grace_stopped_tx = 1;
1039 
1040 	/* Init UCC fast */
1041 	if (ucc_fast_init(uf_info, &uccf)) {
1042 		printf("%s: failed to init ucc fast\n", __FUNCTION__);
1043 		return -ENOMEM;
1044 	}
1045 
1046 	/* Save uccf */
1047 	uec->uccf = uccf;
1048 
1049 	/* Convert the Tx threads number */
1050 	if (uec_convert_threads_num(uec_info->num_threads_tx,
1051 					 &num_threads_tx)) {
1052 		return -EINVAL;
1053 	}
1054 
1055 	/* Convert the Rx threads number */
1056 	if (uec_convert_threads_num(uec_info->num_threads_rx,
1057 					 &num_threads_rx)) {
1058 		return -EINVAL;
1059 	}
1060 
1061 	uf_regs = uccf->uf_regs;
1062 
1063 	/* UEC register is following UCC fast registers */
1064 	uec_regs = (uec_t *)(&uf_regs->ucc_eth);
1065 
1066 	/* Save the UEC register pointer to UEC private struct */
1067 	uec->uec_regs = uec_regs;
1068 
1069 	/* Init UPSMR, enable hardware statistics (UCC) */
1070 	out_be32(&uec->uccf->uf_regs->upsmr, UPSMR_INIT_VALUE);
1071 
1072 	/* Init MACCFG1, flow control disable, disable Tx and Rx */
1073 	out_be32(&uec_regs->maccfg1, MACCFG1_INIT_VALUE);
1074 
1075 	/* Init MACCFG2, length check, MAC PAD and CRC enable */
1076 	out_be32(&uec_regs->maccfg2, MACCFG2_INIT_VALUE);
1077 
1078 	/* Setup MAC interface mode */
1079 	uec_set_mac_if_mode(uec, uec_info->enet_interface);
1080 
1081 	/* Setup MII management base */
1082 #ifndef CONFIG_eTSEC_MDIO_BUS
1083 	uec->uec_mii_regs = (uec_mii_t *)(&uec_regs->miimcfg);
1084 #else
1085 	uec->uec_mii_regs = (uec_mii_t *) CONFIG_MIIM_ADDRESS;
1086 #endif
1087 
1088 	/* Setup MII master clock source */
1089 	qe_set_mii_clk_src(uec_info->uf_info.ucc_num);
1090 
1091 	/* Setup UTBIPAR */
1092 	utbipar = in_be32(&uec_regs->utbipar);
1093 	utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
1094 	enet_interface = uec->uec_info->enet_interface;
1095 	if (enet_interface == ENET_1000_TBI ||
1096 		 enet_interface == ENET_1000_RTBI) {
1097 		utbipar |=  (uec_info->phy_address + uec_info->uf_info.ucc_num)
1098 						 << UTBIPAR_PHY_ADDRESS_SHIFT;
1099 	} else {
1100 		utbipar |=  (0x10 + uec_info->uf_info.ucc_num)
1101 						 << UTBIPAR_PHY_ADDRESS_SHIFT;
1102 	}
1103 
1104 	out_be32(&uec_regs->utbipar, utbipar);
1105 
1106 	/* Allocate Tx BDs */
1107 	length = ((uec_info->tx_bd_ring_len * SIZEOFBD) /
1108 		 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) *
1109 		 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
1110 	if ((uec_info->tx_bd_ring_len * SIZEOFBD) %
1111 		 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) {
1112 		length += UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
1113 	}
1114 
1115 	align = UEC_TX_BD_RING_ALIGNMENT;
1116 	uec->tx_bd_ring_offset = (u32)malloc((u32)(length + align));
1117 	if (uec->tx_bd_ring_offset != 0) {
1118 		uec->p_tx_bd_ring = (u8 *)((uec->tx_bd_ring_offset + align)
1119 						 & ~(align - 1));
1120 	}
1121 
1122 	/* Zero all of Tx BDs */
1123 	memset((void *)(uec->tx_bd_ring_offset), 0, length + align);
1124 
1125 	/* Allocate Rx BDs */
1126 	length = uec_info->rx_bd_ring_len * SIZEOFBD;
1127 	align = UEC_RX_BD_RING_ALIGNMENT;
1128 	uec->rx_bd_ring_offset = (u32)(malloc((u32)(length + align)));
1129 	if (uec->rx_bd_ring_offset != 0) {
1130 		uec->p_rx_bd_ring = (u8 *)((uec->rx_bd_ring_offset + align)
1131 							 & ~(align - 1));
1132 	}
1133 
1134 	/* Zero all of Rx BDs */
1135 	memset((void *)(uec->rx_bd_ring_offset), 0, length + align);
1136 
1137 	/* Allocate Rx buffer */
1138 	length = uec_info->rx_bd_ring_len * MAX_RXBUF_LEN;
1139 	align = UEC_RX_DATA_BUF_ALIGNMENT;
1140 	uec->rx_buf_offset = (u32)malloc(length + align);
1141 	if (uec->rx_buf_offset != 0) {
1142 		uec->p_rx_buf = (u8 *)((uec->rx_buf_offset + align)
1143 						 & ~(align - 1));
1144 	}
1145 
1146 	/* Zero all of the Rx buffer */
1147 	memset((void *)(uec->rx_buf_offset), 0, length + align);
1148 
1149 	/* Init TxBD ring */
1150 	bd = (qe_bd_t *)uec->p_tx_bd_ring;
1151 	uec->txBd = bd;
1152 
1153 	for (i = 0; i < uec_info->tx_bd_ring_len; i++) {
1154 		BD_DATA_CLEAR(bd);
1155 		BD_STATUS_SET(bd, 0);
1156 		BD_LENGTH_SET(bd, 0);
1157 		bd ++;
1158 	}
1159 	BD_STATUS_SET((--bd), TxBD_WRAP);
1160 
1161 	/* Init RxBD ring */
1162 	bd = (qe_bd_t *)uec->p_rx_bd_ring;
1163 	uec->rxBd = bd;
1164 	buf = uec->p_rx_buf;
1165 	for (i = 0; i < uec_info->rx_bd_ring_len; i++) {
1166 		BD_DATA_SET(bd, buf);
1167 		BD_LENGTH_SET(bd, 0);
1168 		BD_STATUS_SET(bd, RxBD_EMPTY);
1169 		buf += MAX_RXBUF_LEN;
1170 		bd ++;
1171 	}
1172 	BD_STATUS_SET((--bd), RxBD_WRAP | RxBD_EMPTY);
1173 
1174 	/* Init global Tx parameter RAM */
1175 	uec_init_tx_parameter(uec, num_threads_tx);
1176 
1177 	/* Init global Rx parameter RAM */
1178 	uec_init_rx_parameter(uec, num_threads_rx);
1179 
1180 	/* Init ethernet Tx and Rx parameter command */
1181 	if (uec_issue_init_enet_rxtx_cmd(uec, num_threads_tx,
1182 					 num_threads_rx)) {
1183 		printf("%s issue init enet cmd failed\n", __FUNCTION__);
1184 		return -ENOMEM;
1185 	}
1186 
1187 	return 0;
1188 }
1189 
1190 static int uec_init(struct eth_device* dev, bd_t *bd)
1191 {
1192 	uec_private_t		*uec;
1193 	int			err, i;
1194 	struct phy_info         *curphy;
1195 
1196 	uec = (uec_private_t *)dev->priv;
1197 
1198 	if (uec->the_first_run == 0) {
1199 		err = init_phy(dev);
1200 		if (err) {
1201 			printf("%s: Cannot initialize PHY, aborting.\n",
1202 			       dev->name);
1203 			return err;
1204 		}
1205 
1206 		curphy = uec->mii_info->phyinfo;
1207 
1208 		if (curphy->config_aneg) {
1209 			err = curphy->config_aneg(uec->mii_info);
1210 			if (err) {
1211 				printf("%s: Can't negotiate PHY\n", dev->name);
1212 				return err;
1213 			}
1214 		}
1215 
1216 		/* Give PHYs up to 5 sec to report a link */
1217 		i = 50;
1218 		do {
1219 			err = curphy->read_status(uec->mii_info);
1220 			udelay(100000);
1221 		} while (((i-- > 0) && !uec->mii_info->link) || err);
1222 
1223 		if (err || i <= 0)
1224 			printf("warning: %s: timeout on PHY link\n", dev->name);
1225 
1226 		uec->the_first_run = 1;
1227 	}
1228 
1229 	/* Set up the MAC address */
1230 	if (dev->enetaddr[0] & 0x01) {
1231 		printf("%s: MacAddress is multcast address\n",
1232 			 __FUNCTION__);
1233 		return -1;
1234 	}
1235 	uec_set_mac_address(uec, dev->enetaddr);
1236 
1237 
1238 	err = uec_open(uec, COMM_DIR_RX_AND_TX);
1239 	if (err) {
1240 		printf("%s: cannot enable UEC device\n", dev->name);
1241 		return -1;
1242 	}
1243 
1244 	phy_change(dev);
1245 
1246 	return (uec->mii_info->link ? 0 : -1);
1247 }
1248 
1249 static void uec_halt(struct eth_device* dev)
1250 {
1251 	uec_private_t	*uec = (uec_private_t *)dev->priv;
1252 	uec_stop(uec, COMM_DIR_RX_AND_TX);
1253 }
1254 
1255 static int uec_send(struct eth_device* dev, volatile void *buf, int len)
1256 {
1257 	uec_private_t		*uec;
1258 	ucc_fast_private_t	*uccf;
1259 	volatile qe_bd_t	*bd;
1260 	u16			status;
1261 	int			i;
1262 	int			result = 0;
1263 
1264 	uec = (uec_private_t *)dev->priv;
1265 	uccf = uec->uccf;
1266 	bd = uec->txBd;
1267 
1268 	/* Find an empty TxBD */
1269 	for (i = 0; bd->status & TxBD_READY; i++) {
1270 		if (i > 0x100000) {
1271 			printf("%s: tx buffer not ready\n", dev->name);
1272 			return result;
1273 		}
1274 	}
1275 
1276 	/* Init TxBD */
1277 	BD_DATA_SET(bd, buf);
1278 	BD_LENGTH_SET(bd, len);
1279 	status = bd->status;
1280 	status &= BD_WRAP;
1281 	status |= (TxBD_READY | TxBD_LAST);
1282 	BD_STATUS_SET(bd, status);
1283 
1284 	/* Tell UCC to transmit the buffer */
1285 	ucc_fast_transmit_on_demand(uccf);
1286 
1287 	/* Wait for buffer to be transmitted */
1288 	for (i = 0; bd->status & TxBD_READY; i++) {
1289 		if (i > 0x100000) {
1290 			printf("%s: tx error\n", dev->name);
1291 			return result;
1292 		}
1293 	}
1294 
1295 	/* Ok, the buffer be transimitted */
1296 	BD_ADVANCE(bd, status, uec->p_tx_bd_ring);
1297 	uec->txBd = bd;
1298 	result = 1;
1299 
1300 	return result;
1301 }
1302 
1303 static int uec_recv(struct eth_device* dev)
1304 {
1305 	uec_private_t		*uec = dev->priv;
1306 	volatile qe_bd_t	*bd;
1307 	u16			status;
1308 	u16			len;
1309 	u8			*data;
1310 
1311 	bd = uec->rxBd;
1312 	status = bd->status;
1313 
1314 	while (!(status & RxBD_EMPTY)) {
1315 		if (!(status & RxBD_ERROR)) {
1316 			data = BD_DATA(bd);
1317 			len = BD_LENGTH(bd);
1318 			NetReceive(data, len);
1319 		} else {
1320 			printf("%s: Rx error\n", dev->name);
1321 		}
1322 		status &= BD_CLEAN;
1323 		BD_LENGTH_SET(bd, 0);
1324 		BD_STATUS_SET(bd, status | RxBD_EMPTY);
1325 		BD_ADVANCE(bd, status, uec->p_rx_bd_ring);
1326 		status = bd->status;
1327 	}
1328 	uec->rxBd = bd;
1329 
1330 	return 1;
1331 }
1332 
1333 int uec_initialize(int index)
1334 {
1335 	struct eth_device	*dev;
1336 	int			i;
1337 	uec_private_t		*uec;
1338 	uec_info_t		*uec_info;
1339 	int			err;
1340 
1341 	dev = (struct eth_device *)malloc(sizeof(struct eth_device));
1342 	if (!dev)
1343 		return 0;
1344 	memset(dev, 0, sizeof(struct eth_device));
1345 
1346 	/* Allocate the UEC private struct */
1347 	uec = (uec_private_t *)malloc(sizeof(uec_private_t));
1348 	if (!uec) {
1349 		return -ENOMEM;
1350 	}
1351 	memset(uec, 0, sizeof(uec_private_t));
1352 
1353 	/* Init UEC private struct, they come from board.h */
1354 	uec_info = NULL;
1355 	if (index == 0) {
1356 #ifdef CONFIG_UEC_ETH1
1357 		uec_info = &eth1_uec_info;
1358 #endif
1359 	} else if (index == 1) {
1360 #ifdef CONFIG_UEC_ETH2
1361 		uec_info = &eth2_uec_info;
1362 #endif
1363 	} else if (index == 2) {
1364 #ifdef CONFIG_UEC_ETH3
1365 		uec_info = &eth3_uec_info;
1366 #endif
1367 	} else if (index == 3) {
1368 #ifdef CONFIG_UEC_ETH4
1369 		uec_info = &eth4_uec_info;
1370 #endif
1371 	} else {
1372 		printf("%s: index is illegal.\n", __FUNCTION__);
1373 		return -EINVAL;
1374 	}
1375 
1376 	devlist[index] = dev;
1377 
1378 	uec->uec_info = uec_info;
1379 
1380 	sprintf(dev->name, "FSL UEC%d", index);
1381 	dev->iobase = 0;
1382 	dev->priv = (void *)uec;
1383 	dev->init = uec_init;
1384 	dev->halt = uec_halt;
1385 	dev->send = uec_send;
1386 	dev->recv = uec_recv;
1387 
1388 	/* Clear the ethnet address */
1389 	for (i = 0; i < 6; i++)
1390 		dev->enetaddr[i] = 0;
1391 
1392 	eth_register(dev);
1393 
1394 	err = uec_startup(uec);
1395 	if (err) {
1396 		printf("%s: Cannot configure net device, aborting.",dev->name);
1397 		return err;
1398 	}
1399 
1400 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) \
1401 	&& !defined(BITBANGMII)
1402 	miiphy_register(dev->name, uec_miiphy_read, uec_miiphy_write);
1403 #endif
1404 
1405 	return 1;
1406 }
1407