xref: /openbmc/u-boot/drivers/qe/uec.c (revision e100a3d5)
1  /*
2   * Copyright (C) 2006-2011 Freescale Semiconductor, Inc.
3   *
4   * Dave Liu <daveliu@freescale.com>
5   *
6   * This program is free software; you can redistribute it and/or
7   * modify it under the terms of the GNU General Public License as
8   * published by the Free Software Foundation; either version 2 of
9   * the License, or (at your option) any later version.
10   *
11   * This program is distributed in the hope that it will be useful,
12   * but WITHOUT ANY WARRANTY; without even the implied warranty of
13   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14   * GNU General Public License for more details.
15   *
16   * You should have received a copy of the GNU General Public License
17   * along with this program; if not, write to the Free Software
18   * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
19   * MA 02111-1307 USA
20   */
21  
22  #include "common.h"
23  #include "net.h"
24  #include "malloc.h"
25  #include "asm/errno.h"
26  #include "asm/io.h"
27  #include "asm/immap_qe.h"
28  #include "qe.h"
29  #include "uccf.h"
30  #include "uec.h"
31  #include "uec_phy.h"
32  #include "miiphy.h"
33  #include <phy.h>
34  
35  /* Default UTBIPAR SMI address */
36  #ifndef CONFIG_UTBIPAR_INIT_TBIPA
37  #define CONFIG_UTBIPAR_INIT_TBIPA 0x1F
38  #endif
39  
40  static uec_info_t uec_info[] = {
41  #ifdef CONFIG_UEC_ETH1
42  	STD_UEC_INFO(1),	/* UEC1 */
43  #endif
44  #ifdef CONFIG_UEC_ETH2
45  	STD_UEC_INFO(2),	/* UEC2 */
46  #endif
47  #ifdef CONFIG_UEC_ETH3
48  	STD_UEC_INFO(3),	/* UEC3 */
49  #endif
50  #ifdef CONFIG_UEC_ETH4
51  	STD_UEC_INFO(4),	/* UEC4 */
52  #endif
53  #ifdef CONFIG_UEC_ETH5
54  	STD_UEC_INFO(5),	/* UEC5 */
55  #endif
56  #ifdef CONFIG_UEC_ETH6
57  	STD_UEC_INFO(6),	/* UEC6 */
58  #endif
59  #ifdef CONFIG_UEC_ETH7
60  	STD_UEC_INFO(7),	/* UEC7 */
61  #endif
62  #ifdef CONFIG_UEC_ETH8
63  	STD_UEC_INFO(8),	/* UEC8 */
64  #endif
65  };
66  
67  #define MAXCONTROLLERS	(8)
68  
69  static struct eth_device *devlist[MAXCONTROLLERS];
70  
71  static int uec_mac_enable(uec_private_t *uec, comm_dir_e mode)
72  {
73  	uec_t		*uec_regs;
74  	u32		maccfg1;
75  
76  	if (!uec) {
77  		printf("%s: uec not initial\n", __FUNCTION__);
78  		return -EINVAL;
79  	}
80  	uec_regs = uec->uec_regs;
81  
82  	maccfg1 = in_be32(&uec_regs->maccfg1);
83  
84  	if (mode & COMM_DIR_TX)	{
85  		maccfg1 |= MACCFG1_ENABLE_TX;
86  		out_be32(&uec_regs->maccfg1, maccfg1);
87  		uec->mac_tx_enabled = 1;
88  	}
89  
90  	if (mode & COMM_DIR_RX)	{
91  		maccfg1 |= MACCFG1_ENABLE_RX;
92  		out_be32(&uec_regs->maccfg1, maccfg1);
93  		uec->mac_rx_enabled = 1;
94  	}
95  
96  	return 0;
97  }
98  
99  static int uec_mac_disable(uec_private_t *uec, comm_dir_e mode)
100  {
101  	uec_t		*uec_regs;
102  	u32		maccfg1;
103  
104  	if (!uec) {
105  		printf("%s: uec not initial\n", __FUNCTION__);
106  		return -EINVAL;
107  	}
108  	uec_regs = uec->uec_regs;
109  
110  	maccfg1 = in_be32(&uec_regs->maccfg1);
111  
112  	if (mode & COMM_DIR_TX)	{
113  		maccfg1 &= ~MACCFG1_ENABLE_TX;
114  		out_be32(&uec_regs->maccfg1, maccfg1);
115  		uec->mac_tx_enabled = 0;
116  	}
117  
118  	if (mode & COMM_DIR_RX)	{
119  		maccfg1 &= ~MACCFG1_ENABLE_RX;
120  		out_be32(&uec_regs->maccfg1, maccfg1);
121  		uec->mac_rx_enabled = 0;
122  	}
123  
124  	return 0;
125  }
126  
127  static int uec_graceful_stop_tx(uec_private_t *uec)
128  {
129  	ucc_fast_t		*uf_regs;
130  	u32			cecr_subblock;
131  	u32			ucce;
132  
133  	if (!uec || !uec->uccf) {
134  		printf("%s: No handle passed.\n", __FUNCTION__);
135  		return -EINVAL;
136  	}
137  
138  	uf_regs = uec->uccf->uf_regs;
139  
140  	/* Clear the grace stop event */
141  	out_be32(&uf_regs->ucce, UCCE_GRA);
142  
143  	/* Issue host command */
144  	cecr_subblock =
145  		 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
146  	qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
147  			 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
148  
149  	/* Wait for command to complete */
150  	do {
151  		ucce = in_be32(&uf_regs->ucce);
152  	} while (! (ucce & UCCE_GRA));
153  
154  	uec->grace_stopped_tx = 1;
155  
156  	return 0;
157  }
158  
159  static int uec_graceful_stop_rx(uec_private_t *uec)
160  {
161  	u32		cecr_subblock;
162  	u8		ack;
163  
164  	if (!uec) {
165  		printf("%s: No handle passed.\n", __FUNCTION__);
166  		return -EINVAL;
167  	}
168  
169  	if (!uec->p_rx_glbl_pram) {
170  		printf("%s: No init rx global parameter\n", __FUNCTION__);
171  		return -EINVAL;
172  	}
173  
174  	/* Clear acknowledge bit */
175  	ack = uec->p_rx_glbl_pram->rxgstpack;
176  	ack &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
177  	uec->p_rx_glbl_pram->rxgstpack = ack;
178  
179  	/* Keep issuing cmd and checking ack bit until it is asserted */
180  	do {
181  		/* Issue host command */
182  		cecr_subblock =
183  		 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
184  		qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
185  				 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
186  		ack = uec->p_rx_glbl_pram->rxgstpack;
187  	} while (! (ack & GRACEFUL_STOP_ACKNOWLEDGE_RX ));
188  
189  	uec->grace_stopped_rx = 1;
190  
191  	return 0;
192  }
193  
194  static int uec_restart_tx(uec_private_t *uec)
195  {
196  	u32		cecr_subblock;
197  
198  	if (!uec || !uec->uec_info) {
199  		printf("%s: No handle passed.\n", __FUNCTION__);
200  		return -EINVAL;
201  	}
202  
203  	cecr_subblock =
204  	 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
205  	qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
206  			 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
207  
208  	uec->grace_stopped_tx = 0;
209  
210  	return 0;
211  }
212  
213  static int uec_restart_rx(uec_private_t *uec)
214  {
215  	u32		cecr_subblock;
216  
217  	if (!uec || !uec->uec_info) {
218  		printf("%s: No handle passed.\n", __FUNCTION__);
219  		return -EINVAL;
220  	}
221  
222  	cecr_subblock =
223  	 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
224  	qe_issue_cmd(QE_RESTART_RX, cecr_subblock,
225  			 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
226  
227  	uec->grace_stopped_rx = 0;
228  
229  	return 0;
230  }
231  
232  static int uec_open(uec_private_t *uec, comm_dir_e mode)
233  {
234  	ucc_fast_private_t	*uccf;
235  
236  	if (!uec || !uec->uccf) {
237  		printf("%s: No handle passed.\n", __FUNCTION__);
238  		return -EINVAL;
239  	}
240  	uccf = uec->uccf;
241  
242  	/* check if the UCC number is in range. */
243  	if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
244  		printf("%s: ucc_num out of range.\n", __FUNCTION__);
245  		return -EINVAL;
246  	}
247  
248  	/* Enable MAC */
249  	uec_mac_enable(uec, mode);
250  
251  	/* Enable UCC fast */
252  	ucc_fast_enable(uccf, mode);
253  
254  	/* RISC microcode start */
255  	if ((mode & COMM_DIR_TX) && uec->grace_stopped_tx) {
256  		uec_restart_tx(uec);
257  	}
258  	if ((mode & COMM_DIR_RX) && uec->grace_stopped_rx) {
259  		uec_restart_rx(uec);
260  	}
261  
262  	return 0;
263  }
264  
265  static int uec_stop(uec_private_t *uec, comm_dir_e mode)
266  {
267  	if (!uec || !uec->uccf) {
268  		printf("%s: No handle passed.\n", __FUNCTION__);
269  		return -EINVAL;
270  	}
271  
272  	/* check if the UCC number is in range. */
273  	if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
274  		printf("%s: ucc_num out of range.\n", __FUNCTION__);
275  		return -EINVAL;
276  	}
277  	/* Stop any transmissions */
278  	if ((mode & COMM_DIR_TX) && !uec->grace_stopped_tx) {
279  		uec_graceful_stop_tx(uec);
280  	}
281  	/* Stop any receptions */
282  	if ((mode & COMM_DIR_RX) && !uec->grace_stopped_rx) {
283  		uec_graceful_stop_rx(uec);
284  	}
285  
286  	/* Disable the UCC fast */
287  	ucc_fast_disable(uec->uccf, mode);
288  
289  	/* Disable the MAC */
290  	uec_mac_disable(uec, mode);
291  
292  	return 0;
293  }
294  
295  static int uec_set_mac_duplex(uec_private_t *uec, int duplex)
296  {
297  	uec_t		*uec_regs;
298  	u32		maccfg2;
299  
300  	if (!uec) {
301  		printf("%s: uec not initial\n", __FUNCTION__);
302  		return -EINVAL;
303  	}
304  	uec_regs = uec->uec_regs;
305  
306  	if (duplex == DUPLEX_HALF) {
307  		maccfg2 = in_be32(&uec_regs->maccfg2);
308  		maccfg2 &= ~MACCFG2_FDX;
309  		out_be32(&uec_regs->maccfg2, maccfg2);
310  	}
311  
312  	if (duplex == DUPLEX_FULL) {
313  		maccfg2 = in_be32(&uec_regs->maccfg2);
314  		maccfg2 |= MACCFG2_FDX;
315  		out_be32(&uec_regs->maccfg2, maccfg2);
316  	}
317  
318  	return 0;
319  }
320  
321  static int uec_set_mac_if_mode(uec_private_t *uec,
322  		phy_interface_t if_mode, int speed)
323  {
324  	phy_interface_t		enet_if_mode;
325  	uec_t			*uec_regs;
326  	u32			upsmr;
327  	u32			maccfg2;
328  
329  	if (!uec) {
330  		printf("%s: uec not initial\n", __FUNCTION__);
331  		return -EINVAL;
332  	}
333  
334  	uec_regs = uec->uec_regs;
335  	enet_if_mode = if_mode;
336  
337  	maccfg2 = in_be32(&uec_regs->maccfg2);
338  	maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
339  
340  	upsmr = in_be32(&uec->uccf->uf_regs->upsmr);
341  	upsmr &= ~(UPSMR_RPM | UPSMR_TBIM | UPSMR_R10M | UPSMR_RMM);
342  
343  	switch (speed) {
344  		case SPEED_10:
345  			maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
346  			switch (enet_if_mode) {
347  				case PHY_INTERFACE_MODE_MII:
348  					break;
349  				case PHY_INTERFACE_MODE_RGMII:
350  					upsmr |= (UPSMR_RPM | UPSMR_R10M);
351  					break;
352  				case PHY_INTERFACE_MODE_RMII:
353  					upsmr |= (UPSMR_R10M | UPSMR_RMM);
354  					break;
355  				default:
356  					return -EINVAL;
357  					break;
358  			}
359  			break;
360  		case SPEED_100:
361  			maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
362  			switch (enet_if_mode) {
363  				case PHY_INTERFACE_MODE_MII:
364  					break;
365  				case PHY_INTERFACE_MODE_RGMII:
366  					upsmr |= UPSMR_RPM;
367  					break;
368  				case PHY_INTERFACE_MODE_RMII:
369  					upsmr |= UPSMR_RMM;
370  					break;
371  				default:
372  					return -EINVAL;
373  					break;
374  			}
375  			break;
376  		case SPEED_1000:
377  			maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
378  			switch (enet_if_mode) {
379  				case PHY_INTERFACE_MODE_GMII:
380  					break;
381  				case PHY_INTERFACE_MODE_TBI:
382  					upsmr |= UPSMR_TBIM;
383  					break;
384  				case PHY_INTERFACE_MODE_RTBI:
385  					upsmr |= (UPSMR_RPM | UPSMR_TBIM);
386  					break;
387  				case PHY_INTERFACE_MODE_RGMII_RXID:
388  				case PHY_INTERFACE_MODE_RGMII_TXID:
389  				case PHY_INTERFACE_MODE_RGMII_ID:
390  				case PHY_INTERFACE_MODE_RGMII:
391  					upsmr |= UPSMR_RPM;
392  					break;
393  				case PHY_INTERFACE_MODE_SGMII:
394  					upsmr |= UPSMR_SGMM;
395  					break;
396  				default:
397  					return -EINVAL;
398  					break;
399  			}
400  			break;
401  		default:
402  			return -EINVAL;
403  			break;
404  	}
405  
406  	out_be32(&uec_regs->maccfg2, maccfg2);
407  	out_be32(&uec->uccf->uf_regs->upsmr, upsmr);
408  
409  	return 0;
410  }
411  
412  static int init_mii_management_configuration(uec_mii_t *uec_mii_regs)
413  {
414  	uint		timeout = 0x1000;
415  	u32		miimcfg = 0;
416  
417  	miimcfg = in_be32(&uec_mii_regs->miimcfg);
418  	miimcfg |= MIIMCFG_MNGMNT_CLC_DIV_INIT_VALUE;
419  	out_be32(&uec_mii_regs->miimcfg, miimcfg);
420  
421  	/* Wait until the bus is free */
422  	while ((in_be32(&uec_mii_regs->miimcfg) & MIIMIND_BUSY) && timeout--);
423  	if (timeout <= 0) {
424  		printf("%s: The MII Bus is stuck!", __FUNCTION__);
425  		return -ETIMEDOUT;
426  	}
427  
428  	return 0;
429  }
430  
431  static int init_phy(struct eth_device *dev)
432  {
433  	uec_private_t		*uec;
434  	uec_mii_t		*umii_regs;
435  	struct uec_mii_info	*mii_info;
436  	struct phy_info		*curphy;
437  	int			err;
438  
439  	uec = (uec_private_t *)dev->priv;
440  	umii_regs = uec->uec_mii_regs;
441  
442  	uec->oldlink = 0;
443  	uec->oldspeed = 0;
444  	uec->oldduplex = -1;
445  
446  	mii_info = malloc(sizeof(*mii_info));
447  	if (!mii_info) {
448  		printf("%s: Could not allocate mii_info", dev->name);
449  		return -ENOMEM;
450  	}
451  	memset(mii_info, 0, sizeof(*mii_info));
452  
453  	if (uec->uec_info->uf_info.eth_type == GIGA_ETH) {
454  		mii_info->speed = SPEED_1000;
455  	} else {
456  		mii_info->speed = SPEED_100;
457  	}
458  
459  	mii_info->duplex = DUPLEX_FULL;
460  	mii_info->pause = 0;
461  	mii_info->link = 1;
462  
463  	mii_info->advertising = (ADVERTISED_10baseT_Half |
464  				ADVERTISED_10baseT_Full |
465  				ADVERTISED_100baseT_Half |
466  				ADVERTISED_100baseT_Full |
467  				ADVERTISED_1000baseT_Full);
468  	mii_info->autoneg = 1;
469  	mii_info->mii_id = uec->uec_info->phy_address;
470  	mii_info->dev = dev;
471  
472  	mii_info->mdio_read = &uec_read_phy_reg;
473  	mii_info->mdio_write = &uec_write_phy_reg;
474  
475  	uec->mii_info = mii_info;
476  
477  	qe_set_mii_clk_src(uec->uec_info->uf_info.ucc_num);
478  
479  	if (init_mii_management_configuration(umii_regs)) {
480  		printf("%s: The MII Bus is stuck!", dev->name);
481  		err = -1;
482  		goto bus_fail;
483  	}
484  
485  	/* get info for this PHY */
486  	curphy = uec_get_phy_info(uec->mii_info);
487  	if (!curphy) {
488  		printf("%s: No PHY found", dev->name);
489  		err = -1;
490  		goto no_phy;
491  	}
492  
493  	mii_info->phyinfo = curphy;
494  
495  	/* Run the commands which initialize the PHY */
496  	if (curphy->init) {
497  		err = curphy->init(uec->mii_info);
498  		if (err)
499  			goto phy_init_fail;
500  	}
501  
502  	return 0;
503  
504  phy_init_fail:
505  no_phy:
506  bus_fail:
507  	free(mii_info);
508  	return err;
509  }
510  
511  static void adjust_link(struct eth_device *dev)
512  {
513  	uec_private_t		*uec = (uec_private_t *)dev->priv;
514  	struct uec_mii_info	*mii_info = uec->mii_info;
515  
516  	extern void change_phy_interface_mode(struct eth_device *dev,
517  				 phy_interface_t mode, int speed);
518  
519  	if (mii_info->link) {
520  		/* Now we make sure that we can be in full duplex mode.
521  		* If not, we operate in half-duplex mode. */
522  		if (mii_info->duplex != uec->oldduplex) {
523  			if (!(mii_info->duplex)) {
524  				uec_set_mac_duplex(uec, DUPLEX_HALF);
525  				printf("%s: Half Duplex\n", dev->name);
526  			} else {
527  				uec_set_mac_duplex(uec, DUPLEX_FULL);
528  				printf("%s: Full Duplex\n", dev->name);
529  			}
530  			uec->oldduplex = mii_info->duplex;
531  		}
532  
533  		if (mii_info->speed != uec->oldspeed) {
534  			phy_interface_t mode =
535  				uec->uec_info->enet_interface_type;
536  			if (uec->uec_info->uf_info.eth_type == GIGA_ETH) {
537  				switch (mii_info->speed) {
538  				case SPEED_1000:
539  					break;
540  				case SPEED_100:
541  					printf ("switching to rgmii 100\n");
542  					mode = PHY_INTERFACE_MODE_RGMII;
543  					break;
544  				case SPEED_10:
545  					printf ("switching to rgmii 10\n");
546  					mode = PHY_INTERFACE_MODE_RGMII;
547  					break;
548  				default:
549  					printf("%s: Ack,Speed(%d)is illegal\n",
550  						dev->name, mii_info->speed);
551  					break;
552  				}
553  			}
554  
555  			/* change phy */
556  			change_phy_interface_mode(dev, mode, mii_info->speed);
557  			/* change the MAC interface mode */
558  			uec_set_mac_if_mode(uec, mode, mii_info->speed);
559  
560  			printf("%s: Speed %dBT\n", dev->name, mii_info->speed);
561  			uec->oldspeed = mii_info->speed;
562  		}
563  
564  		if (!uec->oldlink) {
565  			printf("%s: Link is up\n", dev->name);
566  			uec->oldlink = 1;
567  		}
568  
569  	} else { /* if (mii_info->link) */
570  		if (uec->oldlink) {
571  			printf("%s: Link is down\n", dev->name);
572  			uec->oldlink = 0;
573  			uec->oldspeed = 0;
574  			uec->oldduplex = -1;
575  		}
576  	}
577  }
578  
579  static void phy_change(struct eth_device *dev)
580  {
581  	uec_private_t	*uec = (uec_private_t *)dev->priv;
582  
583  #if defined(CONFIG_P1012) || defined(CONFIG_P1021) || defined(CONFIG_P1025)
584  	ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
585  
586  	/* QE9 and QE12 need to be set for enabling QE MII managment signals */
587  	setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE9);
588  	setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
589  #endif
590  
591  	/* Update the link, speed, duplex */
592  	uec->mii_info->phyinfo->read_status(uec->mii_info);
593  
594  #if defined(CONFIG_P1012) || defined(CONFIG_P1021) || defined(CONFIG_P1025)
595  	/*
596  	 * QE12 is muxed with LBCTL, it needs to be released for enabling
597  	 * LBCTL signal for LBC usage.
598  	 */
599  	clrbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
600  #endif
601  
602  	/* Adjust the interface according to speed */
603  	adjust_link(dev);
604  }
605  
606  #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
607  
608  /*
609   * Find a device index from the devlist by name
610   *
611   * Returns:
612   *  The index where the device is located, -1 on error
613   */
614  static int uec_miiphy_find_dev_by_name(const char *devname)
615  {
616  	int i;
617  
618  	for (i = 0; i < MAXCONTROLLERS; i++) {
619  		if (strncmp(devname, devlist[i]->name, strlen(devname)) == 0) {
620  			break;
621  		}
622  	}
623  
624  	/* If device cannot be found, returns -1 */
625  	if (i == MAXCONTROLLERS) {
626  		debug ("%s: device %s not found in devlist\n", __FUNCTION__, devname);
627  		i = -1;
628  	}
629  
630  	return i;
631  }
632  
633  /*
634   * Read a MII PHY register.
635   *
636   * Returns:
637   *  0 on success
638   */
639  static int uec_miiphy_read(const char *devname, unsigned char addr,
640  			    unsigned char reg, unsigned short *value)
641  {
642  	int devindex = 0;
643  
644  	if (devname == NULL || value == NULL) {
645  		debug("%s: NULL pointer given\n", __FUNCTION__);
646  	} else {
647  		devindex = uec_miiphy_find_dev_by_name(devname);
648  		if (devindex >= 0) {
649  			*value = uec_read_phy_reg(devlist[devindex], addr, reg);
650  		}
651  	}
652  	return 0;
653  }
654  
655  /*
656   * Write a MII PHY register.
657   *
658   * Returns:
659   *  0 on success
660   */
661  static int uec_miiphy_write(const char *devname, unsigned char addr,
662  			     unsigned char reg, unsigned short value)
663  {
664  	int devindex = 0;
665  
666  	if (devname == NULL) {
667  		debug("%s: NULL pointer given\n", __FUNCTION__);
668  	} else {
669  		devindex = uec_miiphy_find_dev_by_name(devname);
670  		if (devindex >= 0) {
671  			uec_write_phy_reg(devlist[devindex], addr, reg, value);
672  		}
673  	}
674  	return 0;
675  }
676  #endif
677  
678  static int uec_set_mac_address(uec_private_t *uec, u8 *mac_addr)
679  {
680  	uec_t		*uec_regs;
681  	u32		mac_addr1;
682  	u32		mac_addr2;
683  
684  	if (!uec) {
685  		printf("%s: uec not initial\n", __FUNCTION__);
686  		return -EINVAL;
687  	}
688  
689  	uec_regs = uec->uec_regs;
690  
691  	/* if a station address of 0x12345678ABCD, perform a write to
692  	MACSTNADDR1 of 0xCDAB7856,
693  	MACSTNADDR2 of 0x34120000 */
694  
695  	mac_addr1 = (mac_addr[5] << 24) | (mac_addr[4] << 16) | \
696  			(mac_addr[3] << 8)  | (mac_addr[2]);
697  	out_be32(&uec_regs->macstnaddr1, mac_addr1);
698  
699  	mac_addr2 = ((mac_addr[1] << 24) | (mac_addr[0] << 16)) & 0xffff0000;
700  	out_be32(&uec_regs->macstnaddr2, mac_addr2);
701  
702  	return 0;
703  }
704  
705  static int uec_convert_threads_num(uec_num_of_threads_e threads_num,
706  					 int *threads_num_ret)
707  {
708  	int	num_threads_numerica;
709  
710  	switch (threads_num) {
711  		case UEC_NUM_OF_THREADS_1:
712  			num_threads_numerica = 1;
713  			break;
714  		case UEC_NUM_OF_THREADS_2:
715  			num_threads_numerica = 2;
716  			break;
717  		case UEC_NUM_OF_THREADS_4:
718  			num_threads_numerica = 4;
719  			break;
720  		case UEC_NUM_OF_THREADS_6:
721  			num_threads_numerica = 6;
722  			break;
723  		case UEC_NUM_OF_THREADS_8:
724  			num_threads_numerica = 8;
725  			break;
726  		default:
727  			printf("%s: Bad number of threads value.",
728  				 __FUNCTION__);
729  			return -EINVAL;
730  	}
731  
732  	*threads_num_ret = num_threads_numerica;
733  
734  	return 0;
735  }
736  
737  static void uec_init_tx_parameter(uec_private_t *uec, int num_threads_tx)
738  {
739  	uec_info_t	*uec_info;
740  	u32		end_bd;
741  	u8		bmrx = 0;
742  	int		i;
743  
744  	uec_info = uec->uec_info;
745  
746  	/* Alloc global Tx parameter RAM page */
747  	uec->tx_glbl_pram_offset = qe_muram_alloc(
748  				sizeof(uec_tx_global_pram_t),
749  				 UEC_TX_GLOBAL_PRAM_ALIGNMENT);
750  	uec->p_tx_glbl_pram = (uec_tx_global_pram_t *)
751  				qe_muram_addr(uec->tx_glbl_pram_offset);
752  
753  	/* Zero the global Tx prameter RAM */
754  	memset(uec->p_tx_glbl_pram, 0, sizeof(uec_tx_global_pram_t));
755  
756  	/* Init global Tx parameter RAM */
757  
758  	/* TEMODER, RMON statistics disable, one Tx queue */
759  	out_be16(&uec->p_tx_glbl_pram->temoder, TEMODER_INIT_VALUE);
760  
761  	/* SQPTR */
762  	uec->send_q_mem_reg_offset = qe_muram_alloc(
763  				sizeof(uec_send_queue_qd_t),
764  				 UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
765  	uec->p_send_q_mem_reg = (uec_send_queue_mem_region_t *)
766  				qe_muram_addr(uec->send_q_mem_reg_offset);
767  	out_be32(&uec->p_tx_glbl_pram->sqptr, uec->send_q_mem_reg_offset);
768  
769  	/* Setup the table with TxBDs ring */
770  	end_bd = (u32)uec->p_tx_bd_ring + (uec_info->tx_bd_ring_len - 1)
771  					 * SIZEOFBD;
772  	out_be32(&uec->p_send_q_mem_reg->sqqd[0].bd_ring_base,
773  				 (u32)(uec->p_tx_bd_ring));
774  	out_be32(&uec->p_send_q_mem_reg->sqqd[0].last_bd_completed_address,
775  						 end_bd);
776  
777  	/* Scheduler Base Pointer, we have only one Tx queue, no need it */
778  	out_be32(&uec->p_tx_glbl_pram->schedulerbasepointer, 0);
779  
780  	/* TxRMON Base Pointer, TxRMON disable, we don't need it */
781  	out_be32(&uec->p_tx_glbl_pram->txrmonbaseptr, 0);
782  
783  	/* TSTATE, global snooping, big endian, the CSB bus selected */
784  	bmrx = BMR_INIT_VALUE;
785  	out_be32(&uec->p_tx_glbl_pram->tstate, ((u32)(bmrx) << BMR_SHIFT));
786  
787  	/* IPH_Offset */
788  	for (i = 0; i < MAX_IPH_OFFSET_ENTRY; i++) {
789  		out_8(&uec->p_tx_glbl_pram->iphoffset[i], 0);
790  	}
791  
792  	/* VTAG table */
793  	for (i = 0; i < UEC_TX_VTAG_TABLE_ENTRY_MAX; i++) {
794  		out_be32(&uec->p_tx_glbl_pram->vtagtable[i], 0);
795  	}
796  
797  	/* TQPTR */
798  	uec->thread_dat_tx_offset = qe_muram_alloc(
799  		num_threads_tx * sizeof(uec_thread_data_tx_t) +
800  		 32 *(num_threads_tx == 1), UEC_THREAD_DATA_ALIGNMENT);
801  
802  	uec->p_thread_data_tx = (uec_thread_data_tx_t *)
803  				qe_muram_addr(uec->thread_dat_tx_offset);
804  	out_be32(&uec->p_tx_glbl_pram->tqptr, uec->thread_dat_tx_offset);
805  }
806  
807  static void uec_init_rx_parameter(uec_private_t *uec, int num_threads_rx)
808  {
809  	u8	bmrx = 0;
810  	int	i;
811  	uec_82xx_address_filtering_pram_t	*p_af_pram;
812  
813  	/* Allocate global Rx parameter RAM page */
814  	uec->rx_glbl_pram_offset = qe_muram_alloc(
815  		sizeof(uec_rx_global_pram_t), UEC_RX_GLOBAL_PRAM_ALIGNMENT);
816  	uec->p_rx_glbl_pram = (uec_rx_global_pram_t *)
817  				qe_muram_addr(uec->rx_glbl_pram_offset);
818  
819  	/* Zero Global Rx parameter RAM */
820  	memset(uec->p_rx_glbl_pram, 0, sizeof(uec_rx_global_pram_t));
821  
822  	/* Init global Rx parameter RAM */
823  	/* REMODER, Extended feature mode disable, VLAN disable,
824  	 LossLess flow control disable, Receive firmware statisic disable,
825  	 Extended address parsing mode disable, One Rx queues,
826  	 Dynamic maximum/minimum frame length disable, IP checksum check
827  	 disable, IP address alignment disable
828  	*/
829  	out_be32(&uec->p_rx_glbl_pram->remoder, REMODER_INIT_VALUE);
830  
831  	/* RQPTR */
832  	uec->thread_dat_rx_offset = qe_muram_alloc(
833  			num_threads_rx * sizeof(uec_thread_data_rx_t),
834  			 UEC_THREAD_DATA_ALIGNMENT);
835  	uec->p_thread_data_rx = (uec_thread_data_rx_t *)
836  				qe_muram_addr(uec->thread_dat_rx_offset);
837  	out_be32(&uec->p_rx_glbl_pram->rqptr, uec->thread_dat_rx_offset);
838  
839  	/* Type_or_Len */
840  	out_be16(&uec->p_rx_glbl_pram->typeorlen, 3072);
841  
842  	/* RxRMON base pointer, we don't need it */
843  	out_be32(&uec->p_rx_glbl_pram->rxrmonbaseptr, 0);
844  
845  	/* IntCoalescingPTR, we don't need it, no interrupt */
846  	out_be32(&uec->p_rx_glbl_pram->intcoalescingptr, 0);
847  
848  	/* RSTATE, global snooping, big endian, the CSB bus selected */
849  	bmrx = BMR_INIT_VALUE;
850  	out_8(&uec->p_rx_glbl_pram->rstate, bmrx);
851  
852  	/* MRBLR */
853  	out_be16(&uec->p_rx_glbl_pram->mrblr, MAX_RXBUF_LEN);
854  
855  	/* RBDQPTR */
856  	uec->rx_bd_qs_tbl_offset = qe_muram_alloc(
857  				sizeof(uec_rx_bd_queues_entry_t) + \
858  				sizeof(uec_rx_prefetched_bds_t),
859  				 UEC_RX_BD_QUEUES_ALIGNMENT);
860  	uec->p_rx_bd_qs_tbl = (uec_rx_bd_queues_entry_t *)
861  				qe_muram_addr(uec->rx_bd_qs_tbl_offset);
862  
863  	/* Zero it */
864  	memset(uec->p_rx_bd_qs_tbl, 0, sizeof(uec_rx_bd_queues_entry_t) + \
865  					sizeof(uec_rx_prefetched_bds_t));
866  	out_be32(&uec->p_rx_glbl_pram->rbdqptr, uec->rx_bd_qs_tbl_offset);
867  	out_be32(&uec->p_rx_bd_qs_tbl->externalbdbaseptr,
868  		 (u32)uec->p_rx_bd_ring);
869  
870  	/* MFLR */
871  	out_be16(&uec->p_rx_glbl_pram->mflr, MAX_FRAME_LEN);
872  	/* MINFLR */
873  	out_be16(&uec->p_rx_glbl_pram->minflr, MIN_FRAME_LEN);
874  	/* MAXD1 */
875  	out_be16(&uec->p_rx_glbl_pram->maxd1, MAX_DMA1_LEN);
876  	/* MAXD2 */
877  	out_be16(&uec->p_rx_glbl_pram->maxd2, MAX_DMA2_LEN);
878  	/* ECAM_PTR */
879  	out_be32(&uec->p_rx_glbl_pram->ecamptr, 0);
880  	/* L2QT */
881  	out_be32(&uec->p_rx_glbl_pram->l2qt, 0);
882  	/* L3QT */
883  	for (i = 0; i < 8; i++)	{
884  		out_be32(&uec->p_rx_glbl_pram->l3qt[i], 0);
885  	}
886  
887  	/* VLAN_TYPE */
888  	out_be16(&uec->p_rx_glbl_pram->vlantype, 0x8100);
889  	/* TCI */
890  	out_be16(&uec->p_rx_glbl_pram->vlantci, 0);
891  
892  	/* Clear PQ2 style address filtering hash table */
893  	p_af_pram = (uec_82xx_address_filtering_pram_t *) \
894  			uec->p_rx_glbl_pram->addressfiltering;
895  
896  	p_af_pram->iaddr_h = 0;
897  	p_af_pram->iaddr_l = 0;
898  	p_af_pram->gaddr_h = 0;
899  	p_af_pram->gaddr_l = 0;
900  }
901  
902  static int uec_issue_init_enet_rxtx_cmd(uec_private_t *uec,
903  					 int thread_tx, int thread_rx)
904  {
905  	uec_init_cmd_pram_t		*p_init_enet_param;
906  	u32				init_enet_param_offset;
907  	uec_info_t			*uec_info;
908  	int				i;
909  	int				snum;
910  	u32				init_enet_offset;
911  	u32				entry_val;
912  	u32				command;
913  	u32				cecr_subblock;
914  
915  	uec_info = uec->uec_info;
916  
917  	/* Allocate init enet command parameter */
918  	uec->init_enet_param_offset = qe_muram_alloc(
919  					sizeof(uec_init_cmd_pram_t), 4);
920  	init_enet_param_offset = uec->init_enet_param_offset;
921  	uec->p_init_enet_param = (uec_init_cmd_pram_t *)
922  				qe_muram_addr(uec->init_enet_param_offset);
923  
924  	/* Zero init enet command struct */
925  	memset((void *)uec->p_init_enet_param, 0, sizeof(uec_init_cmd_pram_t));
926  
927  	/* Init the command struct */
928  	p_init_enet_param = uec->p_init_enet_param;
929  	p_init_enet_param->resinit0 = ENET_INIT_PARAM_MAGIC_RES_INIT0;
930  	p_init_enet_param->resinit1 = ENET_INIT_PARAM_MAGIC_RES_INIT1;
931  	p_init_enet_param->resinit2 = ENET_INIT_PARAM_MAGIC_RES_INIT2;
932  	p_init_enet_param->resinit3 = ENET_INIT_PARAM_MAGIC_RES_INIT3;
933  	p_init_enet_param->resinit4 = ENET_INIT_PARAM_MAGIC_RES_INIT4;
934  	p_init_enet_param->largestexternallookupkeysize = 0;
935  
936  	p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_rx)
937  					 << ENET_INIT_PARAM_RGF_SHIFT;
938  	p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_tx)
939  					 << ENET_INIT_PARAM_TGF_SHIFT;
940  
941  	/* Init Rx global parameter pointer */
942  	p_init_enet_param->rgftgfrxglobal |= uec->rx_glbl_pram_offset |
943  						 (u32)uec_info->risc_rx;
944  
945  	/* Init Rx threads */
946  	for (i = 0; i < (thread_rx + 1); i++) {
947  		if ((snum = qe_get_snum()) < 0) {
948  			printf("%s can not get snum\n", __FUNCTION__);
949  			return -ENOMEM;
950  		}
951  
952  		if (i==0) {
953  			init_enet_offset = 0;
954  		} else {
955  			init_enet_offset = qe_muram_alloc(
956  					sizeof(uec_thread_rx_pram_t),
957  					 UEC_THREAD_RX_PRAM_ALIGNMENT);
958  		}
959  
960  		entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
961  				 init_enet_offset | (u32)uec_info->risc_rx;
962  		p_init_enet_param->rxthread[i] = entry_val;
963  	}
964  
965  	/* Init Tx global parameter pointer */
966  	p_init_enet_param->txglobal = uec->tx_glbl_pram_offset |
967  					 (u32)uec_info->risc_tx;
968  
969  	/* Init Tx threads */
970  	for (i = 0; i < thread_tx; i++) {
971  		if ((snum = qe_get_snum()) < 0)	{
972  			printf("%s can not get snum\n", __FUNCTION__);
973  			return -ENOMEM;
974  		}
975  
976  		init_enet_offset = qe_muram_alloc(sizeof(uec_thread_tx_pram_t),
977  						 UEC_THREAD_TX_PRAM_ALIGNMENT);
978  
979  		entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
980  				 init_enet_offset | (u32)uec_info->risc_tx;
981  		p_init_enet_param->txthread[i] = entry_val;
982  	}
983  
984  	__asm__ __volatile__("sync");
985  
986  	/* Issue QE command */
987  	command = QE_INIT_TX_RX;
988  	cecr_subblock =	ucc_fast_get_qe_cr_subblock(
989  				uec->uec_info->uf_info.ucc_num);
990  	qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
991  						 init_enet_param_offset);
992  
993  	return 0;
994  }
995  
996  static int uec_startup(uec_private_t *uec)
997  {
998  	uec_info_t			*uec_info;
999  	ucc_fast_info_t			*uf_info;
1000  	ucc_fast_private_t		*uccf;
1001  	ucc_fast_t			*uf_regs;
1002  	uec_t				*uec_regs;
1003  	int				num_threads_tx;
1004  	int				num_threads_rx;
1005  	u32				utbipar;
1006  	u32				length;
1007  	u32				align;
1008  	qe_bd_t				*bd;
1009  	u8				*buf;
1010  	int				i;
1011  
1012  	if (!uec || !uec->uec_info) {
1013  		printf("%s: uec or uec_info not initial\n", __FUNCTION__);
1014  		return -EINVAL;
1015  	}
1016  
1017  	uec_info = uec->uec_info;
1018  	uf_info = &(uec_info->uf_info);
1019  
1020  	/* Check if Rx BD ring len is illegal */
1021  	if ((uec_info->rx_bd_ring_len < UEC_RX_BD_RING_SIZE_MIN) || \
1022  		(uec_info->rx_bd_ring_len % UEC_RX_BD_RING_SIZE_ALIGNMENT)) {
1023  		printf("%s: Rx BD ring len must be multiple of 4, and > 8.\n",
1024  			 __FUNCTION__);
1025  		return -EINVAL;
1026  	}
1027  
1028  	/* Check if Tx BD ring len is illegal */
1029  	if (uec_info->tx_bd_ring_len < UEC_TX_BD_RING_SIZE_MIN) {
1030  		printf("%s: Tx BD ring length must not be smaller than 2.\n",
1031  			 __FUNCTION__);
1032  		return -EINVAL;
1033  	}
1034  
1035  	/* Check if MRBLR is illegal */
1036  	if ((MAX_RXBUF_LEN == 0) || (MAX_RXBUF_LEN  % UEC_MRBLR_ALIGNMENT)) {
1037  		printf("%s: max rx buffer length must be mutliple of 128.\n",
1038  			 __FUNCTION__);
1039  		return -EINVAL;
1040  	}
1041  
1042  	/* Both Rx and Tx are stopped */
1043  	uec->grace_stopped_rx = 1;
1044  	uec->grace_stopped_tx = 1;
1045  
1046  	/* Init UCC fast */
1047  	if (ucc_fast_init(uf_info, &uccf)) {
1048  		printf("%s: failed to init ucc fast\n", __FUNCTION__);
1049  		return -ENOMEM;
1050  	}
1051  
1052  	/* Save uccf */
1053  	uec->uccf = uccf;
1054  
1055  	/* Convert the Tx threads number */
1056  	if (uec_convert_threads_num(uec_info->num_threads_tx,
1057  					 &num_threads_tx)) {
1058  		return -EINVAL;
1059  	}
1060  
1061  	/* Convert the Rx threads number */
1062  	if (uec_convert_threads_num(uec_info->num_threads_rx,
1063  					 &num_threads_rx)) {
1064  		return -EINVAL;
1065  	}
1066  
1067  	uf_regs = uccf->uf_regs;
1068  
1069  	/* UEC register is following UCC fast registers */
1070  	uec_regs = (uec_t *)(&uf_regs->ucc_eth);
1071  
1072  	/* Save the UEC register pointer to UEC private struct */
1073  	uec->uec_regs = uec_regs;
1074  
1075  	/* Init UPSMR, enable hardware statistics (UCC) */
1076  	out_be32(&uec->uccf->uf_regs->upsmr, UPSMR_INIT_VALUE);
1077  
1078  	/* Init MACCFG1, flow control disable, disable Tx and Rx */
1079  	out_be32(&uec_regs->maccfg1, MACCFG1_INIT_VALUE);
1080  
1081  	/* Init MACCFG2, length check, MAC PAD and CRC enable */
1082  	out_be32(&uec_regs->maccfg2, MACCFG2_INIT_VALUE);
1083  
1084  	/* Setup MAC interface mode */
1085  	uec_set_mac_if_mode(uec, uec_info->enet_interface_type, uec_info->speed);
1086  
1087  	/* Setup MII management base */
1088  #ifndef CONFIG_eTSEC_MDIO_BUS
1089  	uec->uec_mii_regs = (uec_mii_t *)(&uec_regs->miimcfg);
1090  #else
1091  	uec->uec_mii_regs = (uec_mii_t *) CONFIG_MIIM_ADDRESS;
1092  #endif
1093  
1094  	/* Setup MII master clock source */
1095  	qe_set_mii_clk_src(uec_info->uf_info.ucc_num);
1096  
1097  	/* Setup UTBIPAR */
1098  	utbipar = in_be32(&uec_regs->utbipar);
1099  	utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
1100  
1101  	/* Initialize UTBIPAR address to CONFIG_UTBIPAR_INIT_TBIPA for ALL UEC.
1102  	 * This frees up the remaining SMI addresses for use.
1103  	 */
1104  	utbipar |= CONFIG_UTBIPAR_INIT_TBIPA << UTBIPAR_PHY_ADDRESS_SHIFT;
1105  	out_be32(&uec_regs->utbipar, utbipar);
1106  
1107  	/* Configure the TBI for SGMII operation */
1108  	if ((uec->uec_info->enet_interface_type == PHY_INTERFACE_MODE_SGMII) &&
1109  	   (uec->uec_info->speed == SPEED_1000)) {
1110  		uec_write_phy_reg(uec->dev, uec_regs->utbipar,
1111  			ENET_TBI_MII_ANA, TBIANA_SETTINGS);
1112  
1113  		uec_write_phy_reg(uec->dev, uec_regs->utbipar,
1114  			ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
1115  
1116  		uec_write_phy_reg(uec->dev, uec_regs->utbipar,
1117  			ENET_TBI_MII_CR, TBICR_SETTINGS);
1118  	}
1119  
1120  	/* Allocate Tx BDs */
1121  	length = ((uec_info->tx_bd_ring_len * SIZEOFBD) /
1122  		 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) *
1123  		 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
1124  	if ((uec_info->tx_bd_ring_len * SIZEOFBD) %
1125  		 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) {
1126  		length += UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
1127  	}
1128  
1129  	align = UEC_TX_BD_RING_ALIGNMENT;
1130  	uec->tx_bd_ring_offset = (u32)malloc((u32)(length + align));
1131  	if (uec->tx_bd_ring_offset != 0) {
1132  		uec->p_tx_bd_ring = (u8 *)((uec->tx_bd_ring_offset + align)
1133  						 & ~(align - 1));
1134  	}
1135  
1136  	/* Zero all of Tx BDs */
1137  	memset((void *)(uec->tx_bd_ring_offset), 0, length + align);
1138  
1139  	/* Allocate Rx BDs */
1140  	length = uec_info->rx_bd_ring_len * SIZEOFBD;
1141  	align = UEC_RX_BD_RING_ALIGNMENT;
1142  	uec->rx_bd_ring_offset = (u32)(malloc((u32)(length + align)));
1143  	if (uec->rx_bd_ring_offset != 0) {
1144  		uec->p_rx_bd_ring = (u8 *)((uec->rx_bd_ring_offset + align)
1145  							 & ~(align - 1));
1146  	}
1147  
1148  	/* Zero all of Rx BDs */
1149  	memset((void *)(uec->rx_bd_ring_offset), 0, length + align);
1150  
1151  	/* Allocate Rx buffer */
1152  	length = uec_info->rx_bd_ring_len * MAX_RXBUF_LEN;
1153  	align = UEC_RX_DATA_BUF_ALIGNMENT;
1154  	uec->rx_buf_offset = (u32)malloc(length + align);
1155  	if (uec->rx_buf_offset != 0) {
1156  		uec->p_rx_buf = (u8 *)((uec->rx_buf_offset + align)
1157  						 & ~(align - 1));
1158  	}
1159  
1160  	/* Zero all of the Rx buffer */
1161  	memset((void *)(uec->rx_buf_offset), 0, length + align);
1162  
1163  	/* Init TxBD ring */
1164  	bd = (qe_bd_t *)uec->p_tx_bd_ring;
1165  	uec->txBd = bd;
1166  
1167  	for (i = 0; i < uec_info->tx_bd_ring_len; i++) {
1168  		BD_DATA_CLEAR(bd);
1169  		BD_STATUS_SET(bd, 0);
1170  		BD_LENGTH_SET(bd, 0);
1171  		bd ++;
1172  	}
1173  	BD_STATUS_SET((--bd), TxBD_WRAP);
1174  
1175  	/* Init RxBD ring */
1176  	bd = (qe_bd_t *)uec->p_rx_bd_ring;
1177  	uec->rxBd = bd;
1178  	buf = uec->p_rx_buf;
1179  	for (i = 0; i < uec_info->rx_bd_ring_len; i++) {
1180  		BD_DATA_SET(bd, buf);
1181  		BD_LENGTH_SET(bd, 0);
1182  		BD_STATUS_SET(bd, RxBD_EMPTY);
1183  		buf += MAX_RXBUF_LEN;
1184  		bd ++;
1185  	}
1186  	BD_STATUS_SET((--bd), RxBD_WRAP | RxBD_EMPTY);
1187  
1188  	/* Init global Tx parameter RAM */
1189  	uec_init_tx_parameter(uec, num_threads_tx);
1190  
1191  	/* Init global Rx parameter RAM */
1192  	uec_init_rx_parameter(uec, num_threads_rx);
1193  
1194  	/* Init ethernet Tx and Rx parameter command */
1195  	if (uec_issue_init_enet_rxtx_cmd(uec, num_threads_tx,
1196  					 num_threads_rx)) {
1197  		printf("%s issue init enet cmd failed\n", __FUNCTION__);
1198  		return -ENOMEM;
1199  	}
1200  
1201  	return 0;
1202  }
1203  
1204  static int uec_init(struct eth_device* dev, bd_t *bd)
1205  {
1206  	uec_private_t		*uec;
1207  	int			err, i;
1208  	struct phy_info         *curphy;
1209  #if defined(CONFIG_P1012) || defined(CONFIG_P1021) || defined(CONFIG_P1025)
1210  	ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
1211  #endif
1212  
1213  	uec = (uec_private_t *)dev->priv;
1214  
1215  	if (uec->the_first_run == 0) {
1216  #if defined(CONFIG_P1012) || defined(CONFIG_P1021) || defined(CONFIG_P1025)
1217  	/* QE9 and QE12 need to be set for enabling QE MII managment signals */
1218  	setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE9);
1219  	setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
1220  #endif
1221  
1222  		err = init_phy(dev);
1223  		if (err) {
1224  			printf("%s: Cannot initialize PHY, aborting.\n",
1225  			       dev->name);
1226  			return err;
1227  		}
1228  
1229  		curphy = uec->mii_info->phyinfo;
1230  
1231  		if (curphy->config_aneg) {
1232  			err = curphy->config_aneg(uec->mii_info);
1233  			if (err) {
1234  				printf("%s: Can't negotiate PHY\n", dev->name);
1235  				return err;
1236  			}
1237  		}
1238  
1239  		/* Give PHYs up to 5 sec to report a link */
1240  		i = 50;
1241  		do {
1242  			err = curphy->read_status(uec->mii_info);
1243  			if (!(((i-- > 0) && !uec->mii_info->link) || err))
1244  				break;
1245  			udelay(100000);
1246  		} while (1);
1247  
1248  #if defined(CONFIG_P1012) || defined(CONFIG_P1021) || defined(CONFIG_P1025)
1249  		/* QE12 needs to be released for enabling LBCTL signal*/
1250  		clrbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
1251  #endif
1252  
1253  		if (err || i <= 0)
1254  			printf("warning: %s: timeout on PHY link\n", dev->name);
1255  
1256  		adjust_link(dev);
1257  		uec->the_first_run = 1;
1258  	}
1259  
1260  	/* Set up the MAC address */
1261  	if (dev->enetaddr[0] & 0x01) {
1262  		printf("%s: MacAddress is multcast address\n",
1263  			 __FUNCTION__);
1264  		return -1;
1265  	}
1266  	uec_set_mac_address(uec, dev->enetaddr);
1267  
1268  
1269  	err = uec_open(uec, COMM_DIR_RX_AND_TX);
1270  	if (err) {
1271  		printf("%s: cannot enable UEC device\n", dev->name);
1272  		return -1;
1273  	}
1274  
1275  	phy_change(dev);
1276  
1277  	return (uec->mii_info->link ? 0 : -1);
1278  }
1279  
1280  static void uec_halt(struct eth_device* dev)
1281  {
1282  	uec_private_t	*uec = (uec_private_t *)dev->priv;
1283  	uec_stop(uec, COMM_DIR_RX_AND_TX);
1284  }
1285  
1286  static int uec_send(struct eth_device *dev, void *buf, int len)
1287  {
1288  	uec_private_t		*uec;
1289  	ucc_fast_private_t	*uccf;
1290  	volatile qe_bd_t	*bd;
1291  	u16			status;
1292  	int			i;
1293  	int			result = 0;
1294  
1295  	uec = (uec_private_t *)dev->priv;
1296  	uccf = uec->uccf;
1297  	bd = uec->txBd;
1298  
1299  	/* Find an empty TxBD */
1300  	for (i = 0; bd->status & TxBD_READY; i++) {
1301  		if (i > 0x100000) {
1302  			printf("%s: tx buffer not ready\n", dev->name);
1303  			return result;
1304  		}
1305  	}
1306  
1307  	/* Init TxBD */
1308  	BD_DATA_SET(bd, buf);
1309  	BD_LENGTH_SET(bd, len);
1310  	status = bd->status;
1311  	status &= BD_WRAP;
1312  	status |= (TxBD_READY | TxBD_LAST);
1313  	BD_STATUS_SET(bd, status);
1314  
1315  	/* Tell UCC to transmit the buffer */
1316  	ucc_fast_transmit_on_demand(uccf);
1317  
1318  	/* Wait for buffer to be transmitted */
1319  	for (i = 0; bd->status & TxBD_READY; i++) {
1320  		if (i > 0x100000) {
1321  			printf("%s: tx error\n", dev->name);
1322  			return result;
1323  		}
1324  	}
1325  
1326  	/* Ok, the buffer be transimitted */
1327  	BD_ADVANCE(bd, status, uec->p_tx_bd_ring);
1328  	uec->txBd = bd;
1329  	result = 1;
1330  
1331  	return result;
1332  }
1333  
1334  static int uec_recv(struct eth_device* dev)
1335  {
1336  	uec_private_t		*uec = dev->priv;
1337  	volatile qe_bd_t	*bd;
1338  	u16			status;
1339  	u16			len;
1340  	u8			*data;
1341  
1342  	bd = uec->rxBd;
1343  	status = bd->status;
1344  
1345  	while (!(status & RxBD_EMPTY)) {
1346  		if (!(status & RxBD_ERROR)) {
1347  			data = BD_DATA(bd);
1348  			len = BD_LENGTH(bd);
1349  			NetReceive(data, len);
1350  		} else {
1351  			printf("%s: Rx error\n", dev->name);
1352  		}
1353  		status &= BD_CLEAN;
1354  		BD_LENGTH_SET(bd, 0);
1355  		BD_STATUS_SET(bd, status | RxBD_EMPTY);
1356  		BD_ADVANCE(bd, status, uec->p_rx_bd_ring);
1357  		status = bd->status;
1358  	}
1359  	uec->rxBd = bd;
1360  
1361  	return 1;
1362  }
1363  
1364  int uec_initialize(bd_t *bis, uec_info_t *uec_info)
1365  {
1366  	struct eth_device	*dev;
1367  	int			i;
1368  	uec_private_t		*uec;
1369  	int			err;
1370  
1371  	dev = (struct eth_device *)malloc(sizeof(struct eth_device));
1372  	if (!dev)
1373  		return 0;
1374  	memset(dev, 0, sizeof(struct eth_device));
1375  
1376  	/* Allocate the UEC private struct */
1377  	uec = (uec_private_t *)malloc(sizeof(uec_private_t));
1378  	if (!uec) {
1379  		return -ENOMEM;
1380  	}
1381  	memset(uec, 0, sizeof(uec_private_t));
1382  
1383  	/* Adjust uec_info */
1384  #if (MAX_QE_RISC == 4)
1385  	uec_info->risc_tx = QE_RISC_ALLOCATION_FOUR_RISCS;
1386  	uec_info->risc_rx = QE_RISC_ALLOCATION_FOUR_RISCS;
1387  #endif
1388  
1389  	devlist[uec_info->uf_info.ucc_num] = dev;
1390  
1391  	uec->uec_info = uec_info;
1392  	uec->dev = dev;
1393  
1394  	sprintf(dev->name, "UEC%d", uec_info->uf_info.ucc_num);
1395  	dev->iobase = 0;
1396  	dev->priv = (void *)uec;
1397  	dev->init = uec_init;
1398  	dev->halt = uec_halt;
1399  	dev->send = uec_send;
1400  	dev->recv = uec_recv;
1401  
1402  	/* Clear the ethnet address */
1403  	for (i = 0; i < 6; i++)
1404  		dev->enetaddr[i] = 0;
1405  
1406  	eth_register(dev);
1407  
1408  	err = uec_startup(uec);
1409  	if (err) {
1410  		printf("%s: Cannot configure net device, aborting.",dev->name);
1411  		return err;
1412  	}
1413  
1414  #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
1415  	miiphy_register(dev->name, uec_miiphy_read, uec_miiphy_write);
1416  #endif
1417  
1418  	return 1;
1419  }
1420  
1421  int uec_eth_init(bd_t *bis, uec_info_t *uecs, int num)
1422  {
1423  	int i;
1424  
1425  	for (i = 0; i < num; i++)
1426  		uec_initialize(bis, &uecs[i]);
1427  
1428  	return 0;
1429  }
1430  
1431  int uec_standard_init(bd_t *bis)
1432  {
1433  	return uec_eth_init(bis, uec_info, ARRAY_SIZE(uec_info));
1434  }
1435