xref: /openbmc/u-boot/drivers/net/fm/eth.c (revision 26ddff2d)
1 /*
2  * Copyright 2009-2011 Freescale Semiconductor, Inc.
3  *	Dave Liu <daveliu@freescale.com>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation; either version 2 of
8  * the License, or (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
18  * MA 02111-1307 USA
19  */
20 #include <common.h>
21 #include <asm/io.h>
22 #include <malloc.h>
23 #include <net.h>
24 #include <hwconfig.h>
25 #include <fm_eth.h>
26 #include <fsl_mdio.h>
27 #include <miiphy.h>
28 #include <phy.h>
29 #include <asm/fsl_dtsec.h>
30 #include <asm/fsl_tgec.h>
31 
32 #include "fm.h"
33 
34 static struct eth_device *devlist[NUM_FM_PORTS];
35 static int num_controllers;
36 
37 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) && !defined(BITBANGMII)
38 
39 #define TBIANA_SETTINGS (TBIANA_ASYMMETRIC_PAUSE | TBIANA_SYMMETRIC_PAUSE | \
40 			 TBIANA_FULL_DUPLEX)
41 
42 #define TBIANA_SGMII_ACK 0x4001
43 
44 #define TBICR_SETTINGS (TBICR_ANEG_ENABLE | TBICR_RESTART_ANEG | \
45 			TBICR_FULL_DUPLEX | TBICR_SPEED1_SET)
46 
47 /* Configure the TBI for SGMII operation */
48 void dtsec_configure_serdes(struct fm_eth *priv)
49 {
50 	struct dtsec *regs = priv->mac->base;
51 	struct tsec_mii_mng *phyregs = priv->mac->phyregs;
52 
53 	/*
54 	 * Access TBI PHY registers at given TSEC register offset as
55 	 * opposed to the register offset used for external PHY accesses
56 	 */
57 	tsec_local_mdio_write(phyregs, in_be32(&regs->tbipa), 0, TBI_TBICON,
58 			TBICON_CLK_SELECT);
59 	tsec_local_mdio_write(phyregs, in_be32(&regs->tbipa), 0, TBI_ANA,
60 			TBIANA_SGMII_ACK);
61 	tsec_local_mdio_write(phyregs, in_be32(&regs->tbipa), 0,
62 			TBI_CR, TBICR_SETTINGS);
63 }
64 
65 static void dtsec_init_phy(struct eth_device *dev)
66 {
67 	struct fm_eth *fm_eth = dev->priv;
68 	struct dtsec *regs = (struct dtsec *)fm_eth->mac->base;
69 
70 	/* Assign a Physical address to the TBI */
71 	out_be32(&regs->tbipa, CONFIG_SYS_TBIPA_VALUE);
72 
73 	if (fm_eth->enet_if == PHY_INTERFACE_MODE_SGMII)
74 		dtsec_configure_serdes(fm_eth);
75 }
76 
77 static int tgec_is_fibre(struct eth_device *dev)
78 {
79 	struct fm_eth *fm = dev->priv;
80 	char phyopt[20];
81 
82 	sprintf(phyopt, "fsl_fm%d_xaui_phy", fm->fm_index + 1);
83 
84 	return hwconfig_arg_cmp(phyopt, "xfi");
85 }
86 #endif
87 
88 static u16 muram_readw(u16 *addr)
89 {
90 	u32 base = (u32)addr & ~0x3;
91 	u32 val32 = *(u32 *)base;
92 	int byte_pos;
93 	u16 ret;
94 
95 	byte_pos = (u32)addr & 0x3;
96 	if (byte_pos)
97 		ret = (u16)(val32 & 0x0000ffff);
98 	else
99 		ret = (u16)((val32 & 0xffff0000) >> 16);
100 
101 	return ret;
102 }
103 
104 static void muram_writew(u16 *addr, u16 val)
105 {
106 	u32 base = (u32)addr & ~0x3;
107 	u32 org32 = *(u32 *)base;
108 	u32 val32;
109 	int byte_pos;
110 
111 	byte_pos = (u32)addr & 0x3;
112 	if (byte_pos)
113 		val32 = (org32 & 0xffff0000) | val;
114 	else
115 		val32 = (org32 & 0x0000ffff) | ((u32)val << 16);
116 
117 	*(u32 *)base = val32;
118 }
119 
120 static void bmi_rx_port_disable(struct fm_bmi_rx_port *rx_port)
121 {
122 	int timeout = 1000000;
123 
124 	clrbits_be32(&rx_port->fmbm_rcfg, FMBM_RCFG_EN);
125 
126 	/* wait until the rx port is not busy */
127 	while ((in_be32(&rx_port->fmbm_rst) & FMBM_RST_BSY) && timeout--)
128 		;
129 }
130 
131 static void bmi_rx_port_init(struct fm_bmi_rx_port *rx_port)
132 {
133 	/* set BMI to independent mode, Rx port disable */
134 	out_be32(&rx_port->fmbm_rcfg, FMBM_RCFG_IM);
135 	/* clear FOF in IM case */
136 	out_be32(&rx_port->fmbm_rim, 0);
137 	/* Rx frame next engine -RISC */
138 	out_be32(&rx_port->fmbm_rfne, NIA_ENG_RISC | NIA_RISC_AC_IM_RX);
139 	/* Rx command attribute - no order, MR[3] = 1 */
140 	clrbits_be32(&rx_port->fmbm_rfca, FMBM_RFCA_ORDER | FMBM_RFCA_MR_MASK);
141 	setbits_be32(&rx_port->fmbm_rfca, FMBM_RFCA_MR(4));
142 	/* enable Rx statistic counters */
143 	out_be32(&rx_port->fmbm_rstc, FMBM_RSTC_EN);
144 	/* disable Rx performance counters */
145 	out_be32(&rx_port->fmbm_rpc, 0);
146 }
147 
148 static void bmi_tx_port_disable(struct fm_bmi_tx_port *tx_port)
149 {
150 	int timeout = 1000000;
151 
152 	clrbits_be32(&tx_port->fmbm_tcfg, FMBM_TCFG_EN);
153 
154 	/* wait until the tx port is not busy */
155 	while ((in_be32(&tx_port->fmbm_tst) & FMBM_TST_BSY) && timeout--)
156 		;
157 }
158 
159 static void bmi_tx_port_init(struct fm_bmi_tx_port *tx_port)
160 {
161 	/* set BMI to independent mode, Tx port disable */
162 	out_be32(&tx_port->fmbm_tcfg, FMBM_TCFG_IM);
163 	/* Tx frame next engine -RISC */
164 	out_be32(&tx_port->fmbm_tfne, NIA_ENG_RISC | NIA_RISC_AC_IM_TX);
165 	out_be32(&tx_port->fmbm_tfene, NIA_ENG_RISC | NIA_RISC_AC_IM_TX);
166 	/* Tx command attribute - no order, MR[3] = 1 */
167 	clrbits_be32(&tx_port->fmbm_tfca, FMBM_TFCA_ORDER | FMBM_TFCA_MR_MASK);
168 	setbits_be32(&tx_port->fmbm_tfca, FMBM_TFCA_MR(4));
169 	/* enable Tx statistic counters */
170 	out_be32(&tx_port->fmbm_tstc, FMBM_TSTC_EN);
171 	/* disable Tx performance counters */
172 	out_be32(&tx_port->fmbm_tpc, 0);
173 }
174 
175 static int fm_eth_rx_port_parameter_init(struct fm_eth *fm_eth)
176 {
177 	struct fm_port_global_pram *pram;
178 	u32 pram_page_offset;
179 	void *rx_bd_ring_base;
180 	void *rx_buf_pool;
181 	struct fm_port_bd *rxbd;
182 	struct fm_port_qd *rxqd;
183 	struct fm_bmi_rx_port *bmi_rx_port = fm_eth->rx_port;
184 	int i;
185 
186 	/* alloc global parameter ram at MURAM */
187 	pram = (struct fm_port_global_pram *)fm_muram_alloc(fm_eth->fm_index,
188 		FM_PRAM_SIZE, FM_PRAM_ALIGN);
189 	fm_eth->rx_pram = pram;
190 
191 	/* parameter page offset to MURAM */
192 	pram_page_offset = (u32)pram - fm_muram_base(fm_eth->fm_index);
193 
194 	/* enable global mode- snooping data buffers and BDs */
195 	pram->mode = PRAM_MODE_GLOBAL;
196 
197 	/* init the Rx queue descriptor pionter */
198 	pram->rxqd_ptr = pram_page_offset + 0x20;
199 
200 	/* set the max receive buffer length, power of 2 */
201 	muram_writew(&pram->mrblr, MAX_RXBUF_LOG2);
202 
203 	/* alloc Rx buffer descriptors from main memory */
204 	rx_bd_ring_base = malloc(sizeof(struct fm_port_bd)
205 			* RX_BD_RING_SIZE);
206 	if (!rx_bd_ring_base)
207 		return 0;
208 	memset(rx_bd_ring_base, 0, sizeof(struct fm_port_bd)
209 			* RX_BD_RING_SIZE);
210 
211 	/* alloc Rx buffer from main memory */
212 	rx_buf_pool = malloc(MAX_RXBUF_LEN * RX_BD_RING_SIZE);
213 	if (!rx_buf_pool)
214 		return 0;
215 	memset(rx_buf_pool, 0, MAX_RXBUF_LEN * RX_BD_RING_SIZE);
216 
217 	/* save them to fm_eth */
218 	fm_eth->rx_bd_ring = rx_bd_ring_base;
219 	fm_eth->cur_rxbd = rx_bd_ring_base;
220 	fm_eth->rx_buf = rx_buf_pool;
221 
222 	/* init Rx BDs ring */
223 	rxbd = (struct fm_port_bd *)rx_bd_ring_base;
224 	for (i = 0; i < RX_BD_RING_SIZE; i++) {
225 		rxbd->status = RxBD_EMPTY;
226 		rxbd->len = 0;
227 		rxbd->buf_ptr_hi = 0;
228 		rxbd->buf_ptr_lo = (u32)rx_buf_pool + i * MAX_RXBUF_LEN;
229 		rxbd++;
230 	}
231 
232 	/* set the Rx queue descriptor */
233 	rxqd = &pram->rxqd;
234 	muram_writew(&rxqd->gen, 0);
235 	muram_writew(&rxqd->bd_ring_base_hi, 0);
236 	rxqd->bd_ring_base_lo = (u32)rx_bd_ring_base;
237 	muram_writew(&rxqd->bd_ring_size, sizeof(struct fm_port_bd)
238 			* RX_BD_RING_SIZE);
239 	muram_writew(&rxqd->offset_in, 0);
240 	muram_writew(&rxqd->offset_out, 0);
241 
242 	/* set IM parameter ram pointer to Rx Frame Queue ID */
243 	out_be32(&bmi_rx_port->fmbm_rfqid, pram_page_offset);
244 
245 	return 1;
246 }
247 
248 static int fm_eth_tx_port_parameter_init(struct fm_eth *fm_eth)
249 {
250 	struct fm_port_global_pram *pram;
251 	u32 pram_page_offset;
252 	void *tx_bd_ring_base;
253 	struct fm_port_bd *txbd;
254 	struct fm_port_qd *txqd;
255 	struct fm_bmi_tx_port *bmi_tx_port = fm_eth->tx_port;
256 	int i;
257 
258 	/* alloc global parameter ram at MURAM */
259 	pram = (struct fm_port_global_pram *)fm_muram_alloc(fm_eth->fm_index,
260 		FM_PRAM_SIZE, FM_PRAM_ALIGN);
261 	fm_eth->tx_pram = pram;
262 
263 	/* parameter page offset to MURAM */
264 	pram_page_offset = (u32)pram - fm_muram_base(fm_eth->fm_index);
265 
266 	/* enable global mode- snooping data buffers and BDs */
267 	pram->mode = PRAM_MODE_GLOBAL;
268 
269 	/* init the Tx queue descriptor pionter */
270 	pram->txqd_ptr = pram_page_offset + 0x40;
271 
272 	/* alloc Tx buffer descriptors from main memory */
273 	tx_bd_ring_base = malloc(sizeof(struct fm_port_bd)
274 			* TX_BD_RING_SIZE);
275 	if (!tx_bd_ring_base)
276 		return 0;
277 	memset(tx_bd_ring_base, 0, sizeof(struct fm_port_bd)
278 			* TX_BD_RING_SIZE);
279 	/* save it to fm_eth */
280 	fm_eth->tx_bd_ring = tx_bd_ring_base;
281 	fm_eth->cur_txbd = tx_bd_ring_base;
282 
283 	/* init Tx BDs ring */
284 	txbd = (struct fm_port_bd *)tx_bd_ring_base;
285 	for (i = 0; i < TX_BD_RING_SIZE; i++) {
286 		txbd->status = TxBD_LAST;
287 		txbd->len = 0;
288 		txbd->buf_ptr_hi = 0;
289 		txbd->buf_ptr_lo = 0;
290 	}
291 
292 	/* set the Tx queue decriptor */
293 	txqd = &pram->txqd;
294 	muram_writew(&txqd->bd_ring_base_hi, 0);
295 	txqd->bd_ring_base_lo = (u32)tx_bd_ring_base;
296 	muram_writew(&txqd->bd_ring_size, sizeof(struct fm_port_bd)
297 			* TX_BD_RING_SIZE);
298 	muram_writew(&txqd->offset_in, 0);
299 	muram_writew(&txqd->offset_out, 0);
300 
301 	/* set IM parameter ram pointer to Tx Confirmation Frame Queue ID */
302 	out_be32(&bmi_tx_port->fmbm_tcfqid, pram_page_offset);
303 
304 	return 1;
305 }
306 
307 static int fm_eth_init(struct fm_eth *fm_eth)
308 {
309 
310 	if (!fm_eth_rx_port_parameter_init(fm_eth))
311 		return 0;
312 
313 	if (!fm_eth_tx_port_parameter_init(fm_eth))
314 		return 0;
315 
316 	return 1;
317 }
318 
319 static int fm_eth_startup(struct fm_eth *fm_eth)
320 {
321 	struct fsl_enet_mac *mac;
322 	mac = fm_eth->mac;
323 
324 	/* Rx/TxBDs, Rx/TxQDs, Rx buff and parameter ram init */
325 	if (!fm_eth_init(fm_eth))
326 		return 0;
327 	/* setup the MAC controller */
328 	mac->init_mac(mac);
329 
330 	/* For some reason we need to set SPEED_100 */
331 	if ((fm_eth->enet_if == PHY_INTERFACE_MODE_SGMII) && mac->set_if_mode)
332 		mac->set_if_mode(mac, fm_eth->enet_if, SPEED_100);
333 
334 	/* init bmi rx port, IM mode and disable */
335 	bmi_rx_port_init(fm_eth->rx_port);
336 	/* init bmi tx port, IM mode and disable */
337 	bmi_tx_port_init(fm_eth->tx_port);
338 
339 	return 1;
340 }
341 
342 static void fmc_tx_port_graceful_stop_enable(struct fm_eth *fm_eth)
343 {
344 	struct fm_port_global_pram *pram;
345 
346 	pram = fm_eth->tx_pram;
347 	/* graceful stop transmission of frames */
348 	pram->mode |= PRAM_MODE_GRACEFUL_STOP;
349 	sync();
350 }
351 
352 static void fmc_tx_port_graceful_stop_disable(struct fm_eth *fm_eth)
353 {
354 	struct fm_port_global_pram *pram;
355 
356 	pram = fm_eth->tx_pram;
357 	/* re-enable transmission of frames */
358 	pram->mode &= ~PRAM_MODE_GRACEFUL_STOP;
359 	sync();
360 }
361 
362 static int fm_eth_open(struct eth_device *dev, bd_t *bd)
363 {
364 	struct fm_eth *fm_eth;
365 	struct fsl_enet_mac *mac;
366 
367 	fm_eth = (struct fm_eth *)dev->priv;
368 	mac = fm_eth->mac;
369 
370 	/* setup the MAC address */
371 	if (dev->enetaddr[0] & 0x01) {
372 		printf("%s: MacAddress is multcast address\n",	__func__);
373 		return 1;
374 	}
375 	mac->set_mac_addr(mac, dev->enetaddr);
376 
377 	/* enable bmi Rx port */
378 	setbits_be32(&fm_eth->rx_port->fmbm_rcfg, FMBM_RCFG_EN);
379 	/* enable MAC rx/tx port */
380 	mac->enable_mac(mac);
381 	/* enable bmi Tx port */
382 	setbits_be32(&fm_eth->tx_port->fmbm_tcfg, FMBM_TCFG_EN);
383 	/* re-enable transmission of frame */
384 	fmc_tx_port_graceful_stop_disable(fm_eth);
385 
386 #ifdef CONFIG_PHYLIB
387 	phy_startup(fm_eth->phydev);
388 #else
389 	fm_eth->phydev->speed = SPEED_1000;
390 	fm_eth->phydev->link = 1;
391 	fm_eth->phydev->duplex = DUPLEX_FULL;
392 #endif
393 
394 	/* set the MAC-PHY mode */
395 	mac->set_if_mode(mac, fm_eth->enet_if, fm_eth->phydev->speed);
396 
397 	if (!fm_eth->phydev->link)
398 		printf("%s: No link.\n", fm_eth->phydev->dev->name);
399 
400 	return fm_eth->phydev->link ? 0 : -1;
401 }
402 
403 static void fm_eth_halt(struct eth_device *dev)
404 {
405 	struct fm_eth *fm_eth;
406 	struct fsl_enet_mac *mac;
407 
408 	fm_eth = (struct fm_eth *)dev->priv;
409 	mac = fm_eth->mac;
410 
411 	/* graceful stop the transmission of frames */
412 	fmc_tx_port_graceful_stop_enable(fm_eth);
413 	/* disable bmi Tx port */
414 	bmi_tx_port_disable(fm_eth->tx_port);
415 	/* disable MAC rx/tx port */
416 	mac->disable_mac(mac);
417 	/* disable bmi Rx port */
418 	bmi_rx_port_disable(fm_eth->rx_port);
419 
420 	phy_shutdown(fm_eth->phydev);
421 }
422 
423 static int fm_eth_send(struct eth_device *dev, volatile void *buf, int len)
424 {
425 	struct fm_eth *fm_eth;
426 	struct fm_port_global_pram *pram;
427 	struct fm_port_bd *txbd, *txbd_base;
428 	u16 offset_in;
429 	int i;
430 
431 	fm_eth = (struct fm_eth *)dev->priv;
432 	pram = fm_eth->tx_pram;
433 	txbd = fm_eth->cur_txbd;
434 
435 	/* find one empty TxBD */
436 	for (i = 0; txbd->status & TxBD_READY; i++) {
437 		udelay(100);
438 		if (i > 0x1000) {
439 			printf("%s: Tx buffer not ready\n", dev->name);
440 			return 0;
441 		}
442 	}
443 	/* setup TxBD */
444 	txbd->buf_ptr_hi = 0;
445 	txbd->buf_ptr_lo = (u32)buf;
446 	txbd->len = len;
447 	sync();
448 	txbd->status = TxBD_READY | TxBD_LAST;
449 	sync();
450 
451 	/* update TxQD, let RISC to send the packet */
452 	offset_in = muram_readw(&pram->txqd.offset_in);
453 	offset_in += sizeof(struct fm_port_bd);
454 	if (offset_in >= muram_readw(&pram->txqd.bd_ring_size))
455 		offset_in = 0;
456 	muram_writew(&pram->txqd.offset_in, offset_in);
457 	sync();
458 
459 	/* wait for buffer to be transmitted */
460 	for (i = 0; txbd->status & TxBD_READY; i++) {
461 		udelay(100);
462 		if (i > 0x10000) {
463 			printf("%s: Tx error\n", dev->name);
464 			return 0;
465 		}
466 	}
467 
468 	/* advance the TxBD */
469 	txbd++;
470 	txbd_base = (struct fm_port_bd *)fm_eth->tx_bd_ring;
471 	if (txbd >= (txbd_base + TX_BD_RING_SIZE))
472 		txbd = txbd_base;
473 	/* update current txbd */
474 	fm_eth->cur_txbd = (void *)txbd;
475 
476 	return 1;
477 }
478 
479 static int fm_eth_recv(struct eth_device *dev)
480 {
481 	struct fm_eth *fm_eth;
482 	struct fm_port_global_pram *pram;
483 	struct fm_port_bd *rxbd, *rxbd_base;
484 	u16 status, len;
485 	u8 *data;
486 	u16 offset_out;
487 
488 	fm_eth = (struct fm_eth *)dev->priv;
489 	pram = fm_eth->rx_pram;
490 	rxbd = fm_eth->cur_rxbd;
491 	status = rxbd->status;
492 
493 	while (!(status & RxBD_EMPTY)) {
494 		if (!(status & RxBD_ERROR)) {
495 			data = (u8 *)rxbd->buf_ptr_lo;
496 			len = rxbd->len;
497 			NetReceive(data, len);
498 		} else {
499 			printf("%s: Rx error\n", dev->name);
500 			return 0;
501 		}
502 
503 		/* clear the RxBDs */
504 		rxbd->status = RxBD_EMPTY;
505 		rxbd->len = 0;
506 		sync();
507 
508 		/* advance RxBD */
509 		rxbd++;
510 		rxbd_base = (struct fm_port_bd *)fm_eth->rx_bd_ring;
511 		if (rxbd >= (rxbd_base + RX_BD_RING_SIZE))
512 			rxbd = rxbd_base;
513 		/* read next status */
514 		status = rxbd->status;
515 
516 		/* update RxQD */
517 		offset_out = muram_readw(&pram->rxqd.offset_out);
518 		offset_out += sizeof(struct fm_port_bd);
519 		if (offset_out >= muram_readw(&pram->rxqd.bd_ring_size))
520 			offset_out = 0;
521 		muram_writew(&pram->rxqd.offset_out, offset_out);
522 		sync();
523 	}
524 	fm_eth->cur_rxbd = (void *)rxbd;
525 
526 	return 1;
527 }
528 
529 static int fm_eth_init_mac(struct fm_eth *fm_eth, struct ccsr_fman *reg)
530 {
531 	struct fsl_enet_mac *mac;
532 	int num;
533 	void *base, *phyregs = NULL;
534 
535 	num = fm_eth->num;
536 
537 	/* Get the mac registers base address */
538 	if (fm_eth->type == FM_ETH_1G_E) {
539 		base = &reg->mac_1g[num].fm_dtesc;
540 		phyregs = &reg->mac_1g[num].fm_mdio.miimcfg;
541 	} else {
542 		base = &reg->mac_10g[num].fm_10gec;
543 		phyregs = &reg->mac_10g[num].fm_10gec_mdio;
544 	}
545 
546 	/* alloc mac controller */
547 	mac = malloc(sizeof(struct fsl_enet_mac));
548 	if (!mac)
549 		return 0;
550 	memset(mac, 0, sizeof(struct fsl_enet_mac));
551 
552 	/* save the mac to fm_eth struct */
553 	fm_eth->mac = mac;
554 
555 	if (fm_eth->type == FM_ETH_1G_E)
556 		init_dtsec(mac, base, phyregs, MAX_RXBUF_LEN);
557 	else
558 		init_tgec(mac, base, phyregs, MAX_RXBUF_LEN);
559 
560 	return 1;
561 }
562 
563 static int init_phy(struct eth_device *dev)
564 {
565 	struct fm_eth *fm_eth = dev->priv;
566 	struct phy_device *phydev = NULL;
567 	u32 supported;
568 
569 #ifdef CONFIG_PHYLIB
570 	if (fm_eth->type == FM_ETH_1G_E)
571 		dtsec_init_phy(dev);
572 
573 	if (fm_eth->bus) {
574 		phydev = phy_connect(fm_eth->bus, fm_eth->phyaddr, dev,
575 					fm_eth->enet_if);
576 	}
577 
578 	if (!phydev) {
579 		printf("Failed to connect\n");
580 		return -1;
581 	}
582 
583 	if (fm_eth->type == FM_ETH_1G_E) {
584 		supported = (SUPPORTED_10baseT_Half |
585 				SUPPORTED_10baseT_Full |
586 				SUPPORTED_100baseT_Half |
587 				SUPPORTED_100baseT_Full |
588 				SUPPORTED_1000baseT_Full);
589 	} else {
590 		supported = SUPPORTED_10000baseT_Full;
591 
592 		if (tgec_is_fibre(dev))
593 			phydev->port = PORT_FIBRE;
594 	}
595 
596 	phydev->supported &= supported;
597 	phydev->advertising = phydev->supported;
598 
599 	fm_eth->phydev = phydev;
600 
601 	phy_config(phydev);
602 #endif
603 
604 	return 0;
605 }
606 
607 int fm_eth_initialize(struct ccsr_fman *reg, struct fm_eth_info *info)
608 {
609 	struct eth_device *dev;
610 	struct fm_eth *fm_eth;
611 	int i, num = info->num;
612 
613 	/* alloc eth device */
614 	dev = (struct eth_device *)malloc(sizeof(struct eth_device));
615 	if (!dev)
616 		return 0;
617 	memset(dev, 0, sizeof(struct eth_device));
618 
619 	/* alloc the FMan ethernet private struct */
620 	fm_eth = (struct fm_eth *)malloc(sizeof(struct fm_eth));
621 	if (!fm_eth)
622 		return 0;
623 	memset(fm_eth, 0, sizeof(struct fm_eth));
624 
625 	/* save off some things we need from the info struct */
626 	fm_eth->fm_index = info->index - 1; /* keep as 0 based for muram */
627 	fm_eth->num = num;
628 	fm_eth->type = info->type;
629 
630 	fm_eth->rx_port = (void *)&reg->port[info->rx_port_id - 1].fm_bmi;
631 	fm_eth->tx_port = (void *)&reg->port[info->tx_port_id - 1].fm_bmi;
632 
633 	/* set the ethernet max receive length */
634 	fm_eth->max_rx_len = MAX_RXBUF_LEN;
635 
636 	/* init global mac structure */
637 	if (!fm_eth_init_mac(fm_eth, reg))
638 		return 0;
639 
640 	/* keep same as the manual, we call FMAN1, FMAN2, DTSEC1, DTSEC2, etc */
641 	if (fm_eth->type == FM_ETH_1G_E)
642 		sprintf(dev->name, "FM%d@DTSEC%d", info->index, num + 1);
643 	else
644 		sprintf(dev->name, "FM%d@TGEC%d", info->index, num + 1);
645 
646 	devlist[num_controllers++] = dev;
647 	dev->iobase = 0;
648 	dev->priv = (void *)fm_eth;
649 	dev->init = fm_eth_open;
650 	dev->halt = fm_eth_halt;
651 	dev->send = fm_eth_send;
652 	dev->recv = fm_eth_recv;
653 	fm_eth->dev = dev;
654 	fm_eth->bus = info->bus;
655 	fm_eth->phyaddr = info->phy_addr;
656 	fm_eth->enet_if = info->enet_if;
657 
658 	/* startup the FM im */
659 	if (!fm_eth_startup(fm_eth))
660 		return 0;
661 
662 	if (init_phy(dev))
663 		return 0;
664 
665 	/* clear the ethernet address */
666 	for (i = 0; i < 6; i++)
667 		dev->enetaddr[i] = 0;
668 	eth_register(dev);
669 
670 	return 1;
671 }
672