1 /* 2 * Copyright 2009-2012 Freescale Semiconductor, Inc. 3 * Dave Liu <daveliu@freescale.com> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 #include <common.h> 8 #include <asm/io.h> 9 #include <malloc.h> 10 #include <net.h> 11 #include <hwconfig.h> 12 #include <fm_eth.h> 13 #include <fsl_mdio.h> 14 #include <miiphy.h> 15 #include <phy.h> 16 #include <asm/fsl_dtsec.h> 17 #include <asm/fsl_tgec.h> 18 #include <asm/fsl_memac.h> 19 20 #include "fm.h" 21 22 static struct eth_device *devlist[NUM_FM_PORTS]; 23 static int num_controllers; 24 25 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) && !defined(BITBANGMII) 26 27 #define TBIANA_SETTINGS (TBIANA_ASYMMETRIC_PAUSE | TBIANA_SYMMETRIC_PAUSE | \ 28 TBIANA_FULL_DUPLEX) 29 30 #define TBIANA_SGMII_ACK 0x4001 31 32 #define TBICR_SETTINGS (TBICR_ANEG_ENABLE | TBICR_RESTART_ANEG | \ 33 TBICR_FULL_DUPLEX | TBICR_SPEED1_SET) 34 35 /* Configure the TBI for SGMII operation */ 36 static void dtsec_configure_serdes(struct fm_eth *priv) 37 { 38 #ifdef CONFIG_SYS_FMAN_V3 39 u32 value; 40 struct mii_dev bus; 41 bus.priv = priv->mac->phyregs; 42 bool sgmii_2500 = (priv->enet_if == 43 PHY_INTERFACE_MODE_SGMII_2500) ? true : false; 44 45 /* SGMII IF mode + AN enable only for 1G SGMII, not for 2.5G */ 46 value = PHY_SGMII_IF_MODE_SGMII; 47 if (!sgmii_2500) 48 value |= PHY_SGMII_IF_MODE_AN; 49 50 memac_mdio_write(&bus, 0, MDIO_DEVAD_NONE, 0x14, value); 51 52 /* Dev ability according to SGMII specification */ 53 value = PHY_SGMII_DEV_ABILITY_SGMII; 54 memac_mdio_write(&bus, 0, MDIO_DEVAD_NONE, 0x4, value); 55 56 /* Adjust link timer for SGMII - 57 1.6 ms in units of 8 ns = 2 * 10^5 = 0x30d40 */ 58 memac_mdio_write(&bus, 0, MDIO_DEVAD_NONE, 0x13, 0x3); 59 memac_mdio_write(&bus, 0, MDIO_DEVAD_NONE, 0x12, 0xd40); 60 61 /* Restart AN */ 62 value = PHY_SGMII_CR_DEF_VAL; 63 if (!sgmii_2500) 64 value |= PHY_SGMII_CR_RESET_AN; 65 memac_mdio_write(&bus, 0, MDIO_DEVAD_NONE, 0, value); 66 #else 67 struct dtsec *regs = priv->mac->base; 68 struct tsec_mii_mng *phyregs = priv->mac->phyregs; 69 70 /* 71 * Access TBI PHY registers at given TSEC register offset as 72 * opposed to the register offset used for external PHY accesses 73 */ 74 tsec_local_mdio_write(phyregs, in_be32(®s->tbipa), 0, TBI_TBICON, 75 TBICON_CLK_SELECT); 76 tsec_local_mdio_write(phyregs, in_be32(®s->tbipa), 0, TBI_ANA, 77 TBIANA_SGMII_ACK); 78 tsec_local_mdio_write(phyregs, in_be32(®s->tbipa), 0, 79 TBI_CR, TBICR_SETTINGS); 80 #endif 81 } 82 83 static void dtsec_init_phy(struct eth_device *dev) 84 { 85 struct fm_eth *fm_eth = dev->priv; 86 #ifndef CONFIG_SYS_FMAN_V3 87 struct dtsec *regs = (struct dtsec *)CONFIG_SYS_FSL_FM1_DTSEC1_ADDR; 88 89 /* Assign a Physical address to the TBI */ 90 out_be32(®s->tbipa, CONFIG_SYS_TBIPA_VALUE); 91 #endif 92 93 if (fm_eth->enet_if == PHY_INTERFACE_MODE_SGMII || 94 fm_eth->enet_if == PHY_INTERFACE_MODE_SGMII_2500) 95 dtsec_configure_serdes(fm_eth); 96 } 97 98 static int tgec_is_fibre(struct eth_device *dev) 99 { 100 struct fm_eth *fm = dev->priv; 101 char phyopt[20]; 102 103 sprintf(phyopt, "fsl_fm%d_xaui_phy", fm->fm_index + 1); 104 105 return hwconfig_arg_cmp(phyopt, "xfi"); 106 } 107 #endif 108 109 static u16 muram_readw(u16 *addr) 110 { 111 u32 base = (u32)addr & ~0x3; 112 u32 val32 = *(u32 *)base; 113 int byte_pos; 114 u16 ret; 115 116 byte_pos = (u32)addr & 0x3; 117 if (byte_pos) 118 ret = (u16)(val32 & 0x0000ffff); 119 else 120 ret = (u16)((val32 & 0xffff0000) >> 16); 121 122 return ret; 123 } 124 125 static void muram_writew(u16 *addr, u16 val) 126 { 127 u32 base = (u32)addr & ~0x3; 128 u32 org32 = *(u32 *)base; 129 u32 val32; 130 int byte_pos; 131 132 byte_pos = (u32)addr & 0x3; 133 if (byte_pos) 134 val32 = (org32 & 0xffff0000) | val; 135 else 136 val32 = (org32 & 0x0000ffff) | ((u32)val << 16); 137 138 *(u32 *)base = val32; 139 } 140 141 static void bmi_rx_port_disable(struct fm_bmi_rx_port *rx_port) 142 { 143 int timeout = 1000000; 144 145 clrbits_be32(&rx_port->fmbm_rcfg, FMBM_RCFG_EN); 146 147 /* wait until the rx port is not busy */ 148 while ((in_be32(&rx_port->fmbm_rst) & FMBM_RST_BSY) && timeout--) 149 ; 150 } 151 152 static void bmi_rx_port_init(struct fm_bmi_rx_port *rx_port) 153 { 154 /* set BMI to independent mode, Rx port disable */ 155 out_be32(&rx_port->fmbm_rcfg, FMBM_RCFG_IM); 156 /* clear FOF in IM case */ 157 out_be32(&rx_port->fmbm_rim, 0); 158 /* Rx frame next engine -RISC */ 159 out_be32(&rx_port->fmbm_rfne, NIA_ENG_RISC | NIA_RISC_AC_IM_RX); 160 /* Rx command attribute - no order, MR[3] = 1 */ 161 clrbits_be32(&rx_port->fmbm_rfca, FMBM_RFCA_ORDER | FMBM_RFCA_MR_MASK); 162 setbits_be32(&rx_port->fmbm_rfca, FMBM_RFCA_MR(4)); 163 /* enable Rx statistic counters */ 164 out_be32(&rx_port->fmbm_rstc, FMBM_RSTC_EN); 165 /* disable Rx performance counters */ 166 out_be32(&rx_port->fmbm_rpc, 0); 167 } 168 169 static void bmi_tx_port_disable(struct fm_bmi_tx_port *tx_port) 170 { 171 int timeout = 1000000; 172 173 clrbits_be32(&tx_port->fmbm_tcfg, FMBM_TCFG_EN); 174 175 /* wait until the tx port is not busy */ 176 while ((in_be32(&tx_port->fmbm_tst) & FMBM_TST_BSY) && timeout--) 177 ; 178 } 179 180 static void bmi_tx_port_init(struct fm_bmi_tx_port *tx_port) 181 { 182 /* set BMI to independent mode, Tx port disable */ 183 out_be32(&tx_port->fmbm_tcfg, FMBM_TCFG_IM); 184 /* Tx frame next engine -RISC */ 185 out_be32(&tx_port->fmbm_tfne, NIA_ENG_RISC | NIA_RISC_AC_IM_TX); 186 out_be32(&tx_port->fmbm_tfene, NIA_ENG_RISC | NIA_RISC_AC_IM_TX); 187 /* Tx command attribute - no order, MR[3] = 1 */ 188 clrbits_be32(&tx_port->fmbm_tfca, FMBM_TFCA_ORDER | FMBM_TFCA_MR_MASK); 189 setbits_be32(&tx_port->fmbm_tfca, FMBM_TFCA_MR(4)); 190 /* enable Tx statistic counters */ 191 out_be32(&tx_port->fmbm_tstc, FMBM_TSTC_EN); 192 /* disable Tx performance counters */ 193 out_be32(&tx_port->fmbm_tpc, 0); 194 } 195 196 static int fm_eth_rx_port_parameter_init(struct fm_eth *fm_eth) 197 { 198 struct fm_port_global_pram *pram; 199 u32 pram_page_offset; 200 void *rx_bd_ring_base; 201 void *rx_buf_pool; 202 struct fm_port_bd *rxbd; 203 struct fm_port_qd *rxqd; 204 struct fm_bmi_rx_port *bmi_rx_port = fm_eth->rx_port; 205 int i; 206 207 /* alloc global parameter ram at MURAM */ 208 pram = (struct fm_port_global_pram *)fm_muram_alloc(fm_eth->fm_index, 209 FM_PRAM_SIZE, FM_PRAM_ALIGN); 210 fm_eth->rx_pram = pram; 211 212 /* parameter page offset to MURAM */ 213 pram_page_offset = (u32)pram - fm_muram_base(fm_eth->fm_index); 214 215 /* enable global mode- snooping data buffers and BDs */ 216 pram->mode = PRAM_MODE_GLOBAL; 217 218 /* init the Rx queue descriptor pionter */ 219 pram->rxqd_ptr = pram_page_offset + 0x20; 220 221 /* set the max receive buffer length, power of 2 */ 222 muram_writew(&pram->mrblr, MAX_RXBUF_LOG2); 223 224 /* alloc Rx buffer descriptors from main memory */ 225 rx_bd_ring_base = malloc(sizeof(struct fm_port_bd) 226 * RX_BD_RING_SIZE); 227 if (!rx_bd_ring_base) 228 return 0; 229 memset(rx_bd_ring_base, 0, sizeof(struct fm_port_bd) 230 * RX_BD_RING_SIZE); 231 232 /* alloc Rx buffer from main memory */ 233 rx_buf_pool = malloc(MAX_RXBUF_LEN * RX_BD_RING_SIZE); 234 if (!rx_buf_pool) 235 return 0; 236 memset(rx_buf_pool, 0, MAX_RXBUF_LEN * RX_BD_RING_SIZE); 237 238 /* save them to fm_eth */ 239 fm_eth->rx_bd_ring = rx_bd_ring_base; 240 fm_eth->cur_rxbd = rx_bd_ring_base; 241 fm_eth->rx_buf = rx_buf_pool; 242 243 /* init Rx BDs ring */ 244 rxbd = (struct fm_port_bd *)rx_bd_ring_base; 245 for (i = 0; i < RX_BD_RING_SIZE; i++) { 246 rxbd->status = RxBD_EMPTY; 247 rxbd->len = 0; 248 rxbd->buf_ptr_hi = 0; 249 rxbd->buf_ptr_lo = (u32)rx_buf_pool + i * MAX_RXBUF_LEN; 250 rxbd++; 251 } 252 253 /* set the Rx queue descriptor */ 254 rxqd = &pram->rxqd; 255 muram_writew(&rxqd->gen, 0); 256 muram_writew(&rxqd->bd_ring_base_hi, 0); 257 rxqd->bd_ring_base_lo = (u32)rx_bd_ring_base; 258 muram_writew(&rxqd->bd_ring_size, sizeof(struct fm_port_bd) 259 * RX_BD_RING_SIZE); 260 muram_writew(&rxqd->offset_in, 0); 261 muram_writew(&rxqd->offset_out, 0); 262 263 /* set IM parameter ram pointer to Rx Frame Queue ID */ 264 out_be32(&bmi_rx_port->fmbm_rfqid, pram_page_offset); 265 266 return 1; 267 } 268 269 static int fm_eth_tx_port_parameter_init(struct fm_eth *fm_eth) 270 { 271 struct fm_port_global_pram *pram; 272 u32 pram_page_offset; 273 void *tx_bd_ring_base; 274 struct fm_port_bd *txbd; 275 struct fm_port_qd *txqd; 276 struct fm_bmi_tx_port *bmi_tx_port = fm_eth->tx_port; 277 int i; 278 279 /* alloc global parameter ram at MURAM */ 280 pram = (struct fm_port_global_pram *)fm_muram_alloc(fm_eth->fm_index, 281 FM_PRAM_SIZE, FM_PRAM_ALIGN); 282 fm_eth->tx_pram = pram; 283 284 /* parameter page offset to MURAM */ 285 pram_page_offset = (u32)pram - fm_muram_base(fm_eth->fm_index); 286 287 /* enable global mode- snooping data buffers and BDs */ 288 pram->mode = PRAM_MODE_GLOBAL; 289 290 /* init the Tx queue descriptor pionter */ 291 pram->txqd_ptr = pram_page_offset + 0x40; 292 293 /* alloc Tx buffer descriptors from main memory */ 294 tx_bd_ring_base = malloc(sizeof(struct fm_port_bd) 295 * TX_BD_RING_SIZE); 296 if (!tx_bd_ring_base) 297 return 0; 298 memset(tx_bd_ring_base, 0, sizeof(struct fm_port_bd) 299 * TX_BD_RING_SIZE); 300 /* save it to fm_eth */ 301 fm_eth->tx_bd_ring = tx_bd_ring_base; 302 fm_eth->cur_txbd = tx_bd_ring_base; 303 304 /* init Tx BDs ring */ 305 txbd = (struct fm_port_bd *)tx_bd_ring_base; 306 for (i = 0; i < TX_BD_RING_SIZE; i++) { 307 txbd->status = TxBD_LAST; 308 txbd->len = 0; 309 txbd->buf_ptr_hi = 0; 310 txbd->buf_ptr_lo = 0; 311 } 312 313 /* set the Tx queue decriptor */ 314 txqd = &pram->txqd; 315 muram_writew(&txqd->bd_ring_base_hi, 0); 316 txqd->bd_ring_base_lo = (u32)tx_bd_ring_base; 317 muram_writew(&txqd->bd_ring_size, sizeof(struct fm_port_bd) 318 * TX_BD_RING_SIZE); 319 muram_writew(&txqd->offset_in, 0); 320 muram_writew(&txqd->offset_out, 0); 321 322 /* set IM parameter ram pointer to Tx Confirmation Frame Queue ID */ 323 out_be32(&bmi_tx_port->fmbm_tcfqid, pram_page_offset); 324 325 return 1; 326 } 327 328 static int fm_eth_init(struct fm_eth *fm_eth) 329 { 330 331 if (!fm_eth_rx_port_parameter_init(fm_eth)) 332 return 0; 333 334 if (!fm_eth_tx_port_parameter_init(fm_eth)) 335 return 0; 336 337 return 1; 338 } 339 340 static int fm_eth_startup(struct fm_eth *fm_eth) 341 { 342 struct fsl_enet_mac *mac; 343 mac = fm_eth->mac; 344 345 /* Rx/TxBDs, Rx/TxQDs, Rx buff and parameter ram init */ 346 if (!fm_eth_init(fm_eth)) 347 return 0; 348 /* setup the MAC controller */ 349 mac->init_mac(mac); 350 351 /* For some reason we need to set SPEED_100 */ 352 if (((fm_eth->enet_if == PHY_INTERFACE_MODE_SGMII) || 353 (fm_eth->enet_if == PHY_INTERFACE_MODE_QSGMII)) && 354 mac->set_if_mode) 355 mac->set_if_mode(mac, fm_eth->enet_if, SPEED_100); 356 357 /* init bmi rx port, IM mode and disable */ 358 bmi_rx_port_init(fm_eth->rx_port); 359 /* init bmi tx port, IM mode and disable */ 360 bmi_tx_port_init(fm_eth->tx_port); 361 362 return 1; 363 } 364 365 static void fmc_tx_port_graceful_stop_enable(struct fm_eth *fm_eth) 366 { 367 struct fm_port_global_pram *pram; 368 369 pram = fm_eth->tx_pram; 370 /* graceful stop transmission of frames */ 371 pram->mode |= PRAM_MODE_GRACEFUL_STOP; 372 sync(); 373 } 374 375 static void fmc_tx_port_graceful_stop_disable(struct fm_eth *fm_eth) 376 { 377 struct fm_port_global_pram *pram; 378 379 pram = fm_eth->tx_pram; 380 /* re-enable transmission of frames */ 381 pram->mode &= ~PRAM_MODE_GRACEFUL_STOP; 382 sync(); 383 } 384 385 static int fm_eth_open(struct eth_device *dev, bd_t *bd) 386 { 387 struct fm_eth *fm_eth; 388 struct fsl_enet_mac *mac; 389 #ifdef CONFIG_PHYLIB 390 int ret; 391 #endif 392 393 fm_eth = (struct fm_eth *)dev->priv; 394 mac = fm_eth->mac; 395 396 /* setup the MAC address */ 397 if (dev->enetaddr[0] & 0x01) { 398 printf("%s: MacAddress is multcast address\n", __func__); 399 return 1; 400 } 401 mac->set_mac_addr(mac, dev->enetaddr); 402 403 /* enable bmi Rx port */ 404 setbits_be32(&fm_eth->rx_port->fmbm_rcfg, FMBM_RCFG_EN); 405 /* enable MAC rx/tx port */ 406 mac->enable_mac(mac); 407 /* enable bmi Tx port */ 408 setbits_be32(&fm_eth->tx_port->fmbm_tcfg, FMBM_TCFG_EN); 409 /* re-enable transmission of frame */ 410 fmc_tx_port_graceful_stop_disable(fm_eth); 411 412 #ifdef CONFIG_PHYLIB 413 ret = phy_startup(fm_eth->phydev); 414 if (ret) { 415 printf("%s: Could not initialize\n", fm_eth->phydev->dev->name); 416 return ret; 417 } 418 #else 419 fm_eth->phydev->speed = SPEED_1000; 420 fm_eth->phydev->link = 1; 421 fm_eth->phydev->duplex = DUPLEX_FULL; 422 #endif 423 424 /* set the MAC-PHY mode */ 425 mac->set_if_mode(mac, fm_eth->enet_if, fm_eth->phydev->speed); 426 427 if (!fm_eth->phydev->link) 428 printf("%s: No link.\n", fm_eth->phydev->dev->name); 429 430 return fm_eth->phydev->link ? 0 : -1; 431 } 432 433 static void fm_eth_halt(struct eth_device *dev) 434 { 435 struct fm_eth *fm_eth; 436 struct fsl_enet_mac *mac; 437 438 fm_eth = (struct fm_eth *)dev->priv; 439 mac = fm_eth->mac; 440 441 /* graceful stop the transmission of frames */ 442 fmc_tx_port_graceful_stop_enable(fm_eth); 443 /* disable bmi Tx port */ 444 bmi_tx_port_disable(fm_eth->tx_port); 445 /* disable MAC rx/tx port */ 446 mac->disable_mac(mac); 447 /* disable bmi Rx port */ 448 bmi_rx_port_disable(fm_eth->rx_port); 449 450 phy_shutdown(fm_eth->phydev); 451 } 452 453 static int fm_eth_send(struct eth_device *dev, void *buf, int len) 454 { 455 struct fm_eth *fm_eth; 456 struct fm_port_global_pram *pram; 457 struct fm_port_bd *txbd, *txbd_base; 458 u16 offset_in; 459 int i; 460 461 fm_eth = (struct fm_eth *)dev->priv; 462 pram = fm_eth->tx_pram; 463 txbd = fm_eth->cur_txbd; 464 465 /* find one empty TxBD */ 466 for (i = 0; txbd->status & TxBD_READY; i++) { 467 udelay(100); 468 if (i > 0x1000) { 469 printf("%s: Tx buffer not ready\n", dev->name); 470 return 0; 471 } 472 } 473 /* setup TxBD */ 474 txbd->buf_ptr_hi = 0; 475 txbd->buf_ptr_lo = (u32)buf; 476 txbd->len = len; 477 sync(); 478 txbd->status = TxBD_READY | TxBD_LAST; 479 sync(); 480 481 /* update TxQD, let RISC to send the packet */ 482 offset_in = muram_readw(&pram->txqd.offset_in); 483 offset_in += sizeof(struct fm_port_bd); 484 if (offset_in >= muram_readw(&pram->txqd.bd_ring_size)) 485 offset_in = 0; 486 muram_writew(&pram->txqd.offset_in, offset_in); 487 sync(); 488 489 /* wait for buffer to be transmitted */ 490 for (i = 0; txbd->status & TxBD_READY; i++) { 491 udelay(100); 492 if (i > 0x10000) { 493 printf("%s: Tx error\n", dev->name); 494 return 0; 495 } 496 } 497 498 /* advance the TxBD */ 499 txbd++; 500 txbd_base = (struct fm_port_bd *)fm_eth->tx_bd_ring; 501 if (txbd >= (txbd_base + TX_BD_RING_SIZE)) 502 txbd = txbd_base; 503 /* update current txbd */ 504 fm_eth->cur_txbd = (void *)txbd; 505 506 return 1; 507 } 508 509 static int fm_eth_recv(struct eth_device *dev) 510 { 511 struct fm_eth *fm_eth; 512 struct fm_port_global_pram *pram; 513 struct fm_port_bd *rxbd, *rxbd_base; 514 u16 status, len; 515 u8 *data; 516 u16 offset_out; 517 518 fm_eth = (struct fm_eth *)dev->priv; 519 pram = fm_eth->rx_pram; 520 rxbd = fm_eth->cur_rxbd; 521 status = rxbd->status; 522 523 while (!(status & RxBD_EMPTY)) { 524 if (!(status & RxBD_ERROR)) { 525 data = (u8 *)rxbd->buf_ptr_lo; 526 len = rxbd->len; 527 NetReceive(data, len); 528 } else { 529 printf("%s: Rx error\n", dev->name); 530 return 0; 531 } 532 533 /* clear the RxBDs */ 534 rxbd->status = RxBD_EMPTY; 535 rxbd->len = 0; 536 sync(); 537 538 /* advance RxBD */ 539 rxbd++; 540 rxbd_base = (struct fm_port_bd *)fm_eth->rx_bd_ring; 541 if (rxbd >= (rxbd_base + RX_BD_RING_SIZE)) 542 rxbd = rxbd_base; 543 /* read next status */ 544 status = rxbd->status; 545 546 /* update RxQD */ 547 offset_out = muram_readw(&pram->rxqd.offset_out); 548 offset_out += sizeof(struct fm_port_bd); 549 if (offset_out >= muram_readw(&pram->rxqd.bd_ring_size)) 550 offset_out = 0; 551 muram_writew(&pram->rxqd.offset_out, offset_out); 552 sync(); 553 } 554 fm_eth->cur_rxbd = (void *)rxbd; 555 556 return 1; 557 } 558 559 static int fm_eth_init_mac(struct fm_eth *fm_eth, struct ccsr_fman *reg) 560 { 561 struct fsl_enet_mac *mac; 562 int num; 563 void *base, *phyregs = NULL; 564 565 num = fm_eth->num; 566 567 #ifdef CONFIG_SYS_FMAN_V3 568 if (fm_eth->type == FM_ETH_10G_E) { 569 /* 10GEC1/10GEC2 use mEMAC9/mEMAC10 570 * 10GEC3/10GEC4 use mEMAC1/mEMAC2 571 * so it needs to change the num. 572 */ 573 if (fm_eth->num >= 2) 574 num -= 2; 575 else 576 num += 8; 577 } 578 base = ®->memac[num].fm_memac; 579 phyregs = ®->memac[num].fm_memac_mdio; 580 #else 581 /* Get the mac registers base address */ 582 if (fm_eth->type == FM_ETH_1G_E) { 583 base = ®->mac_1g[num].fm_dtesc; 584 phyregs = ®->mac_1g[num].fm_mdio.miimcfg; 585 } else { 586 base = ®->mac_10g[num].fm_10gec; 587 phyregs = ®->mac_10g[num].fm_10gec_mdio; 588 } 589 #endif 590 591 /* alloc mac controller */ 592 mac = malloc(sizeof(struct fsl_enet_mac)); 593 if (!mac) 594 return 0; 595 memset(mac, 0, sizeof(struct fsl_enet_mac)); 596 597 /* save the mac to fm_eth struct */ 598 fm_eth->mac = mac; 599 600 #ifdef CONFIG_SYS_FMAN_V3 601 init_memac(mac, base, phyregs, MAX_RXBUF_LEN); 602 #else 603 if (fm_eth->type == FM_ETH_1G_E) 604 init_dtsec(mac, base, phyregs, MAX_RXBUF_LEN); 605 else 606 init_tgec(mac, base, phyregs, MAX_RXBUF_LEN); 607 #endif 608 609 return 1; 610 } 611 612 static int init_phy(struct eth_device *dev) 613 { 614 struct fm_eth *fm_eth = dev->priv; 615 struct phy_device *phydev = NULL; 616 u32 supported; 617 618 #ifdef CONFIG_PHYLIB 619 if (fm_eth->type == FM_ETH_1G_E) 620 dtsec_init_phy(dev); 621 622 if (fm_eth->bus) { 623 phydev = phy_connect(fm_eth->bus, fm_eth->phyaddr, dev, 624 fm_eth->enet_if); 625 } 626 627 if (!phydev) { 628 printf("Failed to connect\n"); 629 return -1; 630 } 631 632 if (fm_eth->type == FM_ETH_1G_E) { 633 supported = (SUPPORTED_10baseT_Half | 634 SUPPORTED_10baseT_Full | 635 SUPPORTED_100baseT_Half | 636 SUPPORTED_100baseT_Full | 637 SUPPORTED_1000baseT_Full); 638 } else { 639 supported = SUPPORTED_10000baseT_Full; 640 641 if (tgec_is_fibre(dev)) 642 phydev->port = PORT_FIBRE; 643 } 644 645 phydev->supported &= supported; 646 phydev->advertising = phydev->supported; 647 648 fm_eth->phydev = phydev; 649 650 phy_config(phydev); 651 #endif 652 653 return 0; 654 } 655 656 int fm_eth_initialize(struct ccsr_fman *reg, struct fm_eth_info *info) 657 { 658 struct eth_device *dev; 659 struct fm_eth *fm_eth; 660 int i, num = info->num; 661 662 /* alloc eth device */ 663 dev = (struct eth_device *)malloc(sizeof(struct eth_device)); 664 if (!dev) 665 return 0; 666 memset(dev, 0, sizeof(struct eth_device)); 667 668 /* alloc the FMan ethernet private struct */ 669 fm_eth = (struct fm_eth *)malloc(sizeof(struct fm_eth)); 670 if (!fm_eth) 671 return 0; 672 memset(fm_eth, 0, sizeof(struct fm_eth)); 673 674 /* save off some things we need from the info struct */ 675 fm_eth->fm_index = info->index - 1; /* keep as 0 based for muram */ 676 fm_eth->num = num; 677 fm_eth->type = info->type; 678 679 fm_eth->rx_port = (void *)®->port[info->rx_port_id - 1].fm_bmi; 680 fm_eth->tx_port = (void *)®->port[info->tx_port_id - 1].fm_bmi; 681 682 /* set the ethernet max receive length */ 683 fm_eth->max_rx_len = MAX_RXBUF_LEN; 684 685 /* init global mac structure */ 686 if (!fm_eth_init_mac(fm_eth, reg)) 687 return 0; 688 689 /* keep same as the manual, we call FMAN1, FMAN2, DTSEC1, DTSEC2, etc */ 690 if (fm_eth->type == FM_ETH_1G_E) 691 sprintf(dev->name, "FM%d@DTSEC%d", info->index, num + 1); 692 else 693 sprintf(dev->name, "FM%d@TGEC%d", info->index, num + 1); 694 695 devlist[num_controllers++] = dev; 696 dev->iobase = 0; 697 dev->priv = (void *)fm_eth; 698 dev->init = fm_eth_open; 699 dev->halt = fm_eth_halt; 700 dev->send = fm_eth_send; 701 dev->recv = fm_eth_recv; 702 fm_eth->dev = dev; 703 fm_eth->bus = info->bus; 704 fm_eth->phyaddr = info->phy_addr; 705 fm_eth->enet_if = info->enet_if; 706 707 /* startup the FM im */ 708 if (!fm_eth_startup(fm_eth)) 709 return 0; 710 711 if (init_phy(dev)) 712 return 0; 713 714 /* clear the ethernet address */ 715 for (i = 0; i < 6; i++) 716 dev->enetaddr[i] = 0; 717 eth_register(dev); 718 719 return 1; 720 } 721