1 /* 2 * Copyright 2009-2012 Freescale Semiconductor, Inc. 3 * Dave Liu <daveliu@freescale.com> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 #include <common.h> 8 #include <asm/io.h> 9 #include <malloc.h> 10 #include <net.h> 11 #include <hwconfig.h> 12 #include <fm_eth.h> 13 #include <fsl_mdio.h> 14 #include <miiphy.h> 15 #include <phy.h> 16 #include <asm/fsl_dtsec.h> 17 #include <asm/fsl_tgec.h> 18 #include <asm/fsl_memac.h> 19 20 #include "fm.h" 21 22 static struct eth_device *devlist[NUM_FM_PORTS]; 23 static int num_controllers; 24 25 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) && !defined(BITBANGMII) 26 27 #define TBIANA_SETTINGS (TBIANA_ASYMMETRIC_PAUSE | TBIANA_SYMMETRIC_PAUSE | \ 28 TBIANA_FULL_DUPLEX) 29 30 #define TBIANA_SGMII_ACK 0x4001 31 32 #define TBICR_SETTINGS (TBICR_ANEG_ENABLE | TBICR_RESTART_ANEG | \ 33 TBICR_FULL_DUPLEX | TBICR_SPEED1_SET) 34 35 /* Configure the TBI for SGMII operation */ 36 static void dtsec_configure_serdes(struct fm_eth *priv) 37 { 38 #ifdef CONFIG_SYS_FMAN_V3 39 u32 value; 40 struct mii_dev bus; 41 bus.priv = priv->mac->phyregs; 42 bool sgmii_2500 = (priv->enet_if == 43 PHY_INTERFACE_MODE_SGMII_2500) ? true : false; 44 45 /* SGMII IF mode + AN enable only for 1G SGMII, not for 2.5G */ 46 value = PHY_SGMII_IF_MODE_SGMII; 47 if (!sgmii_2500) 48 value |= PHY_SGMII_IF_MODE_AN; 49 50 memac_mdio_write(&bus, 0, MDIO_DEVAD_NONE, 0x14, value); 51 52 /* Dev ability according to SGMII specification */ 53 value = PHY_SGMII_DEV_ABILITY_SGMII; 54 memac_mdio_write(&bus, 0, MDIO_DEVAD_NONE, 0x4, value); 55 56 /* Adjust link timer for SGMII - 57 1.6 ms in units of 8 ns = 2 * 10^5 = 0x30d40 */ 58 memac_mdio_write(&bus, 0, MDIO_DEVAD_NONE, 0x13, 0x3); 59 memac_mdio_write(&bus, 0, MDIO_DEVAD_NONE, 0x12, 0xd40); 60 61 /* Restart AN */ 62 value = PHY_SGMII_CR_DEF_VAL; 63 if (!sgmii_2500) 64 value |= PHY_SGMII_CR_RESET_AN; 65 memac_mdio_write(&bus, 0, MDIO_DEVAD_NONE, 0, value); 66 #else 67 struct dtsec *regs = priv->mac->base; 68 struct tsec_mii_mng *phyregs = priv->mac->phyregs; 69 70 /* 71 * Access TBI PHY registers at given TSEC register offset as 72 * opposed to the register offset used for external PHY accesses 73 */ 74 tsec_local_mdio_write(phyregs, in_be32(®s->tbipa), 0, TBI_TBICON, 75 TBICON_CLK_SELECT); 76 tsec_local_mdio_write(phyregs, in_be32(®s->tbipa), 0, TBI_ANA, 77 TBIANA_SGMII_ACK); 78 tsec_local_mdio_write(phyregs, in_be32(®s->tbipa), 0, 79 TBI_CR, TBICR_SETTINGS); 80 #endif 81 } 82 83 static void dtsec_init_phy(struct eth_device *dev) 84 { 85 struct fm_eth *fm_eth = dev->priv; 86 #ifndef CONFIG_SYS_FMAN_V3 87 struct dtsec *regs = (struct dtsec *)CONFIG_SYS_FSL_FM1_DTSEC1_ADDR; 88 89 /* Assign a Physical address to the TBI */ 90 out_be32(®s->tbipa, CONFIG_SYS_TBIPA_VALUE); 91 #endif 92 93 if (fm_eth->enet_if == PHY_INTERFACE_MODE_SGMII || 94 fm_eth->enet_if == PHY_INTERFACE_MODE_SGMII_2500) 95 dtsec_configure_serdes(fm_eth); 96 } 97 98 static int tgec_is_fibre(struct eth_device *dev) 99 { 100 struct fm_eth *fm = dev->priv; 101 char phyopt[20]; 102 103 sprintf(phyopt, "fsl_fm%d_xaui_phy", fm->fm_index + 1); 104 105 return hwconfig_arg_cmp(phyopt, "xfi"); 106 } 107 #endif 108 109 static u16 muram_readw(u16 *addr) 110 { 111 u32 base = (u32)addr & ~0x3; 112 u32 val32 = *(u32 *)base; 113 int byte_pos; 114 u16 ret; 115 116 byte_pos = (u32)addr & 0x3; 117 if (byte_pos) 118 ret = (u16)(val32 & 0x0000ffff); 119 else 120 ret = (u16)((val32 & 0xffff0000) >> 16); 121 122 return ret; 123 } 124 125 static void muram_writew(u16 *addr, u16 val) 126 { 127 u32 base = (u32)addr & ~0x3; 128 u32 org32 = *(u32 *)base; 129 u32 val32; 130 int byte_pos; 131 132 byte_pos = (u32)addr & 0x3; 133 if (byte_pos) 134 val32 = (org32 & 0xffff0000) | val; 135 else 136 val32 = (org32 & 0x0000ffff) | ((u32)val << 16); 137 138 *(u32 *)base = val32; 139 } 140 141 static void bmi_rx_port_disable(struct fm_bmi_rx_port *rx_port) 142 { 143 int timeout = 1000000; 144 145 clrbits_be32(&rx_port->fmbm_rcfg, FMBM_RCFG_EN); 146 147 /* wait until the rx port is not busy */ 148 while ((in_be32(&rx_port->fmbm_rst) & FMBM_RST_BSY) && timeout--) 149 ; 150 } 151 152 static void bmi_rx_port_init(struct fm_bmi_rx_port *rx_port) 153 { 154 /* set BMI to independent mode, Rx port disable */ 155 out_be32(&rx_port->fmbm_rcfg, FMBM_RCFG_IM); 156 /* clear FOF in IM case */ 157 out_be32(&rx_port->fmbm_rim, 0); 158 /* Rx frame next engine -RISC */ 159 out_be32(&rx_port->fmbm_rfne, NIA_ENG_RISC | NIA_RISC_AC_IM_RX); 160 /* Rx command attribute - no order, MR[3] = 1 */ 161 clrbits_be32(&rx_port->fmbm_rfca, FMBM_RFCA_ORDER | FMBM_RFCA_MR_MASK); 162 setbits_be32(&rx_port->fmbm_rfca, FMBM_RFCA_MR(4)); 163 /* enable Rx statistic counters */ 164 out_be32(&rx_port->fmbm_rstc, FMBM_RSTC_EN); 165 /* disable Rx performance counters */ 166 out_be32(&rx_port->fmbm_rpc, 0); 167 } 168 169 static void bmi_tx_port_disable(struct fm_bmi_tx_port *tx_port) 170 { 171 int timeout = 1000000; 172 173 clrbits_be32(&tx_port->fmbm_tcfg, FMBM_TCFG_EN); 174 175 /* wait until the tx port is not busy */ 176 while ((in_be32(&tx_port->fmbm_tst) & FMBM_TST_BSY) && timeout--) 177 ; 178 } 179 180 static void bmi_tx_port_init(struct fm_bmi_tx_port *tx_port) 181 { 182 /* set BMI to independent mode, Tx port disable */ 183 out_be32(&tx_port->fmbm_tcfg, FMBM_TCFG_IM); 184 /* Tx frame next engine -RISC */ 185 out_be32(&tx_port->fmbm_tfne, NIA_ENG_RISC | NIA_RISC_AC_IM_TX); 186 out_be32(&tx_port->fmbm_tfene, NIA_ENG_RISC | NIA_RISC_AC_IM_TX); 187 /* Tx command attribute - no order, MR[3] = 1 */ 188 clrbits_be32(&tx_port->fmbm_tfca, FMBM_TFCA_ORDER | FMBM_TFCA_MR_MASK); 189 setbits_be32(&tx_port->fmbm_tfca, FMBM_TFCA_MR(4)); 190 /* enable Tx statistic counters */ 191 out_be32(&tx_port->fmbm_tstc, FMBM_TSTC_EN); 192 /* disable Tx performance counters */ 193 out_be32(&tx_port->fmbm_tpc, 0); 194 } 195 196 static int fm_eth_rx_port_parameter_init(struct fm_eth *fm_eth) 197 { 198 struct fm_port_global_pram *pram; 199 u32 pram_page_offset; 200 void *rx_bd_ring_base; 201 void *rx_buf_pool; 202 struct fm_port_bd *rxbd; 203 struct fm_port_qd *rxqd; 204 struct fm_bmi_rx_port *bmi_rx_port = fm_eth->rx_port; 205 int i; 206 207 /* alloc global parameter ram at MURAM */ 208 pram = (struct fm_port_global_pram *)fm_muram_alloc(fm_eth->fm_index, 209 FM_PRAM_SIZE, FM_PRAM_ALIGN); 210 fm_eth->rx_pram = pram; 211 212 /* parameter page offset to MURAM */ 213 pram_page_offset = (u32)pram - fm_muram_base(fm_eth->fm_index); 214 215 /* enable global mode- snooping data buffers and BDs */ 216 pram->mode = PRAM_MODE_GLOBAL; 217 218 /* init the Rx queue descriptor pionter */ 219 pram->rxqd_ptr = pram_page_offset + 0x20; 220 221 /* set the max receive buffer length, power of 2 */ 222 muram_writew(&pram->mrblr, MAX_RXBUF_LOG2); 223 224 /* alloc Rx buffer descriptors from main memory */ 225 rx_bd_ring_base = malloc(sizeof(struct fm_port_bd) 226 * RX_BD_RING_SIZE); 227 if (!rx_bd_ring_base) 228 return 0; 229 memset(rx_bd_ring_base, 0, sizeof(struct fm_port_bd) 230 * RX_BD_RING_SIZE); 231 232 /* alloc Rx buffer from main memory */ 233 rx_buf_pool = malloc(MAX_RXBUF_LEN * RX_BD_RING_SIZE); 234 if (!rx_buf_pool) 235 return 0; 236 memset(rx_buf_pool, 0, MAX_RXBUF_LEN * RX_BD_RING_SIZE); 237 238 /* save them to fm_eth */ 239 fm_eth->rx_bd_ring = rx_bd_ring_base; 240 fm_eth->cur_rxbd = rx_bd_ring_base; 241 fm_eth->rx_buf = rx_buf_pool; 242 243 /* init Rx BDs ring */ 244 rxbd = (struct fm_port_bd *)rx_bd_ring_base; 245 for (i = 0; i < RX_BD_RING_SIZE; i++) { 246 rxbd->status = RxBD_EMPTY; 247 rxbd->len = 0; 248 rxbd->buf_ptr_hi = 0; 249 rxbd->buf_ptr_lo = (u32)rx_buf_pool + i * MAX_RXBUF_LEN; 250 rxbd++; 251 } 252 253 /* set the Rx queue descriptor */ 254 rxqd = &pram->rxqd; 255 muram_writew(&rxqd->gen, 0); 256 muram_writew(&rxqd->bd_ring_base_hi, 0); 257 rxqd->bd_ring_base_lo = (u32)rx_bd_ring_base; 258 muram_writew(&rxqd->bd_ring_size, sizeof(struct fm_port_bd) 259 * RX_BD_RING_SIZE); 260 muram_writew(&rxqd->offset_in, 0); 261 muram_writew(&rxqd->offset_out, 0); 262 263 /* set IM parameter ram pointer to Rx Frame Queue ID */ 264 out_be32(&bmi_rx_port->fmbm_rfqid, pram_page_offset); 265 266 return 1; 267 } 268 269 static int fm_eth_tx_port_parameter_init(struct fm_eth *fm_eth) 270 { 271 struct fm_port_global_pram *pram; 272 u32 pram_page_offset; 273 void *tx_bd_ring_base; 274 struct fm_port_bd *txbd; 275 struct fm_port_qd *txqd; 276 struct fm_bmi_tx_port *bmi_tx_port = fm_eth->tx_port; 277 int i; 278 279 /* alloc global parameter ram at MURAM */ 280 pram = (struct fm_port_global_pram *)fm_muram_alloc(fm_eth->fm_index, 281 FM_PRAM_SIZE, FM_PRAM_ALIGN); 282 fm_eth->tx_pram = pram; 283 284 /* parameter page offset to MURAM */ 285 pram_page_offset = (u32)pram - fm_muram_base(fm_eth->fm_index); 286 287 /* enable global mode- snooping data buffers and BDs */ 288 pram->mode = PRAM_MODE_GLOBAL; 289 290 /* init the Tx queue descriptor pionter */ 291 pram->txqd_ptr = pram_page_offset + 0x40; 292 293 /* alloc Tx buffer descriptors from main memory */ 294 tx_bd_ring_base = malloc(sizeof(struct fm_port_bd) 295 * TX_BD_RING_SIZE); 296 if (!tx_bd_ring_base) 297 return 0; 298 memset(tx_bd_ring_base, 0, sizeof(struct fm_port_bd) 299 * TX_BD_RING_SIZE); 300 /* save it to fm_eth */ 301 fm_eth->tx_bd_ring = tx_bd_ring_base; 302 fm_eth->cur_txbd = tx_bd_ring_base; 303 304 /* init Tx BDs ring */ 305 txbd = (struct fm_port_bd *)tx_bd_ring_base; 306 for (i = 0; i < TX_BD_RING_SIZE; i++) { 307 txbd->status = TxBD_LAST; 308 txbd->len = 0; 309 txbd->buf_ptr_hi = 0; 310 txbd->buf_ptr_lo = 0; 311 } 312 313 /* set the Tx queue decriptor */ 314 txqd = &pram->txqd; 315 muram_writew(&txqd->bd_ring_base_hi, 0); 316 txqd->bd_ring_base_lo = (u32)tx_bd_ring_base; 317 muram_writew(&txqd->bd_ring_size, sizeof(struct fm_port_bd) 318 * TX_BD_RING_SIZE); 319 muram_writew(&txqd->offset_in, 0); 320 muram_writew(&txqd->offset_out, 0); 321 322 /* set IM parameter ram pointer to Tx Confirmation Frame Queue ID */ 323 out_be32(&bmi_tx_port->fmbm_tcfqid, pram_page_offset); 324 325 return 1; 326 } 327 328 static int fm_eth_init(struct fm_eth *fm_eth) 329 { 330 331 if (!fm_eth_rx_port_parameter_init(fm_eth)) 332 return 0; 333 334 if (!fm_eth_tx_port_parameter_init(fm_eth)) 335 return 0; 336 337 return 1; 338 } 339 340 static int fm_eth_startup(struct fm_eth *fm_eth) 341 { 342 struct fsl_enet_mac *mac; 343 mac = fm_eth->mac; 344 345 /* Rx/TxBDs, Rx/TxQDs, Rx buff and parameter ram init */ 346 if (!fm_eth_init(fm_eth)) 347 return 0; 348 /* setup the MAC controller */ 349 mac->init_mac(mac); 350 351 /* For some reason we need to set SPEED_100 */ 352 if (((fm_eth->enet_if == PHY_INTERFACE_MODE_SGMII) || 353 (fm_eth->enet_if == PHY_INTERFACE_MODE_QSGMII)) && 354 mac->set_if_mode) 355 mac->set_if_mode(mac, fm_eth->enet_if, SPEED_100); 356 357 /* init bmi rx port, IM mode and disable */ 358 bmi_rx_port_init(fm_eth->rx_port); 359 /* init bmi tx port, IM mode and disable */ 360 bmi_tx_port_init(fm_eth->tx_port); 361 362 return 1; 363 } 364 365 static void fmc_tx_port_graceful_stop_enable(struct fm_eth *fm_eth) 366 { 367 struct fm_port_global_pram *pram; 368 369 pram = fm_eth->tx_pram; 370 /* graceful stop transmission of frames */ 371 pram->mode |= PRAM_MODE_GRACEFUL_STOP; 372 sync(); 373 } 374 375 static void fmc_tx_port_graceful_stop_disable(struct fm_eth *fm_eth) 376 { 377 struct fm_port_global_pram *pram; 378 379 pram = fm_eth->tx_pram; 380 /* re-enable transmission of frames */ 381 pram->mode &= ~PRAM_MODE_GRACEFUL_STOP; 382 sync(); 383 } 384 385 static int fm_eth_open(struct eth_device *dev, bd_t *bd) 386 { 387 struct fm_eth *fm_eth; 388 struct fsl_enet_mac *mac; 389 #ifdef CONFIG_PHYLIB 390 int ret; 391 #endif 392 393 fm_eth = (struct fm_eth *)dev->priv; 394 mac = fm_eth->mac; 395 396 /* setup the MAC address */ 397 if (dev->enetaddr[0] & 0x01) { 398 printf("%s: MacAddress is multcast address\n", __func__); 399 return 1; 400 } 401 mac->set_mac_addr(mac, dev->enetaddr); 402 403 /* enable bmi Rx port */ 404 setbits_be32(&fm_eth->rx_port->fmbm_rcfg, FMBM_RCFG_EN); 405 /* enable MAC rx/tx port */ 406 mac->enable_mac(mac); 407 /* enable bmi Tx port */ 408 setbits_be32(&fm_eth->tx_port->fmbm_tcfg, FMBM_TCFG_EN); 409 /* re-enable transmission of frame */ 410 fmc_tx_port_graceful_stop_disable(fm_eth); 411 412 #ifdef CONFIG_PHYLIB 413 if (fm_eth->phydev) { 414 ret = phy_startup(fm_eth->phydev); 415 if (ret) { 416 printf("%s: Could not initialize\n", 417 fm_eth->phydev->dev->name); 418 return ret; 419 } 420 } else { 421 return 0; 422 } 423 #else 424 fm_eth->phydev->speed = SPEED_1000; 425 fm_eth->phydev->link = 1; 426 fm_eth->phydev->duplex = DUPLEX_FULL; 427 #endif 428 429 /* set the MAC-PHY mode */ 430 mac->set_if_mode(mac, fm_eth->enet_if, fm_eth->phydev->speed); 431 432 if (!fm_eth->phydev->link) 433 printf("%s: No link.\n", fm_eth->phydev->dev->name); 434 435 return fm_eth->phydev->link ? 0 : -1; 436 } 437 438 static void fm_eth_halt(struct eth_device *dev) 439 { 440 struct fm_eth *fm_eth; 441 struct fsl_enet_mac *mac; 442 443 fm_eth = (struct fm_eth *)dev->priv; 444 mac = fm_eth->mac; 445 446 /* graceful stop the transmission of frames */ 447 fmc_tx_port_graceful_stop_enable(fm_eth); 448 /* disable bmi Tx port */ 449 bmi_tx_port_disable(fm_eth->tx_port); 450 /* disable MAC rx/tx port */ 451 mac->disable_mac(mac); 452 /* disable bmi Rx port */ 453 bmi_rx_port_disable(fm_eth->rx_port); 454 455 if (fm_eth->phydev) 456 phy_shutdown(fm_eth->phydev); 457 } 458 459 static int fm_eth_send(struct eth_device *dev, void *buf, int len) 460 { 461 struct fm_eth *fm_eth; 462 struct fm_port_global_pram *pram; 463 struct fm_port_bd *txbd, *txbd_base; 464 u16 offset_in; 465 int i; 466 467 fm_eth = (struct fm_eth *)dev->priv; 468 pram = fm_eth->tx_pram; 469 txbd = fm_eth->cur_txbd; 470 471 /* find one empty TxBD */ 472 for (i = 0; txbd->status & TxBD_READY; i++) { 473 udelay(100); 474 if (i > 0x1000) { 475 printf("%s: Tx buffer not ready\n", dev->name); 476 return 0; 477 } 478 } 479 /* setup TxBD */ 480 txbd->buf_ptr_hi = 0; 481 txbd->buf_ptr_lo = (u32)buf; 482 txbd->len = len; 483 sync(); 484 txbd->status = TxBD_READY | TxBD_LAST; 485 sync(); 486 487 /* update TxQD, let RISC to send the packet */ 488 offset_in = muram_readw(&pram->txqd.offset_in); 489 offset_in += sizeof(struct fm_port_bd); 490 if (offset_in >= muram_readw(&pram->txqd.bd_ring_size)) 491 offset_in = 0; 492 muram_writew(&pram->txqd.offset_in, offset_in); 493 sync(); 494 495 /* wait for buffer to be transmitted */ 496 for (i = 0; txbd->status & TxBD_READY; i++) { 497 udelay(100); 498 if (i > 0x10000) { 499 printf("%s: Tx error\n", dev->name); 500 return 0; 501 } 502 } 503 504 /* advance the TxBD */ 505 txbd++; 506 txbd_base = (struct fm_port_bd *)fm_eth->tx_bd_ring; 507 if (txbd >= (txbd_base + TX_BD_RING_SIZE)) 508 txbd = txbd_base; 509 /* update current txbd */ 510 fm_eth->cur_txbd = (void *)txbd; 511 512 return 1; 513 } 514 515 static int fm_eth_recv(struct eth_device *dev) 516 { 517 struct fm_eth *fm_eth; 518 struct fm_port_global_pram *pram; 519 struct fm_port_bd *rxbd, *rxbd_base; 520 u16 status, len; 521 u8 *data; 522 u16 offset_out; 523 524 fm_eth = (struct fm_eth *)dev->priv; 525 pram = fm_eth->rx_pram; 526 rxbd = fm_eth->cur_rxbd; 527 status = rxbd->status; 528 529 while (!(status & RxBD_EMPTY)) { 530 if (!(status & RxBD_ERROR)) { 531 data = (u8 *)rxbd->buf_ptr_lo; 532 len = rxbd->len; 533 net_process_received_packet(data, len); 534 } else { 535 printf("%s: Rx error\n", dev->name); 536 return 0; 537 } 538 539 /* clear the RxBDs */ 540 rxbd->status = RxBD_EMPTY; 541 rxbd->len = 0; 542 sync(); 543 544 /* advance RxBD */ 545 rxbd++; 546 rxbd_base = (struct fm_port_bd *)fm_eth->rx_bd_ring; 547 if (rxbd >= (rxbd_base + RX_BD_RING_SIZE)) 548 rxbd = rxbd_base; 549 /* read next status */ 550 status = rxbd->status; 551 552 /* update RxQD */ 553 offset_out = muram_readw(&pram->rxqd.offset_out); 554 offset_out += sizeof(struct fm_port_bd); 555 if (offset_out >= muram_readw(&pram->rxqd.bd_ring_size)) 556 offset_out = 0; 557 muram_writew(&pram->rxqd.offset_out, offset_out); 558 sync(); 559 } 560 fm_eth->cur_rxbd = (void *)rxbd; 561 562 return 1; 563 } 564 565 static int fm_eth_init_mac(struct fm_eth *fm_eth, struct ccsr_fman *reg) 566 { 567 struct fsl_enet_mac *mac; 568 int num; 569 void *base, *phyregs = NULL; 570 571 num = fm_eth->num; 572 573 #ifdef CONFIG_SYS_FMAN_V3 574 #ifndef CONFIG_FSL_FM_10GEC_REGULAR_NOTATION 575 if (fm_eth->type == FM_ETH_10G_E) { 576 /* 10GEC1/10GEC2 use mEMAC9/mEMAC10 on T2080/T4240. 577 * 10GEC3/10GEC4 use mEMAC1/mEMAC2 on T2080. 578 * 10GEC1 uses mEMAC1 on T1024. 579 * so it needs to change the num. 580 */ 581 if (fm_eth->num >= 2) 582 num -= 2; 583 else 584 num += 8; 585 } 586 #endif 587 base = ®->memac[num].fm_memac; 588 phyregs = ®->memac[num].fm_memac_mdio; 589 #else 590 /* Get the mac registers base address */ 591 if (fm_eth->type == FM_ETH_1G_E) { 592 base = ®->mac_1g[num].fm_dtesc; 593 phyregs = ®->mac_1g[num].fm_mdio.miimcfg; 594 } else { 595 base = ®->mac_10g[num].fm_10gec; 596 phyregs = ®->mac_10g[num].fm_10gec_mdio; 597 } 598 #endif 599 600 /* alloc mac controller */ 601 mac = malloc(sizeof(struct fsl_enet_mac)); 602 if (!mac) 603 return 0; 604 memset(mac, 0, sizeof(struct fsl_enet_mac)); 605 606 /* save the mac to fm_eth struct */ 607 fm_eth->mac = mac; 608 609 #ifdef CONFIG_SYS_FMAN_V3 610 init_memac(mac, base, phyregs, MAX_RXBUF_LEN); 611 #else 612 if (fm_eth->type == FM_ETH_1G_E) 613 init_dtsec(mac, base, phyregs, MAX_RXBUF_LEN); 614 else 615 init_tgec(mac, base, phyregs, MAX_RXBUF_LEN); 616 #endif 617 618 return 1; 619 } 620 621 static int init_phy(struct eth_device *dev) 622 { 623 struct fm_eth *fm_eth = dev->priv; 624 struct phy_device *phydev = NULL; 625 u32 supported; 626 627 #ifdef CONFIG_PHYLIB 628 if (fm_eth->type == FM_ETH_1G_E) 629 dtsec_init_phy(dev); 630 631 if (fm_eth->bus) { 632 phydev = phy_connect(fm_eth->bus, fm_eth->phyaddr, dev, 633 fm_eth->enet_if); 634 if (!phydev) { 635 printf("Failed to connect\n"); 636 return -1; 637 } 638 } else { 639 return 0; 640 } 641 642 if (fm_eth->type == FM_ETH_1G_E) { 643 supported = (SUPPORTED_10baseT_Half | 644 SUPPORTED_10baseT_Full | 645 SUPPORTED_100baseT_Half | 646 SUPPORTED_100baseT_Full | 647 SUPPORTED_1000baseT_Full); 648 } else { 649 supported = SUPPORTED_10000baseT_Full; 650 651 if (tgec_is_fibre(dev)) 652 phydev->port = PORT_FIBRE; 653 } 654 655 phydev->supported &= supported; 656 phydev->advertising = phydev->supported; 657 658 fm_eth->phydev = phydev; 659 660 phy_config(phydev); 661 #endif 662 663 return 0; 664 } 665 666 int fm_eth_initialize(struct ccsr_fman *reg, struct fm_eth_info *info) 667 { 668 struct eth_device *dev; 669 struct fm_eth *fm_eth; 670 int i, num = info->num; 671 672 /* alloc eth device */ 673 dev = (struct eth_device *)malloc(sizeof(struct eth_device)); 674 if (!dev) 675 return 0; 676 memset(dev, 0, sizeof(struct eth_device)); 677 678 /* alloc the FMan ethernet private struct */ 679 fm_eth = (struct fm_eth *)malloc(sizeof(struct fm_eth)); 680 if (!fm_eth) 681 return 0; 682 memset(fm_eth, 0, sizeof(struct fm_eth)); 683 684 /* save off some things we need from the info struct */ 685 fm_eth->fm_index = info->index - 1; /* keep as 0 based for muram */ 686 fm_eth->num = num; 687 fm_eth->type = info->type; 688 689 fm_eth->rx_port = (void *)®->port[info->rx_port_id - 1].fm_bmi; 690 fm_eth->tx_port = (void *)®->port[info->tx_port_id - 1].fm_bmi; 691 692 /* set the ethernet max receive length */ 693 fm_eth->max_rx_len = MAX_RXBUF_LEN; 694 695 /* init global mac structure */ 696 if (!fm_eth_init_mac(fm_eth, reg)) 697 return 0; 698 699 /* keep same as the manual, we call FMAN1, FMAN2, DTSEC1, DTSEC2, etc */ 700 if (fm_eth->type == FM_ETH_1G_E) 701 sprintf(dev->name, "FM%d@DTSEC%d", info->index, num + 1); 702 else 703 sprintf(dev->name, "FM%d@TGEC%d", info->index, num + 1); 704 705 devlist[num_controllers++] = dev; 706 dev->iobase = 0; 707 dev->priv = (void *)fm_eth; 708 dev->init = fm_eth_open; 709 dev->halt = fm_eth_halt; 710 dev->send = fm_eth_send; 711 dev->recv = fm_eth_recv; 712 fm_eth->dev = dev; 713 fm_eth->bus = info->bus; 714 fm_eth->phyaddr = info->phy_addr; 715 fm_eth->enet_if = info->enet_if; 716 717 /* startup the FM im */ 718 if (!fm_eth_startup(fm_eth)) 719 return 0; 720 721 init_phy(dev); 722 723 /* clear the ethernet address */ 724 for (i = 0; i < 6; i++) 725 dev->enetaddr[i] = 0; 726 eth_register(dev); 727 728 return 1; 729 } 730