1 /* 2 * Copyright 2009-2012 Freescale Semiconductor, Inc. 3 * Dave Liu <daveliu@freescale.com> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 #include <common.h> 8 #include <asm/io.h> 9 #include <malloc.h> 10 #include <net.h> 11 #include <hwconfig.h> 12 #include <fm_eth.h> 13 #include <fsl_mdio.h> 14 #include <miiphy.h> 15 #include <phy.h> 16 #include <fsl_dtsec.h> 17 #include <fsl_tgec.h> 18 #include <fsl_memac.h> 19 20 #include "fm.h" 21 22 static struct eth_device *devlist[NUM_FM_PORTS]; 23 static int num_controllers; 24 25 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) && !defined(BITBANGMII) 26 27 #define TBIANA_SETTINGS (TBIANA_ASYMMETRIC_PAUSE | TBIANA_SYMMETRIC_PAUSE | \ 28 TBIANA_FULL_DUPLEX) 29 30 #define TBIANA_SGMII_ACK 0x4001 31 32 #define TBICR_SETTINGS (TBICR_ANEG_ENABLE | TBICR_RESTART_ANEG | \ 33 TBICR_FULL_DUPLEX | TBICR_SPEED1_SET) 34 35 /* Configure the TBI for SGMII operation */ 36 static void dtsec_configure_serdes(struct fm_eth *priv) 37 { 38 #ifdef CONFIG_SYS_FMAN_V3 39 u32 value; 40 struct mii_dev bus; 41 bus.priv = priv->mac->phyregs; 42 bool sgmii_2500 = (priv->enet_if == 43 PHY_INTERFACE_MODE_SGMII_2500) ? true : false; 44 int i = 0; 45 46 qsgmii_loop: 47 /* SGMII IF mode + AN enable only for 1G SGMII, not for 2.5G */ 48 value = PHY_SGMII_IF_MODE_SGMII; 49 if (!sgmii_2500) 50 value |= PHY_SGMII_IF_MODE_AN; 51 52 memac_mdio_write(&bus, i, MDIO_DEVAD_NONE, 0x14, value); 53 54 /* Dev ability according to SGMII specification */ 55 value = PHY_SGMII_DEV_ABILITY_SGMII; 56 memac_mdio_write(&bus, i, MDIO_DEVAD_NONE, 0x4, value); 57 58 /* Adjust link timer for SGMII - 59 1.6 ms in units of 8 ns = 2 * 10^5 = 0x30d40 */ 60 memac_mdio_write(&bus, i, MDIO_DEVAD_NONE, 0x13, 0x3); 61 memac_mdio_write(&bus, i, MDIO_DEVAD_NONE, 0x12, 0xd40); 62 63 /* Restart AN */ 64 value = PHY_SGMII_CR_DEF_VAL; 65 if (!sgmii_2500) 66 value |= PHY_SGMII_CR_RESET_AN; 67 memac_mdio_write(&bus, i, MDIO_DEVAD_NONE, 0, value); 68 69 if ((priv->enet_if == PHY_INTERFACE_MODE_QSGMII) && (i < 3)) { 70 i++; 71 goto qsgmii_loop; 72 } 73 #else 74 struct dtsec *regs = priv->mac->base; 75 struct tsec_mii_mng *phyregs = priv->mac->phyregs; 76 77 /* 78 * Access TBI PHY registers at given TSEC register offset as 79 * opposed to the register offset used for external PHY accesses 80 */ 81 tsec_local_mdio_write(phyregs, in_be32(®s->tbipa), 0, TBI_TBICON, 82 TBICON_CLK_SELECT); 83 tsec_local_mdio_write(phyregs, in_be32(®s->tbipa), 0, TBI_ANA, 84 TBIANA_SGMII_ACK); 85 tsec_local_mdio_write(phyregs, in_be32(®s->tbipa), 0, 86 TBI_CR, TBICR_SETTINGS); 87 #endif 88 } 89 90 static void dtsec_init_phy(struct eth_device *dev) 91 { 92 struct fm_eth *fm_eth = dev->priv; 93 #ifndef CONFIG_SYS_FMAN_V3 94 struct dtsec *regs = (struct dtsec *)CONFIG_SYS_FSL_FM1_DTSEC1_ADDR; 95 96 /* Assign a Physical address to the TBI */ 97 out_be32(®s->tbipa, CONFIG_SYS_TBIPA_VALUE); 98 #endif 99 100 if (fm_eth->enet_if == PHY_INTERFACE_MODE_SGMII || 101 fm_eth->enet_if == PHY_INTERFACE_MODE_QSGMII || 102 fm_eth->enet_if == PHY_INTERFACE_MODE_SGMII_2500) 103 dtsec_configure_serdes(fm_eth); 104 } 105 106 #ifdef CONFIG_PHYLIB 107 static int tgec_is_fibre(struct eth_device *dev) 108 { 109 struct fm_eth *fm = dev->priv; 110 char phyopt[20]; 111 112 sprintf(phyopt, "fsl_fm%d_xaui_phy", fm->fm_index + 1); 113 114 return hwconfig_arg_cmp(phyopt, "xfi"); 115 } 116 #endif 117 #endif 118 119 static u16 muram_readw(u16 *addr) 120 { 121 ulong base = (ulong)addr & ~0x3UL; 122 u32 val32 = in_be32((void *)base); 123 int byte_pos; 124 u16 ret; 125 126 byte_pos = (ulong)addr & 0x3UL; 127 if (byte_pos) 128 ret = (u16)(val32 & 0x0000ffff); 129 else 130 ret = (u16)((val32 & 0xffff0000) >> 16); 131 132 return ret; 133 } 134 135 static void muram_writew(u16 *addr, u16 val) 136 { 137 ulong base = (ulong)addr & ~0x3UL; 138 u32 org32 = in_be32((void *)base); 139 u32 val32; 140 int byte_pos; 141 142 byte_pos = (ulong)addr & 0x3UL; 143 if (byte_pos) 144 val32 = (org32 & 0xffff0000) | val; 145 else 146 val32 = (org32 & 0x0000ffff) | ((u32)val << 16); 147 148 out_be32((void *)base, val32); 149 } 150 151 static void bmi_rx_port_disable(struct fm_bmi_rx_port *rx_port) 152 { 153 int timeout = 1000000; 154 155 clrbits_be32(&rx_port->fmbm_rcfg, FMBM_RCFG_EN); 156 157 /* wait until the rx port is not busy */ 158 while ((in_be32(&rx_port->fmbm_rst) & FMBM_RST_BSY) && timeout--) 159 ; 160 } 161 162 static void bmi_rx_port_init(struct fm_bmi_rx_port *rx_port) 163 { 164 /* set BMI to independent mode, Rx port disable */ 165 out_be32(&rx_port->fmbm_rcfg, FMBM_RCFG_IM); 166 /* clear FOF in IM case */ 167 out_be32(&rx_port->fmbm_rim, 0); 168 /* Rx frame next engine -RISC */ 169 out_be32(&rx_port->fmbm_rfne, NIA_ENG_RISC | NIA_RISC_AC_IM_RX); 170 /* Rx command attribute - no order, MR[3] = 1 */ 171 clrbits_be32(&rx_port->fmbm_rfca, FMBM_RFCA_ORDER | FMBM_RFCA_MR_MASK); 172 setbits_be32(&rx_port->fmbm_rfca, FMBM_RFCA_MR(4)); 173 /* enable Rx statistic counters */ 174 out_be32(&rx_port->fmbm_rstc, FMBM_RSTC_EN); 175 /* disable Rx performance counters */ 176 out_be32(&rx_port->fmbm_rpc, 0); 177 } 178 179 static void bmi_tx_port_disable(struct fm_bmi_tx_port *tx_port) 180 { 181 int timeout = 1000000; 182 183 clrbits_be32(&tx_port->fmbm_tcfg, FMBM_TCFG_EN); 184 185 /* wait until the tx port is not busy */ 186 while ((in_be32(&tx_port->fmbm_tst) & FMBM_TST_BSY) && timeout--) 187 ; 188 } 189 190 static void bmi_tx_port_init(struct fm_bmi_tx_port *tx_port) 191 { 192 /* set BMI to independent mode, Tx port disable */ 193 out_be32(&tx_port->fmbm_tcfg, FMBM_TCFG_IM); 194 /* Tx frame next engine -RISC */ 195 out_be32(&tx_port->fmbm_tfne, NIA_ENG_RISC | NIA_RISC_AC_IM_TX); 196 out_be32(&tx_port->fmbm_tfene, NIA_ENG_RISC | NIA_RISC_AC_IM_TX); 197 /* Tx command attribute - no order, MR[3] = 1 */ 198 clrbits_be32(&tx_port->fmbm_tfca, FMBM_TFCA_ORDER | FMBM_TFCA_MR_MASK); 199 setbits_be32(&tx_port->fmbm_tfca, FMBM_TFCA_MR(4)); 200 /* enable Tx statistic counters */ 201 out_be32(&tx_port->fmbm_tstc, FMBM_TSTC_EN); 202 /* disable Tx performance counters */ 203 out_be32(&tx_port->fmbm_tpc, 0); 204 } 205 206 static int fm_eth_rx_port_parameter_init(struct fm_eth *fm_eth) 207 { 208 struct fm_port_global_pram *pram; 209 u32 pram_page_offset; 210 void *rx_bd_ring_base; 211 void *rx_buf_pool; 212 u32 bd_ring_base_lo, bd_ring_base_hi; 213 u32 buf_lo, buf_hi; 214 struct fm_port_bd *rxbd; 215 struct fm_port_qd *rxqd; 216 struct fm_bmi_rx_port *bmi_rx_port = fm_eth->rx_port; 217 int i; 218 219 /* alloc global parameter ram at MURAM */ 220 pram = (struct fm_port_global_pram *)fm_muram_alloc(fm_eth->fm_index, 221 FM_PRAM_SIZE, FM_PRAM_ALIGN); 222 if (!pram) { 223 printf("%s: No muram for Rx global parameter\n", __func__); 224 return -ENOMEM; 225 } 226 227 fm_eth->rx_pram = pram; 228 229 /* parameter page offset to MURAM */ 230 pram_page_offset = (void *)pram - fm_muram_base(fm_eth->fm_index); 231 232 /* enable global mode- snooping data buffers and BDs */ 233 out_be32(&pram->mode, PRAM_MODE_GLOBAL); 234 235 /* init the Rx queue descriptor pionter */ 236 out_be32(&pram->rxqd_ptr, pram_page_offset + 0x20); 237 238 /* set the max receive buffer length, power of 2 */ 239 muram_writew(&pram->mrblr, MAX_RXBUF_LOG2); 240 241 /* alloc Rx buffer descriptors from main memory */ 242 rx_bd_ring_base = malloc(sizeof(struct fm_port_bd) 243 * RX_BD_RING_SIZE); 244 if (!rx_bd_ring_base) 245 return -ENOMEM; 246 247 memset(rx_bd_ring_base, 0, sizeof(struct fm_port_bd) 248 * RX_BD_RING_SIZE); 249 250 /* alloc Rx buffer from main memory */ 251 rx_buf_pool = malloc(MAX_RXBUF_LEN * RX_BD_RING_SIZE); 252 if (!rx_buf_pool) 253 return -ENOMEM; 254 255 memset(rx_buf_pool, 0, MAX_RXBUF_LEN * RX_BD_RING_SIZE); 256 debug("%s: rx_buf_pool = %p\n", __func__, rx_buf_pool); 257 258 /* save them to fm_eth */ 259 fm_eth->rx_bd_ring = rx_bd_ring_base; 260 fm_eth->cur_rxbd = rx_bd_ring_base; 261 fm_eth->rx_buf = rx_buf_pool; 262 263 /* init Rx BDs ring */ 264 rxbd = (struct fm_port_bd *)rx_bd_ring_base; 265 for (i = 0; i < RX_BD_RING_SIZE; i++) { 266 muram_writew(&rxbd->status, RxBD_EMPTY); 267 muram_writew(&rxbd->len, 0); 268 buf_hi = upper_32_bits(virt_to_phys(rx_buf_pool + 269 i * MAX_RXBUF_LEN)); 270 buf_lo = lower_32_bits(virt_to_phys(rx_buf_pool + 271 i * MAX_RXBUF_LEN)); 272 muram_writew(&rxbd->buf_ptr_hi, (u16)buf_hi); 273 out_be32(&rxbd->buf_ptr_lo, buf_lo); 274 rxbd++; 275 } 276 277 /* set the Rx queue descriptor */ 278 rxqd = &pram->rxqd; 279 muram_writew(&rxqd->gen, 0); 280 bd_ring_base_hi = upper_32_bits(virt_to_phys(rx_bd_ring_base)); 281 bd_ring_base_lo = lower_32_bits(virt_to_phys(rx_bd_ring_base)); 282 muram_writew(&rxqd->bd_ring_base_hi, (u16)bd_ring_base_hi); 283 out_be32(&rxqd->bd_ring_base_lo, bd_ring_base_lo); 284 muram_writew(&rxqd->bd_ring_size, sizeof(struct fm_port_bd) 285 * RX_BD_RING_SIZE); 286 muram_writew(&rxqd->offset_in, 0); 287 muram_writew(&rxqd->offset_out, 0); 288 289 /* set IM parameter ram pointer to Rx Frame Queue ID */ 290 out_be32(&bmi_rx_port->fmbm_rfqid, pram_page_offset); 291 292 return 0; 293 } 294 295 static int fm_eth_tx_port_parameter_init(struct fm_eth *fm_eth) 296 { 297 struct fm_port_global_pram *pram; 298 u32 pram_page_offset; 299 void *tx_bd_ring_base; 300 u32 bd_ring_base_lo, bd_ring_base_hi; 301 struct fm_port_bd *txbd; 302 struct fm_port_qd *txqd; 303 struct fm_bmi_tx_port *bmi_tx_port = fm_eth->tx_port; 304 int i; 305 306 /* alloc global parameter ram at MURAM */ 307 pram = (struct fm_port_global_pram *)fm_muram_alloc(fm_eth->fm_index, 308 FM_PRAM_SIZE, FM_PRAM_ALIGN); 309 if (!pram) { 310 printf("%s: No muram for Tx global parameter\n", __func__); 311 return -ENOMEM; 312 } 313 fm_eth->tx_pram = pram; 314 315 /* parameter page offset to MURAM */ 316 pram_page_offset = (void *)pram - fm_muram_base(fm_eth->fm_index); 317 318 /* enable global mode- snooping data buffers and BDs */ 319 out_be32(&pram->mode, PRAM_MODE_GLOBAL); 320 321 /* init the Tx queue descriptor pionter */ 322 out_be32(&pram->txqd_ptr, pram_page_offset + 0x40); 323 324 /* alloc Tx buffer descriptors from main memory */ 325 tx_bd_ring_base = malloc(sizeof(struct fm_port_bd) 326 * TX_BD_RING_SIZE); 327 if (!tx_bd_ring_base) 328 return -ENOMEM; 329 330 memset(tx_bd_ring_base, 0, sizeof(struct fm_port_bd) 331 * TX_BD_RING_SIZE); 332 /* save it to fm_eth */ 333 fm_eth->tx_bd_ring = tx_bd_ring_base; 334 fm_eth->cur_txbd = tx_bd_ring_base; 335 336 /* init Tx BDs ring */ 337 txbd = (struct fm_port_bd *)tx_bd_ring_base; 338 for (i = 0; i < TX_BD_RING_SIZE; i++) { 339 muram_writew(&txbd->status, TxBD_LAST); 340 muram_writew(&txbd->len, 0); 341 muram_writew(&txbd->buf_ptr_hi, 0); 342 out_be32(&txbd->buf_ptr_lo, 0); 343 txbd++; 344 } 345 346 /* set the Tx queue decriptor */ 347 txqd = &pram->txqd; 348 bd_ring_base_hi = upper_32_bits(virt_to_phys(tx_bd_ring_base)); 349 bd_ring_base_lo = lower_32_bits(virt_to_phys(tx_bd_ring_base)); 350 muram_writew(&txqd->bd_ring_base_hi, (u16)bd_ring_base_hi); 351 out_be32(&txqd->bd_ring_base_lo, bd_ring_base_lo); 352 muram_writew(&txqd->bd_ring_size, sizeof(struct fm_port_bd) 353 * TX_BD_RING_SIZE); 354 muram_writew(&txqd->offset_in, 0); 355 muram_writew(&txqd->offset_out, 0); 356 357 /* set IM parameter ram pointer to Tx Confirmation Frame Queue ID */ 358 out_be32(&bmi_tx_port->fmbm_tcfqid, pram_page_offset); 359 360 return 0; 361 } 362 363 static int fm_eth_init(struct fm_eth *fm_eth) 364 { 365 int ret; 366 367 ret = fm_eth_rx_port_parameter_init(fm_eth); 368 if (ret) 369 return ret; 370 371 ret = fm_eth_tx_port_parameter_init(fm_eth); 372 if (ret) 373 return ret; 374 375 return 0; 376 } 377 378 static int fm_eth_startup(struct fm_eth *fm_eth) 379 { 380 struct fsl_enet_mac *mac; 381 int ret; 382 383 mac = fm_eth->mac; 384 385 /* Rx/TxBDs, Rx/TxQDs, Rx buff and parameter ram init */ 386 ret = fm_eth_init(fm_eth); 387 if (ret) 388 return ret; 389 /* setup the MAC controller */ 390 mac->init_mac(mac); 391 392 /* For some reason we need to set SPEED_100 */ 393 if (((fm_eth->enet_if == PHY_INTERFACE_MODE_SGMII) || 394 (fm_eth->enet_if == PHY_INTERFACE_MODE_QSGMII)) && 395 mac->set_if_mode) 396 mac->set_if_mode(mac, fm_eth->enet_if, SPEED_100); 397 398 /* init bmi rx port, IM mode and disable */ 399 bmi_rx_port_init(fm_eth->rx_port); 400 /* init bmi tx port, IM mode and disable */ 401 bmi_tx_port_init(fm_eth->tx_port); 402 403 return 0; 404 } 405 406 static void fmc_tx_port_graceful_stop_enable(struct fm_eth *fm_eth) 407 { 408 struct fm_port_global_pram *pram; 409 410 pram = fm_eth->tx_pram; 411 /* graceful stop transmission of frames */ 412 setbits_be32(&pram->mode, PRAM_MODE_GRACEFUL_STOP); 413 sync(); 414 } 415 416 static void fmc_tx_port_graceful_stop_disable(struct fm_eth *fm_eth) 417 { 418 struct fm_port_global_pram *pram; 419 420 pram = fm_eth->tx_pram; 421 /* re-enable transmission of frames */ 422 clrbits_be32(&pram->mode, PRAM_MODE_GRACEFUL_STOP); 423 sync(); 424 } 425 426 static int fm_eth_open(struct eth_device *dev, bd_t *bd) 427 { 428 struct fm_eth *fm_eth; 429 struct fsl_enet_mac *mac; 430 #ifdef CONFIG_PHYLIB 431 int ret; 432 #endif 433 434 fm_eth = (struct fm_eth *)dev->priv; 435 mac = fm_eth->mac; 436 437 /* setup the MAC address */ 438 if (dev->enetaddr[0] & 0x01) { 439 printf("%s: MacAddress is multcast address\n", __func__); 440 return 1; 441 } 442 mac->set_mac_addr(mac, dev->enetaddr); 443 444 /* enable bmi Rx port */ 445 setbits_be32(&fm_eth->rx_port->fmbm_rcfg, FMBM_RCFG_EN); 446 /* enable MAC rx/tx port */ 447 mac->enable_mac(mac); 448 /* enable bmi Tx port */ 449 setbits_be32(&fm_eth->tx_port->fmbm_tcfg, FMBM_TCFG_EN); 450 /* re-enable transmission of frame */ 451 fmc_tx_port_graceful_stop_disable(fm_eth); 452 453 #ifdef CONFIG_PHYLIB 454 if (fm_eth->phydev) { 455 ret = phy_startup(fm_eth->phydev); 456 if (ret) { 457 printf("%s: Could not initialize\n", 458 fm_eth->phydev->dev->name); 459 return ret; 460 } 461 } else { 462 return 0; 463 } 464 #else 465 fm_eth->phydev->speed = SPEED_1000; 466 fm_eth->phydev->link = 1; 467 fm_eth->phydev->duplex = DUPLEX_FULL; 468 #endif 469 470 /* set the MAC-PHY mode */ 471 mac->set_if_mode(mac, fm_eth->enet_if, fm_eth->phydev->speed); 472 473 if (!fm_eth->phydev->link) 474 printf("%s: No link.\n", fm_eth->phydev->dev->name); 475 476 return fm_eth->phydev->link ? 0 : -1; 477 } 478 479 static void fm_eth_halt(struct eth_device *dev) 480 { 481 struct fm_eth *fm_eth; 482 struct fsl_enet_mac *mac; 483 484 fm_eth = (struct fm_eth *)dev->priv; 485 mac = fm_eth->mac; 486 487 /* graceful stop the transmission of frames */ 488 fmc_tx_port_graceful_stop_enable(fm_eth); 489 /* disable bmi Tx port */ 490 bmi_tx_port_disable(fm_eth->tx_port); 491 /* disable MAC rx/tx port */ 492 mac->disable_mac(mac); 493 /* disable bmi Rx port */ 494 bmi_rx_port_disable(fm_eth->rx_port); 495 496 #ifdef CONFIG_PHYLIB 497 if (fm_eth->phydev) 498 phy_shutdown(fm_eth->phydev); 499 #endif 500 } 501 502 static int fm_eth_send(struct eth_device *dev, void *buf, int len) 503 { 504 struct fm_eth *fm_eth; 505 struct fm_port_global_pram *pram; 506 struct fm_port_bd *txbd, *txbd_base; 507 u16 offset_in; 508 int i; 509 510 fm_eth = (struct fm_eth *)dev->priv; 511 pram = fm_eth->tx_pram; 512 txbd = fm_eth->cur_txbd; 513 514 /* find one empty TxBD */ 515 for (i = 0; muram_readw(&txbd->status) & TxBD_READY; i++) { 516 udelay(100); 517 if (i > 0x1000) { 518 printf("%s: Tx buffer not ready, txbd->status = 0x%x\n", 519 dev->name, muram_readw(&txbd->status)); 520 return 0; 521 } 522 } 523 /* setup TxBD */ 524 muram_writew(&txbd->buf_ptr_hi, (u16)upper_32_bits(virt_to_phys(buf))); 525 out_be32(&txbd->buf_ptr_lo, lower_32_bits(virt_to_phys(buf))); 526 muram_writew(&txbd->len, len); 527 sync(); 528 muram_writew(&txbd->status, TxBD_READY | TxBD_LAST); 529 sync(); 530 531 /* update TxQD, let RISC to send the packet */ 532 offset_in = muram_readw(&pram->txqd.offset_in); 533 offset_in += sizeof(struct fm_port_bd); 534 if (offset_in >= muram_readw(&pram->txqd.bd_ring_size)) 535 offset_in = 0; 536 muram_writew(&pram->txqd.offset_in, offset_in); 537 sync(); 538 539 /* wait for buffer to be transmitted */ 540 for (i = 0; muram_readw(&txbd->status) & TxBD_READY; i++) { 541 udelay(100); 542 if (i > 0x10000) { 543 printf("%s: Tx error, txbd->status = 0x%x\n", 544 dev->name, muram_readw(&txbd->status)); 545 return 0; 546 } 547 } 548 549 /* advance the TxBD */ 550 txbd++; 551 txbd_base = (struct fm_port_bd *)fm_eth->tx_bd_ring; 552 if (txbd >= (txbd_base + TX_BD_RING_SIZE)) 553 txbd = txbd_base; 554 /* update current txbd */ 555 fm_eth->cur_txbd = (void *)txbd; 556 557 return 1; 558 } 559 560 static int fm_eth_recv(struct eth_device *dev) 561 { 562 struct fm_eth *fm_eth; 563 struct fm_port_global_pram *pram; 564 struct fm_port_bd *rxbd, *rxbd_base; 565 u16 status, len; 566 u32 buf_lo, buf_hi; 567 u8 *data; 568 u16 offset_out; 569 int ret = 1; 570 571 fm_eth = (struct fm_eth *)dev->priv; 572 pram = fm_eth->rx_pram; 573 rxbd = fm_eth->cur_rxbd; 574 status = muram_readw(&rxbd->status); 575 576 while (!(status & RxBD_EMPTY)) { 577 if (!(status & RxBD_ERROR)) { 578 buf_hi = muram_readw(&rxbd->buf_ptr_hi); 579 buf_lo = in_be32(&rxbd->buf_ptr_lo); 580 data = (u8 *)((ulong)(buf_hi << 16) << 16 | buf_lo); 581 len = muram_readw(&rxbd->len); 582 net_process_received_packet(data, len); 583 } else { 584 printf("%s: Rx error\n", dev->name); 585 ret = 0; 586 } 587 588 /* clear the RxBDs */ 589 muram_writew(&rxbd->status, RxBD_EMPTY); 590 muram_writew(&rxbd->len, 0); 591 sync(); 592 593 /* advance RxBD */ 594 rxbd++; 595 rxbd_base = (struct fm_port_bd *)fm_eth->rx_bd_ring; 596 if (rxbd >= (rxbd_base + RX_BD_RING_SIZE)) 597 rxbd = rxbd_base; 598 /* read next status */ 599 status = muram_readw(&rxbd->status); 600 601 /* update RxQD */ 602 offset_out = muram_readw(&pram->rxqd.offset_out); 603 offset_out += sizeof(struct fm_port_bd); 604 if (offset_out >= muram_readw(&pram->rxqd.bd_ring_size)) 605 offset_out = 0; 606 muram_writew(&pram->rxqd.offset_out, offset_out); 607 sync(); 608 } 609 fm_eth->cur_rxbd = (void *)rxbd; 610 611 return ret; 612 } 613 614 static int fm_eth_init_mac(struct fm_eth *fm_eth, struct ccsr_fman *reg) 615 { 616 struct fsl_enet_mac *mac; 617 int num; 618 void *base, *phyregs = NULL; 619 620 num = fm_eth->num; 621 622 #ifdef CONFIG_SYS_FMAN_V3 623 #ifndef CONFIG_FSL_FM_10GEC_REGULAR_NOTATION 624 if (fm_eth->type == FM_ETH_10G_E) { 625 /* 10GEC1/10GEC2 use mEMAC9/mEMAC10 on T2080/T4240. 626 * 10GEC3/10GEC4 use mEMAC1/mEMAC2 on T2080. 627 * 10GEC1 uses mEMAC1 on T1024. 628 * so it needs to change the num. 629 */ 630 if (fm_eth->num >= 2) 631 num -= 2; 632 else 633 num += 8; 634 } 635 #endif 636 base = ®->memac[num].fm_memac; 637 phyregs = ®->memac[num].fm_memac_mdio; 638 #else 639 /* Get the mac registers base address */ 640 if (fm_eth->type == FM_ETH_1G_E) { 641 base = ®->mac_1g[num].fm_dtesc; 642 phyregs = ®->mac_1g[num].fm_mdio.miimcfg; 643 } else { 644 base = ®->mac_10g[num].fm_10gec; 645 phyregs = ®->mac_10g[num].fm_10gec_mdio; 646 } 647 #endif 648 649 /* alloc mac controller */ 650 mac = malloc(sizeof(struct fsl_enet_mac)); 651 if (!mac) 652 return -ENOMEM; 653 memset(mac, 0, sizeof(struct fsl_enet_mac)); 654 655 /* save the mac to fm_eth struct */ 656 fm_eth->mac = mac; 657 658 #ifdef CONFIG_SYS_FMAN_V3 659 init_memac(mac, base, phyregs, MAX_RXBUF_LEN); 660 #else 661 if (fm_eth->type == FM_ETH_1G_E) 662 init_dtsec(mac, base, phyregs, MAX_RXBUF_LEN); 663 else 664 init_tgec(mac, base, phyregs, MAX_RXBUF_LEN); 665 #endif 666 667 return 0; 668 } 669 670 static int init_phy(struct eth_device *dev) 671 { 672 struct fm_eth *fm_eth = dev->priv; 673 #ifdef CONFIG_PHYLIB 674 struct phy_device *phydev = NULL; 675 u32 supported; 676 #endif 677 678 if (fm_eth->type == FM_ETH_1G_E) 679 dtsec_init_phy(dev); 680 681 #ifdef CONFIG_PHYLIB 682 if (fm_eth->bus) { 683 phydev = phy_connect(fm_eth->bus, fm_eth->phyaddr, dev, 684 fm_eth->enet_if); 685 if (!phydev) { 686 printf("Failed to connect\n"); 687 return -1; 688 } 689 } else { 690 return 0; 691 } 692 693 if (fm_eth->type == FM_ETH_1G_E) { 694 supported = (SUPPORTED_10baseT_Half | 695 SUPPORTED_10baseT_Full | 696 SUPPORTED_100baseT_Half | 697 SUPPORTED_100baseT_Full | 698 SUPPORTED_1000baseT_Full); 699 } else { 700 supported = SUPPORTED_10000baseT_Full; 701 702 if (tgec_is_fibre(dev)) 703 phydev->port = PORT_FIBRE; 704 } 705 706 phydev->supported &= supported; 707 phydev->advertising = phydev->supported; 708 709 fm_eth->phydev = phydev; 710 711 phy_config(phydev); 712 #endif 713 714 return 0; 715 } 716 717 int fm_eth_initialize(struct ccsr_fman *reg, struct fm_eth_info *info) 718 { 719 struct eth_device *dev; 720 struct fm_eth *fm_eth; 721 int i, num = info->num; 722 int ret; 723 724 /* alloc eth device */ 725 dev = (struct eth_device *)malloc(sizeof(struct eth_device)); 726 if (!dev) 727 return -ENOMEM; 728 memset(dev, 0, sizeof(struct eth_device)); 729 730 /* alloc the FMan ethernet private struct */ 731 fm_eth = (struct fm_eth *)malloc(sizeof(struct fm_eth)); 732 if (!fm_eth) 733 return -ENOMEM; 734 memset(fm_eth, 0, sizeof(struct fm_eth)); 735 736 /* save off some things we need from the info struct */ 737 fm_eth->fm_index = info->index - 1; /* keep as 0 based for muram */ 738 fm_eth->num = num; 739 fm_eth->type = info->type; 740 741 fm_eth->rx_port = (void *)®->port[info->rx_port_id - 1].fm_bmi; 742 fm_eth->tx_port = (void *)®->port[info->tx_port_id - 1].fm_bmi; 743 744 /* set the ethernet max receive length */ 745 fm_eth->max_rx_len = MAX_RXBUF_LEN; 746 747 /* init global mac structure */ 748 ret = fm_eth_init_mac(fm_eth, reg); 749 if (ret) 750 return ret; 751 752 /* keep same as the manual, we call FMAN1, FMAN2, DTSEC1, DTSEC2, etc */ 753 if (fm_eth->type == FM_ETH_1G_E) 754 sprintf(dev->name, "FM%d@DTSEC%d", info->index, num + 1); 755 else 756 sprintf(dev->name, "FM%d@TGEC%d", info->index, num + 1); 757 758 devlist[num_controllers++] = dev; 759 dev->iobase = 0; 760 dev->priv = (void *)fm_eth; 761 dev->init = fm_eth_open; 762 dev->halt = fm_eth_halt; 763 dev->send = fm_eth_send; 764 dev->recv = fm_eth_recv; 765 fm_eth->dev = dev; 766 fm_eth->bus = info->bus; 767 fm_eth->phyaddr = info->phy_addr; 768 fm_eth->enet_if = info->enet_if; 769 770 /* startup the FM im */ 771 ret = fm_eth_startup(fm_eth); 772 if (ret) 773 return ret; 774 775 init_phy(dev); 776 777 /* clear the ethernet address */ 778 for (i = 0; i < 6; i++) 779 dev->enetaddr[i] = 0; 780 eth_register(dev); 781 782 return 0; 783 } 784