1 /* 2 * Copyright (C) 2006-2009 Freescale Semiconductor, Inc. 3 * 4 * Dave Liu <daveliu@freescale.com> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation; either version 2 of 9 * the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 19 * MA 02111-1307 USA 20 */ 21 22 #include "common.h" 23 #include "net.h" 24 #include "malloc.h" 25 #include "asm/errno.h" 26 #include "asm/io.h" 27 #include "asm/immap_qe.h" 28 #include "qe.h" 29 #include "uccf.h" 30 #include "uec.h" 31 #include "uec_phy.h" 32 #include "miiphy.h" 33 34 /* Default UTBIPAR SMI address */ 35 #ifndef CONFIG_UTBIPAR_INIT_TBIPA 36 #define CONFIG_UTBIPAR_INIT_TBIPA 0x1F 37 #endif 38 39 static uec_info_t uec_info[] = { 40 #ifdef CONFIG_UEC_ETH1 41 STD_UEC_INFO(1), /* UEC1 */ 42 #endif 43 #ifdef CONFIG_UEC_ETH2 44 STD_UEC_INFO(2), /* UEC2 */ 45 #endif 46 #ifdef CONFIG_UEC_ETH3 47 STD_UEC_INFO(3), /* UEC3 */ 48 #endif 49 #ifdef CONFIG_UEC_ETH4 50 STD_UEC_INFO(4), /* UEC4 */ 51 #endif 52 #ifdef CONFIG_UEC_ETH5 53 STD_UEC_INFO(5), /* UEC5 */ 54 #endif 55 #ifdef CONFIG_UEC_ETH6 56 STD_UEC_INFO(6), /* UEC6 */ 57 #endif 58 #ifdef CONFIG_UEC_ETH7 59 STD_UEC_INFO(7), /* UEC7 */ 60 #endif 61 #ifdef CONFIG_UEC_ETH8 62 STD_UEC_INFO(8), /* UEC8 */ 63 #endif 64 }; 65 66 #define MAXCONTROLLERS (8) 67 68 static struct eth_device *devlist[MAXCONTROLLERS]; 69 70 u16 phy_read (struct uec_mii_info *mii_info, u16 regnum); 71 void phy_write (struct uec_mii_info *mii_info, u16 regnum, u16 val); 72 73 static int uec_mac_enable(uec_private_t *uec, comm_dir_e mode) 74 { 75 uec_t *uec_regs; 76 u32 maccfg1; 77 78 if (!uec) { 79 printf("%s: uec not initial\n", __FUNCTION__); 80 return -EINVAL; 81 } 82 uec_regs = uec->uec_regs; 83 84 maccfg1 = in_be32(&uec_regs->maccfg1); 85 86 if (mode & COMM_DIR_TX) { 87 maccfg1 |= MACCFG1_ENABLE_TX; 88 out_be32(&uec_regs->maccfg1, maccfg1); 89 uec->mac_tx_enabled = 1; 90 } 91 92 if (mode & COMM_DIR_RX) { 93 maccfg1 |= MACCFG1_ENABLE_RX; 94 out_be32(&uec_regs->maccfg1, maccfg1); 95 uec->mac_rx_enabled = 1; 96 } 97 98 return 0; 99 } 100 101 static int uec_mac_disable(uec_private_t *uec, comm_dir_e mode) 102 { 103 uec_t *uec_regs; 104 u32 maccfg1; 105 106 if (!uec) { 107 printf("%s: uec not initial\n", __FUNCTION__); 108 return -EINVAL; 109 } 110 uec_regs = uec->uec_regs; 111 112 maccfg1 = in_be32(&uec_regs->maccfg1); 113 114 if (mode & COMM_DIR_TX) { 115 maccfg1 &= ~MACCFG1_ENABLE_TX; 116 out_be32(&uec_regs->maccfg1, maccfg1); 117 uec->mac_tx_enabled = 0; 118 } 119 120 if (mode & COMM_DIR_RX) { 121 maccfg1 &= ~MACCFG1_ENABLE_RX; 122 out_be32(&uec_regs->maccfg1, maccfg1); 123 uec->mac_rx_enabled = 0; 124 } 125 126 return 0; 127 } 128 129 static int uec_graceful_stop_tx(uec_private_t *uec) 130 { 131 ucc_fast_t *uf_regs; 132 u32 cecr_subblock; 133 u32 ucce; 134 135 if (!uec || !uec->uccf) { 136 printf("%s: No handle passed.\n", __FUNCTION__); 137 return -EINVAL; 138 } 139 140 uf_regs = uec->uccf->uf_regs; 141 142 /* Clear the grace stop event */ 143 out_be32(&uf_regs->ucce, UCCE_GRA); 144 145 /* Issue host command */ 146 cecr_subblock = 147 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num); 148 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, 149 (u8)QE_CR_PROTOCOL_ETHERNET, 0); 150 151 /* Wait for command to complete */ 152 do { 153 ucce = in_be32(&uf_regs->ucce); 154 } while (! (ucce & UCCE_GRA)); 155 156 uec->grace_stopped_tx = 1; 157 158 return 0; 159 } 160 161 static int uec_graceful_stop_rx(uec_private_t *uec) 162 { 163 u32 cecr_subblock; 164 u8 ack; 165 166 if (!uec) { 167 printf("%s: No handle passed.\n", __FUNCTION__); 168 return -EINVAL; 169 } 170 171 if (!uec->p_rx_glbl_pram) { 172 printf("%s: No init rx global parameter\n", __FUNCTION__); 173 return -EINVAL; 174 } 175 176 /* Clear acknowledge bit */ 177 ack = uec->p_rx_glbl_pram->rxgstpack; 178 ack &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; 179 uec->p_rx_glbl_pram->rxgstpack = ack; 180 181 /* Keep issuing cmd and checking ack bit until it is asserted */ 182 do { 183 /* Issue host command */ 184 cecr_subblock = 185 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num); 186 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, 187 (u8)QE_CR_PROTOCOL_ETHERNET, 0); 188 ack = uec->p_rx_glbl_pram->rxgstpack; 189 } while (! (ack & GRACEFUL_STOP_ACKNOWLEDGE_RX )); 190 191 uec->grace_stopped_rx = 1; 192 193 return 0; 194 } 195 196 static int uec_restart_tx(uec_private_t *uec) 197 { 198 u32 cecr_subblock; 199 200 if (!uec || !uec->uec_info) { 201 printf("%s: No handle passed.\n", __FUNCTION__); 202 return -EINVAL; 203 } 204 205 cecr_subblock = 206 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num); 207 qe_issue_cmd(QE_RESTART_TX, cecr_subblock, 208 (u8)QE_CR_PROTOCOL_ETHERNET, 0); 209 210 uec->grace_stopped_tx = 0; 211 212 return 0; 213 } 214 215 static int uec_restart_rx(uec_private_t *uec) 216 { 217 u32 cecr_subblock; 218 219 if (!uec || !uec->uec_info) { 220 printf("%s: No handle passed.\n", __FUNCTION__); 221 return -EINVAL; 222 } 223 224 cecr_subblock = 225 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num); 226 qe_issue_cmd(QE_RESTART_RX, cecr_subblock, 227 (u8)QE_CR_PROTOCOL_ETHERNET, 0); 228 229 uec->grace_stopped_rx = 0; 230 231 return 0; 232 } 233 234 static int uec_open(uec_private_t *uec, comm_dir_e mode) 235 { 236 ucc_fast_private_t *uccf; 237 238 if (!uec || !uec->uccf) { 239 printf("%s: No handle passed.\n", __FUNCTION__); 240 return -EINVAL; 241 } 242 uccf = uec->uccf; 243 244 /* check if the UCC number is in range. */ 245 if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) { 246 printf("%s: ucc_num out of range.\n", __FUNCTION__); 247 return -EINVAL; 248 } 249 250 /* Enable MAC */ 251 uec_mac_enable(uec, mode); 252 253 /* Enable UCC fast */ 254 ucc_fast_enable(uccf, mode); 255 256 /* RISC microcode start */ 257 if ((mode & COMM_DIR_TX) && uec->grace_stopped_tx) { 258 uec_restart_tx(uec); 259 } 260 if ((mode & COMM_DIR_RX) && uec->grace_stopped_rx) { 261 uec_restart_rx(uec); 262 } 263 264 return 0; 265 } 266 267 static int uec_stop(uec_private_t *uec, comm_dir_e mode) 268 { 269 ucc_fast_private_t *uccf; 270 271 if (!uec || !uec->uccf) { 272 printf("%s: No handle passed.\n", __FUNCTION__); 273 return -EINVAL; 274 } 275 uccf = uec->uccf; 276 277 /* check if the UCC number is in range. */ 278 if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) { 279 printf("%s: ucc_num out of range.\n", __FUNCTION__); 280 return -EINVAL; 281 } 282 /* Stop any transmissions */ 283 if ((mode & COMM_DIR_TX) && !uec->grace_stopped_tx) { 284 uec_graceful_stop_tx(uec); 285 } 286 /* Stop any receptions */ 287 if ((mode & COMM_DIR_RX) && !uec->grace_stopped_rx) { 288 uec_graceful_stop_rx(uec); 289 } 290 291 /* Disable the UCC fast */ 292 ucc_fast_disable(uec->uccf, mode); 293 294 /* Disable the MAC */ 295 uec_mac_disable(uec, mode); 296 297 return 0; 298 } 299 300 static int uec_set_mac_duplex(uec_private_t *uec, int duplex) 301 { 302 uec_t *uec_regs; 303 u32 maccfg2; 304 305 if (!uec) { 306 printf("%s: uec not initial\n", __FUNCTION__); 307 return -EINVAL; 308 } 309 uec_regs = uec->uec_regs; 310 311 if (duplex == DUPLEX_HALF) { 312 maccfg2 = in_be32(&uec_regs->maccfg2); 313 maccfg2 &= ~MACCFG2_FDX; 314 out_be32(&uec_regs->maccfg2, maccfg2); 315 } 316 317 if (duplex == DUPLEX_FULL) { 318 maccfg2 = in_be32(&uec_regs->maccfg2); 319 maccfg2 |= MACCFG2_FDX; 320 out_be32(&uec_regs->maccfg2, maccfg2); 321 } 322 323 return 0; 324 } 325 326 static int uec_set_mac_if_mode(uec_private_t *uec, enet_interface_e if_mode) 327 { 328 enet_interface_e enet_if_mode; 329 uec_info_t *uec_info; 330 uec_t *uec_regs; 331 u32 upsmr; 332 u32 maccfg2; 333 334 if (!uec) { 335 printf("%s: uec not initial\n", __FUNCTION__); 336 return -EINVAL; 337 } 338 339 uec_info = uec->uec_info; 340 uec_regs = uec->uec_regs; 341 enet_if_mode = if_mode; 342 343 maccfg2 = in_be32(&uec_regs->maccfg2); 344 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; 345 346 upsmr = in_be32(&uec->uccf->uf_regs->upsmr); 347 upsmr &= ~(UPSMR_RPM | UPSMR_TBIM | UPSMR_R10M | UPSMR_RMM); 348 349 switch (enet_if_mode) { 350 case ENET_100_MII: 351 case ENET_10_MII: 352 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; 353 break; 354 case ENET_1000_GMII: 355 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; 356 break; 357 case ENET_1000_TBI: 358 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; 359 upsmr |= UPSMR_TBIM; 360 break; 361 case ENET_1000_RTBI: 362 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; 363 upsmr |= (UPSMR_RPM | UPSMR_TBIM); 364 break; 365 case ENET_1000_RGMII_RXID: 366 case ENET_1000_RGMII_ID: 367 case ENET_1000_RGMII: 368 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; 369 upsmr |= UPSMR_RPM; 370 break; 371 case ENET_100_RGMII: 372 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; 373 upsmr |= UPSMR_RPM; 374 break; 375 case ENET_10_RGMII: 376 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; 377 upsmr |= (UPSMR_RPM | UPSMR_R10M); 378 break; 379 case ENET_100_RMII: 380 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; 381 upsmr |= UPSMR_RMM; 382 break; 383 case ENET_10_RMII: 384 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; 385 upsmr |= (UPSMR_R10M | UPSMR_RMM); 386 break; 387 case ENET_1000_SGMII: 388 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; 389 upsmr |= UPSMR_SGMM; 390 break; 391 default: 392 return -EINVAL; 393 break; 394 } 395 out_be32(&uec_regs->maccfg2, maccfg2); 396 out_be32(&uec->uccf->uf_regs->upsmr, upsmr); 397 398 return 0; 399 } 400 401 static int init_mii_management_configuration(uec_mii_t *uec_mii_regs) 402 { 403 uint timeout = 0x1000; 404 u32 miimcfg = 0; 405 406 miimcfg = in_be32(&uec_mii_regs->miimcfg); 407 miimcfg |= MIIMCFG_MNGMNT_CLC_DIV_INIT_VALUE; 408 out_be32(&uec_mii_regs->miimcfg, miimcfg); 409 410 /* Wait until the bus is free */ 411 while ((in_be32(&uec_mii_regs->miimcfg) & MIIMIND_BUSY) && timeout--); 412 if (timeout <= 0) { 413 printf("%s: The MII Bus is stuck!", __FUNCTION__); 414 return -ETIMEDOUT; 415 } 416 417 return 0; 418 } 419 420 static int init_phy(struct eth_device *dev) 421 { 422 uec_private_t *uec; 423 uec_mii_t *umii_regs; 424 struct uec_mii_info *mii_info; 425 struct phy_info *curphy; 426 int err; 427 428 uec = (uec_private_t *)dev->priv; 429 umii_regs = uec->uec_mii_regs; 430 431 uec->oldlink = 0; 432 uec->oldspeed = 0; 433 uec->oldduplex = -1; 434 435 mii_info = malloc(sizeof(*mii_info)); 436 if (!mii_info) { 437 printf("%s: Could not allocate mii_info", dev->name); 438 return -ENOMEM; 439 } 440 memset(mii_info, 0, sizeof(*mii_info)); 441 442 if (uec->uec_info->uf_info.eth_type == GIGA_ETH) { 443 mii_info->speed = SPEED_1000; 444 } else { 445 mii_info->speed = SPEED_100; 446 } 447 448 mii_info->duplex = DUPLEX_FULL; 449 mii_info->pause = 0; 450 mii_info->link = 1; 451 452 mii_info->advertising = (ADVERTISED_10baseT_Half | 453 ADVERTISED_10baseT_Full | 454 ADVERTISED_100baseT_Half | 455 ADVERTISED_100baseT_Full | 456 ADVERTISED_1000baseT_Full); 457 mii_info->autoneg = 1; 458 mii_info->mii_id = uec->uec_info->phy_address; 459 mii_info->dev = dev; 460 461 mii_info->mdio_read = &uec_read_phy_reg; 462 mii_info->mdio_write = &uec_write_phy_reg; 463 464 uec->mii_info = mii_info; 465 466 qe_set_mii_clk_src(uec->uec_info->uf_info.ucc_num); 467 468 if (init_mii_management_configuration(umii_regs)) { 469 printf("%s: The MII Bus is stuck!", dev->name); 470 err = -1; 471 goto bus_fail; 472 } 473 474 /* get info for this PHY */ 475 curphy = uec_get_phy_info(uec->mii_info); 476 if (!curphy) { 477 printf("%s: No PHY found", dev->name); 478 err = -1; 479 goto no_phy; 480 } 481 482 mii_info->phyinfo = curphy; 483 484 /* Run the commands which initialize the PHY */ 485 if (curphy->init) { 486 err = curphy->init(uec->mii_info); 487 if (err) 488 goto phy_init_fail; 489 } 490 491 return 0; 492 493 phy_init_fail: 494 no_phy: 495 bus_fail: 496 free(mii_info); 497 return err; 498 } 499 500 static void adjust_link(struct eth_device *dev) 501 { 502 uec_private_t *uec = (uec_private_t *)dev->priv; 503 uec_t *uec_regs; 504 struct uec_mii_info *mii_info = uec->mii_info; 505 506 extern void change_phy_interface_mode(struct eth_device *dev, 507 enet_interface_e mode); 508 uec_regs = uec->uec_regs; 509 510 if (mii_info->link) { 511 /* Now we make sure that we can be in full duplex mode. 512 * If not, we operate in half-duplex mode. */ 513 if (mii_info->duplex != uec->oldduplex) { 514 if (!(mii_info->duplex)) { 515 uec_set_mac_duplex(uec, DUPLEX_HALF); 516 printf("%s: Half Duplex\n", dev->name); 517 } else { 518 uec_set_mac_duplex(uec, DUPLEX_FULL); 519 printf("%s: Full Duplex\n", dev->name); 520 } 521 uec->oldduplex = mii_info->duplex; 522 } 523 524 if (mii_info->speed != uec->oldspeed) { 525 if (uec->uec_info->uf_info.eth_type == GIGA_ETH) { 526 switch (mii_info->speed) { 527 case 1000: 528 break; 529 case 100: 530 printf ("switching to rgmii 100\n"); 531 /* change phy to rgmii 100 */ 532 change_phy_interface_mode(dev, 533 ENET_100_RGMII); 534 /* change the MAC interface mode */ 535 uec_set_mac_if_mode(uec,ENET_100_RGMII); 536 break; 537 case 10: 538 printf ("switching to rgmii 10\n"); 539 /* change phy to rgmii 10 */ 540 change_phy_interface_mode(dev, 541 ENET_10_RGMII); 542 /* change the MAC interface mode */ 543 uec_set_mac_if_mode(uec,ENET_10_RGMII); 544 break; 545 default: 546 printf("%s: Ack,Speed(%d)is illegal\n", 547 dev->name, mii_info->speed); 548 break; 549 } 550 } 551 552 printf("%s: Speed %dBT\n", dev->name, mii_info->speed); 553 uec->oldspeed = mii_info->speed; 554 } 555 556 if (!uec->oldlink) { 557 printf("%s: Link is up\n", dev->name); 558 uec->oldlink = 1; 559 } 560 561 } else { /* if (mii_info->link) */ 562 if (uec->oldlink) { 563 printf("%s: Link is down\n", dev->name); 564 uec->oldlink = 0; 565 uec->oldspeed = 0; 566 uec->oldduplex = -1; 567 } 568 } 569 } 570 571 static void phy_change(struct eth_device *dev) 572 { 573 uec_private_t *uec = (uec_private_t *)dev->priv; 574 575 /* Update the link, speed, duplex */ 576 uec->mii_info->phyinfo->read_status(uec->mii_info); 577 578 /* Adjust the interface according to speed */ 579 adjust_link(dev); 580 } 581 582 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) \ 583 && !defined(BITBANGMII) 584 585 /* 586 * Find a device index from the devlist by name 587 * 588 * Returns: 589 * The index where the device is located, -1 on error 590 */ 591 static int uec_miiphy_find_dev_by_name(char *devname) 592 { 593 int i; 594 595 for (i = 0; i < MAXCONTROLLERS; i++) { 596 if (strncmp(devname, devlist[i]->name, strlen(devname)) == 0) { 597 break; 598 } 599 } 600 601 /* If device cannot be found, returns -1 */ 602 if (i == MAXCONTROLLERS) { 603 debug ("%s: device %s not found in devlist\n", __FUNCTION__, devname); 604 i = -1; 605 } 606 607 return i; 608 } 609 610 /* 611 * Read a MII PHY register. 612 * 613 * Returns: 614 * 0 on success 615 */ 616 static int uec_miiphy_read(char *devname, unsigned char addr, 617 unsigned char reg, unsigned short *value) 618 { 619 int devindex = 0; 620 621 if (devname == NULL || value == NULL) { 622 debug("%s: NULL pointer given\n", __FUNCTION__); 623 } else { 624 devindex = uec_miiphy_find_dev_by_name(devname); 625 if (devindex >= 0) { 626 *value = uec_read_phy_reg(devlist[devindex], addr, reg); 627 } 628 } 629 return 0; 630 } 631 632 /* 633 * Write a MII PHY register. 634 * 635 * Returns: 636 * 0 on success 637 */ 638 static int uec_miiphy_write(char *devname, unsigned char addr, 639 unsigned char reg, unsigned short value) 640 { 641 int devindex = 0; 642 643 if (devname == NULL) { 644 debug("%s: NULL pointer given\n", __FUNCTION__); 645 } else { 646 devindex = uec_miiphy_find_dev_by_name(devname); 647 if (devindex >= 0) { 648 uec_write_phy_reg(devlist[devindex], addr, reg, value); 649 } 650 } 651 return 0; 652 } 653 #endif 654 655 static int uec_set_mac_address(uec_private_t *uec, u8 *mac_addr) 656 { 657 uec_t *uec_regs; 658 u32 mac_addr1; 659 u32 mac_addr2; 660 661 if (!uec) { 662 printf("%s: uec not initial\n", __FUNCTION__); 663 return -EINVAL; 664 } 665 666 uec_regs = uec->uec_regs; 667 668 /* if a station address of 0x12345678ABCD, perform a write to 669 MACSTNADDR1 of 0xCDAB7856, 670 MACSTNADDR2 of 0x34120000 */ 671 672 mac_addr1 = (mac_addr[5] << 24) | (mac_addr[4] << 16) | \ 673 (mac_addr[3] << 8) | (mac_addr[2]); 674 out_be32(&uec_regs->macstnaddr1, mac_addr1); 675 676 mac_addr2 = ((mac_addr[1] << 24) | (mac_addr[0] << 16)) & 0xffff0000; 677 out_be32(&uec_regs->macstnaddr2, mac_addr2); 678 679 return 0; 680 } 681 682 static int uec_convert_threads_num(uec_num_of_threads_e threads_num, 683 int *threads_num_ret) 684 { 685 int num_threads_numerica; 686 687 switch (threads_num) { 688 case UEC_NUM_OF_THREADS_1: 689 num_threads_numerica = 1; 690 break; 691 case UEC_NUM_OF_THREADS_2: 692 num_threads_numerica = 2; 693 break; 694 case UEC_NUM_OF_THREADS_4: 695 num_threads_numerica = 4; 696 break; 697 case UEC_NUM_OF_THREADS_6: 698 num_threads_numerica = 6; 699 break; 700 case UEC_NUM_OF_THREADS_8: 701 num_threads_numerica = 8; 702 break; 703 default: 704 printf("%s: Bad number of threads value.", 705 __FUNCTION__); 706 return -EINVAL; 707 } 708 709 *threads_num_ret = num_threads_numerica; 710 711 return 0; 712 } 713 714 static void uec_init_tx_parameter(uec_private_t *uec, int num_threads_tx) 715 { 716 uec_info_t *uec_info; 717 u32 end_bd; 718 u8 bmrx = 0; 719 int i; 720 721 uec_info = uec->uec_info; 722 723 /* Alloc global Tx parameter RAM page */ 724 uec->tx_glbl_pram_offset = qe_muram_alloc( 725 sizeof(uec_tx_global_pram_t), 726 UEC_TX_GLOBAL_PRAM_ALIGNMENT); 727 uec->p_tx_glbl_pram = (uec_tx_global_pram_t *) 728 qe_muram_addr(uec->tx_glbl_pram_offset); 729 730 /* Zero the global Tx prameter RAM */ 731 memset(uec->p_tx_glbl_pram, 0, sizeof(uec_tx_global_pram_t)); 732 733 /* Init global Tx parameter RAM */ 734 735 /* TEMODER, RMON statistics disable, one Tx queue */ 736 out_be16(&uec->p_tx_glbl_pram->temoder, TEMODER_INIT_VALUE); 737 738 /* SQPTR */ 739 uec->send_q_mem_reg_offset = qe_muram_alloc( 740 sizeof(uec_send_queue_qd_t), 741 UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); 742 uec->p_send_q_mem_reg = (uec_send_queue_mem_region_t *) 743 qe_muram_addr(uec->send_q_mem_reg_offset); 744 out_be32(&uec->p_tx_glbl_pram->sqptr, uec->send_q_mem_reg_offset); 745 746 /* Setup the table with TxBDs ring */ 747 end_bd = (u32)uec->p_tx_bd_ring + (uec_info->tx_bd_ring_len - 1) 748 * SIZEOFBD; 749 out_be32(&uec->p_send_q_mem_reg->sqqd[0].bd_ring_base, 750 (u32)(uec->p_tx_bd_ring)); 751 out_be32(&uec->p_send_q_mem_reg->sqqd[0].last_bd_completed_address, 752 end_bd); 753 754 /* Scheduler Base Pointer, we have only one Tx queue, no need it */ 755 out_be32(&uec->p_tx_glbl_pram->schedulerbasepointer, 0); 756 757 /* TxRMON Base Pointer, TxRMON disable, we don't need it */ 758 out_be32(&uec->p_tx_glbl_pram->txrmonbaseptr, 0); 759 760 /* TSTATE, global snooping, big endian, the CSB bus selected */ 761 bmrx = BMR_INIT_VALUE; 762 out_be32(&uec->p_tx_glbl_pram->tstate, ((u32)(bmrx) << BMR_SHIFT)); 763 764 /* IPH_Offset */ 765 for (i = 0; i < MAX_IPH_OFFSET_ENTRY; i++) { 766 out_8(&uec->p_tx_glbl_pram->iphoffset[i], 0); 767 } 768 769 /* VTAG table */ 770 for (i = 0; i < UEC_TX_VTAG_TABLE_ENTRY_MAX; i++) { 771 out_be32(&uec->p_tx_glbl_pram->vtagtable[i], 0); 772 } 773 774 /* TQPTR */ 775 uec->thread_dat_tx_offset = qe_muram_alloc( 776 num_threads_tx * sizeof(uec_thread_data_tx_t) + 777 32 *(num_threads_tx == 1), UEC_THREAD_DATA_ALIGNMENT); 778 779 uec->p_thread_data_tx = (uec_thread_data_tx_t *) 780 qe_muram_addr(uec->thread_dat_tx_offset); 781 out_be32(&uec->p_tx_glbl_pram->tqptr, uec->thread_dat_tx_offset); 782 } 783 784 static void uec_init_rx_parameter(uec_private_t *uec, int num_threads_rx) 785 { 786 u8 bmrx = 0; 787 int i; 788 uec_82xx_address_filtering_pram_t *p_af_pram; 789 790 /* Allocate global Rx parameter RAM page */ 791 uec->rx_glbl_pram_offset = qe_muram_alloc( 792 sizeof(uec_rx_global_pram_t), UEC_RX_GLOBAL_PRAM_ALIGNMENT); 793 uec->p_rx_glbl_pram = (uec_rx_global_pram_t *) 794 qe_muram_addr(uec->rx_glbl_pram_offset); 795 796 /* Zero Global Rx parameter RAM */ 797 memset(uec->p_rx_glbl_pram, 0, sizeof(uec_rx_global_pram_t)); 798 799 /* Init global Rx parameter RAM */ 800 /* REMODER, Extended feature mode disable, VLAN disable, 801 LossLess flow control disable, Receive firmware statisic disable, 802 Extended address parsing mode disable, One Rx queues, 803 Dynamic maximum/minimum frame length disable, IP checksum check 804 disable, IP address alignment disable 805 */ 806 out_be32(&uec->p_rx_glbl_pram->remoder, REMODER_INIT_VALUE); 807 808 /* RQPTR */ 809 uec->thread_dat_rx_offset = qe_muram_alloc( 810 num_threads_rx * sizeof(uec_thread_data_rx_t), 811 UEC_THREAD_DATA_ALIGNMENT); 812 uec->p_thread_data_rx = (uec_thread_data_rx_t *) 813 qe_muram_addr(uec->thread_dat_rx_offset); 814 out_be32(&uec->p_rx_glbl_pram->rqptr, uec->thread_dat_rx_offset); 815 816 /* Type_or_Len */ 817 out_be16(&uec->p_rx_glbl_pram->typeorlen, 3072); 818 819 /* RxRMON base pointer, we don't need it */ 820 out_be32(&uec->p_rx_glbl_pram->rxrmonbaseptr, 0); 821 822 /* IntCoalescingPTR, we don't need it, no interrupt */ 823 out_be32(&uec->p_rx_glbl_pram->intcoalescingptr, 0); 824 825 /* RSTATE, global snooping, big endian, the CSB bus selected */ 826 bmrx = BMR_INIT_VALUE; 827 out_8(&uec->p_rx_glbl_pram->rstate, bmrx); 828 829 /* MRBLR */ 830 out_be16(&uec->p_rx_glbl_pram->mrblr, MAX_RXBUF_LEN); 831 832 /* RBDQPTR */ 833 uec->rx_bd_qs_tbl_offset = qe_muram_alloc( 834 sizeof(uec_rx_bd_queues_entry_t) + \ 835 sizeof(uec_rx_prefetched_bds_t), 836 UEC_RX_BD_QUEUES_ALIGNMENT); 837 uec->p_rx_bd_qs_tbl = (uec_rx_bd_queues_entry_t *) 838 qe_muram_addr(uec->rx_bd_qs_tbl_offset); 839 840 /* Zero it */ 841 memset(uec->p_rx_bd_qs_tbl, 0, sizeof(uec_rx_bd_queues_entry_t) + \ 842 sizeof(uec_rx_prefetched_bds_t)); 843 out_be32(&uec->p_rx_glbl_pram->rbdqptr, uec->rx_bd_qs_tbl_offset); 844 out_be32(&uec->p_rx_bd_qs_tbl->externalbdbaseptr, 845 (u32)uec->p_rx_bd_ring); 846 847 /* MFLR */ 848 out_be16(&uec->p_rx_glbl_pram->mflr, MAX_FRAME_LEN); 849 /* MINFLR */ 850 out_be16(&uec->p_rx_glbl_pram->minflr, MIN_FRAME_LEN); 851 /* MAXD1 */ 852 out_be16(&uec->p_rx_glbl_pram->maxd1, MAX_DMA1_LEN); 853 /* MAXD2 */ 854 out_be16(&uec->p_rx_glbl_pram->maxd2, MAX_DMA2_LEN); 855 /* ECAM_PTR */ 856 out_be32(&uec->p_rx_glbl_pram->ecamptr, 0); 857 /* L2QT */ 858 out_be32(&uec->p_rx_glbl_pram->l2qt, 0); 859 /* L3QT */ 860 for (i = 0; i < 8; i++) { 861 out_be32(&uec->p_rx_glbl_pram->l3qt[i], 0); 862 } 863 864 /* VLAN_TYPE */ 865 out_be16(&uec->p_rx_glbl_pram->vlantype, 0x8100); 866 /* TCI */ 867 out_be16(&uec->p_rx_glbl_pram->vlantci, 0); 868 869 /* Clear PQ2 style address filtering hash table */ 870 p_af_pram = (uec_82xx_address_filtering_pram_t *) \ 871 uec->p_rx_glbl_pram->addressfiltering; 872 873 p_af_pram->iaddr_h = 0; 874 p_af_pram->iaddr_l = 0; 875 p_af_pram->gaddr_h = 0; 876 p_af_pram->gaddr_l = 0; 877 } 878 879 static int uec_issue_init_enet_rxtx_cmd(uec_private_t *uec, 880 int thread_tx, int thread_rx) 881 { 882 uec_init_cmd_pram_t *p_init_enet_param; 883 u32 init_enet_param_offset; 884 uec_info_t *uec_info; 885 int i; 886 int snum; 887 u32 init_enet_offset; 888 u32 entry_val; 889 u32 command; 890 u32 cecr_subblock; 891 892 uec_info = uec->uec_info; 893 894 /* Allocate init enet command parameter */ 895 uec->init_enet_param_offset = qe_muram_alloc( 896 sizeof(uec_init_cmd_pram_t), 4); 897 init_enet_param_offset = uec->init_enet_param_offset; 898 uec->p_init_enet_param = (uec_init_cmd_pram_t *) 899 qe_muram_addr(uec->init_enet_param_offset); 900 901 /* Zero init enet command struct */ 902 memset((void *)uec->p_init_enet_param, 0, sizeof(uec_init_cmd_pram_t)); 903 904 /* Init the command struct */ 905 p_init_enet_param = uec->p_init_enet_param; 906 p_init_enet_param->resinit0 = ENET_INIT_PARAM_MAGIC_RES_INIT0; 907 p_init_enet_param->resinit1 = ENET_INIT_PARAM_MAGIC_RES_INIT1; 908 p_init_enet_param->resinit2 = ENET_INIT_PARAM_MAGIC_RES_INIT2; 909 p_init_enet_param->resinit3 = ENET_INIT_PARAM_MAGIC_RES_INIT3; 910 p_init_enet_param->resinit4 = ENET_INIT_PARAM_MAGIC_RES_INIT4; 911 p_init_enet_param->largestexternallookupkeysize = 0; 912 913 p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_rx) 914 << ENET_INIT_PARAM_RGF_SHIFT; 915 p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_tx) 916 << ENET_INIT_PARAM_TGF_SHIFT; 917 918 /* Init Rx global parameter pointer */ 919 p_init_enet_param->rgftgfrxglobal |= uec->rx_glbl_pram_offset | 920 (u32)uec_info->risc_rx; 921 922 /* Init Rx threads */ 923 for (i = 0; i < (thread_rx + 1); i++) { 924 if ((snum = qe_get_snum()) < 0) { 925 printf("%s can not get snum\n", __FUNCTION__); 926 return -ENOMEM; 927 } 928 929 if (i==0) { 930 init_enet_offset = 0; 931 } else { 932 init_enet_offset = qe_muram_alloc( 933 sizeof(uec_thread_rx_pram_t), 934 UEC_THREAD_RX_PRAM_ALIGNMENT); 935 } 936 937 entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) | 938 init_enet_offset | (u32)uec_info->risc_rx; 939 p_init_enet_param->rxthread[i] = entry_val; 940 } 941 942 /* Init Tx global parameter pointer */ 943 p_init_enet_param->txglobal = uec->tx_glbl_pram_offset | 944 (u32)uec_info->risc_tx; 945 946 /* Init Tx threads */ 947 for (i = 0; i < thread_tx; i++) { 948 if ((snum = qe_get_snum()) < 0) { 949 printf("%s can not get snum\n", __FUNCTION__); 950 return -ENOMEM; 951 } 952 953 init_enet_offset = qe_muram_alloc(sizeof(uec_thread_tx_pram_t), 954 UEC_THREAD_TX_PRAM_ALIGNMENT); 955 956 entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) | 957 init_enet_offset | (u32)uec_info->risc_tx; 958 p_init_enet_param->txthread[i] = entry_val; 959 } 960 961 __asm__ __volatile__("sync"); 962 963 /* Issue QE command */ 964 command = QE_INIT_TX_RX; 965 cecr_subblock = ucc_fast_get_qe_cr_subblock( 966 uec->uec_info->uf_info.ucc_num); 967 qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET, 968 init_enet_param_offset); 969 970 return 0; 971 } 972 973 static int uec_startup(uec_private_t *uec) 974 { 975 uec_info_t *uec_info; 976 ucc_fast_info_t *uf_info; 977 ucc_fast_private_t *uccf; 978 ucc_fast_t *uf_regs; 979 uec_t *uec_regs; 980 int num_threads_tx; 981 int num_threads_rx; 982 u32 utbipar; 983 enet_interface_e enet_interface; 984 u32 length; 985 u32 align; 986 qe_bd_t *bd; 987 u8 *buf; 988 int i; 989 990 if (!uec || !uec->uec_info) { 991 printf("%s: uec or uec_info not initial\n", __FUNCTION__); 992 return -EINVAL; 993 } 994 995 uec_info = uec->uec_info; 996 uf_info = &(uec_info->uf_info); 997 998 /* Check if Rx BD ring len is illegal */ 999 if ((uec_info->rx_bd_ring_len < UEC_RX_BD_RING_SIZE_MIN) || \ 1000 (uec_info->rx_bd_ring_len % UEC_RX_BD_RING_SIZE_ALIGNMENT)) { 1001 printf("%s: Rx BD ring len must be multiple of 4, and > 8.\n", 1002 __FUNCTION__); 1003 return -EINVAL; 1004 } 1005 1006 /* Check if Tx BD ring len is illegal */ 1007 if (uec_info->tx_bd_ring_len < UEC_TX_BD_RING_SIZE_MIN) { 1008 printf("%s: Tx BD ring length must not be smaller than 2.\n", 1009 __FUNCTION__); 1010 return -EINVAL; 1011 } 1012 1013 /* Check if MRBLR is illegal */ 1014 if ((MAX_RXBUF_LEN == 0) || (MAX_RXBUF_LEN % UEC_MRBLR_ALIGNMENT)) { 1015 printf("%s: max rx buffer length must be mutliple of 128.\n", 1016 __FUNCTION__); 1017 return -EINVAL; 1018 } 1019 1020 /* Both Rx and Tx are stopped */ 1021 uec->grace_stopped_rx = 1; 1022 uec->grace_stopped_tx = 1; 1023 1024 /* Init UCC fast */ 1025 if (ucc_fast_init(uf_info, &uccf)) { 1026 printf("%s: failed to init ucc fast\n", __FUNCTION__); 1027 return -ENOMEM; 1028 } 1029 1030 /* Save uccf */ 1031 uec->uccf = uccf; 1032 1033 /* Convert the Tx threads number */ 1034 if (uec_convert_threads_num(uec_info->num_threads_tx, 1035 &num_threads_tx)) { 1036 return -EINVAL; 1037 } 1038 1039 /* Convert the Rx threads number */ 1040 if (uec_convert_threads_num(uec_info->num_threads_rx, 1041 &num_threads_rx)) { 1042 return -EINVAL; 1043 } 1044 1045 uf_regs = uccf->uf_regs; 1046 1047 /* UEC register is following UCC fast registers */ 1048 uec_regs = (uec_t *)(&uf_regs->ucc_eth); 1049 1050 /* Save the UEC register pointer to UEC private struct */ 1051 uec->uec_regs = uec_regs; 1052 1053 /* Init UPSMR, enable hardware statistics (UCC) */ 1054 out_be32(&uec->uccf->uf_regs->upsmr, UPSMR_INIT_VALUE); 1055 1056 /* Init MACCFG1, flow control disable, disable Tx and Rx */ 1057 out_be32(&uec_regs->maccfg1, MACCFG1_INIT_VALUE); 1058 1059 /* Init MACCFG2, length check, MAC PAD and CRC enable */ 1060 out_be32(&uec_regs->maccfg2, MACCFG2_INIT_VALUE); 1061 1062 /* Setup MAC interface mode */ 1063 uec_set_mac_if_mode(uec, uec_info->enet_interface); 1064 1065 /* Setup MII management base */ 1066 #ifndef CONFIG_eTSEC_MDIO_BUS 1067 uec->uec_mii_regs = (uec_mii_t *)(&uec_regs->miimcfg); 1068 #else 1069 uec->uec_mii_regs = (uec_mii_t *) CONFIG_MIIM_ADDRESS; 1070 #endif 1071 1072 /* Setup MII master clock source */ 1073 qe_set_mii_clk_src(uec_info->uf_info.ucc_num); 1074 1075 /* Setup UTBIPAR */ 1076 utbipar = in_be32(&uec_regs->utbipar); 1077 utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK; 1078 enet_interface = uec->uec_info->enet_interface; 1079 1080 /* Initialize UTBIPAR address to CONFIG_UTBIPAR_INIT_TBIPA for ALL UEC. 1081 * This frees up the remaining SMI addresses for use. 1082 */ 1083 utbipar |= CONFIG_UTBIPAR_INIT_TBIPA << UTBIPAR_PHY_ADDRESS_SHIFT; 1084 out_be32(&uec_regs->utbipar, utbipar); 1085 1086 /* Configure the TBI for SGMII operation */ 1087 if (uec->uec_info->enet_interface == ENET_1000_SGMII) { 1088 uec_write_phy_reg(uec->dev, uec_regs->utbipar, 1089 ENET_TBI_MII_ANA, TBIANA_SETTINGS); 1090 1091 uec_write_phy_reg(uec->dev, uec_regs->utbipar, 1092 ENET_TBI_MII_TBICON, TBICON_CLK_SELECT); 1093 1094 uec_write_phy_reg(uec->dev, uec_regs->utbipar, 1095 ENET_TBI_MII_CR, TBICR_SETTINGS); 1096 } 1097 1098 /* Allocate Tx BDs */ 1099 length = ((uec_info->tx_bd_ring_len * SIZEOFBD) / 1100 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) * 1101 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; 1102 if ((uec_info->tx_bd_ring_len * SIZEOFBD) % 1103 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) { 1104 length += UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; 1105 } 1106 1107 align = UEC_TX_BD_RING_ALIGNMENT; 1108 uec->tx_bd_ring_offset = (u32)malloc((u32)(length + align)); 1109 if (uec->tx_bd_ring_offset != 0) { 1110 uec->p_tx_bd_ring = (u8 *)((uec->tx_bd_ring_offset + align) 1111 & ~(align - 1)); 1112 } 1113 1114 /* Zero all of Tx BDs */ 1115 memset((void *)(uec->tx_bd_ring_offset), 0, length + align); 1116 1117 /* Allocate Rx BDs */ 1118 length = uec_info->rx_bd_ring_len * SIZEOFBD; 1119 align = UEC_RX_BD_RING_ALIGNMENT; 1120 uec->rx_bd_ring_offset = (u32)(malloc((u32)(length + align))); 1121 if (uec->rx_bd_ring_offset != 0) { 1122 uec->p_rx_bd_ring = (u8 *)((uec->rx_bd_ring_offset + align) 1123 & ~(align - 1)); 1124 } 1125 1126 /* Zero all of Rx BDs */ 1127 memset((void *)(uec->rx_bd_ring_offset), 0, length + align); 1128 1129 /* Allocate Rx buffer */ 1130 length = uec_info->rx_bd_ring_len * MAX_RXBUF_LEN; 1131 align = UEC_RX_DATA_BUF_ALIGNMENT; 1132 uec->rx_buf_offset = (u32)malloc(length + align); 1133 if (uec->rx_buf_offset != 0) { 1134 uec->p_rx_buf = (u8 *)((uec->rx_buf_offset + align) 1135 & ~(align - 1)); 1136 } 1137 1138 /* Zero all of the Rx buffer */ 1139 memset((void *)(uec->rx_buf_offset), 0, length + align); 1140 1141 /* Init TxBD ring */ 1142 bd = (qe_bd_t *)uec->p_tx_bd_ring; 1143 uec->txBd = bd; 1144 1145 for (i = 0; i < uec_info->tx_bd_ring_len; i++) { 1146 BD_DATA_CLEAR(bd); 1147 BD_STATUS_SET(bd, 0); 1148 BD_LENGTH_SET(bd, 0); 1149 bd ++; 1150 } 1151 BD_STATUS_SET((--bd), TxBD_WRAP); 1152 1153 /* Init RxBD ring */ 1154 bd = (qe_bd_t *)uec->p_rx_bd_ring; 1155 uec->rxBd = bd; 1156 buf = uec->p_rx_buf; 1157 for (i = 0; i < uec_info->rx_bd_ring_len; i++) { 1158 BD_DATA_SET(bd, buf); 1159 BD_LENGTH_SET(bd, 0); 1160 BD_STATUS_SET(bd, RxBD_EMPTY); 1161 buf += MAX_RXBUF_LEN; 1162 bd ++; 1163 } 1164 BD_STATUS_SET((--bd), RxBD_WRAP | RxBD_EMPTY); 1165 1166 /* Init global Tx parameter RAM */ 1167 uec_init_tx_parameter(uec, num_threads_tx); 1168 1169 /* Init global Rx parameter RAM */ 1170 uec_init_rx_parameter(uec, num_threads_rx); 1171 1172 /* Init ethernet Tx and Rx parameter command */ 1173 if (uec_issue_init_enet_rxtx_cmd(uec, num_threads_tx, 1174 num_threads_rx)) { 1175 printf("%s issue init enet cmd failed\n", __FUNCTION__); 1176 return -ENOMEM; 1177 } 1178 1179 return 0; 1180 } 1181 1182 static int uec_init(struct eth_device* dev, bd_t *bd) 1183 { 1184 uec_private_t *uec; 1185 int err, i; 1186 struct phy_info *curphy; 1187 1188 uec = (uec_private_t *)dev->priv; 1189 1190 if (uec->the_first_run == 0) { 1191 err = init_phy(dev); 1192 if (err) { 1193 printf("%s: Cannot initialize PHY, aborting.\n", 1194 dev->name); 1195 return err; 1196 } 1197 1198 curphy = uec->mii_info->phyinfo; 1199 1200 if (curphy->config_aneg) { 1201 err = curphy->config_aneg(uec->mii_info); 1202 if (err) { 1203 printf("%s: Can't negotiate PHY\n", dev->name); 1204 return err; 1205 } 1206 } 1207 1208 /* Give PHYs up to 5 sec to report a link */ 1209 i = 50; 1210 do { 1211 err = curphy->read_status(uec->mii_info); 1212 udelay(100000); 1213 } while (((i-- > 0) && !uec->mii_info->link) || err); 1214 1215 if (err || i <= 0) 1216 printf("warning: %s: timeout on PHY link\n", dev->name); 1217 1218 uec->the_first_run = 1; 1219 } 1220 1221 /* Set up the MAC address */ 1222 if (dev->enetaddr[0] & 0x01) { 1223 printf("%s: MacAddress is multcast address\n", 1224 __FUNCTION__); 1225 return -1; 1226 } 1227 uec_set_mac_address(uec, dev->enetaddr); 1228 1229 1230 err = uec_open(uec, COMM_DIR_RX_AND_TX); 1231 if (err) { 1232 printf("%s: cannot enable UEC device\n", dev->name); 1233 return -1; 1234 } 1235 1236 phy_change(dev); 1237 1238 return (uec->mii_info->link ? 0 : -1); 1239 } 1240 1241 static void uec_halt(struct eth_device* dev) 1242 { 1243 uec_private_t *uec = (uec_private_t *)dev->priv; 1244 uec_stop(uec, COMM_DIR_RX_AND_TX); 1245 } 1246 1247 static int uec_send(struct eth_device* dev, volatile void *buf, int len) 1248 { 1249 uec_private_t *uec; 1250 ucc_fast_private_t *uccf; 1251 volatile qe_bd_t *bd; 1252 u16 status; 1253 int i; 1254 int result = 0; 1255 1256 uec = (uec_private_t *)dev->priv; 1257 uccf = uec->uccf; 1258 bd = uec->txBd; 1259 1260 /* Find an empty TxBD */ 1261 for (i = 0; bd->status & TxBD_READY; i++) { 1262 if (i > 0x100000) { 1263 printf("%s: tx buffer not ready\n", dev->name); 1264 return result; 1265 } 1266 } 1267 1268 /* Init TxBD */ 1269 BD_DATA_SET(bd, buf); 1270 BD_LENGTH_SET(bd, len); 1271 status = bd->status; 1272 status &= BD_WRAP; 1273 status |= (TxBD_READY | TxBD_LAST); 1274 BD_STATUS_SET(bd, status); 1275 1276 /* Tell UCC to transmit the buffer */ 1277 ucc_fast_transmit_on_demand(uccf); 1278 1279 /* Wait for buffer to be transmitted */ 1280 for (i = 0; bd->status & TxBD_READY; i++) { 1281 if (i > 0x100000) { 1282 printf("%s: tx error\n", dev->name); 1283 return result; 1284 } 1285 } 1286 1287 /* Ok, the buffer be transimitted */ 1288 BD_ADVANCE(bd, status, uec->p_tx_bd_ring); 1289 uec->txBd = bd; 1290 result = 1; 1291 1292 return result; 1293 } 1294 1295 static int uec_recv(struct eth_device* dev) 1296 { 1297 uec_private_t *uec = dev->priv; 1298 volatile qe_bd_t *bd; 1299 u16 status; 1300 u16 len; 1301 u8 *data; 1302 1303 bd = uec->rxBd; 1304 status = bd->status; 1305 1306 while (!(status & RxBD_EMPTY)) { 1307 if (!(status & RxBD_ERROR)) { 1308 data = BD_DATA(bd); 1309 len = BD_LENGTH(bd); 1310 NetReceive(data, len); 1311 } else { 1312 printf("%s: Rx error\n", dev->name); 1313 } 1314 status &= BD_CLEAN; 1315 BD_LENGTH_SET(bd, 0); 1316 BD_STATUS_SET(bd, status | RxBD_EMPTY); 1317 BD_ADVANCE(bd, status, uec->p_rx_bd_ring); 1318 status = bd->status; 1319 } 1320 uec->rxBd = bd; 1321 1322 return 1; 1323 } 1324 1325 int uec_initialize(bd_t *bis, uec_info_t *uec_info) 1326 { 1327 struct eth_device *dev; 1328 int i; 1329 uec_private_t *uec; 1330 int err; 1331 1332 dev = (struct eth_device *)malloc(sizeof(struct eth_device)); 1333 if (!dev) 1334 return 0; 1335 memset(dev, 0, sizeof(struct eth_device)); 1336 1337 /* Allocate the UEC private struct */ 1338 uec = (uec_private_t *)malloc(sizeof(uec_private_t)); 1339 if (!uec) { 1340 return -ENOMEM; 1341 } 1342 memset(uec, 0, sizeof(uec_private_t)); 1343 1344 /* Adjust uec_info */ 1345 #if (MAX_QE_RISC == 4) 1346 uec_info->risc_tx = QE_RISC_ALLOCATION_FOUR_RISCS; 1347 uec_info->risc_rx = QE_RISC_ALLOCATION_FOUR_RISCS; 1348 #endif 1349 1350 devlist[uec_info->uf_info.ucc_num] = dev; 1351 1352 uec->uec_info = uec_info; 1353 uec->dev = dev; 1354 1355 sprintf(dev->name, "FSL UEC%d", uec_info->uf_info.ucc_num); 1356 dev->iobase = 0; 1357 dev->priv = (void *)uec; 1358 dev->init = uec_init; 1359 dev->halt = uec_halt; 1360 dev->send = uec_send; 1361 dev->recv = uec_recv; 1362 1363 /* Clear the ethnet address */ 1364 for (i = 0; i < 6; i++) 1365 dev->enetaddr[i] = 0; 1366 1367 eth_register(dev); 1368 1369 err = uec_startup(uec); 1370 if (err) { 1371 printf("%s: Cannot configure net device, aborting.",dev->name); 1372 return err; 1373 } 1374 1375 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) \ 1376 && !defined(BITBANGMII) 1377 miiphy_register(dev->name, uec_miiphy_read, uec_miiphy_write); 1378 #endif 1379 1380 return 1; 1381 } 1382 1383 int uec_eth_init(bd_t *bis, uec_info_t *uecs, int num) 1384 { 1385 int i; 1386 1387 for (i = 0; i < num; i++) 1388 uec_initialize(bis, &uecs[i]); 1389 1390 return 0; 1391 } 1392 1393 int uec_standard_init(bd_t *bis) 1394 { 1395 return uec_eth_init(bis, uec_info, ARRAY_SIZE(uec_info)); 1396 } 1397