1 /* 2 * Copyright (C) 2006-2009 Freescale Semiconductor, Inc. 3 * 4 * Dave Liu <daveliu@freescale.com> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation; either version 2 of 9 * the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 19 * MA 02111-1307 USA 20 */ 21 22 #include "common.h" 23 #include "net.h" 24 #include "malloc.h" 25 #include "asm/errno.h" 26 #include "asm/io.h" 27 #include "asm/immap_qe.h" 28 #include "qe.h" 29 #include "uccf.h" 30 #include "uec.h" 31 #include "uec_phy.h" 32 #include "miiphy.h" 33 34 static uec_info_t uec_info[] = { 35 #ifdef CONFIG_UEC_ETH1 36 STD_UEC_INFO(1), /* UEC1 */ 37 #endif 38 #ifdef CONFIG_UEC_ETH2 39 STD_UEC_INFO(2), /* UEC2 */ 40 #endif 41 #ifdef CONFIG_UEC_ETH3 42 STD_UEC_INFO(3), /* UEC3 */ 43 #endif 44 #ifdef CONFIG_UEC_ETH4 45 STD_UEC_INFO(4), /* UEC4 */ 46 #endif 47 #ifdef CONFIG_UEC_ETH5 48 STD_UEC_INFO(5), /* UEC5 */ 49 #endif 50 #ifdef CONFIG_UEC_ETH6 51 STD_UEC_INFO(6), /* UEC6 */ 52 #endif 53 #ifdef CONFIG_UEC_ETH7 54 STD_UEC_INFO(7), /* UEC7 */ 55 #endif 56 #ifdef CONFIG_UEC_ETH8 57 STD_UEC_INFO(8), /* UEC8 */ 58 #endif 59 }; 60 61 #define MAXCONTROLLERS (8) 62 63 static struct eth_device *devlist[MAXCONTROLLERS]; 64 65 u16 phy_read (struct uec_mii_info *mii_info, u16 regnum); 66 void phy_write (struct uec_mii_info *mii_info, u16 regnum, u16 val); 67 68 static int uec_mac_enable(uec_private_t *uec, comm_dir_e mode) 69 { 70 uec_t *uec_regs; 71 u32 maccfg1; 72 73 if (!uec) { 74 printf("%s: uec not initial\n", __FUNCTION__); 75 return -EINVAL; 76 } 77 uec_regs = uec->uec_regs; 78 79 maccfg1 = in_be32(&uec_regs->maccfg1); 80 81 if (mode & COMM_DIR_TX) { 82 maccfg1 |= MACCFG1_ENABLE_TX; 83 out_be32(&uec_regs->maccfg1, maccfg1); 84 uec->mac_tx_enabled = 1; 85 } 86 87 if (mode & COMM_DIR_RX) { 88 maccfg1 |= MACCFG1_ENABLE_RX; 89 out_be32(&uec_regs->maccfg1, maccfg1); 90 uec->mac_rx_enabled = 1; 91 } 92 93 return 0; 94 } 95 96 static int uec_mac_disable(uec_private_t *uec, comm_dir_e mode) 97 { 98 uec_t *uec_regs; 99 u32 maccfg1; 100 101 if (!uec) { 102 printf("%s: uec not initial\n", __FUNCTION__); 103 return -EINVAL; 104 } 105 uec_regs = uec->uec_regs; 106 107 maccfg1 = in_be32(&uec_regs->maccfg1); 108 109 if (mode & COMM_DIR_TX) { 110 maccfg1 &= ~MACCFG1_ENABLE_TX; 111 out_be32(&uec_regs->maccfg1, maccfg1); 112 uec->mac_tx_enabled = 0; 113 } 114 115 if (mode & COMM_DIR_RX) { 116 maccfg1 &= ~MACCFG1_ENABLE_RX; 117 out_be32(&uec_regs->maccfg1, maccfg1); 118 uec->mac_rx_enabled = 0; 119 } 120 121 return 0; 122 } 123 124 static int uec_graceful_stop_tx(uec_private_t *uec) 125 { 126 ucc_fast_t *uf_regs; 127 u32 cecr_subblock; 128 u32 ucce; 129 130 if (!uec || !uec->uccf) { 131 printf("%s: No handle passed.\n", __FUNCTION__); 132 return -EINVAL; 133 } 134 135 uf_regs = uec->uccf->uf_regs; 136 137 /* Clear the grace stop event */ 138 out_be32(&uf_regs->ucce, UCCE_GRA); 139 140 /* Issue host command */ 141 cecr_subblock = 142 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num); 143 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, 144 (u8)QE_CR_PROTOCOL_ETHERNET, 0); 145 146 /* Wait for command to complete */ 147 do { 148 ucce = in_be32(&uf_regs->ucce); 149 } while (! (ucce & UCCE_GRA)); 150 151 uec->grace_stopped_tx = 1; 152 153 return 0; 154 } 155 156 static int uec_graceful_stop_rx(uec_private_t *uec) 157 { 158 u32 cecr_subblock; 159 u8 ack; 160 161 if (!uec) { 162 printf("%s: No handle passed.\n", __FUNCTION__); 163 return -EINVAL; 164 } 165 166 if (!uec->p_rx_glbl_pram) { 167 printf("%s: No init rx global parameter\n", __FUNCTION__); 168 return -EINVAL; 169 } 170 171 /* Clear acknowledge bit */ 172 ack = uec->p_rx_glbl_pram->rxgstpack; 173 ack &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; 174 uec->p_rx_glbl_pram->rxgstpack = ack; 175 176 /* Keep issuing cmd and checking ack bit until it is asserted */ 177 do { 178 /* Issue host command */ 179 cecr_subblock = 180 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num); 181 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, 182 (u8)QE_CR_PROTOCOL_ETHERNET, 0); 183 ack = uec->p_rx_glbl_pram->rxgstpack; 184 } while (! (ack & GRACEFUL_STOP_ACKNOWLEDGE_RX )); 185 186 uec->grace_stopped_rx = 1; 187 188 return 0; 189 } 190 191 static int uec_restart_tx(uec_private_t *uec) 192 { 193 u32 cecr_subblock; 194 195 if (!uec || !uec->uec_info) { 196 printf("%s: No handle passed.\n", __FUNCTION__); 197 return -EINVAL; 198 } 199 200 cecr_subblock = 201 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num); 202 qe_issue_cmd(QE_RESTART_TX, cecr_subblock, 203 (u8)QE_CR_PROTOCOL_ETHERNET, 0); 204 205 uec->grace_stopped_tx = 0; 206 207 return 0; 208 } 209 210 static int uec_restart_rx(uec_private_t *uec) 211 { 212 u32 cecr_subblock; 213 214 if (!uec || !uec->uec_info) { 215 printf("%s: No handle passed.\n", __FUNCTION__); 216 return -EINVAL; 217 } 218 219 cecr_subblock = 220 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num); 221 qe_issue_cmd(QE_RESTART_RX, cecr_subblock, 222 (u8)QE_CR_PROTOCOL_ETHERNET, 0); 223 224 uec->grace_stopped_rx = 0; 225 226 return 0; 227 } 228 229 static int uec_open(uec_private_t *uec, comm_dir_e mode) 230 { 231 ucc_fast_private_t *uccf; 232 233 if (!uec || !uec->uccf) { 234 printf("%s: No handle passed.\n", __FUNCTION__); 235 return -EINVAL; 236 } 237 uccf = uec->uccf; 238 239 /* check if the UCC number is in range. */ 240 if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) { 241 printf("%s: ucc_num out of range.\n", __FUNCTION__); 242 return -EINVAL; 243 } 244 245 /* Enable MAC */ 246 uec_mac_enable(uec, mode); 247 248 /* Enable UCC fast */ 249 ucc_fast_enable(uccf, mode); 250 251 /* RISC microcode start */ 252 if ((mode & COMM_DIR_TX) && uec->grace_stopped_tx) { 253 uec_restart_tx(uec); 254 } 255 if ((mode & COMM_DIR_RX) && uec->grace_stopped_rx) { 256 uec_restart_rx(uec); 257 } 258 259 return 0; 260 } 261 262 static int uec_stop(uec_private_t *uec, comm_dir_e mode) 263 { 264 ucc_fast_private_t *uccf; 265 266 if (!uec || !uec->uccf) { 267 printf("%s: No handle passed.\n", __FUNCTION__); 268 return -EINVAL; 269 } 270 uccf = uec->uccf; 271 272 /* check if the UCC number is in range. */ 273 if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) { 274 printf("%s: ucc_num out of range.\n", __FUNCTION__); 275 return -EINVAL; 276 } 277 /* Stop any transmissions */ 278 if ((mode & COMM_DIR_TX) && !uec->grace_stopped_tx) { 279 uec_graceful_stop_tx(uec); 280 } 281 /* Stop any receptions */ 282 if ((mode & COMM_DIR_RX) && !uec->grace_stopped_rx) { 283 uec_graceful_stop_rx(uec); 284 } 285 286 /* Disable the UCC fast */ 287 ucc_fast_disable(uec->uccf, mode); 288 289 /* Disable the MAC */ 290 uec_mac_disable(uec, mode); 291 292 return 0; 293 } 294 295 static int uec_set_mac_duplex(uec_private_t *uec, int duplex) 296 { 297 uec_t *uec_regs; 298 u32 maccfg2; 299 300 if (!uec) { 301 printf("%s: uec not initial\n", __FUNCTION__); 302 return -EINVAL; 303 } 304 uec_regs = uec->uec_regs; 305 306 if (duplex == DUPLEX_HALF) { 307 maccfg2 = in_be32(&uec_regs->maccfg2); 308 maccfg2 &= ~MACCFG2_FDX; 309 out_be32(&uec_regs->maccfg2, maccfg2); 310 } 311 312 if (duplex == DUPLEX_FULL) { 313 maccfg2 = in_be32(&uec_regs->maccfg2); 314 maccfg2 |= MACCFG2_FDX; 315 out_be32(&uec_regs->maccfg2, maccfg2); 316 } 317 318 return 0; 319 } 320 321 static int uec_set_mac_if_mode(uec_private_t *uec, enet_interface_e if_mode) 322 { 323 enet_interface_e enet_if_mode; 324 uec_info_t *uec_info; 325 uec_t *uec_regs; 326 u32 upsmr; 327 u32 maccfg2; 328 329 if (!uec) { 330 printf("%s: uec not initial\n", __FUNCTION__); 331 return -EINVAL; 332 } 333 334 uec_info = uec->uec_info; 335 uec_regs = uec->uec_regs; 336 enet_if_mode = if_mode; 337 338 maccfg2 = in_be32(&uec_regs->maccfg2); 339 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; 340 341 upsmr = in_be32(&uec->uccf->uf_regs->upsmr); 342 upsmr &= ~(UPSMR_RPM | UPSMR_TBIM | UPSMR_R10M | UPSMR_RMM); 343 344 switch (enet_if_mode) { 345 case ENET_100_MII: 346 case ENET_10_MII: 347 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; 348 break; 349 case ENET_1000_GMII: 350 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; 351 break; 352 case ENET_1000_TBI: 353 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; 354 upsmr |= UPSMR_TBIM; 355 break; 356 case ENET_1000_RTBI: 357 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; 358 upsmr |= (UPSMR_RPM | UPSMR_TBIM); 359 break; 360 case ENET_1000_RGMII_RXID: 361 case ENET_1000_RGMII_ID: 362 case ENET_1000_RGMII: 363 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; 364 upsmr |= UPSMR_RPM; 365 break; 366 case ENET_100_RGMII: 367 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; 368 upsmr |= UPSMR_RPM; 369 break; 370 case ENET_10_RGMII: 371 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; 372 upsmr |= (UPSMR_RPM | UPSMR_R10M); 373 break; 374 case ENET_100_RMII: 375 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; 376 upsmr |= UPSMR_RMM; 377 break; 378 case ENET_10_RMII: 379 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; 380 upsmr |= (UPSMR_R10M | UPSMR_RMM); 381 break; 382 case ENET_1000_SGMII: 383 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; 384 upsmr |= UPSMR_SGMM; 385 break; 386 default: 387 return -EINVAL; 388 break; 389 } 390 out_be32(&uec_regs->maccfg2, maccfg2); 391 out_be32(&uec->uccf->uf_regs->upsmr, upsmr); 392 393 return 0; 394 } 395 396 static int init_mii_management_configuration(uec_mii_t *uec_mii_regs) 397 { 398 uint timeout = 0x1000; 399 u32 miimcfg = 0; 400 401 miimcfg = in_be32(&uec_mii_regs->miimcfg); 402 miimcfg |= MIIMCFG_MNGMNT_CLC_DIV_INIT_VALUE; 403 out_be32(&uec_mii_regs->miimcfg, miimcfg); 404 405 /* Wait until the bus is free */ 406 while ((in_be32(&uec_mii_regs->miimcfg) & MIIMIND_BUSY) && timeout--); 407 if (timeout <= 0) { 408 printf("%s: The MII Bus is stuck!", __FUNCTION__); 409 return -ETIMEDOUT; 410 } 411 412 return 0; 413 } 414 415 static int init_phy(struct eth_device *dev) 416 { 417 uec_private_t *uec; 418 uec_mii_t *umii_regs; 419 struct uec_mii_info *mii_info; 420 struct phy_info *curphy; 421 int err; 422 423 uec = (uec_private_t *)dev->priv; 424 umii_regs = uec->uec_mii_regs; 425 426 uec->oldlink = 0; 427 uec->oldspeed = 0; 428 uec->oldduplex = -1; 429 430 mii_info = malloc(sizeof(*mii_info)); 431 if (!mii_info) { 432 printf("%s: Could not allocate mii_info", dev->name); 433 return -ENOMEM; 434 } 435 memset(mii_info, 0, sizeof(*mii_info)); 436 437 if (uec->uec_info->uf_info.eth_type == GIGA_ETH) { 438 mii_info->speed = SPEED_1000; 439 } else { 440 mii_info->speed = SPEED_100; 441 } 442 443 mii_info->duplex = DUPLEX_FULL; 444 mii_info->pause = 0; 445 mii_info->link = 1; 446 447 mii_info->advertising = (ADVERTISED_10baseT_Half | 448 ADVERTISED_10baseT_Full | 449 ADVERTISED_100baseT_Half | 450 ADVERTISED_100baseT_Full | 451 ADVERTISED_1000baseT_Full); 452 mii_info->autoneg = 1; 453 mii_info->mii_id = uec->uec_info->phy_address; 454 mii_info->dev = dev; 455 456 mii_info->mdio_read = &uec_read_phy_reg; 457 mii_info->mdio_write = &uec_write_phy_reg; 458 459 uec->mii_info = mii_info; 460 461 qe_set_mii_clk_src(uec->uec_info->uf_info.ucc_num); 462 463 if (init_mii_management_configuration(umii_regs)) { 464 printf("%s: The MII Bus is stuck!", dev->name); 465 err = -1; 466 goto bus_fail; 467 } 468 469 /* get info for this PHY */ 470 curphy = uec_get_phy_info(uec->mii_info); 471 if (!curphy) { 472 printf("%s: No PHY found", dev->name); 473 err = -1; 474 goto no_phy; 475 } 476 477 mii_info->phyinfo = curphy; 478 479 /* Run the commands which initialize the PHY */ 480 if (curphy->init) { 481 err = curphy->init(uec->mii_info); 482 if (err) 483 goto phy_init_fail; 484 } 485 486 return 0; 487 488 phy_init_fail: 489 no_phy: 490 bus_fail: 491 free(mii_info); 492 return err; 493 } 494 495 static void adjust_link(struct eth_device *dev) 496 { 497 uec_private_t *uec = (uec_private_t *)dev->priv; 498 uec_t *uec_regs; 499 struct uec_mii_info *mii_info = uec->mii_info; 500 501 extern void change_phy_interface_mode(struct eth_device *dev, 502 enet_interface_e mode); 503 uec_regs = uec->uec_regs; 504 505 if (mii_info->link) { 506 /* Now we make sure that we can be in full duplex mode. 507 * If not, we operate in half-duplex mode. */ 508 if (mii_info->duplex != uec->oldduplex) { 509 if (!(mii_info->duplex)) { 510 uec_set_mac_duplex(uec, DUPLEX_HALF); 511 printf("%s: Half Duplex\n", dev->name); 512 } else { 513 uec_set_mac_duplex(uec, DUPLEX_FULL); 514 printf("%s: Full Duplex\n", dev->name); 515 } 516 uec->oldduplex = mii_info->duplex; 517 } 518 519 if (mii_info->speed != uec->oldspeed) { 520 if (uec->uec_info->uf_info.eth_type == GIGA_ETH) { 521 switch (mii_info->speed) { 522 case 1000: 523 break; 524 case 100: 525 printf ("switching to rgmii 100\n"); 526 /* change phy to rgmii 100 */ 527 change_phy_interface_mode(dev, 528 ENET_100_RGMII); 529 /* change the MAC interface mode */ 530 uec_set_mac_if_mode(uec,ENET_100_RGMII); 531 break; 532 case 10: 533 printf ("switching to rgmii 10\n"); 534 /* change phy to rgmii 10 */ 535 change_phy_interface_mode(dev, 536 ENET_10_RGMII); 537 /* change the MAC interface mode */ 538 uec_set_mac_if_mode(uec,ENET_10_RGMII); 539 break; 540 default: 541 printf("%s: Ack,Speed(%d)is illegal\n", 542 dev->name, mii_info->speed); 543 break; 544 } 545 } 546 547 printf("%s: Speed %dBT\n", dev->name, mii_info->speed); 548 uec->oldspeed = mii_info->speed; 549 } 550 551 if (!uec->oldlink) { 552 printf("%s: Link is up\n", dev->name); 553 uec->oldlink = 1; 554 } 555 556 } else { /* if (mii_info->link) */ 557 if (uec->oldlink) { 558 printf("%s: Link is down\n", dev->name); 559 uec->oldlink = 0; 560 uec->oldspeed = 0; 561 uec->oldduplex = -1; 562 } 563 } 564 } 565 566 static void phy_change(struct eth_device *dev) 567 { 568 uec_private_t *uec = (uec_private_t *)dev->priv; 569 570 /* Update the link, speed, duplex */ 571 uec->mii_info->phyinfo->read_status(uec->mii_info); 572 573 /* Adjust the interface according to speed */ 574 adjust_link(dev); 575 } 576 577 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) \ 578 && !defined(BITBANGMII) 579 580 /* 581 * Find a device index from the devlist by name 582 * 583 * Returns: 584 * The index where the device is located, -1 on error 585 */ 586 static int uec_miiphy_find_dev_by_name(char *devname) 587 { 588 int i; 589 590 for (i = 0; i < MAXCONTROLLERS; i++) { 591 if (strncmp(devname, devlist[i]->name, strlen(devname)) == 0) { 592 break; 593 } 594 } 595 596 /* If device cannot be found, returns -1 */ 597 if (i == MAXCONTROLLERS) { 598 debug ("%s: device %s not found in devlist\n", __FUNCTION__, devname); 599 i = -1; 600 } 601 602 return i; 603 } 604 605 /* 606 * Read a MII PHY register. 607 * 608 * Returns: 609 * 0 on success 610 */ 611 static int uec_miiphy_read(char *devname, unsigned char addr, 612 unsigned char reg, unsigned short *value) 613 { 614 int devindex = 0; 615 616 if (devname == NULL || value == NULL) { 617 debug("%s: NULL pointer given\n", __FUNCTION__); 618 } else { 619 devindex = uec_miiphy_find_dev_by_name(devname); 620 if (devindex >= 0) { 621 *value = uec_read_phy_reg(devlist[devindex], addr, reg); 622 } 623 } 624 return 0; 625 } 626 627 /* 628 * Write a MII PHY register. 629 * 630 * Returns: 631 * 0 on success 632 */ 633 static int uec_miiphy_write(char *devname, unsigned char addr, 634 unsigned char reg, unsigned short value) 635 { 636 int devindex = 0; 637 638 if (devname == NULL) { 639 debug("%s: NULL pointer given\n", __FUNCTION__); 640 } else { 641 devindex = uec_miiphy_find_dev_by_name(devname); 642 if (devindex >= 0) { 643 uec_write_phy_reg(devlist[devindex], addr, reg, value); 644 } 645 } 646 return 0; 647 } 648 #endif 649 650 static int uec_set_mac_address(uec_private_t *uec, u8 *mac_addr) 651 { 652 uec_t *uec_regs; 653 u32 mac_addr1; 654 u32 mac_addr2; 655 656 if (!uec) { 657 printf("%s: uec not initial\n", __FUNCTION__); 658 return -EINVAL; 659 } 660 661 uec_regs = uec->uec_regs; 662 663 /* if a station address of 0x12345678ABCD, perform a write to 664 MACSTNADDR1 of 0xCDAB7856, 665 MACSTNADDR2 of 0x34120000 */ 666 667 mac_addr1 = (mac_addr[5] << 24) | (mac_addr[4] << 16) | \ 668 (mac_addr[3] << 8) | (mac_addr[2]); 669 out_be32(&uec_regs->macstnaddr1, mac_addr1); 670 671 mac_addr2 = ((mac_addr[1] << 24) | (mac_addr[0] << 16)) & 0xffff0000; 672 out_be32(&uec_regs->macstnaddr2, mac_addr2); 673 674 return 0; 675 } 676 677 static int uec_convert_threads_num(uec_num_of_threads_e threads_num, 678 int *threads_num_ret) 679 { 680 int num_threads_numerica; 681 682 switch (threads_num) { 683 case UEC_NUM_OF_THREADS_1: 684 num_threads_numerica = 1; 685 break; 686 case UEC_NUM_OF_THREADS_2: 687 num_threads_numerica = 2; 688 break; 689 case UEC_NUM_OF_THREADS_4: 690 num_threads_numerica = 4; 691 break; 692 case UEC_NUM_OF_THREADS_6: 693 num_threads_numerica = 6; 694 break; 695 case UEC_NUM_OF_THREADS_8: 696 num_threads_numerica = 8; 697 break; 698 default: 699 printf("%s: Bad number of threads value.", 700 __FUNCTION__); 701 return -EINVAL; 702 } 703 704 *threads_num_ret = num_threads_numerica; 705 706 return 0; 707 } 708 709 static void uec_init_tx_parameter(uec_private_t *uec, int num_threads_tx) 710 { 711 uec_info_t *uec_info; 712 u32 end_bd; 713 u8 bmrx = 0; 714 int i; 715 716 uec_info = uec->uec_info; 717 718 /* Alloc global Tx parameter RAM page */ 719 uec->tx_glbl_pram_offset = qe_muram_alloc( 720 sizeof(uec_tx_global_pram_t), 721 UEC_TX_GLOBAL_PRAM_ALIGNMENT); 722 uec->p_tx_glbl_pram = (uec_tx_global_pram_t *) 723 qe_muram_addr(uec->tx_glbl_pram_offset); 724 725 /* Zero the global Tx prameter RAM */ 726 memset(uec->p_tx_glbl_pram, 0, sizeof(uec_tx_global_pram_t)); 727 728 /* Init global Tx parameter RAM */ 729 730 /* TEMODER, RMON statistics disable, one Tx queue */ 731 out_be16(&uec->p_tx_glbl_pram->temoder, TEMODER_INIT_VALUE); 732 733 /* SQPTR */ 734 uec->send_q_mem_reg_offset = qe_muram_alloc( 735 sizeof(uec_send_queue_qd_t), 736 UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); 737 uec->p_send_q_mem_reg = (uec_send_queue_mem_region_t *) 738 qe_muram_addr(uec->send_q_mem_reg_offset); 739 out_be32(&uec->p_tx_glbl_pram->sqptr, uec->send_q_mem_reg_offset); 740 741 /* Setup the table with TxBDs ring */ 742 end_bd = (u32)uec->p_tx_bd_ring + (uec_info->tx_bd_ring_len - 1) 743 * SIZEOFBD; 744 out_be32(&uec->p_send_q_mem_reg->sqqd[0].bd_ring_base, 745 (u32)(uec->p_tx_bd_ring)); 746 out_be32(&uec->p_send_q_mem_reg->sqqd[0].last_bd_completed_address, 747 end_bd); 748 749 /* Scheduler Base Pointer, we have only one Tx queue, no need it */ 750 out_be32(&uec->p_tx_glbl_pram->schedulerbasepointer, 0); 751 752 /* TxRMON Base Pointer, TxRMON disable, we don't need it */ 753 out_be32(&uec->p_tx_glbl_pram->txrmonbaseptr, 0); 754 755 /* TSTATE, global snooping, big endian, the CSB bus selected */ 756 bmrx = BMR_INIT_VALUE; 757 out_be32(&uec->p_tx_glbl_pram->tstate, ((u32)(bmrx) << BMR_SHIFT)); 758 759 /* IPH_Offset */ 760 for (i = 0; i < MAX_IPH_OFFSET_ENTRY; i++) { 761 out_8(&uec->p_tx_glbl_pram->iphoffset[i], 0); 762 } 763 764 /* VTAG table */ 765 for (i = 0; i < UEC_TX_VTAG_TABLE_ENTRY_MAX; i++) { 766 out_be32(&uec->p_tx_glbl_pram->vtagtable[i], 0); 767 } 768 769 /* TQPTR */ 770 uec->thread_dat_tx_offset = qe_muram_alloc( 771 num_threads_tx * sizeof(uec_thread_data_tx_t) + 772 32 *(num_threads_tx == 1), UEC_THREAD_DATA_ALIGNMENT); 773 774 uec->p_thread_data_tx = (uec_thread_data_tx_t *) 775 qe_muram_addr(uec->thread_dat_tx_offset); 776 out_be32(&uec->p_tx_glbl_pram->tqptr, uec->thread_dat_tx_offset); 777 } 778 779 static void uec_init_rx_parameter(uec_private_t *uec, int num_threads_rx) 780 { 781 u8 bmrx = 0; 782 int i; 783 uec_82xx_address_filtering_pram_t *p_af_pram; 784 785 /* Allocate global Rx parameter RAM page */ 786 uec->rx_glbl_pram_offset = qe_muram_alloc( 787 sizeof(uec_rx_global_pram_t), UEC_RX_GLOBAL_PRAM_ALIGNMENT); 788 uec->p_rx_glbl_pram = (uec_rx_global_pram_t *) 789 qe_muram_addr(uec->rx_glbl_pram_offset); 790 791 /* Zero Global Rx parameter RAM */ 792 memset(uec->p_rx_glbl_pram, 0, sizeof(uec_rx_global_pram_t)); 793 794 /* Init global Rx parameter RAM */ 795 /* REMODER, Extended feature mode disable, VLAN disable, 796 LossLess flow control disable, Receive firmware statisic disable, 797 Extended address parsing mode disable, One Rx queues, 798 Dynamic maximum/minimum frame length disable, IP checksum check 799 disable, IP address alignment disable 800 */ 801 out_be32(&uec->p_rx_glbl_pram->remoder, REMODER_INIT_VALUE); 802 803 /* RQPTR */ 804 uec->thread_dat_rx_offset = qe_muram_alloc( 805 num_threads_rx * sizeof(uec_thread_data_rx_t), 806 UEC_THREAD_DATA_ALIGNMENT); 807 uec->p_thread_data_rx = (uec_thread_data_rx_t *) 808 qe_muram_addr(uec->thread_dat_rx_offset); 809 out_be32(&uec->p_rx_glbl_pram->rqptr, uec->thread_dat_rx_offset); 810 811 /* Type_or_Len */ 812 out_be16(&uec->p_rx_glbl_pram->typeorlen, 3072); 813 814 /* RxRMON base pointer, we don't need it */ 815 out_be32(&uec->p_rx_glbl_pram->rxrmonbaseptr, 0); 816 817 /* IntCoalescingPTR, we don't need it, no interrupt */ 818 out_be32(&uec->p_rx_glbl_pram->intcoalescingptr, 0); 819 820 /* RSTATE, global snooping, big endian, the CSB bus selected */ 821 bmrx = BMR_INIT_VALUE; 822 out_8(&uec->p_rx_glbl_pram->rstate, bmrx); 823 824 /* MRBLR */ 825 out_be16(&uec->p_rx_glbl_pram->mrblr, MAX_RXBUF_LEN); 826 827 /* RBDQPTR */ 828 uec->rx_bd_qs_tbl_offset = qe_muram_alloc( 829 sizeof(uec_rx_bd_queues_entry_t) + \ 830 sizeof(uec_rx_prefetched_bds_t), 831 UEC_RX_BD_QUEUES_ALIGNMENT); 832 uec->p_rx_bd_qs_tbl = (uec_rx_bd_queues_entry_t *) 833 qe_muram_addr(uec->rx_bd_qs_tbl_offset); 834 835 /* Zero it */ 836 memset(uec->p_rx_bd_qs_tbl, 0, sizeof(uec_rx_bd_queues_entry_t) + \ 837 sizeof(uec_rx_prefetched_bds_t)); 838 out_be32(&uec->p_rx_glbl_pram->rbdqptr, uec->rx_bd_qs_tbl_offset); 839 out_be32(&uec->p_rx_bd_qs_tbl->externalbdbaseptr, 840 (u32)uec->p_rx_bd_ring); 841 842 /* MFLR */ 843 out_be16(&uec->p_rx_glbl_pram->mflr, MAX_FRAME_LEN); 844 /* MINFLR */ 845 out_be16(&uec->p_rx_glbl_pram->minflr, MIN_FRAME_LEN); 846 /* MAXD1 */ 847 out_be16(&uec->p_rx_glbl_pram->maxd1, MAX_DMA1_LEN); 848 /* MAXD2 */ 849 out_be16(&uec->p_rx_glbl_pram->maxd2, MAX_DMA2_LEN); 850 /* ECAM_PTR */ 851 out_be32(&uec->p_rx_glbl_pram->ecamptr, 0); 852 /* L2QT */ 853 out_be32(&uec->p_rx_glbl_pram->l2qt, 0); 854 /* L3QT */ 855 for (i = 0; i < 8; i++) { 856 out_be32(&uec->p_rx_glbl_pram->l3qt[i], 0); 857 } 858 859 /* VLAN_TYPE */ 860 out_be16(&uec->p_rx_glbl_pram->vlantype, 0x8100); 861 /* TCI */ 862 out_be16(&uec->p_rx_glbl_pram->vlantci, 0); 863 864 /* Clear PQ2 style address filtering hash table */ 865 p_af_pram = (uec_82xx_address_filtering_pram_t *) \ 866 uec->p_rx_glbl_pram->addressfiltering; 867 868 p_af_pram->iaddr_h = 0; 869 p_af_pram->iaddr_l = 0; 870 p_af_pram->gaddr_h = 0; 871 p_af_pram->gaddr_l = 0; 872 } 873 874 static int uec_issue_init_enet_rxtx_cmd(uec_private_t *uec, 875 int thread_tx, int thread_rx) 876 { 877 uec_init_cmd_pram_t *p_init_enet_param; 878 u32 init_enet_param_offset; 879 uec_info_t *uec_info; 880 int i; 881 int snum; 882 u32 init_enet_offset; 883 u32 entry_val; 884 u32 command; 885 u32 cecr_subblock; 886 887 uec_info = uec->uec_info; 888 889 /* Allocate init enet command parameter */ 890 uec->init_enet_param_offset = qe_muram_alloc( 891 sizeof(uec_init_cmd_pram_t), 4); 892 init_enet_param_offset = uec->init_enet_param_offset; 893 uec->p_init_enet_param = (uec_init_cmd_pram_t *) 894 qe_muram_addr(uec->init_enet_param_offset); 895 896 /* Zero init enet command struct */ 897 memset((void *)uec->p_init_enet_param, 0, sizeof(uec_init_cmd_pram_t)); 898 899 /* Init the command struct */ 900 p_init_enet_param = uec->p_init_enet_param; 901 p_init_enet_param->resinit0 = ENET_INIT_PARAM_MAGIC_RES_INIT0; 902 p_init_enet_param->resinit1 = ENET_INIT_PARAM_MAGIC_RES_INIT1; 903 p_init_enet_param->resinit2 = ENET_INIT_PARAM_MAGIC_RES_INIT2; 904 p_init_enet_param->resinit3 = ENET_INIT_PARAM_MAGIC_RES_INIT3; 905 p_init_enet_param->resinit4 = ENET_INIT_PARAM_MAGIC_RES_INIT4; 906 p_init_enet_param->largestexternallookupkeysize = 0; 907 908 p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_rx) 909 << ENET_INIT_PARAM_RGF_SHIFT; 910 p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_tx) 911 << ENET_INIT_PARAM_TGF_SHIFT; 912 913 /* Init Rx global parameter pointer */ 914 p_init_enet_param->rgftgfrxglobal |= uec->rx_glbl_pram_offset | 915 (u32)uec_info->risc_rx; 916 917 /* Init Rx threads */ 918 for (i = 0; i < (thread_rx + 1); i++) { 919 if ((snum = qe_get_snum()) < 0) { 920 printf("%s can not get snum\n", __FUNCTION__); 921 return -ENOMEM; 922 } 923 924 if (i==0) { 925 init_enet_offset = 0; 926 } else { 927 init_enet_offset = qe_muram_alloc( 928 sizeof(uec_thread_rx_pram_t), 929 UEC_THREAD_RX_PRAM_ALIGNMENT); 930 } 931 932 entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) | 933 init_enet_offset | (u32)uec_info->risc_rx; 934 p_init_enet_param->rxthread[i] = entry_val; 935 } 936 937 /* Init Tx global parameter pointer */ 938 p_init_enet_param->txglobal = uec->tx_glbl_pram_offset | 939 (u32)uec_info->risc_tx; 940 941 /* Init Tx threads */ 942 for (i = 0; i < thread_tx; i++) { 943 if ((snum = qe_get_snum()) < 0) { 944 printf("%s can not get snum\n", __FUNCTION__); 945 return -ENOMEM; 946 } 947 948 init_enet_offset = qe_muram_alloc(sizeof(uec_thread_tx_pram_t), 949 UEC_THREAD_TX_PRAM_ALIGNMENT); 950 951 entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) | 952 init_enet_offset | (u32)uec_info->risc_tx; 953 p_init_enet_param->txthread[i] = entry_val; 954 } 955 956 __asm__ __volatile__("sync"); 957 958 /* Issue QE command */ 959 command = QE_INIT_TX_RX; 960 cecr_subblock = ucc_fast_get_qe_cr_subblock( 961 uec->uec_info->uf_info.ucc_num); 962 qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET, 963 init_enet_param_offset); 964 965 return 0; 966 } 967 968 static int uec_startup(uec_private_t *uec) 969 { 970 uec_info_t *uec_info; 971 ucc_fast_info_t *uf_info; 972 ucc_fast_private_t *uccf; 973 ucc_fast_t *uf_regs; 974 uec_t *uec_regs; 975 int num_threads_tx; 976 int num_threads_rx; 977 u32 utbipar; 978 enet_interface_e enet_interface; 979 u32 length; 980 u32 align; 981 qe_bd_t *bd; 982 u8 *buf; 983 int i; 984 985 if (!uec || !uec->uec_info) { 986 printf("%s: uec or uec_info not initial\n", __FUNCTION__); 987 return -EINVAL; 988 } 989 990 uec_info = uec->uec_info; 991 uf_info = &(uec_info->uf_info); 992 993 /* Check if Rx BD ring len is illegal */ 994 if ((uec_info->rx_bd_ring_len < UEC_RX_BD_RING_SIZE_MIN) || \ 995 (uec_info->rx_bd_ring_len % UEC_RX_BD_RING_SIZE_ALIGNMENT)) { 996 printf("%s: Rx BD ring len must be multiple of 4, and > 8.\n", 997 __FUNCTION__); 998 return -EINVAL; 999 } 1000 1001 /* Check if Tx BD ring len is illegal */ 1002 if (uec_info->tx_bd_ring_len < UEC_TX_BD_RING_SIZE_MIN) { 1003 printf("%s: Tx BD ring length must not be smaller than 2.\n", 1004 __FUNCTION__); 1005 return -EINVAL; 1006 } 1007 1008 /* Check if MRBLR is illegal */ 1009 if ((MAX_RXBUF_LEN == 0) || (MAX_RXBUF_LEN % UEC_MRBLR_ALIGNMENT)) { 1010 printf("%s: max rx buffer length must be mutliple of 128.\n", 1011 __FUNCTION__); 1012 return -EINVAL; 1013 } 1014 1015 /* Both Rx and Tx are stopped */ 1016 uec->grace_stopped_rx = 1; 1017 uec->grace_stopped_tx = 1; 1018 1019 /* Init UCC fast */ 1020 if (ucc_fast_init(uf_info, &uccf)) { 1021 printf("%s: failed to init ucc fast\n", __FUNCTION__); 1022 return -ENOMEM; 1023 } 1024 1025 /* Save uccf */ 1026 uec->uccf = uccf; 1027 1028 /* Convert the Tx threads number */ 1029 if (uec_convert_threads_num(uec_info->num_threads_tx, 1030 &num_threads_tx)) { 1031 return -EINVAL; 1032 } 1033 1034 /* Convert the Rx threads number */ 1035 if (uec_convert_threads_num(uec_info->num_threads_rx, 1036 &num_threads_rx)) { 1037 return -EINVAL; 1038 } 1039 1040 uf_regs = uccf->uf_regs; 1041 1042 /* UEC register is following UCC fast registers */ 1043 uec_regs = (uec_t *)(&uf_regs->ucc_eth); 1044 1045 /* Save the UEC register pointer to UEC private struct */ 1046 uec->uec_regs = uec_regs; 1047 1048 /* Init UPSMR, enable hardware statistics (UCC) */ 1049 out_be32(&uec->uccf->uf_regs->upsmr, UPSMR_INIT_VALUE); 1050 1051 /* Init MACCFG1, flow control disable, disable Tx and Rx */ 1052 out_be32(&uec_regs->maccfg1, MACCFG1_INIT_VALUE); 1053 1054 /* Init MACCFG2, length check, MAC PAD and CRC enable */ 1055 out_be32(&uec_regs->maccfg2, MACCFG2_INIT_VALUE); 1056 1057 /* Setup MAC interface mode */ 1058 uec_set_mac_if_mode(uec, uec_info->enet_interface); 1059 1060 /* Setup MII management base */ 1061 #ifndef CONFIG_eTSEC_MDIO_BUS 1062 uec->uec_mii_regs = (uec_mii_t *)(&uec_regs->miimcfg); 1063 #else 1064 uec->uec_mii_regs = (uec_mii_t *) CONFIG_MIIM_ADDRESS; 1065 #endif 1066 1067 /* Setup MII master clock source */ 1068 qe_set_mii_clk_src(uec_info->uf_info.ucc_num); 1069 1070 /* Setup UTBIPAR */ 1071 utbipar = in_be32(&uec_regs->utbipar); 1072 utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK; 1073 enet_interface = uec->uec_info->enet_interface; 1074 if (enet_interface == ENET_1000_TBI || 1075 enet_interface == ENET_1000_RTBI) { 1076 utbipar |= (uec_info->phy_address + uec_info->uf_info.ucc_num) 1077 << UTBIPAR_PHY_ADDRESS_SHIFT; 1078 } else { 1079 utbipar |= (0x10 + uec_info->uf_info.ucc_num) 1080 << UTBIPAR_PHY_ADDRESS_SHIFT; 1081 } 1082 1083 out_be32(&uec_regs->utbipar, utbipar); 1084 1085 /* Configure the TBI for SGMII operation */ 1086 if (uec->uec_info->enet_interface == ENET_1000_SGMII) { 1087 uec_write_phy_reg(uec->dev, uec_regs->utbipar, 1088 ENET_TBI_MII_ANA, TBIANA_SETTINGS); 1089 1090 uec_write_phy_reg(uec->dev, uec_regs->utbipar, 1091 ENET_TBI_MII_TBICON, TBICON_CLK_SELECT); 1092 1093 uec_write_phy_reg(uec->dev, uec_regs->utbipar, 1094 ENET_TBI_MII_CR, TBICR_SETTINGS); 1095 } 1096 1097 /* Allocate Tx BDs */ 1098 length = ((uec_info->tx_bd_ring_len * SIZEOFBD) / 1099 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) * 1100 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; 1101 if ((uec_info->tx_bd_ring_len * SIZEOFBD) % 1102 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) { 1103 length += UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; 1104 } 1105 1106 align = UEC_TX_BD_RING_ALIGNMENT; 1107 uec->tx_bd_ring_offset = (u32)malloc((u32)(length + align)); 1108 if (uec->tx_bd_ring_offset != 0) { 1109 uec->p_tx_bd_ring = (u8 *)((uec->tx_bd_ring_offset + align) 1110 & ~(align - 1)); 1111 } 1112 1113 /* Zero all of Tx BDs */ 1114 memset((void *)(uec->tx_bd_ring_offset), 0, length + align); 1115 1116 /* Allocate Rx BDs */ 1117 length = uec_info->rx_bd_ring_len * SIZEOFBD; 1118 align = UEC_RX_BD_RING_ALIGNMENT; 1119 uec->rx_bd_ring_offset = (u32)(malloc((u32)(length + align))); 1120 if (uec->rx_bd_ring_offset != 0) { 1121 uec->p_rx_bd_ring = (u8 *)((uec->rx_bd_ring_offset + align) 1122 & ~(align - 1)); 1123 } 1124 1125 /* Zero all of Rx BDs */ 1126 memset((void *)(uec->rx_bd_ring_offset), 0, length + align); 1127 1128 /* Allocate Rx buffer */ 1129 length = uec_info->rx_bd_ring_len * MAX_RXBUF_LEN; 1130 align = UEC_RX_DATA_BUF_ALIGNMENT; 1131 uec->rx_buf_offset = (u32)malloc(length + align); 1132 if (uec->rx_buf_offset != 0) { 1133 uec->p_rx_buf = (u8 *)((uec->rx_buf_offset + align) 1134 & ~(align - 1)); 1135 } 1136 1137 /* Zero all of the Rx buffer */ 1138 memset((void *)(uec->rx_buf_offset), 0, length + align); 1139 1140 /* Init TxBD ring */ 1141 bd = (qe_bd_t *)uec->p_tx_bd_ring; 1142 uec->txBd = bd; 1143 1144 for (i = 0; i < uec_info->tx_bd_ring_len; i++) { 1145 BD_DATA_CLEAR(bd); 1146 BD_STATUS_SET(bd, 0); 1147 BD_LENGTH_SET(bd, 0); 1148 bd ++; 1149 } 1150 BD_STATUS_SET((--bd), TxBD_WRAP); 1151 1152 /* Init RxBD ring */ 1153 bd = (qe_bd_t *)uec->p_rx_bd_ring; 1154 uec->rxBd = bd; 1155 buf = uec->p_rx_buf; 1156 for (i = 0; i < uec_info->rx_bd_ring_len; i++) { 1157 BD_DATA_SET(bd, buf); 1158 BD_LENGTH_SET(bd, 0); 1159 BD_STATUS_SET(bd, RxBD_EMPTY); 1160 buf += MAX_RXBUF_LEN; 1161 bd ++; 1162 } 1163 BD_STATUS_SET((--bd), RxBD_WRAP | RxBD_EMPTY); 1164 1165 /* Init global Tx parameter RAM */ 1166 uec_init_tx_parameter(uec, num_threads_tx); 1167 1168 /* Init global Rx parameter RAM */ 1169 uec_init_rx_parameter(uec, num_threads_rx); 1170 1171 /* Init ethernet Tx and Rx parameter command */ 1172 if (uec_issue_init_enet_rxtx_cmd(uec, num_threads_tx, 1173 num_threads_rx)) { 1174 printf("%s issue init enet cmd failed\n", __FUNCTION__); 1175 return -ENOMEM; 1176 } 1177 1178 return 0; 1179 } 1180 1181 static int uec_init(struct eth_device* dev, bd_t *bd) 1182 { 1183 uec_private_t *uec; 1184 int err, i; 1185 struct phy_info *curphy; 1186 1187 uec = (uec_private_t *)dev->priv; 1188 1189 if (uec->the_first_run == 0) { 1190 err = init_phy(dev); 1191 if (err) { 1192 printf("%s: Cannot initialize PHY, aborting.\n", 1193 dev->name); 1194 return err; 1195 } 1196 1197 curphy = uec->mii_info->phyinfo; 1198 1199 if (curphy->config_aneg) { 1200 err = curphy->config_aneg(uec->mii_info); 1201 if (err) { 1202 printf("%s: Can't negotiate PHY\n", dev->name); 1203 return err; 1204 } 1205 } 1206 1207 /* Give PHYs up to 5 sec to report a link */ 1208 i = 50; 1209 do { 1210 err = curphy->read_status(uec->mii_info); 1211 udelay(100000); 1212 } while (((i-- > 0) && !uec->mii_info->link) || err); 1213 1214 if (err || i <= 0) 1215 printf("warning: %s: timeout on PHY link\n", dev->name); 1216 1217 uec->the_first_run = 1; 1218 } 1219 1220 /* Set up the MAC address */ 1221 if (dev->enetaddr[0] & 0x01) { 1222 printf("%s: MacAddress is multcast address\n", 1223 __FUNCTION__); 1224 return -1; 1225 } 1226 uec_set_mac_address(uec, dev->enetaddr); 1227 1228 1229 err = uec_open(uec, COMM_DIR_RX_AND_TX); 1230 if (err) { 1231 printf("%s: cannot enable UEC device\n", dev->name); 1232 return -1; 1233 } 1234 1235 phy_change(dev); 1236 1237 return (uec->mii_info->link ? 0 : -1); 1238 } 1239 1240 static void uec_halt(struct eth_device* dev) 1241 { 1242 uec_private_t *uec = (uec_private_t *)dev->priv; 1243 uec_stop(uec, COMM_DIR_RX_AND_TX); 1244 } 1245 1246 static int uec_send(struct eth_device* dev, volatile void *buf, int len) 1247 { 1248 uec_private_t *uec; 1249 ucc_fast_private_t *uccf; 1250 volatile qe_bd_t *bd; 1251 u16 status; 1252 int i; 1253 int result = 0; 1254 1255 uec = (uec_private_t *)dev->priv; 1256 uccf = uec->uccf; 1257 bd = uec->txBd; 1258 1259 /* Find an empty TxBD */ 1260 for (i = 0; bd->status & TxBD_READY; i++) { 1261 if (i > 0x100000) { 1262 printf("%s: tx buffer not ready\n", dev->name); 1263 return result; 1264 } 1265 } 1266 1267 /* Init TxBD */ 1268 BD_DATA_SET(bd, buf); 1269 BD_LENGTH_SET(bd, len); 1270 status = bd->status; 1271 status &= BD_WRAP; 1272 status |= (TxBD_READY | TxBD_LAST); 1273 BD_STATUS_SET(bd, status); 1274 1275 /* Tell UCC to transmit the buffer */ 1276 ucc_fast_transmit_on_demand(uccf); 1277 1278 /* Wait for buffer to be transmitted */ 1279 for (i = 0; bd->status & TxBD_READY; i++) { 1280 if (i > 0x100000) { 1281 printf("%s: tx error\n", dev->name); 1282 return result; 1283 } 1284 } 1285 1286 /* Ok, the buffer be transimitted */ 1287 BD_ADVANCE(bd, status, uec->p_tx_bd_ring); 1288 uec->txBd = bd; 1289 result = 1; 1290 1291 return result; 1292 } 1293 1294 static int uec_recv(struct eth_device* dev) 1295 { 1296 uec_private_t *uec = dev->priv; 1297 volatile qe_bd_t *bd; 1298 u16 status; 1299 u16 len; 1300 u8 *data; 1301 1302 bd = uec->rxBd; 1303 status = bd->status; 1304 1305 while (!(status & RxBD_EMPTY)) { 1306 if (!(status & RxBD_ERROR)) { 1307 data = BD_DATA(bd); 1308 len = BD_LENGTH(bd); 1309 NetReceive(data, len); 1310 } else { 1311 printf("%s: Rx error\n", dev->name); 1312 } 1313 status &= BD_CLEAN; 1314 BD_LENGTH_SET(bd, 0); 1315 BD_STATUS_SET(bd, status | RxBD_EMPTY); 1316 BD_ADVANCE(bd, status, uec->p_rx_bd_ring); 1317 status = bd->status; 1318 } 1319 uec->rxBd = bd; 1320 1321 return 1; 1322 } 1323 1324 int uec_initialize(bd_t *bis, uec_info_t *uec_info) 1325 { 1326 struct eth_device *dev; 1327 int i; 1328 uec_private_t *uec; 1329 int err; 1330 1331 dev = (struct eth_device *)malloc(sizeof(struct eth_device)); 1332 if (!dev) 1333 return 0; 1334 memset(dev, 0, sizeof(struct eth_device)); 1335 1336 /* Allocate the UEC private struct */ 1337 uec = (uec_private_t *)malloc(sizeof(uec_private_t)); 1338 if (!uec) { 1339 return -ENOMEM; 1340 } 1341 memset(uec, 0, sizeof(uec_private_t)); 1342 1343 /* Adjust uec_info */ 1344 #if (MAX_QE_RISC == 4) 1345 uec_info->risc_tx = QE_RISC_ALLOCATION_FOUR_RISCS; 1346 uec_info->risc_rx = QE_RISC_ALLOCATION_FOUR_RISCS; 1347 #endif 1348 1349 devlist[uec_info->uf_info.ucc_num] = dev; 1350 1351 uec->uec_info = uec_info; 1352 uec->dev = dev; 1353 1354 sprintf(dev->name, "FSL UEC%d", uec_info->uf_info.ucc_num); 1355 dev->iobase = 0; 1356 dev->priv = (void *)uec; 1357 dev->init = uec_init; 1358 dev->halt = uec_halt; 1359 dev->send = uec_send; 1360 dev->recv = uec_recv; 1361 1362 /* Clear the ethnet address */ 1363 for (i = 0; i < 6; i++) 1364 dev->enetaddr[i] = 0; 1365 1366 eth_register(dev); 1367 1368 err = uec_startup(uec); 1369 if (err) { 1370 printf("%s: Cannot configure net device, aborting.",dev->name); 1371 return err; 1372 } 1373 1374 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) \ 1375 && !defined(BITBANGMII) 1376 miiphy_register(dev->name, uec_miiphy_read, uec_miiphy_write); 1377 #endif 1378 1379 return 1; 1380 } 1381 1382 int uec_eth_init(bd_t *bis, uec_info_t *uecs, int num) 1383 { 1384 int i; 1385 1386 for (i = 0; i < num; i++) 1387 uec_initialize(bis, &uecs[i]); 1388 1389 return 0; 1390 } 1391 1392 int uec_standard_init(bd_t *bis) 1393 { 1394 return uec_eth_init(bis, uec_info, ARRAY_SIZE(uec_info)); 1395 } 1396 1397 1398