1 /* 2 * (C) Copyright 2009 Ilya Yanok, Emcraft Systems Ltd <yanok@emcraft.com> 3 * (C) Copyright 2008,2009 Eric Jarrige <eric.jarrige@armadeus.org> 4 * (C) Copyright 2008 Armadeus Systems nc 5 * (C) Copyright 2007 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de> 6 * (C) Copyright 2007 Pengutronix, Juergen Beisert <j.beisert@pengutronix.de> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License as 10 * published by the Free Software Foundation; either version 2 of 11 * the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 21 * MA 02111-1307 USA 22 */ 23 24 #include <common.h> 25 #include <malloc.h> 26 #include <net.h> 27 #include <miiphy.h> 28 #include "fec_mxc.h" 29 30 #include <asm/arch/clock.h> 31 #include <asm/arch/imx-regs.h> 32 #include <asm/io.h> 33 #include <asm/errno.h> 34 #include <linux/compiler.h> 35 36 DECLARE_GLOBAL_DATA_PTR; 37 38 /* 39 * Timeout the transfer after 5 mS. This is usually a bit more, since 40 * the code in the tightloops this timeout is used in adds some overhead. 41 */ 42 #define FEC_XFER_TIMEOUT 5000 43 44 #ifndef CONFIG_MII 45 #error "CONFIG_MII has to be defined!" 46 #endif 47 48 #ifndef CONFIG_FEC_XCV_TYPE 49 #define CONFIG_FEC_XCV_TYPE MII100 50 #endif 51 52 /* 53 * The i.MX28 operates with packets in big endian. We need to swap them before 54 * sending and after receiving. 55 */ 56 #ifdef CONFIG_MX28 57 #define CONFIG_FEC_MXC_SWAP_PACKET 58 #endif 59 60 #define RXDESC_PER_CACHELINE (ARCH_DMA_MINALIGN/sizeof(struct fec_bd)) 61 62 /* Check various alignment issues at compile time */ 63 #if ((ARCH_DMA_MINALIGN < 16) || (ARCH_DMA_MINALIGN % 16 != 0)) 64 #error "ARCH_DMA_MINALIGN must be multiple of 16!" 65 #endif 66 67 #if ((PKTALIGN < ARCH_DMA_MINALIGN) || \ 68 (PKTALIGN % ARCH_DMA_MINALIGN != 0)) 69 #error "PKTALIGN must be multiple of ARCH_DMA_MINALIGN!" 70 #endif 71 72 #undef DEBUG 73 74 struct nbuf { 75 uint8_t data[1500]; /**< actual data */ 76 int length; /**< actual length */ 77 int used; /**< buffer in use or not */ 78 uint8_t head[16]; /**< MAC header(6 + 6 + 2) + 2(aligned) */ 79 }; 80 81 #ifdef CONFIG_FEC_MXC_SWAP_PACKET 82 static void swap_packet(uint32_t *packet, int length) 83 { 84 int i; 85 86 for (i = 0; i < DIV_ROUND_UP(length, 4); i++) 87 packet[i] = __swab32(packet[i]); 88 } 89 #endif 90 91 /* 92 * MII-interface related functions 93 */ 94 static int fec_mdio_read(struct ethernet_regs *eth, uint8_t phyAddr, 95 uint8_t regAddr) 96 { 97 uint32_t reg; /* convenient holder for the PHY register */ 98 uint32_t phy; /* convenient holder for the PHY */ 99 uint32_t start; 100 int val; 101 102 /* 103 * reading from any PHY's register is done by properly 104 * programming the FEC's MII data register. 105 */ 106 writel(FEC_IEVENT_MII, ð->ievent); 107 reg = regAddr << FEC_MII_DATA_RA_SHIFT; 108 phy = phyAddr << FEC_MII_DATA_PA_SHIFT; 109 110 writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA | 111 phy | reg, ð->mii_data); 112 113 /* 114 * wait for the related interrupt 115 */ 116 start = get_timer(0); 117 while (!(readl(ð->ievent) & FEC_IEVENT_MII)) { 118 if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) { 119 printf("Read MDIO failed...\n"); 120 return -1; 121 } 122 } 123 124 /* 125 * clear mii interrupt bit 126 */ 127 writel(FEC_IEVENT_MII, ð->ievent); 128 129 /* 130 * it's now safe to read the PHY's register 131 */ 132 val = (unsigned short)readl(ð->mii_data); 133 debug("%s: phy: %02x reg:%02x val:%#x\n", __func__, phyAddr, 134 regAddr, val); 135 return val; 136 } 137 138 static void fec_mii_setspeed(struct ethernet_regs *eth) 139 { 140 /* 141 * Set MII_SPEED = (1/(mii_speed * 2)) * System Clock 142 * and do not drop the Preamble. 143 */ 144 writel((((imx_get_fecclk() / 1000000) + 2) / 5) << 1, 145 ð->mii_speed); 146 debug("%s: mii_speed %08x\n", __func__, readl(ð->mii_speed)); 147 } 148 149 static int fec_mdio_write(struct ethernet_regs *eth, uint8_t phyAddr, 150 uint8_t regAddr, uint16_t data) 151 { 152 uint32_t reg; /* convenient holder for the PHY register */ 153 uint32_t phy; /* convenient holder for the PHY */ 154 uint32_t start; 155 156 reg = regAddr << FEC_MII_DATA_RA_SHIFT; 157 phy = phyAddr << FEC_MII_DATA_PA_SHIFT; 158 159 writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR | 160 FEC_MII_DATA_TA | phy | reg | data, ð->mii_data); 161 162 /* 163 * wait for the MII interrupt 164 */ 165 start = get_timer(0); 166 while (!(readl(ð->ievent) & FEC_IEVENT_MII)) { 167 if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) { 168 printf("Write MDIO failed...\n"); 169 return -1; 170 } 171 } 172 173 /* 174 * clear MII interrupt bit 175 */ 176 writel(FEC_IEVENT_MII, ð->ievent); 177 debug("%s: phy: %02x reg:%02x val:%#x\n", __func__, phyAddr, 178 regAddr, data); 179 180 return 0; 181 } 182 183 int fec_phy_read(struct mii_dev *bus, int phyAddr, int dev_addr, int regAddr) 184 { 185 return fec_mdio_read(bus->priv, phyAddr, regAddr); 186 } 187 188 int fec_phy_write(struct mii_dev *bus, int phyAddr, int dev_addr, int regAddr, 189 u16 data) 190 { 191 return fec_mdio_write(bus->priv, phyAddr, regAddr, data); 192 } 193 194 #ifndef CONFIG_PHYLIB 195 static int miiphy_restart_aneg(struct eth_device *dev) 196 { 197 int ret = 0; 198 #if !defined(CONFIG_FEC_MXC_NO_ANEG) 199 struct fec_priv *fec = (struct fec_priv *)dev->priv; 200 struct ethernet_regs *eth = fec->bus->priv; 201 202 /* 203 * Wake up from sleep if necessary 204 * Reset PHY, then delay 300ns 205 */ 206 #ifdef CONFIG_MX27 207 fec_mdio_write(eth, fec->phy_id, MII_DCOUNTER, 0x00FF); 208 #endif 209 fec_mdio_write(eth, fec->phy_id, MII_BMCR, BMCR_RESET); 210 udelay(1000); 211 212 /* 213 * Set the auto-negotiation advertisement register bits 214 */ 215 fec_mdio_write(eth, fec->phy_id, MII_ADVERTISE, 216 LPA_100FULL | LPA_100HALF | LPA_10FULL | 217 LPA_10HALF | PHY_ANLPAR_PSB_802_3); 218 fec_mdio_write(eth, fec->phy_id, MII_BMCR, 219 BMCR_ANENABLE | BMCR_ANRESTART); 220 221 if (fec->mii_postcall) 222 ret = fec->mii_postcall(fec->phy_id); 223 224 #endif 225 return ret; 226 } 227 228 static int miiphy_wait_aneg(struct eth_device *dev) 229 { 230 uint32_t start; 231 int status; 232 struct fec_priv *fec = (struct fec_priv *)dev->priv; 233 struct ethernet_regs *eth = fec->bus->priv; 234 235 /* 236 * Wait for AN completion 237 */ 238 start = get_timer(0); 239 do { 240 if (get_timer(start) > (CONFIG_SYS_HZ * 5)) { 241 printf("%s: Autonegotiation timeout\n", dev->name); 242 return -1; 243 } 244 245 status = fec_mdio_read(eth, fec->phy_id, MII_BMSR); 246 if (status < 0) { 247 printf("%s: Autonegotiation failed. status: %d\n", 248 dev->name, status); 249 return -1; 250 } 251 } while (!(status & BMSR_LSTATUS)); 252 253 return 0; 254 } 255 #endif 256 257 static int fec_rx_task_enable(struct fec_priv *fec) 258 { 259 writel(FEC_R_DES_ACTIVE_RDAR, &fec->eth->r_des_active); 260 return 0; 261 } 262 263 static int fec_rx_task_disable(struct fec_priv *fec) 264 { 265 return 0; 266 } 267 268 static int fec_tx_task_enable(struct fec_priv *fec) 269 { 270 writel(FEC_X_DES_ACTIVE_TDAR, &fec->eth->x_des_active); 271 return 0; 272 } 273 274 static int fec_tx_task_disable(struct fec_priv *fec) 275 { 276 return 0; 277 } 278 279 /** 280 * Initialize receive task's buffer descriptors 281 * @param[in] fec all we know about the device yet 282 * @param[in] count receive buffer count to be allocated 283 * @param[in] dsize desired size of each receive buffer 284 * @return 0 on success 285 * 286 * For this task we need additional memory for the data buffers. And each 287 * data buffer requires some alignment. Thy must be aligned to a specific 288 * boundary each. 289 */ 290 static int fec_rbd_init(struct fec_priv *fec, int count, int dsize) 291 { 292 uint32_t size; 293 int i; 294 295 /* 296 * Allocate memory for the buffers. This allocation respects the 297 * alignment 298 */ 299 size = roundup(dsize, ARCH_DMA_MINALIGN); 300 for (i = 0; i < count; i++) { 301 uint32_t data_ptr = readl(&fec->rbd_base[i].data_pointer); 302 if (data_ptr == 0) { 303 uint8_t *data = memalign(ARCH_DMA_MINALIGN, 304 size); 305 if (!data) { 306 printf("%s: error allocating rxbuf %d\n", 307 __func__, i); 308 goto err; 309 } 310 writel((uint32_t)data, &fec->rbd_base[i].data_pointer); 311 } /* needs allocation */ 312 writew(FEC_RBD_EMPTY, &fec->rbd_base[i].status); 313 writew(0, &fec->rbd_base[i].data_length); 314 } 315 316 /* Mark the last RBD to close the ring. */ 317 writew(FEC_RBD_WRAP | FEC_RBD_EMPTY, &fec->rbd_base[i - 1].status); 318 fec->rbd_index = 0; 319 320 return 0; 321 322 err: 323 for (; i >= 0; i--) { 324 uint32_t data_ptr = readl(&fec->rbd_base[i].data_pointer); 325 free((void *)data_ptr); 326 } 327 328 return -ENOMEM; 329 } 330 331 /** 332 * Initialize transmit task's buffer descriptors 333 * @param[in] fec all we know about the device yet 334 * 335 * Transmit buffers are created externally. We only have to init the BDs here.\n 336 * Note: There is a race condition in the hardware. When only one BD is in 337 * use it must be marked with the WRAP bit to use it for every transmitt. 338 * This bit in combination with the READY bit results into double transmit 339 * of each data buffer. It seems the state machine checks READY earlier then 340 * resetting it after the first transfer. 341 * Using two BDs solves this issue. 342 */ 343 static void fec_tbd_init(struct fec_priv *fec) 344 { 345 unsigned addr = (unsigned)fec->tbd_base; 346 unsigned size = roundup(2 * sizeof(struct fec_bd), 347 ARCH_DMA_MINALIGN); 348 writew(0x0000, &fec->tbd_base[0].status); 349 writew(FEC_TBD_WRAP, &fec->tbd_base[1].status); 350 fec->tbd_index = 0; 351 flush_dcache_range(addr, addr+size); 352 } 353 354 /** 355 * Mark the given read buffer descriptor as free 356 * @param[in] last 1 if this is the last buffer descriptor in the chain, else 0 357 * @param[in] pRbd buffer descriptor to mark free again 358 */ 359 static void fec_rbd_clean(int last, struct fec_bd *pRbd) 360 { 361 unsigned short flags = FEC_RBD_EMPTY; 362 if (last) 363 flags |= FEC_RBD_WRAP; 364 writew(flags, &pRbd->status); 365 writew(0, &pRbd->data_length); 366 } 367 368 static int fec_get_hwaddr(struct eth_device *dev, int dev_id, 369 unsigned char *mac) 370 { 371 imx_get_mac_from_fuse(dev_id, mac); 372 return !is_valid_ether_addr(mac); 373 } 374 375 static int fec_set_hwaddr(struct eth_device *dev) 376 { 377 uchar *mac = dev->enetaddr; 378 struct fec_priv *fec = (struct fec_priv *)dev->priv; 379 380 writel(0, &fec->eth->iaddr1); 381 writel(0, &fec->eth->iaddr2); 382 writel(0, &fec->eth->gaddr1); 383 writel(0, &fec->eth->gaddr2); 384 385 /* 386 * Set physical address 387 */ 388 writel((mac[0] << 24) + (mac[1] << 16) + (mac[2] << 8) + mac[3], 389 &fec->eth->paddr1); 390 writel((mac[4] << 24) + (mac[5] << 16) + 0x8808, &fec->eth->paddr2); 391 392 return 0; 393 } 394 395 /* 396 * Do initial configuration of the FEC registers 397 */ 398 static void fec_reg_setup(struct fec_priv *fec) 399 { 400 uint32_t rcntrl; 401 402 /* 403 * Set interrupt mask register 404 */ 405 writel(0x00000000, &fec->eth->imask); 406 407 /* 408 * Clear FEC-Lite interrupt event register(IEVENT) 409 */ 410 writel(0xffffffff, &fec->eth->ievent); 411 412 413 /* 414 * Set FEC-Lite receive control register(R_CNTRL): 415 */ 416 417 /* Start with frame length = 1518, common for all modes. */ 418 rcntrl = PKTSIZE << FEC_RCNTRL_MAX_FL_SHIFT; 419 if (fec->xcv_type != SEVENWIRE) /* xMII modes */ 420 rcntrl |= FEC_RCNTRL_FCE | FEC_RCNTRL_MII_MODE; 421 if (fec->xcv_type == RGMII) 422 rcntrl |= FEC_RCNTRL_RGMII; 423 else if (fec->xcv_type == RMII) 424 rcntrl |= FEC_RCNTRL_RMII; 425 426 writel(rcntrl, &fec->eth->r_cntrl); 427 } 428 429 /** 430 * Start the FEC engine 431 * @param[in] dev Our device to handle 432 */ 433 static int fec_open(struct eth_device *edev) 434 { 435 struct fec_priv *fec = (struct fec_priv *)edev->priv; 436 int speed; 437 uint32_t addr, size; 438 int i; 439 440 debug("fec_open: fec_open(dev)\n"); 441 /* full-duplex, heartbeat disabled */ 442 writel(1 << 2, &fec->eth->x_cntrl); 443 fec->rbd_index = 0; 444 445 /* Invalidate all descriptors */ 446 for (i = 0; i < FEC_RBD_NUM - 1; i++) 447 fec_rbd_clean(0, &fec->rbd_base[i]); 448 fec_rbd_clean(1, &fec->rbd_base[i]); 449 450 /* Flush the descriptors into RAM */ 451 size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd), 452 ARCH_DMA_MINALIGN); 453 addr = (uint32_t)fec->rbd_base; 454 flush_dcache_range(addr, addr + size); 455 456 #ifdef FEC_QUIRK_ENET_MAC 457 /* Enable ENET HW endian SWAP */ 458 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_DBSWAP, 459 &fec->eth->ecntrl); 460 /* Enable ENET store and forward mode */ 461 writel(readl(&fec->eth->x_wmrk) | FEC_X_WMRK_STRFWD, 462 &fec->eth->x_wmrk); 463 #endif 464 /* 465 * Enable FEC-Lite controller 466 */ 467 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_ETHER_EN, 468 &fec->eth->ecntrl); 469 #if defined(CONFIG_MX25) || defined(CONFIG_MX53) 470 udelay(100); 471 /* 472 * setup the MII gasket for RMII mode 473 */ 474 475 /* disable the gasket */ 476 writew(0, &fec->eth->miigsk_enr); 477 478 /* wait for the gasket to be disabled */ 479 while (readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY) 480 udelay(2); 481 482 /* configure gasket for RMII, 50 MHz, no loopback, and no echo */ 483 writew(MIIGSK_CFGR_IF_MODE_RMII, &fec->eth->miigsk_cfgr); 484 485 /* re-enable the gasket */ 486 writew(MIIGSK_ENR_EN, &fec->eth->miigsk_enr); 487 488 /* wait until MII gasket is ready */ 489 int max_loops = 10; 490 while ((readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY) == 0) { 491 if (--max_loops <= 0) { 492 printf("WAIT for MII Gasket ready timed out\n"); 493 break; 494 } 495 } 496 #endif 497 498 #ifdef CONFIG_PHYLIB 499 { 500 /* Start up the PHY */ 501 int ret = phy_startup(fec->phydev); 502 503 if (ret) { 504 printf("Could not initialize PHY %s\n", 505 fec->phydev->dev->name); 506 return ret; 507 } 508 speed = fec->phydev->speed; 509 } 510 #else 511 miiphy_wait_aneg(edev); 512 speed = miiphy_speed(edev->name, fec->phy_id); 513 miiphy_duplex(edev->name, fec->phy_id); 514 #endif 515 516 #ifdef FEC_QUIRK_ENET_MAC 517 { 518 u32 ecr = readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_SPEED; 519 u32 rcr = readl(&fec->eth->r_cntrl) & ~FEC_RCNTRL_RMII_10T; 520 if (speed == _1000BASET) 521 ecr |= FEC_ECNTRL_SPEED; 522 else if (speed != _100BASET) 523 rcr |= FEC_RCNTRL_RMII_10T; 524 writel(ecr, &fec->eth->ecntrl); 525 writel(rcr, &fec->eth->r_cntrl); 526 } 527 #endif 528 debug("%s:Speed=%i\n", __func__, speed); 529 530 /* 531 * Enable SmartDMA receive task 532 */ 533 fec_rx_task_enable(fec); 534 535 udelay(100000); 536 return 0; 537 } 538 539 static int fec_init(struct eth_device *dev, bd_t* bd) 540 { 541 struct fec_priv *fec = (struct fec_priv *)dev->priv; 542 uint32_t mib_ptr = (uint32_t)&fec->eth->rmon_t_drop; 543 uint32_t size; 544 int i, ret; 545 546 /* Initialize MAC address */ 547 fec_set_hwaddr(dev); 548 549 /* 550 * Allocate transmit descriptors, there are two in total. This 551 * allocation respects cache alignment. 552 */ 553 if (!fec->tbd_base) { 554 size = roundup(2 * sizeof(struct fec_bd), 555 ARCH_DMA_MINALIGN); 556 fec->tbd_base = memalign(ARCH_DMA_MINALIGN, size); 557 if (!fec->tbd_base) { 558 ret = -ENOMEM; 559 goto err1; 560 } 561 memset(fec->tbd_base, 0, size); 562 fec_tbd_init(fec); 563 } 564 565 /* 566 * Allocate receive descriptors. This allocation respects cache 567 * alignment. 568 */ 569 if (!fec->rbd_base) { 570 size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd), 571 ARCH_DMA_MINALIGN); 572 fec->rbd_base = memalign(ARCH_DMA_MINALIGN, size); 573 if (!fec->rbd_base) { 574 ret = -ENOMEM; 575 goto err2; 576 } 577 memset(fec->rbd_base, 0, size); 578 /* 579 * Initialize RxBD ring 580 */ 581 if (fec_rbd_init(fec, FEC_RBD_NUM, FEC_MAX_PKT_SIZE) < 0) { 582 ret = -ENOMEM; 583 goto err3; 584 } 585 flush_dcache_range((unsigned)fec->rbd_base, 586 (unsigned)fec->rbd_base + size); 587 } 588 589 fec_reg_setup(fec); 590 591 if (fec->xcv_type != SEVENWIRE) 592 fec_mii_setspeed(fec->bus->priv); 593 594 /* 595 * Set Opcode/Pause Duration Register 596 */ 597 writel(0x00010020, &fec->eth->op_pause); /* FIXME 0xffff0020; */ 598 writel(0x2, &fec->eth->x_wmrk); 599 /* 600 * Set multicast address filter 601 */ 602 writel(0x00000000, &fec->eth->gaddr1); 603 writel(0x00000000, &fec->eth->gaddr2); 604 605 606 /* clear MIB RAM */ 607 for (i = mib_ptr; i <= mib_ptr + 0xfc; i += 4) 608 writel(0, i); 609 610 /* FIFO receive start register */ 611 writel(0x520, &fec->eth->r_fstart); 612 613 /* size and address of each buffer */ 614 writel(FEC_MAX_PKT_SIZE, &fec->eth->emrbr); 615 writel((uint32_t)fec->tbd_base, &fec->eth->etdsr); 616 writel((uint32_t)fec->rbd_base, &fec->eth->erdsr); 617 618 #ifndef CONFIG_PHYLIB 619 if (fec->xcv_type != SEVENWIRE) 620 miiphy_restart_aneg(dev); 621 #endif 622 fec_open(dev); 623 return 0; 624 625 err3: 626 free(fec->rbd_base); 627 err2: 628 free(fec->tbd_base); 629 err1: 630 return ret; 631 } 632 633 /** 634 * Halt the FEC engine 635 * @param[in] dev Our device to handle 636 */ 637 static void fec_halt(struct eth_device *dev) 638 { 639 struct fec_priv *fec = (struct fec_priv *)dev->priv; 640 int counter = 0xffff; 641 642 /* 643 * issue graceful stop command to the FEC transmitter if necessary 644 */ 645 writel(FEC_TCNTRL_GTS | readl(&fec->eth->x_cntrl), 646 &fec->eth->x_cntrl); 647 648 debug("eth_halt: wait for stop regs\n"); 649 /* 650 * wait for graceful stop to register 651 */ 652 while ((counter--) && (!(readl(&fec->eth->ievent) & FEC_IEVENT_GRA))) 653 udelay(1); 654 655 /* 656 * Disable SmartDMA tasks 657 */ 658 fec_tx_task_disable(fec); 659 fec_rx_task_disable(fec); 660 661 /* 662 * Disable the Ethernet Controller 663 * Note: this will also reset the BD index counter! 664 */ 665 writel(readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_ETHER_EN, 666 &fec->eth->ecntrl); 667 fec->rbd_index = 0; 668 fec->tbd_index = 0; 669 debug("eth_halt: done\n"); 670 } 671 672 /** 673 * Transmit one frame 674 * @param[in] dev Our ethernet device to handle 675 * @param[in] packet Pointer to the data to be transmitted 676 * @param[in] length Data count in bytes 677 * @return 0 on success 678 */ 679 static int fec_send(struct eth_device *dev, void *packet, int length) 680 { 681 unsigned int status; 682 uint32_t size, end; 683 uint32_t addr; 684 int timeout = FEC_XFER_TIMEOUT; 685 int ret = 0; 686 687 /* 688 * This routine transmits one frame. This routine only accepts 689 * 6-byte Ethernet addresses. 690 */ 691 struct fec_priv *fec = (struct fec_priv *)dev->priv; 692 693 /* 694 * Check for valid length of data. 695 */ 696 if ((length > 1500) || (length <= 0)) { 697 printf("Payload (%d) too large\n", length); 698 return -1; 699 } 700 701 /* 702 * Setup the transmit buffer. We are always using the first buffer for 703 * transmission, the second will be empty and only used to stop the DMA 704 * engine. We also flush the packet to RAM here to avoid cache trouble. 705 */ 706 #ifdef CONFIG_FEC_MXC_SWAP_PACKET 707 swap_packet((uint32_t *)packet, length); 708 #endif 709 710 addr = (uint32_t)packet; 711 end = roundup(addr + length, ARCH_DMA_MINALIGN); 712 addr &= ~(ARCH_DMA_MINALIGN - 1); 713 flush_dcache_range(addr, end); 714 715 writew(length, &fec->tbd_base[fec->tbd_index].data_length); 716 writel(addr, &fec->tbd_base[fec->tbd_index].data_pointer); 717 718 /* 719 * update BD's status now 720 * This block: 721 * - is always the last in a chain (means no chain) 722 * - should transmitt the CRC 723 * - might be the last BD in the list, so the address counter should 724 * wrap (-> keep the WRAP flag) 725 */ 726 status = readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_WRAP; 727 status |= FEC_TBD_LAST | FEC_TBD_TC | FEC_TBD_READY; 728 writew(status, &fec->tbd_base[fec->tbd_index].status); 729 730 /* 731 * Flush data cache. This code flushes both TX descriptors to RAM. 732 * After this code, the descriptors will be safely in RAM and we 733 * can start DMA. 734 */ 735 size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN); 736 addr = (uint32_t)fec->tbd_base; 737 flush_dcache_range(addr, addr + size); 738 739 /* 740 * Below we read the DMA descriptor's last four bytes back from the 741 * DRAM. This is important in order to make sure that all WRITE 742 * operations on the bus that were triggered by previous cache FLUSH 743 * have completed. 744 * 745 * Otherwise, on MX28, it is possible to observe a corruption of the 746 * DMA descriptors. Please refer to schematic "Figure 1-2" in MX28RM 747 * for the bus structure of MX28. The scenario is as follows: 748 * 749 * 1) ARM core triggers a series of WRITEs on the AHB_ARB2 bus going 750 * to DRAM due to flush_dcache_range() 751 * 2) ARM core writes the FEC registers via AHB_ARB2 752 * 3) FEC DMA starts reading/writing from/to DRAM via AHB_ARB3 753 * 754 * Note that 2) does sometimes finish before 1) due to reordering of 755 * WRITE accesses on the AHB bus, therefore triggering 3) before the 756 * DMA descriptor is fully written into DRAM. This results in occasional 757 * corruption of the DMA descriptor. 758 */ 759 readl(addr + size - 4); 760 761 /* 762 * Enable SmartDMA transmit task 763 */ 764 fec_tx_task_enable(fec); 765 766 /* 767 * Wait until frame is sent. On each turn of the wait cycle, we must 768 * invalidate data cache to see what's really in RAM. Also, we need 769 * barrier here. 770 */ 771 while (--timeout) { 772 if (!(readl(&fec->eth->x_des_active) & FEC_X_DES_ACTIVE_TDAR)) 773 break; 774 } 775 776 if (!timeout) 777 ret = -EINVAL; 778 779 invalidate_dcache_range(addr, addr + size); 780 if (readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_READY) 781 ret = -EINVAL; 782 783 debug("fec_send: status 0x%x index %d ret %i\n", 784 readw(&fec->tbd_base[fec->tbd_index].status), 785 fec->tbd_index, ret); 786 /* for next transmission use the other buffer */ 787 if (fec->tbd_index) 788 fec->tbd_index = 0; 789 else 790 fec->tbd_index = 1; 791 792 return ret; 793 } 794 795 /** 796 * Pull one frame from the card 797 * @param[in] dev Our ethernet device to handle 798 * @return Length of packet read 799 */ 800 static int fec_recv(struct eth_device *dev) 801 { 802 struct fec_priv *fec = (struct fec_priv *)dev->priv; 803 struct fec_bd *rbd = &fec->rbd_base[fec->rbd_index]; 804 unsigned long ievent; 805 int frame_length, len = 0; 806 struct nbuf *frame; 807 uint16_t bd_status; 808 uint32_t addr, size, end; 809 int i; 810 uchar buff[FEC_MAX_PKT_SIZE] __aligned(ARCH_DMA_MINALIGN); 811 812 /* 813 * Check if any critical events have happened 814 */ 815 ievent = readl(&fec->eth->ievent); 816 writel(ievent, &fec->eth->ievent); 817 debug("fec_recv: ievent 0x%lx\n", ievent); 818 if (ievent & FEC_IEVENT_BABR) { 819 fec_halt(dev); 820 fec_init(dev, fec->bd); 821 printf("some error: 0x%08lx\n", ievent); 822 return 0; 823 } 824 if (ievent & FEC_IEVENT_HBERR) { 825 /* Heartbeat error */ 826 writel(0x00000001 | readl(&fec->eth->x_cntrl), 827 &fec->eth->x_cntrl); 828 } 829 if (ievent & FEC_IEVENT_GRA) { 830 /* Graceful stop complete */ 831 if (readl(&fec->eth->x_cntrl) & 0x00000001) { 832 fec_halt(dev); 833 writel(~0x00000001 & readl(&fec->eth->x_cntrl), 834 &fec->eth->x_cntrl); 835 fec_init(dev, fec->bd); 836 } 837 } 838 839 /* 840 * Read the buffer status. Before the status can be read, the data cache 841 * must be invalidated, because the data in RAM might have been changed 842 * by DMA. The descriptors are properly aligned to cachelines so there's 843 * no need to worry they'd overlap. 844 * 845 * WARNING: By invalidating the descriptor here, we also invalidate 846 * the descriptors surrounding this one. Therefore we can NOT change the 847 * contents of this descriptor nor the surrounding ones. The problem is 848 * that in order to mark the descriptor as processed, we need to change 849 * the descriptor. The solution is to mark the whole cache line when all 850 * descriptors in the cache line are processed. 851 */ 852 addr = (uint32_t)rbd; 853 addr &= ~(ARCH_DMA_MINALIGN - 1); 854 size = roundup(sizeof(struct fec_bd), ARCH_DMA_MINALIGN); 855 invalidate_dcache_range(addr, addr + size); 856 857 bd_status = readw(&rbd->status); 858 debug("fec_recv: status 0x%x\n", bd_status); 859 860 if (!(bd_status & FEC_RBD_EMPTY)) { 861 if ((bd_status & FEC_RBD_LAST) && !(bd_status & FEC_RBD_ERR) && 862 ((readw(&rbd->data_length) - 4) > 14)) { 863 /* 864 * Get buffer address and size 865 */ 866 frame = (struct nbuf *)readl(&rbd->data_pointer); 867 frame_length = readw(&rbd->data_length) - 4; 868 /* 869 * Invalidate data cache over the buffer 870 */ 871 addr = (uint32_t)frame; 872 end = roundup(addr + frame_length, ARCH_DMA_MINALIGN); 873 addr &= ~(ARCH_DMA_MINALIGN - 1); 874 invalidate_dcache_range(addr, end); 875 876 /* 877 * Fill the buffer and pass it to upper layers 878 */ 879 #ifdef CONFIG_FEC_MXC_SWAP_PACKET 880 swap_packet((uint32_t *)frame->data, frame_length); 881 #endif 882 memcpy(buff, frame->data, frame_length); 883 NetReceive(buff, frame_length); 884 len = frame_length; 885 } else { 886 if (bd_status & FEC_RBD_ERR) 887 printf("error frame: 0x%08lx 0x%08x\n", 888 (ulong)rbd->data_pointer, 889 bd_status); 890 } 891 892 /* 893 * Free the current buffer, restart the engine and move forward 894 * to the next buffer. Here we check if the whole cacheline of 895 * descriptors was already processed and if so, we mark it free 896 * as whole. 897 */ 898 size = RXDESC_PER_CACHELINE - 1; 899 if ((fec->rbd_index & size) == size) { 900 i = fec->rbd_index - size; 901 addr = (uint32_t)&fec->rbd_base[i]; 902 for (; i <= fec->rbd_index ; i++) { 903 fec_rbd_clean(i == (FEC_RBD_NUM - 1), 904 &fec->rbd_base[i]); 905 } 906 flush_dcache_range(addr, 907 addr + ARCH_DMA_MINALIGN); 908 } 909 910 fec_rx_task_enable(fec); 911 fec->rbd_index = (fec->rbd_index + 1) % FEC_RBD_NUM; 912 } 913 debug("fec_recv: stop\n"); 914 915 return len; 916 } 917 918 static void fec_set_dev_name(char *dest, int dev_id) 919 { 920 sprintf(dest, (dev_id == -1) ? "FEC" : "FEC%i", dev_id); 921 } 922 923 #ifdef CONFIG_PHYLIB 924 int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr, 925 struct mii_dev *bus, struct phy_device *phydev) 926 #else 927 static int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr, 928 struct mii_dev *bus, int phy_id) 929 #endif 930 { 931 struct eth_device *edev; 932 struct fec_priv *fec; 933 unsigned char ethaddr[6]; 934 uint32_t start; 935 int ret = 0; 936 937 /* create and fill edev struct */ 938 edev = (struct eth_device *)malloc(sizeof(struct eth_device)); 939 if (!edev) { 940 puts("fec_mxc: not enough malloc memory for eth_device\n"); 941 ret = -ENOMEM; 942 goto err1; 943 } 944 945 fec = (struct fec_priv *)malloc(sizeof(struct fec_priv)); 946 if (!fec) { 947 puts("fec_mxc: not enough malloc memory for fec_priv\n"); 948 ret = -ENOMEM; 949 goto err2; 950 } 951 952 memset(edev, 0, sizeof(*edev)); 953 memset(fec, 0, sizeof(*fec)); 954 955 edev->priv = fec; 956 edev->init = fec_init; 957 edev->send = fec_send; 958 edev->recv = fec_recv; 959 edev->halt = fec_halt; 960 edev->write_hwaddr = fec_set_hwaddr; 961 962 fec->eth = (struct ethernet_regs *)base_addr; 963 fec->bd = bd; 964 965 fec->xcv_type = CONFIG_FEC_XCV_TYPE; 966 967 /* Reset chip. */ 968 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_RESET, &fec->eth->ecntrl); 969 start = get_timer(0); 970 while (readl(&fec->eth->ecntrl) & FEC_ECNTRL_RESET) { 971 if (get_timer(start) > (CONFIG_SYS_HZ * 5)) { 972 printf("FEC MXC: Timeout reseting chip\n"); 973 goto err3; 974 } 975 udelay(10); 976 } 977 978 fec_reg_setup(fec); 979 fec_set_dev_name(edev->name, dev_id); 980 fec->dev_id = (dev_id == -1) ? 0 : dev_id; 981 fec->bus = bus; 982 fec_mii_setspeed(bus->priv); 983 #ifdef CONFIG_PHYLIB 984 fec->phydev = phydev; 985 phy_connect_dev(phydev, edev); 986 /* Configure phy */ 987 phy_config(phydev); 988 #else 989 fec->phy_id = phy_id; 990 #endif 991 eth_register(edev); 992 993 if (fec_get_hwaddr(edev, dev_id, ethaddr) == 0) { 994 debug("got MAC%d address from fuse: %pM\n", dev_id, ethaddr); 995 memcpy(edev->enetaddr, ethaddr, 6); 996 } 997 return ret; 998 err3: 999 free(fec); 1000 err2: 1001 free(edev); 1002 err1: 1003 return ret; 1004 } 1005 1006 struct mii_dev *fec_get_miibus(uint32_t base_addr, int dev_id) 1007 { 1008 struct ethernet_regs *eth = (struct ethernet_regs *)base_addr; 1009 struct mii_dev *bus; 1010 int ret; 1011 1012 bus = mdio_alloc(); 1013 if (!bus) { 1014 printf("mdio_alloc failed\n"); 1015 return NULL; 1016 } 1017 bus->read = fec_phy_read; 1018 bus->write = fec_phy_write; 1019 bus->priv = eth; 1020 fec_set_dev_name(bus->name, dev_id); 1021 1022 ret = mdio_register(bus); 1023 if (ret) { 1024 printf("mdio_register failed\n"); 1025 free(bus); 1026 return NULL; 1027 } 1028 fec_mii_setspeed(eth); 1029 return bus; 1030 } 1031 1032 int fecmxc_initialize_multi(bd_t *bd, int dev_id, int phy_id, uint32_t addr) 1033 { 1034 uint32_t base_mii; 1035 struct mii_dev *bus = NULL; 1036 #ifdef CONFIG_PHYLIB 1037 struct phy_device *phydev = NULL; 1038 #endif 1039 int ret; 1040 1041 #ifdef CONFIG_MX28 1042 /* 1043 * The i.MX28 has two ethernet interfaces, but they are not equal. 1044 * Only the first one can access the MDIO bus. 1045 */ 1046 base_mii = MXS_ENET0_BASE; 1047 #else 1048 base_mii = addr; 1049 #endif 1050 debug("eth_init: fec_probe(bd, %i, %i) @ %08x\n", dev_id, phy_id, addr); 1051 bus = fec_get_miibus(base_mii, dev_id); 1052 if (!bus) 1053 return -ENOMEM; 1054 #ifdef CONFIG_PHYLIB 1055 phydev = phy_find_by_mask(bus, 1 << phy_id, PHY_INTERFACE_MODE_RGMII); 1056 if (!phydev) { 1057 free(bus); 1058 return -ENOMEM; 1059 } 1060 ret = fec_probe(bd, dev_id, addr, bus, phydev); 1061 #else 1062 ret = fec_probe(bd, dev_id, addr, bus, phy_id); 1063 #endif 1064 if (ret) { 1065 #ifdef CONFIG_PHYLIB 1066 free(phydev); 1067 #endif 1068 free(bus); 1069 } 1070 return ret; 1071 } 1072 1073 #ifdef CONFIG_FEC_MXC_PHYADDR 1074 int fecmxc_initialize(bd_t *bd) 1075 { 1076 return fecmxc_initialize_multi(bd, -1, CONFIG_FEC_MXC_PHYADDR, 1077 IMX_FEC_BASE); 1078 } 1079 #endif 1080 1081 #ifndef CONFIG_PHYLIB 1082 int fecmxc_register_mii_postcall(struct eth_device *dev, int (*cb)(int)) 1083 { 1084 struct fec_priv *fec = (struct fec_priv *)dev->priv; 1085 fec->mii_postcall = cb; 1086 return 0; 1087 } 1088 #endif 1089