1 /* 2 * (C) Copyright 2009 Ilya Yanok, Emcraft Systems Ltd <yanok@emcraft.com> 3 * (C) Copyright 2008,2009 Eric Jarrige <eric.jarrige@armadeus.org> 4 * (C) Copyright 2008 Armadeus Systems nc 5 * (C) Copyright 2007 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de> 6 * (C) Copyright 2007 Pengutronix, Juergen Beisert <j.beisert@pengutronix.de> 7 * 8 * SPDX-License-Identifier: GPL-2.0+ 9 */ 10 11 #include <common.h> 12 #include <malloc.h> 13 #include <net.h> 14 #include <netdev.h> 15 #include <miiphy.h> 16 #include "fec_mxc.h" 17 18 #include <asm/arch/clock.h> 19 #include <asm/arch/imx-regs.h> 20 #include <asm/io.h> 21 #include <asm/errno.h> 22 #include <linux/compiler.h> 23 24 DECLARE_GLOBAL_DATA_PTR; 25 26 /* 27 * Timeout the transfer after 5 mS. This is usually a bit more, since 28 * the code in the tightloops this timeout is used in adds some overhead. 29 */ 30 #define FEC_XFER_TIMEOUT 5000 31 32 /* 33 * The standard 32-byte DMA alignment does not work on mx6solox, which requires 34 * 64-byte alignment in the DMA RX FEC buffer. 35 * Introduce the FEC_DMA_RX_MINALIGN which can cover mx6solox needs and also 36 * satisfies the alignment on other SoCs (32-bytes) 37 */ 38 #define FEC_DMA_RX_MINALIGN 64 39 40 #ifndef CONFIG_MII 41 #error "CONFIG_MII has to be defined!" 42 #endif 43 44 #ifndef CONFIG_FEC_XCV_TYPE 45 #define CONFIG_FEC_XCV_TYPE MII100 46 #endif 47 48 /* 49 * The i.MX28 operates with packets in big endian. We need to swap them before 50 * sending and after receiving. 51 */ 52 #ifdef CONFIG_MX28 53 #define CONFIG_FEC_MXC_SWAP_PACKET 54 #endif 55 56 #define RXDESC_PER_CACHELINE (ARCH_DMA_MINALIGN/sizeof(struct fec_bd)) 57 58 /* Check various alignment issues at compile time */ 59 #if ((ARCH_DMA_MINALIGN < 16) || (ARCH_DMA_MINALIGN % 16 != 0)) 60 #error "ARCH_DMA_MINALIGN must be multiple of 16!" 61 #endif 62 63 #if ((PKTALIGN < ARCH_DMA_MINALIGN) || \ 64 (PKTALIGN % ARCH_DMA_MINALIGN != 0)) 65 #error "PKTALIGN must be multiple of ARCH_DMA_MINALIGN!" 66 #endif 67 68 #undef DEBUG 69 70 struct nbuf { 71 uint8_t data[1500]; /**< actual data */ 72 int length; /**< actual length */ 73 int used; /**< buffer in use or not */ 74 uint8_t head[16]; /**< MAC header(6 + 6 + 2) + 2(aligned) */ 75 }; 76 77 #ifdef CONFIG_FEC_MXC_SWAP_PACKET 78 static void swap_packet(uint32_t *packet, int length) 79 { 80 int i; 81 82 for (i = 0; i < DIV_ROUND_UP(length, 4); i++) 83 packet[i] = __swab32(packet[i]); 84 } 85 #endif 86 87 /* 88 * MII-interface related functions 89 */ 90 static int fec_mdio_read(struct ethernet_regs *eth, uint8_t phyAddr, 91 uint8_t regAddr) 92 { 93 uint32_t reg; /* convenient holder for the PHY register */ 94 uint32_t phy; /* convenient holder for the PHY */ 95 uint32_t start; 96 int val; 97 98 /* 99 * reading from any PHY's register is done by properly 100 * programming the FEC's MII data register. 101 */ 102 writel(FEC_IEVENT_MII, ð->ievent); 103 reg = regAddr << FEC_MII_DATA_RA_SHIFT; 104 phy = phyAddr << FEC_MII_DATA_PA_SHIFT; 105 106 writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA | 107 phy | reg, ð->mii_data); 108 109 /* 110 * wait for the related interrupt 111 */ 112 start = get_timer(0); 113 while (!(readl(ð->ievent) & FEC_IEVENT_MII)) { 114 if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) { 115 printf("Read MDIO failed...\n"); 116 return -1; 117 } 118 } 119 120 /* 121 * clear mii interrupt bit 122 */ 123 writel(FEC_IEVENT_MII, ð->ievent); 124 125 /* 126 * it's now safe to read the PHY's register 127 */ 128 val = (unsigned short)readl(ð->mii_data); 129 debug("%s: phy: %02x reg:%02x val:%#x\n", __func__, phyAddr, 130 regAddr, val); 131 return val; 132 } 133 134 static void fec_mii_setspeed(struct ethernet_regs *eth) 135 { 136 /* 137 * Set MII_SPEED = (1/(mii_speed * 2)) * System Clock 138 * and do not drop the Preamble. 139 */ 140 register u32 speed = DIV_ROUND_UP(imx_get_fecclk(), 5000000); 141 #ifdef FEC_QUIRK_ENET_MAC 142 speed--; 143 #endif 144 speed <<= 1; 145 writel(speed, ð->mii_speed); 146 debug("%s: mii_speed %08x\n", __func__, readl(ð->mii_speed)); 147 } 148 149 static int fec_mdio_write(struct ethernet_regs *eth, uint8_t phyAddr, 150 uint8_t regAddr, uint16_t data) 151 { 152 uint32_t reg; /* convenient holder for the PHY register */ 153 uint32_t phy; /* convenient holder for the PHY */ 154 uint32_t start; 155 156 reg = regAddr << FEC_MII_DATA_RA_SHIFT; 157 phy = phyAddr << FEC_MII_DATA_PA_SHIFT; 158 159 writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR | 160 FEC_MII_DATA_TA | phy | reg | data, ð->mii_data); 161 162 /* 163 * wait for the MII interrupt 164 */ 165 start = get_timer(0); 166 while (!(readl(ð->ievent) & FEC_IEVENT_MII)) { 167 if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) { 168 printf("Write MDIO failed...\n"); 169 return -1; 170 } 171 } 172 173 /* 174 * clear MII interrupt bit 175 */ 176 writel(FEC_IEVENT_MII, ð->ievent); 177 debug("%s: phy: %02x reg:%02x val:%#x\n", __func__, phyAddr, 178 regAddr, data); 179 180 return 0; 181 } 182 183 static int fec_phy_read(struct mii_dev *bus, int phyAddr, int dev_addr, 184 int regAddr) 185 { 186 return fec_mdio_read(bus->priv, phyAddr, regAddr); 187 } 188 189 static int fec_phy_write(struct mii_dev *bus, int phyAddr, int dev_addr, 190 int regAddr, u16 data) 191 { 192 return fec_mdio_write(bus->priv, phyAddr, regAddr, data); 193 } 194 195 #ifndef CONFIG_PHYLIB 196 static int miiphy_restart_aneg(struct eth_device *dev) 197 { 198 int ret = 0; 199 #if !defined(CONFIG_FEC_MXC_NO_ANEG) 200 struct fec_priv *fec = (struct fec_priv *)dev->priv; 201 struct ethernet_regs *eth = fec->bus->priv; 202 203 /* 204 * Wake up from sleep if necessary 205 * Reset PHY, then delay 300ns 206 */ 207 #ifdef CONFIG_MX27 208 fec_mdio_write(eth, fec->phy_id, MII_DCOUNTER, 0x00FF); 209 #endif 210 fec_mdio_write(eth, fec->phy_id, MII_BMCR, BMCR_RESET); 211 udelay(1000); 212 213 /* 214 * Set the auto-negotiation advertisement register bits 215 */ 216 fec_mdio_write(eth, fec->phy_id, MII_ADVERTISE, 217 LPA_100FULL | LPA_100HALF | LPA_10FULL | 218 LPA_10HALF | PHY_ANLPAR_PSB_802_3); 219 fec_mdio_write(eth, fec->phy_id, MII_BMCR, 220 BMCR_ANENABLE | BMCR_ANRESTART); 221 222 if (fec->mii_postcall) 223 ret = fec->mii_postcall(fec->phy_id); 224 225 #endif 226 return ret; 227 } 228 229 static int miiphy_wait_aneg(struct eth_device *dev) 230 { 231 uint32_t start; 232 int status; 233 struct fec_priv *fec = (struct fec_priv *)dev->priv; 234 struct ethernet_regs *eth = fec->bus->priv; 235 236 /* 237 * Wait for AN completion 238 */ 239 start = get_timer(0); 240 do { 241 if (get_timer(start) > (CONFIG_SYS_HZ * 5)) { 242 printf("%s: Autonegotiation timeout\n", dev->name); 243 return -1; 244 } 245 246 status = fec_mdio_read(eth, fec->phy_id, MII_BMSR); 247 if (status < 0) { 248 printf("%s: Autonegotiation failed. status: %d\n", 249 dev->name, status); 250 return -1; 251 } 252 } while (!(status & BMSR_LSTATUS)); 253 254 return 0; 255 } 256 #endif 257 258 static int fec_rx_task_enable(struct fec_priv *fec) 259 { 260 writel(FEC_R_DES_ACTIVE_RDAR, &fec->eth->r_des_active); 261 return 0; 262 } 263 264 static int fec_rx_task_disable(struct fec_priv *fec) 265 { 266 return 0; 267 } 268 269 static int fec_tx_task_enable(struct fec_priv *fec) 270 { 271 writel(FEC_X_DES_ACTIVE_TDAR, &fec->eth->x_des_active); 272 return 0; 273 } 274 275 static int fec_tx_task_disable(struct fec_priv *fec) 276 { 277 return 0; 278 } 279 280 /** 281 * Initialize receive task's buffer descriptors 282 * @param[in] fec all we know about the device yet 283 * @param[in] count receive buffer count to be allocated 284 * @param[in] dsize desired size of each receive buffer 285 * @return 0 on success 286 * 287 * Init all RX descriptors to default values. 288 */ 289 static void fec_rbd_init(struct fec_priv *fec, int count, int dsize) 290 { 291 uint32_t size; 292 uint8_t *data; 293 int i; 294 295 /* 296 * Reload the RX descriptors with default values and wipe 297 * the RX buffers. 298 */ 299 size = roundup(dsize, ARCH_DMA_MINALIGN); 300 for (i = 0; i < count; i++) { 301 data = (uint8_t *)fec->rbd_base[i].data_pointer; 302 memset(data, 0, dsize); 303 flush_dcache_range((uint32_t)data, (uint32_t)data + size); 304 305 fec->rbd_base[i].status = FEC_RBD_EMPTY; 306 fec->rbd_base[i].data_length = 0; 307 } 308 309 /* Mark the last RBD to close the ring. */ 310 fec->rbd_base[i - 1].status = FEC_RBD_WRAP | FEC_RBD_EMPTY; 311 fec->rbd_index = 0; 312 313 flush_dcache_range((unsigned)fec->rbd_base, 314 (unsigned)fec->rbd_base + size); 315 } 316 317 /** 318 * Initialize transmit task's buffer descriptors 319 * @param[in] fec all we know about the device yet 320 * 321 * Transmit buffers are created externally. We only have to init the BDs here.\n 322 * Note: There is a race condition in the hardware. When only one BD is in 323 * use it must be marked with the WRAP bit to use it for every transmitt. 324 * This bit in combination with the READY bit results into double transmit 325 * of each data buffer. It seems the state machine checks READY earlier then 326 * resetting it after the first transfer. 327 * Using two BDs solves this issue. 328 */ 329 static void fec_tbd_init(struct fec_priv *fec) 330 { 331 unsigned addr = (unsigned)fec->tbd_base; 332 unsigned size = roundup(2 * sizeof(struct fec_bd), 333 ARCH_DMA_MINALIGN); 334 335 memset(fec->tbd_base, 0, size); 336 fec->tbd_base[0].status = 0; 337 fec->tbd_base[1].status = FEC_TBD_WRAP; 338 fec->tbd_index = 0; 339 flush_dcache_range(addr, addr + size); 340 } 341 342 /** 343 * Mark the given read buffer descriptor as free 344 * @param[in] last 1 if this is the last buffer descriptor in the chain, else 0 345 * @param[in] pRbd buffer descriptor to mark free again 346 */ 347 static void fec_rbd_clean(int last, struct fec_bd *pRbd) 348 { 349 unsigned short flags = FEC_RBD_EMPTY; 350 if (last) 351 flags |= FEC_RBD_WRAP; 352 writew(flags, &pRbd->status); 353 writew(0, &pRbd->data_length); 354 } 355 356 static int fec_get_hwaddr(struct eth_device *dev, int dev_id, 357 unsigned char *mac) 358 { 359 imx_get_mac_from_fuse(dev_id, mac); 360 return !is_valid_ethaddr(mac); 361 } 362 363 static int fec_set_hwaddr(struct eth_device *dev) 364 { 365 uchar *mac = dev->enetaddr; 366 struct fec_priv *fec = (struct fec_priv *)dev->priv; 367 368 writel(0, &fec->eth->iaddr1); 369 writel(0, &fec->eth->iaddr2); 370 writel(0, &fec->eth->gaddr1); 371 writel(0, &fec->eth->gaddr2); 372 373 /* 374 * Set physical address 375 */ 376 writel((mac[0] << 24) + (mac[1] << 16) + (mac[2] << 8) + mac[3], 377 &fec->eth->paddr1); 378 writel((mac[4] << 24) + (mac[5] << 16) + 0x8808, &fec->eth->paddr2); 379 380 return 0; 381 } 382 383 /* 384 * Do initial configuration of the FEC registers 385 */ 386 static void fec_reg_setup(struct fec_priv *fec) 387 { 388 uint32_t rcntrl; 389 390 /* 391 * Set interrupt mask register 392 */ 393 writel(0x00000000, &fec->eth->imask); 394 395 /* 396 * Clear FEC-Lite interrupt event register(IEVENT) 397 */ 398 writel(0xffffffff, &fec->eth->ievent); 399 400 401 /* 402 * Set FEC-Lite receive control register(R_CNTRL): 403 */ 404 405 /* Start with frame length = 1518, common for all modes. */ 406 rcntrl = PKTSIZE << FEC_RCNTRL_MAX_FL_SHIFT; 407 if (fec->xcv_type != SEVENWIRE) /* xMII modes */ 408 rcntrl |= FEC_RCNTRL_FCE | FEC_RCNTRL_MII_MODE; 409 if (fec->xcv_type == RGMII) 410 rcntrl |= FEC_RCNTRL_RGMII; 411 else if (fec->xcv_type == RMII) 412 rcntrl |= FEC_RCNTRL_RMII; 413 414 writel(rcntrl, &fec->eth->r_cntrl); 415 } 416 417 /** 418 * Start the FEC engine 419 * @param[in] dev Our device to handle 420 */ 421 static int fec_open(struct eth_device *edev) 422 { 423 struct fec_priv *fec = (struct fec_priv *)edev->priv; 424 int speed; 425 uint32_t addr, size; 426 int i; 427 428 debug("fec_open: fec_open(dev)\n"); 429 /* full-duplex, heartbeat disabled */ 430 writel(1 << 2, &fec->eth->x_cntrl); 431 fec->rbd_index = 0; 432 433 /* Invalidate all descriptors */ 434 for (i = 0; i < FEC_RBD_NUM - 1; i++) 435 fec_rbd_clean(0, &fec->rbd_base[i]); 436 fec_rbd_clean(1, &fec->rbd_base[i]); 437 438 /* Flush the descriptors into RAM */ 439 size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd), 440 ARCH_DMA_MINALIGN); 441 addr = (uint32_t)fec->rbd_base; 442 flush_dcache_range(addr, addr + size); 443 444 #ifdef FEC_QUIRK_ENET_MAC 445 /* Enable ENET HW endian SWAP */ 446 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_DBSWAP, 447 &fec->eth->ecntrl); 448 /* Enable ENET store and forward mode */ 449 writel(readl(&fec->eth->x_wmrk) | FEC_X_WMRK_STRFWD, 450 &fec->eth->x_wmrk); 451 #endif 452 /* 453 * Enable FEC-Lite controller 454 */ 455 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_ETHER_EN, 456 &fec->eth->ecntrl); 457 #if defined(CONFIG_MX25) || defined(CONFIG_MX53) || defined(CONFIG_MX6SL) 458 udelay(100); 459 /* 460 * setup the MII gasket for RMII mode 461 */ 462 463 /* disable the gasket */ 464 writew(0, &fec->eth->miigsk_enr); 465 466 /* wait for the gasket to be disabled */ 467 while (readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY) 468 udelay(2); 469 470 /* configure gasket for RMII, 50 MHz, no loopback, and no echo */ 471 writew(MIIGSK_CFGR_IF_MODE_RMII, &fec->eth->miigsk_cfgr); 472 473 /* re-enable the gasket */ 474 writew(MIIGSK_ENR_EN, &fec->eth->miigsk_enr); 475 476 /* wait until MII gasket is ready */ 477 int max_loops = 10; 478 while ((readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY) == 0) { 479 if (--max_loops <= 0) { 480 printf("WAIT for MII Gasket ready timed out\n"); 481 break; 482 } 483 } 484 #endif 485 486 #ifdef CONFIG_PHYLIB 487 { 488 /* Start up the PHY */ 489 int ret = phy_startup(fec->phydev); 490 491 if (ret) { 492 printf("Could not initialize PHY %s\n", 493 fec->phydev->dev->name); 494 return ret; 495 } 496 speed = fec->phydev->speed; 497 } 498 #else 499 miiphy_wait_aneg(edev); 500 speed = miiphy_speed(edev->name, fec->phy_id); 501 miiphy_duplex(edev->name, fec->phy_id); 502 #endif 503 504 #ifdef FEC_QUIRK_ENET_MAC 505 { 506 u32 ecr = readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_SPEED; 507 u32 rcr = readl(&fec->eth->r_cntrl) & ~FEC_RCNTRL_RMII_10T; 508 if (speed == _1000BASET) 509 ecr |= FEC_ECNTRL_SPEED; 510 else if (speed != _100BASET) 511 rcr |= FEC_RCNTRL_RMII_10T; 512 writel(ecr, &fec->eth->ecntrl); 513 writel(rcr, &fec->eth->r_cntrl); 514 } 515 #endif 516 debug("%s:Speed=%i\n", __func__, speed); 517 518 /* 519 * Enable SmartDMA receive task 520 */ 521 fec_rx_task_enable(fec); 522 523 udelay(100000); 524 return 0; 525 } 526 527 static int fec_init(struct eth_device *dev, bd_t* bd) 528 { 529 struct fec_priv *fec = (struct fec_priv *)dev->priv; 530 uint32_t mib_ptr = (uint32_t)&fec->eth->rmon_t_drop; 531 int i; 532 533 /* Initialize MAC address */ 534 fec_set_hwaddr(dev); 535 536 /* 537 * Setup transmit descriptors, there are two in total. 538 */ 539 fec_tbd_init(fec); 540 541 /* Setup receive descriptors. */ 542 fec_rbd_init(fec, FEC_RBD_NUM, FEC_MAX_PKT_SIZE); 543 544 fec_reg_setup(fec); 545 546 if (fec->xcv_type != SEVENWIRE) 547 fec_mii_setspeed(fec->bus->priv); 548 549 /* 550 * Set Opcode/Pause Duration Register 551 */ 552 writel(0x00010020, &fec->eth->op_pause); /* FIXME 0xffff0020; */ 553 writel(0x2, &fec->eth->x_wmrk); 554 /* 555 * Set multicast address filter 556 */ 557 writel(0x00000000, &fec->eth->gaddr1); 558 writel(0x00000000, &fec->eth->gaddr2); 559 560 561 /* clear MIB RAM */ 562 for (i = mib_ptr; i <= mib_ptr + 0xfc; i += 4) 563 writel(0, i); 564 565 /* FIFO receive start register */ 566 writel(0x520, &fec->eth->r_fstart); 567 568 /* size and address of each buffer */ 569 writel(FEC_MAX_PKT_SIZE, &fec->eth->emrbr); 570 writel((uint32_t)fec->tbd_base, &fec->eth->etdsr); 571 writel((uint32_t)fec->rbd_base, &fec->eth->erdsr); 572 573 #ifndef CONFIG_PHYLIB 574 if (fec->xcv_type != SEVENWIRE) 575 miiphy_restart_aneg(dev); 576 #endif 577 fec_open(dev); 578 return 0; 579 } 580 581 /** 582 * Halt the FEC engine 583 * @param[in] dev Our device to handle 584 */ 585 static void fec_halt(struct eth_device *dev) 586 { 587 struct fec_priv *fec = (struct fec_priv *)dev->priv; 588 int counter = 0xffff; 589 590 /* 591 * issue graceful stop command to the FEC transmitter if necessary 592 */ 593 writel(FEC_TCNTRL_GTS | readl(&fec->eth->x_cntrl), 594 &fec->eth->x_cntrl); 595 596 debug("eth_halt: wait for stop regs\n"); 597 /* 598 * wait for graceful stop to register 599 */ 600 while ((counter--) && (!(readl(&fec->eth->ievent) & FEC_IEVENT_GRA))) 601 udelay(1); 602 603 /* 604 * Disable SmartDMA tasks 605 */ 606 fec_tx_task_disable(fec); 607 fec_rx_task_disable(fec); 608 609 /* 610 * Disable the Ethernet Controller 611 * Note: this will also reset the BD index counter! 612 */ 613 writel(readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_ETHER_EN, 614 &fec->eth->ecntrl); 615 fec->rbd_index = 0; 616 fec->tbd_index = 0; 617 debug("eth_halt: done\n"); 618 } 619 620 /** 621 * Transmit one frame 622 * @param[in] dev Our ethernet device to handle 623 * @param[in] packet Pointer to the data to be transmitted 624 * @param[in] length Data count in bytes 625 * @return 0 on success 626 */ 627 static int fec_send(struct eth_device *dev, void *packet, int length) 628 { 629 unsigned int status; 630 uint32_t size, end; 631 uint32_t addr; 632 int timeout = FEC_XFER_TIMEOUT; 633 int ret = 0; 634 635 /* 636 * This routine transmits one frame. This routine only accepts 637 * 6-byte Ethernet addresses. 638 */ 639 struct fec_priv *fec = (struct fec_priv *)dev->priv; 640 641 /* 642 * Check for valid length of data. 643 */ 644 if ((length > 1500) || (length <= 0)) { 645 printf("Payload (%d) too large\n", length); 646 return -1; 647 } 648 649 /* 650 * Setup the transmit buffer. We are always using the first buffer for 651 * transmission, the second will be empty and only used to stop the DMA 652 * engine. We also flush the packet to RAM here to avoid cache trouble. 653 */ 654 #ifdef CONFIG_FEC_MXC_SWAP_PACKET 655 swap_packet((uint32_t *)packet, length); 656 #endif 657 658 addr = (uint32_t)packet; 659 end = roundup(addr + length, ARCH_DMA_MINALIGN); 660 addr &= ~(ARCH_DMA_MINALIGN - 1); 661 flush_dcache_range(addr, end); 662 663 writew(length, &fec->tbd_base[fec->tbd_index].data_length); 664 writel(addr, &fec->tbd_base[fec->tbd_index].data_pointer); 665 666 /* 667 * update BD's status now 668 * This block: 669 * - is always the last in a chain (means no chain) 670 * - should transmitt the CRC 671 * - might be the last BD in the list, so the address counter should 672 * wrap (-> keep the WRAP flag) 673 */ 674 status = readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_WRAP; 675 status |= FEC_TBD_LAST | FEC_TBD_TC | FEC_TBD_READY; 676 writew(status, &fec->tbd_base[fec->tbd_index].status); 677 678 /* 679 * Flush data cache. This code flushes both TX descriptors to RAM. 680 * After this code, the descriptors will be safely in RAM and we 681 * can start DMA. 682 */ 683 size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN); 684 addr = (uint32_t)fec->tbd_base; 685 flush_dcache_range(addr, addr + size); 686 687 /* 688 * Below we read the DMA descriptor's last four bytes back from the 689 * DRAM. This is important in order to make sure that all WRITE 690 * operations on the bus that were triggered by previous cache FLUSH 691 * have completed. 692 * 693 * Otherwise, on MX28, it is possible to observe a corruption of the 694 * DMA descriptors. Please refer to schematic "Figure 1-2" in MX28RM 695 * for the bus structure of MX28. The scenario is as follows: 696 * 697 * 1) ARM core triggers a series of WRITEs on the AHB_ARB2 bus going 698 * to DRAM due to flush_dcache_range() 699 * 2) ARM core writes the FEC registers via AHB_ARB2 700 * 3) FEC DMA starts reading/writing from/to DRAM via AHB_ARB3 701 * 702 * Note that 2) does sometimes finish before 1) due to reordering of 703 * WRITE accesses on the AHB bus, therefore triggering 3) before the 704 * DMA descriptor is fully written into DRAM. This results in occasional 705 * corruption of the DMA descriptor. 706 */ 707 readl(addr + size - 4); 708 709 /* 710 * Enable SmartDMA transmit task 711 */ 712 fec_tx_task_enable(fec); 713 714 /* 715 * Wait until frame is sent. On each turn of the wait cycle, we must 716 * invalidate data cache to see what's really in RAM. Also, we need 717 * barrier here. 718 */ 719 while (--timeout) { 720 if (!(readl(&fec->eth->x_des_active) & FEC_X_DES_ACTIVE_TDAR)) 721 break; 722 } 723 724 if (!timeout) { 725 ret = -EINVAL; 726 goto out; 727 } 728 729 /* 730 * The TDAR bit is cleared when the descriptors are all out from TX 731 * but on mx6solox we noticed that the READY bit is still not cleared 732 * right after TDAR. 733 * These are two distinct signals, and in IC simulation, we found that 734 * TDAR always gets cleared prior than the READY bit of last BD becomes 735 * cleared. 736 * In mx6solox, we use a later version of FEC IP. It looks like that 737 * this intrinsic behaviour of TDAR bit has changed in this newer FEC 738 * version. 739 * 740 * Fix this by polling the READY bit of BD after the TDAR polling, 741 * which covers the mx6solox case and does not harm the other SoCs. 742 */ 743 timeout = FEC_XFER_TIMEOUT; 744 while (--timeout) { 745 invalidate_dcache_range(addr, addr + size); 746 if (!(readw(&fec->tbd_base[fec->tbd_index].status) & 747 FEC_TBD_READY)) 748 break; 749 } 750 751 if (!timeout) 752 ret = -EINVAL; 753 754 out: 755 debug("fec_send: status 0x%x index %d ret %i\n", 756 readw(&fec->tbd_base[fec->tbd_index].status), 757 fec->tbd_index, ret); 758 /* for next transmission use the other buffer */ 759 if (fec->tbd_index) 760 fec->tbd_index = 0; 761 else 762 fec->tbd_index = 1; 763 764 return ret; 765 } 766 767 /** 768 * Pull one frame from the card 769 * @param[in] dev Our ethernet device to handle 770 * @return Length of packet read 771 */ 772 static int fec_recv(struct eth_device *dev) 773 { 774 struct fec_priv *fec = (struct fec_priv *)dev->priv; 775 struct fec_bd *rbd = &fec->rbd_base[fec->rbd_index]; 776 unsigned long ievent; 777 int frame_length, len = 0; 778 struct nbuf *frame; 779 uint16_t bd_status; 780 uint32_t addr, size, end; 781 int i; 782 ALLOC_CACHE_ALIGN_BUFFER(uchar, buff, FEC_MAX_PKT_SIZE); 783 784 /* 785 * Check if any critical events have happened 786 */ 787 ievent = readl(&fec->eth->ievent); 788 writel(ievent, &fec->eth->ievent); 789 debug("fec_recv: ievent 0x%lx\n", ievent); 790 if (ievent & FEC_IEVENT_BABR) { 791 fec_halt(dev); 792 fec_init(dev, fec->bd); 793 printf("some error: 0x%08lx\n", ievent); 794 return 0; 795 } 796 if (ievent & FEC_IEVENT_HBERR) { 797 /* Heartbeat error */ 798 writel(0x00000001 | readl(&fec->eth->x_cntrl), 799 &fec->eth->x_cntrl); 800 } 801 if (ievent & FEC_IEVENT_GRA) { 802 /* Graceful stop complete */ 803 if (readl(&fec->eth->x_cntrl) & 0x00000001) { 804 fec_halt(dev); 805 writel(~0x00000001 & readl(&fec->eth->x_cntrl), 806 &fec->eth->x_cntrl); 807 fec_init(dev, fec->bd); 808 } 809 } 810 811 /* 812 * Read the buffer status. Before the status can be read, the data cache 813 * must be invalidated, because the data in RAM might have been changed 814 * by DMA. The descriptors are properly aligned to cachelines so there's 815 * no need to worry they'd overlap. 816 * 817 * WARNING: By invalidating the descriptor here, we also invalidate 818 * the descriptors surrounding this one. Therefore we can NOT change the 819 * contents of this descriptor nor the surrounding ones. The problem is 820 * that in order to mark the descriptor as processed, we need to change 821 * the descriptor. The solution is to mark the whole cache line when all 822 * descriptors in the cache line are processed. 823 */ 824 addr = (uint32_t)rbd; 825 addr &= ~(ARCH_DMA_MINALIGN - 1); 826 size = roundup(sizeof(struct fec_bd), ARCH_DMA_MINALIGN); 827 invalidate_dcache_range(addr, addr + size); 828 829 bd_status = readw(&rbd->status); 830 debug("fec_recv: status 0x%x\n", bd_status); 831 832 if (!(bd_status & FEC_RBD_EMPTY)) { 833 if ((bd_status & FEC_RBD_LAST) && !(bd_status & FEC_RBD_ERR) && 834 ((readw(&rbd->data_length) - 4) > 14)) { 835 /* 836 * Get buffer address and size 837 */ 838 frame = (struct nbuf *)readl(&rbd->data_pointer); 839 frame_length = readw(&rbd->data_length) - 4; 840 /* 841 * Invalidate data cache over the buffer 842 */ 843 addr = (uint32_t)frame; 844 end = roundup(addr + frame_length, ARCH_DMA_MINALIGN); 845 addr &= ~(ARCH_DMA_MINALIGN - 1); 846 invalidate_dcache_range(addr, end); 847 848 /* 849 * Fill the buffer and pass it to upper layers 850 */ 851 #ifdef CONFIG_FEC_MXC_SWAP_PACKET 852 swap_packet((uint32_t *)frame->data, frame_length); 853 #endif 854 memcpy(buff, frame->data, frame_length); 855 net_process_received_packet(buff, frame_length); 856 len = frame_length; 857 } else { 858 if (bd_status & FEC_RBD_ERR) 859 printf("error frame: 0x%08lx 0x%08x\n", 860 (ulong)rbd->data_pointer, 861 bd_status); 862 } 863 864 /* 865 * Free the current buffer, restart the engine and move forward 866 * to the next buffer. Here we check if the whole cacheline of 867 * descriptors was already processed and if so, we mark it free 868 * as whole. 869 */ 870 size = RXDESC_PER_CACHELINE - 1; 871 if ((fec->rbd_index & size) == size) { 872 i = fec->rbd_index - size; 873 addr = (uint32_t)&fec->rbd_base[i]; 874 for (; i <= fec->rbd_index ; i++) { 875 fec_rbd_clean(i == (FEC_RBD_NUM - 1), 876 &fec->rbd_base[i]); 877 } 878 flush_dcache_range(addr, 879 addr + ARCH_DMA_MINALIGN); 880 } 881 882 fec_rx_task_enable(fec); 883 fec->rbd_index = (fec->rbd_index + 1) % FEC_RBD_NUM; 884 } 885 debug("fec_recv: stop\n"); 886 887 return len; 888 } 889 890 static void fec_set_dev_name(char *dest, int dev_id) 891 { 892 sprintf(dest, (dev_id == -1) ? "FEC" : "FEC%i", dev_id); 893 } 894 895 static int fec_alloc_descs(struct fec_priv *fec) 896 { 897 unsigned int size; 898 int i; 899 uint8_t *data; 900 901 /* Allocate TX descriptors. */ 902 size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN); 903 fec->tbd_base = memalign(ARCH_DMA_MINALIGN, size); 904 if (!fec->tbd_base) 905 goto err_tx; 906 907 /* Allocate RX descriptors. */ 908 size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd), ARCH_DMA_MINALIGN); 909 fec->rbd_base = memalign(ARCH_DMA_MINALIGN, size); 910 if (!fec->rbd_base) 911 goto err_rx; 912 913 memset(fec->rbd_base, 0, size); 914 915 /* Allocate RX buffers. */ 916 917 /* Maximum RX buffer size. */ 918 size = roundup(FEC_MAX_PKT_SIZE, FEC_DMA_RX_MINALIGN); 919 for (i = 0; i < FEC_RBD_NUM; i++) { 920 data = memalign(FEC_DMA_RX_MINALIGN, size); 921 if (!data) { 922 printf("%s: error allocating rxbuf %d\n", __func__, i); 923 goto err_ring; 924 } 925 926 memset(data, 0, size); 927 928 fec->rbd_base[i].data_pointer = (uint32_t)data; 929 fec->rbd_base[i].status = FEC_RBD_EMPTY; 930 fec->rbd_base[i].data_length = 0; 931 /* Flush the buffer to memory. */ 932 flush_dcache_range((uint32_t)data, (uint32_t)data + size); 933 } 934 935 /* Mark the last RBD to close the ring. */ 936 fec->rbd_base[i - 1].status = FEC_RBD_WRAP | FEC_RBD_EMPTY; 937 938 fec->rbd_index = 0; 939 fec->tbd_index = 0; 940 941 return 0; 942 943 err_ring: 944 for (; i >= 0; i--) 945 free((void *)fec->rbd_base[i].data_pointer); 946 free(fec->rbd_base); 947 err_rx: 948 free(fec->tbd_base); 949 err_tx: 950 return -ENOMEM; 951 } 952 953 static void fec_free_descs(struct fec_priv *fec) 954 { 955 int i; 956 957 for (i = 0; i < FEC_RBD_NUM; i++) 958 free((void *)fec->rbd_base[i].data_pointer); 959 free(fec->rbd_base); 960 free(fec->tbd_base); 961 } 962 963 #ifdef CONFIG_PHYLIB 964 int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr, 965 struct mii_dev *bus, struct phy_device *phydev) 966 #else 967 static int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr, 968 struct mii_dev *bus, int phy_id) 969 #endif 970 { 971 struct eth_device *edev; 972 struct fec_priv *fec; 973 unsigned char ethaddr[6]; 974 uint32_t start; 975 int ret = 0; 976 977 /* create and fill edev struct */ 978 edev = (struct eth_device *)malloc(sizeof(struct eth_device)); 979 if (!edev) { 980 puts("fec_mxc: not enough malloc memory for eth_device\n"); 981 ret = -ENOMEM; 982 goto err1; 983 } 984 985 fec = (struct fec_priv *)malloc(sizeof(struct fec_priv)); 986 if (!fec) { 987 puts("fec_mxc: not enough malloc memory for fec_priv\n"); 988 ret = -ENOMEM; 989 goto err2; 990 } 991 992 memset(edev, 0, sizeof(*edev)); 993 memset(fec, 0, sizeof(*fec)); 994 995 ret = fec_alloc_descs(fec); 996 if (ret) 997 goto err3; 998 999 edev->priv = fec; 1000 edev->init = fec_init; 1001 edev->send = fec_send; 1002 edev->recv = fec_recv; 1003 edev->halt = fec_halt; 1004 edev->write_hwaddr = fec_set_hwaddr; 1005 1006 fec->eth = (struct ethernet_regs *)base_addr; 1007 fec->bd = bd; 1008 1009 fec->xcv_type = CONFIG_FEC_XCV_TYPE; 1010 1011 /* Reset chip. */ 1012 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_RESET, &fec->eth->ecntrl); 1013 start = get_timer(0); 1014 while (readl(&fec->eth->ecntrl) & FEC_ECNTRL_RESET) { 1015 if (get_timer(start) > (CONFIG_SYS_HZ * 5)) { 1016 printf("FEC MXC: Timeout reseting chip\n"); 1017 goto err4; 1018 } 1019 udelay(10); 1020 } 1021 1022 fec_reg_setup(fec); 1023 fec_set_dev_name(edev->name, dev_id); 1024 fec->dev_id = (dev_id == -1) ? 0 : dev_id; 1025 fec->bus = bus; 1026 fec_mii_setspeed(bus->priv); 1027 #ifdef CONFIG_PHYLIB 1028 fec->phydev = phydev; 1029 phy_connect_dev(phydev, edev); 1030 /* Configure phy */ 1031 phy_config(phydev); 1032 #else 1033 fec->phy_id = phy_id; 1034 #endif 1035 eth_register(edev); 1036 1037 if (fec_get_hwaddr(edev, dev_id, ethaddr) == 0) { 1038 debug("got MAC%d address from fuse: %pM\n", dev_id, ethaddr); 1039 memcpy(edev->enetaddr, ethaddr, 6); 1040 if (!getenv("ethaddr")) 1041 eth_setenv_enetaddr("ethaddr", ethaddr); 1042 } 1043 return ret; 1044 err4: 1045 fec_free_descs(fec); 1046 err3: 1047 free(fec); 1048 err2: 1049 free(edev); 1050 err1: 1051 return ret; 1052 } 1053 1054 struct mii_dev *fec_get_miibus(uint32_t base_addr, int dev_id) 1055 { 1056 struct ethernet_regs *eth = (struct ethernet_regs *)base_addr; 1057 struct mii_dev *bus; 1058 int ret; 1059 1060 bus = mdio_alloc(); 1061 if (!bus) { 1062 printf("mdio_alloc failed\n"); 1063 return NULL; 1064 } 1065 bus->read = fec_phy_read; 1066 bus->write = fec_phy_write; 1067 bus->priv = eth; 1068 fec_set_dev_name(bus->name, dev_id); 1069 1070 ret = mdio_register(bus); 1071 if (ret) { 1072 printf("mdio_register failed\n"); 1073 free(bus); 1074 return NULL; 1075 } 1076 fec_mii_setspeed(eth); 1077 return bus; 1078 } 1079 1080 int fecmxc_initialize_multi(bd_t *bd, int dev_id, int phy_id, uint32_t addr) 1081 { 1082 uint32_t base_mii; 1083 struct mii_dev *bus = NULL; 1084 #ifdef CONFIG_PHYLIB 1085 struct phy_device *phydev = NULL; 1086 #endif 1087 int ret; 1088 1089 #ifdef CONFIG_MX28 1090 /* 1091 * The i.MX28 has two ethernet interfaces, but they are not equal. 1092 * Only the first one can access the MDIO bus. 1093 */ 1094 base_mii = MXS_ENET0_BASE; 1095 #else 1096 base_mii = addr; 1097 #endif 1098 debug("eth_init: fec_probe(bd, %i, %i) @ %08x\n", dev_id, phy_id, addr); 1099 bus = fec_get_miibus(base_mii, dev_id); 1100 if (!bus) 1101 return -ENOMEM; 1102 #ifdef CONFIG_PHYLIB 1103 phydev = phy_find_by_mask(bus, 1 << phy_id, PHY_INTERFACE_MODE_RGMII); 1104 if (!phydev) { 1105 free(bus); 1106 return -ENOMEM; 1107 } 1108 ret = fec_probe(bd, dev_id, addr, bus, phydev); 1109 #else 1110 ret = fec_probe(bd, dev_id, addr, bus, phy_id); 1111 #endif 1112 if (ret) { 1113 #ifdef CONFIG_PHYLIB 1114 free(phydev); 1115 #endif 1116 free(bus); 1117 } 1118 return ret; 1119 } 1120 1121 #ifdef CONFIG_FEC_MXC_PHYADDR 1122 int fecmxc_initialize(bd_t *bd) 1123 { 1124 return fecmxc_initialize_multi(bd, -1, CONFIG_FEC_MXC_PHYADDR, 1125 IMX_FEC_BASE); 1126 } 1127 #endif 1128 1129 #ifndef CONFIG_PHYLIB 1130 int fecmxc_register_mii_postcall(struct eth_device *dev, int (*cb)(int)) 1131 { 1132 struct fec_priv *fec = (struct fec_priv *)dev->priv; 1133 fec->mii_postcall = cb; 1134 return 0; 1135 } 1136 #endif 1137