1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * (C) Copyright 2009 Ilya Yanok, Emcraft Systems Ltd <yanok@emcraft.com> 4 * (C) Copyright 2008,2009 Eric Jarrige <eric.jarrige@armadeus.org> 5 * (C) Copyright 2008 Armadeus Systems nc 6 * (C) Copyright 2007 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de> 7 * (C) Copyright 2007 Pengutronix, Juergen Beisert <j.beisert@pengutronix.de> 8 */ 9 10 #include <common.h> 11 #include <dm.h> 12 #include <environment.h> 13 #include <malloc.h> 14 #include <memalign.h> 15 #include <miiphy.h> 16 #include <net.h> 17 #include <netdev.h> 18 #include <power/regulator.h> 19 20 #include <asm/io.h> 21 #include <linux/errno.h> 22 #include <linux/compiler.h> 23 24 #include <asm/arch/clock.h> 25 #include <asm/arch/imx-regs.h> 26 #include <asm/mach-imx/sys_proto.h> 27 #include <asm-generic/gpio.h> 28 29 #include "fec_mxc.h" 30 31 DECLARE_GLOBAL_DATA_PTR; 32 33 /* 34 * Timeout the transfer after 5 mS. This is usually a bit more, since 35 * the code in the tightloops this timeout is used in adds some overhead. 36 */ 37 #define FEC_XFER_TIMEOUT 5000 38 39 /* 40 * The standard 32-byte DMA alignment does not work on mx6solox, which requires 41 * 64-byte alignment in the DMA RX FEC buffer. 42 * Introduce the FEC_DMA_RX_MINALIGN which can cover mx6solox needs and also 43 * satisfies the alignment on other SoCs (32-bytes) 44 */ 45 #define FEC_DMA_RX_MINALIGN 64 46 47 #ifndef CONFIG_MII 48 #error "CONFIG_MII has to be defined!" 49 #endif 50 51 #ifndef CONFIG_FEC_XCV_TYPE 52 #define CONFIG_FEC_XCV_TYPE MII100 53 #endif 54 55 /* 56 * The i.MX28 operates with packets in big endian. We need to swap them before 57 * sending and after receiving. 58 */ 59 #ifdef CONFIG_MX28 60 #define CONFIG_FEC_MXC_SWAP_PACKET 61 #endif 62 63 #define RXDESC_PER_CACHELINE (ARCH_DMA_MINALIGN/sizeof(struct fec_bd)) 64 65 /* Check various alignment issues at compile time */ 66 #if ((ARCH_DMA_MINALIGN < 16) || (ARCH_DMA_MINALIGN % 16 != 0)) 67 #error "ARCH_DMA_MINALIGN must be multiple of 16!" 68 #endif 69 70 #if ((PKTALIGN < ARCH_DMA_MINALIGN) || \ 71 (PKTALIGN % ARCH_DMA_MINALIGN != 0)) 72 #error "PKTALIGN must be multiple of ARCH_DMA_MINALIGN!" 73 #endif 74 75 #undef DEBUG 76 77 #ifdef CONFIG_FEC_MXC_SWAP_PACKET 78 static void swap_packet(uint32_t *packet, int length) 79 { 80 int i; 81 82 for (i = 0; i < DIV_ROUND_UP(length, 4); i++) 83 packet[i] = __swab32(packet[i]); 84 } 85 #endif 86 87 /* MII-interface related functions */ 88 static int fec_mdio_read(struct ethernet_regs *eth, uint8_t phyaddr, 89 uint8_t regaddr) 90 { 91 uint32_t reg; /* convenient holder for the PHY register */ 92 uint32_t phy; /* convenient holder for the PHY */ 93 uint32_t start; 94 int val; 95 96 /* 97 * reading from any PHY's register is done by properly 98 * programming the FEC's MII data register. 99 */ 100 writel(FEC_IEVENT_MII, ð->ievent); 101 reg = regaddr << FEC_MII_DATA_RA_SHIFT; 102 phy = phyaddr << FEC_MII_DATA_PA_SHIFT; 103 104 writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA | 105 phy | reg, ð->mii_data); 106 107 /* wait for the related interrupt */ 108 start = get_timer(0); 109 while (!(readl(ð->ievent) & FEC_IEVENT_MII)) { 110 if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) { 111 printf("Read MDIO failed...\n"); 112 return -1; 113 } 114 } 115 116 /* clear mii interrupt bit */ 117 writel(FEC_IEVENT_MII, ð->ievent); 118 119 /* it's now safe to read the PHY's register */ 120 val = (unsigned short)readl(ð->mii_data); 121 debug("%s: phy: %02x reg:%02x val:%#x\n", __func__, phyaddr, 122 regaddr, val); 123 return val; 124 } 125 126 static int fec_get_clk_rate(void *udev, int idx) 127 { 128 #if IS_ENABLED(CONFIG_IMX8) 129 struct fec_priv *fec; 130 struct udevice *dev; 131 int ret; 132 133 dev = udev; 134 if (!dev) { 135 ret = uclass_get_device(UCLASS_ETH, idx, &dev); 136 if (ret < 0) { 137 debug("Can't get FEC udev: %d\n", ret); 138 return ret; 139 } 140 } 141 142 fec = dev_get_priv(dev); 143 if (fec) 144 return fec->clk_rate; 145 146 return -EINVAL; 147 #else 148 return imx_get_fecclk(); 149 #endif 150 } 151 152 static void fec_mii_setspeed(struct ethernet_regs *eth) 153 { 154 /* 155 * Set MII_SPEED = (1/(mii_speed * 2)) * System Clock 156 * and do not drop the Preamble. 157 * 158 * The i.MX28 and i.MX6 types have another field in the MSCR (aka 159 * MII_SPEED) register that defines the MDIO output hold time. Earlier 160 * versions are RAZ there, so just ignore the difference and write the 161 * register always. 162 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns. 163 * HOLDTIME + 1 is the number of clk cycles the fec is holding the 164 * output. 165 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive). 166 * Given that ceil(clkrate / 5000000) <= 64, the calculation for 167 * holdtime cannot result in a value greater than 3. 168 */ 169 u32 pclk; 170 u32 speed; 171 u32 hold; 172 int ret; 173 174 ret = fec_get_clk_rate(NULL, 0); 175 if (ret < 0) { 176 printf("Can't find FEC0 clk rate: %d\n", ret); 177 return; 178 } 179 pclk = ret; 180 speed = DIV_ROUND_UP(pclk, 5000000); 181 hold = DIV_ROUND_UP(pclk, 100000000) - 1; 182 183 #ifdef FEC_QUIRK_ENET_MAC 184 speed--; 185 #endif 186 writel(speed << 1 | hold << 8, ð->mii_speed); 187 debug("%s: mii_speed %08x\n", __func__, readl(ð->mii_speed)); 188 } 189 190 static int fec_mdio_write(struct ethernet_regs *eth, uint8_t phyaddr, 191 uint8_t regaddr, uint16_t data) 192 { 193 uint32_t reg; /* convenient holder for the PHY register */ 194 uint32_t phy; /* convenient holder for the PHY */ 195 uint32_t start; 196 197 reg = regaddr << FEC_MII_DATA_RA_SHIFT; 198 phy = phyaddr << FEC_MII_DATA_PA_SHIFT; 199 200 writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR | 201 FEC_MII_DATA_TA | phy | reg | data, ð->mii_data); 202 203 /* wait for the MII interrupt */ 204 start = get_timer(0); 205 while (!(readl(ð->ievent) & FEC_IEVENT_MII)) { 206 if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) { 207 printf("Write MDIO failed...\n"); 208 return -1; 209 } 210 } 211 212 /* clear MII interrupt bit */ 213 writel(FEC_IEVENT_MII, ð->ievent); 214 debug("%s: phy: %02x reg:%02x val:%#x\n", __func__, phyaddr, 215 regaddr, data); 216 217 return 0; 218 } 219 220 static int fec_phy_read(struct mii_dev *bus, int phyaddr, int dev_addr, 221 int regaddr) 222 { 223 return fec_mdio_read(bus->priv, phyaddr, regaddr); 224 } 225 226 static int fec_phy_write(struct mii_dev *bus, int phyaddr, int dev_addr, 227 int regaddr, u16 data) 228 { 229 return fec_mdio_write(bus->priv, phyaddr, regaddr, data); 230 } 231 232 #ifndef CONFIG_PHYLIB 233 static int miiphy_restart_aneg(struct eth_device *dev) 234 { 235 int ret = 0; 236 #if !defined(CONFIG_FEC_MXC_NO_ANEG) 237 struct fec_priv *fec = (struct fec_priv *)dev->priv; 238 struct ethernet_regs *eth = fec->bus->priv; 239 240 /* 241 * Wake up from sleep if necessary 242 * Reset PHY, then delay 300ns 243 */ 244 #ifdef CONFIG_MX27 245 fec_mdio_write(eth, fec->phy_id, MII_DCOUNTER, 0x00FF); 246 #endif 247 fec_mdio_write(eth, fec->phy_id, MII_BMCR, BMCR_RESET); 248 udelay(1000); 249 250 /* Set the auto-negotiation advertisement register bits */ 251 fec_mdio_write(eth, fec->phy_id, MII_ADVERTISE, 252 LPA_100FULL | LPA_100HALF | LPA_10FULL | 253 LPA_10HALF | PHY_ANLPAR_PSB_802_3); 254 fec_mdio_write(eth, fec->phy_id, MII_BMCR, 255 BMCR_ANENABLE | BMCR_ANRESTART); 256 257 if (fec->mii_postcall) 258 ret = fec->mii_postcall(fec->phy_id); 259 260 #endif 261 return ret; 262 } 263 264 #ifndef CONFIG_FEC_FIXED_SPEED 265 static int miiphy_wait_aneg(struct eth_device *dev) 266 { 267 uint32_t start; 268 int status; 269 struct fec_priv *fec = (struct fec_priv *)dev->priv; 270 struct ethernet_regs *eth = fec->bus->priv; 271 272 /* Wait for AN completion */ 273 start = get_timer(0); 274 do { 275 if (get_timer(start) > (CONFIG_SYS_HZ * 5)) { 276 printf("%s: Autonegotiation timeout\n", dev->name); 277 return -1; 278 } 279 280 status = fec_mdio_read(eth, fec->phy_id, MII_BMSR); 281 if (status < 0) { 282 printf("%s: Autonegotiation failed. status: %d\n", 283 dev->name, status); 284 return -1; 285 } 286 } while (!(status & BMSR_LSTATUS)); 287 288 return 0; 289 } 290 #endif /* CONFIG_FEC_FIXED_SPEED */ 291 #endif 292 293 static int fec_rx_task_enable(struct fec_priv *fec) 294 { 295 writel(FEC_R_DES_ACTIVE_RDAR, &fec->eth->r_des_active); 296 return 0; 297 } 298 299 static int fec_rx_task_disable(struct fec_priv *fec) 300 { 301 return 0; 302 } 303 304 static int fec_tx_task_enable(struct fec_priv *fec) 305 { 306 writel(FEC_X_DES_ACTIVE_TDAR, &fec->eth->x_des_active); 307 return 0; 308 } 309 310 static int fec_tx_task_disable(struct fec_priv *fec) 311 { 312 return 0; 313 } 314 315 /** 316 * Initialize receive task's buffer descriptors 317 * @param[in] fec all we know about the device yet 318 * @param[in] count receive buffer count to be allocated 319 * @param[in] dsize desired size of each receive buffer 320 * @return 0 on success 321 * 322 * Init all RX descriptors to default values. 323 */ 324 static void fec_rbd_init(struct fec_priv *fec, int count, int dsize) 325 { 326 uint32_t size; 327 ulong data; 328 int i; 329 330 /* 331 * Reload the RX descriptors with default values and wipe 332 * the RX buffers. 333 */ 334 size = roundup(dsize, ARCH_DMA_MINALIGN); 335 for (i = 0; i < count; i++) { 336 data = fec->rbd_base[i].data_pointer; 337 memset((void *)data, 0, dsize); 338 flush_dcache_range(data, data + size); 339 340 fec->rbd_base[i].status = FEC_RBD_EMPTY; 341 fec->rbd_base[i].data_length = 0; 342 } 343 344 /* Mark the last RBD to close the ring. */ 345 fec->rbd_base[i - 1].status = FEC_RBD_WRAP | FEC_RBD_EMPTY; 346 fec->rbd_index = 0; 347 348 flush_dcache_range((ulong)fec->rbd_base, 349 (ulong)fec->rbd_base + size); 350 } 351 352 /** 353 * Initialize transmit task's buffer descriptors 354 * @param[in] fec all we know about the device yet 355 * 356 * Transmit buffers are created externally. We only have to init the BDs here.\n 357 * Note: There is a race condition in the hardware. When only one BD is in 358 * use it must be marked with the WRAP bit to use it for every transmitt. 359 * This bit in combination with the READY bit results into double transmit 360 * of each data buffer. It seems the state machine checks READY earlier then 361 * resetting it after the first transfer. 362 * Using two BDs solves this issue. 363 */ 364 static void fec_tbd_init(struct fec_priv *fec) 365 { 366 ulong addr = (ulong)fec->tbd_base; 367 unsigned size = roundup(2 * sizeof(struct fec_bd), 368 ARCH_DMA_MINALIGN); 369 370 memset(fec->tbd_base, 0, size); 371 fec->tbd_base[0].status = 0; 372 fec->tbd_base[1].status = FEC_TBD_WRAP; 373 fec->tbd_index = 0; 374 flush_dcache_range(addr, addr + size); 375 } 376 377 /** 378 * Mark the given read buffer descriptor as free 379 * @param[in] last 1 if this is the last buffer descriptor in the chain, else 0 380 * @param[in] prbd buffer descriptor to mark free again 381 */ 382 static void fec_rbd_clean(int last, struct fec_bd *prbd) 383 { 384 unsigned short flags = FEC_RBD_EMPTY; 385 if (last) 386 flags |= FEC_RBD_WRAP; 387 writew(flags, &prbd->status); 388 writew(0, &prbd->data_length); 389 } 390 391 static int fec_get_hwaddr(int dev_id, unsigned char *mac) 392 { 393 imx_get_mac_from_fuse(dev_id, mac); 394 return !is_valid_ethaddr(mac); 395 } 396 397 #ifdef CONFIG_DM_ETH 398 static int fecmxc_set_hwaddr(struct udevice *dev) 399 #else 400 static int fec_set_hwaddr(struct eth_device *dev) 401 #endif 402 { 403 #ifdef CONFIG_DM_ETH 404 struct fec_priv *fec = dev_get_priv(dev); 405 struct eth_pdata *pdata = dev_get_platdata(dev); 406 uchar *mac = pdata->enetaddr; 407 #else 408 uchar *mac = dev->enetaddr; 409 struct fec_priv *fec = (struct fec_priv *)dev->priv; 410 #endif 411 412 writel(0, &fec->eth->iaddr1); 413 writel(0, &fec->eth->iaddr2); 414 writel(0, &fec->eth->gaddr1); 415 writel(0, &fec->eth->gaddr2); 416 417 /* Set physical address */ 418 writel((mac[0] << 24) + (mac[1] << 16) + (mac[2] << 8) + mac[3], 419 &fec->eth->paddr1); 420 writel((mac[4] << 24) + (mac[5] << 16) + 0x8808, &fec->eth->paddr2); 421 422 return 0; 423 } 424 425 /* Do initial configuration of the FEC registers */ 426 static void fec_reg_setup(struct fec_priv *fec) 427 { 428 uint32_t rcntrl; 429 430 /* Set interrupt mask register */ 431 writel(0x00000000, &fec->eth->imask); 432 433 /* Clear FEC-Lite interrupt event register(IEVENT) */ 434 writel(0xffffffff, &fec->eth->ievent); 435 436 /* Set FEC-Lite receive control register(R_CNTRL): */ 437 438 /* Start with frame length = 1518, common for all modes. */ 439 rcntrl = PKTSIZE << FEC_RCNTRL_MAX_FL_SHIFT; 440 if (fec->xcv_type != SEVENWIRE) /* xMII modes */ 441 rcntrl |= FEC_RCNTRL_FCE | FEC_RCNTRL_MII_MODE; 442 if (fec->xcv_type == RGMII) 443 rcntrl |= FEC_RCNTRL_RGMII; 444 else if (fec->xcv_type == RMII) 445 rcntrl |= FEC_RCNTRL_RMII; 446 447 writel(rcntrl, &fec->eth->r_cntrl); 448 } 449 450 /** 451 * Start the FEC engine 452 * @param[in] dev Our device to handle 453 */ 454 #ifdef CONFIG_DM_ETH 455 static int fec_open(struct udevice *dev) 456 #else 457 static int fec_open(struct eth_device *edev) 458 #endif 459 { 460 #ifdef CONFIG_DM_ETH 461 struct fec_priv *fec = dev_get_priv(dev); 462 #else 463 struct fec_priv *fec = (struct fec_priv *)edev->priv; 464 #endif 465 int speed; 466 ulong addr, size; 467 int i; 468 469 debug("fec_open: fec_open(dev)\n"); 470 /* full-duplex, heartbeat disabled */ 471 writel(1 << 2, &fec->eth->x_cntrl); 472 fec->rbd_index = 0; 473 474 /* Invalidate all descriptors */ 475 for (i = 0; i < FEC_RBD_NUM - 1; i++) 476 fec_rbd_clean(0, &fec->rbd_base[i]); 477 fec_rbd_clean(1, &fec->rbd_base[i]); 478 479 /* Flush the descriptors into RAM */ 480 size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd), 481 ARCH_DMA_MINALIGN); 482 addr = (ulong)fec->rbd_base; 483 flush_dcache_range(addr, addr + size); 484 485 #ifdef FEC_QUIRK_ENET_MAC 486 /* Enable ENET HW endian SWAP */ 487 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_DBSWAP, 488 &fec->eth->ecntrl); 489 /* Enable ENET store and forward mode */ 490 writel(readl(&fec->eth->x_wmrk) | FEC_X_WMRK_STRFWD, 491 &fec->eth->x_wmrk); 492 #endif 493 /* Enable FEC-Lite controller */ 494 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_ETHER_EN, 495 &fec->eth->ecntrl); 496 497 #if defined(CONFIG_MX25) || defined(CONFIG_MX53) || defined(CONFIG_MX6SL) 498 udelay(100); 499 500 /* setup the MII gasket for RMII mode */ 501 /* disable the gasket */ 502 writew(0, &fec->eth->miigsk_enr); 503 504 /* wait for the gasket to be disabled */ 505 while (readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY) 506 udelay(2); 507 508 /* configure gasket for RMII, 50 MHz, no loopback, and no echo */ 509 writew(MIIGSK_CFGR_IF_MODE_RMII, &fec->eth->miigsk_cfgr); 510 511 /* re-enable the gasket */ 512 writew(MIIGSK_ENR_EN, &fec->eth->miigsk_enr); 513 514 /* wait until MII gasket is ready */ 515 int max_loops = 10; 516 while ((readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY) == 0) { 517 if (--max_loops <= 0) { 518 printf("WAIT for MII Gasket ready timed out\n"); 519 break; 520 } 521 } 522 #endif 523 524 #ifdef CONFIG_PHYLIB 525 { 526 /* Start up the PHY */ 527 int ret = phy_startup(fec->phydev); 528 529 if (ret) { 530 printf("Could not initialize PHY %s\n", 531 fec->phydev->dev->name); 532 return ret; 533 } 534 speed = fec->phydev->speed; 535 } 536 #elif CONFIG_FEC_FIXED_SPEED 537 speed = CONFIG_FEC_FIXED_SPEED; 538 #else 539 miiphy_wait_aneg(edev); 540 speed = miiphy_speed(edev->name, fec->phy_id); 541 miiphy_duplex(edev->name, fec->phy_id); 542 #endif 543 544 #ifdef FEC_QUIRK_ENET_MAC 545 { 546 u32 ecr = readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_SPEED; 547 u32 rcr = readl(&fec->eth->r_cntrl) & ~FEC_RCNTRL_RMII_10T; 548 if (speed == _1000BASET) 549 ecr |= FEC_ECNTRL_SPEED; 550 else if (speed != _100BASET) 551 rcr |= FEC_RCNTRL_RMII_10T; 552 writel(ecr, &fec->eth->ecntrl); 553 writel(rcr, &fec->eth->r_cntrl); 554 } 555 #endif 556 debug("%s:Speed=%i\n", __func__, speed); 557 558 /* Enable SmartDMA receive task */ 559 fec_rx_task_enable(fec); 560 561 udelay(100000); 562 return 0; 563 } 564 565 #ifdef CONFIG_DM_ETH 566 static int fecmxc_init(struct udevice *dev) 567 #else 568 static int fec_init(struct eth_device *dev, bd_t *bd) 569 #endif 570 { 571 #ifdef CONFIG_DM_ETH 572 struct fec_priv *fec = dev_get_priv(dev); 573 #else 574 struct fec_priv *fec = (struct fec_priv *)dev->priv; 575 #endif 576 u8 *mib_ptr = (uint8_t *)&fec->eth->rmon_t_drop; 577 u8 *i; 578 ulong addr; 579 580 /* Initialize MAC address */ 581 #ifdef CONFIG_DM_ETH 582 fecmxc_set_hwaddr(dev); 583 #else 584 fec_set_hwaddr(dev); 585 #endif 586 587 /* Setup transmit descriptors, there are two in total. */ 588 fec_tbd_init(fec); 589 590 /* Setup receive descriptors. */ 591 fec_rbd_init(fec, FEC_RBD_NUM, FEC_MAX_PKT_SIZE); 592 593 fec_reg_setup(fec); 594 595 if (fec->xcv_type != SEVENWIRE) 596 fec_mii_setspeed(fec->bus->priv); 597 598 /* Set Opcode/Pause Duration Register */ 599 writel(0x00010020, &fec->eth->op_pause); /* FIXME 0xffff0020; */ 600 writel(0x2, &fec->eth->x_wmrk); 601 602 /* Set multicast address filter */ 603 writel(0x00000000, &fec->eth->gaddr1); 604 writel(0x00000000, &fec->eth->gaddr2); 605 606 /* Do not access reserved register */ 607 if (!is_mx6ul() && !is_mx6ull() && !is_imx8m()) { 608 /* clear MIB RAM */ 609 for (i = mib_ptr; i <= mib_ptr + 0xfc; i += 4) 610 writel(0, i); 611 612 /* FIFO receive start register */ 613 writel(0x520, &fec->eth->r_fstart); 614 } 615 616 /* size and address of each buffer */ 617 writel(FEC_MAX_PKT_SIZE, &fec->eth->emrbr); 618 619 addr = (ulong)fec->tbd_base; 620 writel((uint32_t)addr, &fec->eth->etdsr); 621 622 addr = (ulong)fec->rbd_base; 623 writel((uint32_t)addr, &fec->eth->erdsr); 624 625 #ifndef CONFIG_PHYLIB 626 if (fec->xcv_type != SEVENWIRE) 627 miiphy_restart_aneg(dev); 628 #endif 629 fec_open(dev); 630 return 0; 631 } 632 633 /** 634 * Halt the FEC engine 635 * @param[in] dev Our device to handle 636 */ 637 #ifdef CONFIG_DM_ETH 638 static void fecmxc_halt(struct udevice *dev) 639 #else 640 static void fec_halt(struct eth_device *dev) 641 #endif 642 { 643 #ifdef CONFIG_DM_ETH 644 struct fec_priv *fec = dev_get_priv(dev); 645 #else 646 struct fec_priv *fec = (struct fec_priv *)dev->priv; 647 #endif 648 int counter = 0xffff; 649 650 /* issue graceful stop command to the FEC transmitter if necessary */ 651 writel(FEC_TCNTRL_GTS | readl(&fec->eth->x_cntrl), 652 &fec->eth->x_cntrl); 653 654 debug("eth_halt: wait for stop regs\n"); 655 /* wait for graceful stop to register */ 656 while ((counter--) && (!(readl(&fec->eth->ievent) & FEC_IEVENT_GRA))) 657 udelay(1); 658 659 /* Disable SmartDMA tasks */ 660 fec_tx_task_disable(fec); 661 fec_rx_task_disable(fec); 662 663 /* 664 * Disable the Ethernet Controller 665 * Note: this will also reset the BD index counter! 666 */ 667 writel(readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_ETHER_EN, 668 &fec->eth->ecntrl); 669 fec->rbd_index = 0; 670 fec->tbd_index = 0; 671 debug("eth_halt: done\n"); 672 } 673 674 /** 675 * Transmit one frame 676 * @param[in] dev Our ethernet device to handle 677 * @param[in] packet Pointer to the data to be transmitted 678 * @param[in] length Data count in bytes 679 * @return 0 on success 680 */ 681 #ifdef CONFIG_DM_ETH 682 static int fecmxc_send(struct udevice *dev, void *packet, int length) 683 #else 684 static int fec_send(struct eth_device *dev, void *packet, int length) 685 #endif 686 { 687 unsigned int status; 688 u32 size; 689 ulong addr, end; 690 int timeout = FEC_XFER_TIMEOUT; 691 int ret = 0; 692 693 /* 694 * This routine transmits one frame. This routine only accepts 695 * 6-byte Ethernet addresses. 696 */ 697 #ifdef CONFIG_DM_ETH 698 struct fec_priv *fec = dev_get_priv(dev); 699 #else 700 struct fec_priv *fec = (struct fec_priv *)dev->priv; 701 #endif 702 703 /* 704 * Check for valid length of data. 705 */ 706 if ((length > 1500) || (length <= 0)) { 707 printf("Payload (%d) too large\n", length); 708 return -1; 709 } 710 711 /* 712 * Setup the transmit buffer. We are always using the first buffer for 713 * transmission, the second will be empty and only used to stop the DMA 714 * engine. We also flush the packet to RAM here to avoid cache trouble. 715 */ 716 #ifdef CONFIG_FEC_MXC_SWAP_PACKET 717 swap_packet((uint32_t *)packet, length); 718 #endif 719 720 addr = (ulong)packet; 721 end = roundup(addr + length, ARCH_DMA_MINALIGN); 722 addr &= ~(ARCH_DMA_MINALIGN - 1); 723 flush_dcache_range(addr, end); 724 725 writew(length, &fec->tbd_base[fec->tbd_index].data_length); 726 writel((uint32_t)addr, &fec->tbd_base[fec->tbd_index].data_pointer); 727 728 /* 729 * update BD's status now 730 * This block: 731 * - is always the last in a chain (means no chain) 732 * - should transmitt the CRC 733 * - might be the last BD in the list, so the address counter should 734 * wrap (-> keep the WRAP flag) 735 */ 736 status = readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_WRAP; 737 status |= FEC_TBD_LAST | FEC_TBD_TC | FEC_TBD_READY; 738 writew(status, &fec->tbd_base[fec->tbd_index].status); 739 740 /* 741 * Flush data cache. This code flushes both TX descriptors to RAM. 742 * After this code, the descriptors will be safely in RAM and we 743 * can start DMA. 744 */ 745 size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN); 746 addr = (ulong)fec->tbd_base; 747 flush_dcache_range(addr, addr + size); 748 749 /* 750 * Below we read the DMA descriptor's last four bytes back from the 751 * DRAM. This is important in order to make sure that all WRITE 752 * operations on the bus that were triggered by previous cache FLUSH 753 * have completed. 754 * 755 * Otherwise, on MX28, it is possible to observe a corruption of the 756 * DMA descriptors. Please refer to schematic "Figure 1-2" in MX28RM 757 * for the bus structure of MX28. The scenario is as follows: 758 * 759 * 1) ARM core triggers a series of WRITEs on the AHB_ARB2 bus going 760 * to DRAM due to flush_dcache_range() 761 * 2) ARM core writes the FEC registers via AHB_ARB2 762 * 3) FEC DMA starts reading/writing from/to DRAM via AHB_ARB3 763 * 764 * Note that 2) does sometimes finish before 1) due to reordering of 765 * WRITE accesses on the AHB bus, therefore triggering 3) before the 766 * DMA descriptor is fully written into DRAM. This results in occasional 767 * corruption of the DMA descriptor. 768 */ 769 readl(addr + size - 4); 770 771 /* Enable SmartDMA transmit task */ 772 fec_tx_task_enable(fec); 773 774 /* 775 * Wait until frame is sent. On each turn of the wait cycle, we must 776 * invalidate data cache to see what's really in RAM. Also, we need 777 * barrier here. 778 */ 779 while (--timeout) { 780 if (!(readl(&fec->eth->x_des_active) & FEC_X_DES_ACTIVE_TDAR)) 781 break; 782 } 783 784 if (!timeout) { 785 ret = -EINVAL; 786 goto out; 787 } 788 789 /* 790 * The TDAR bit is cleared when the descriptors are all out from TX 791 * but on mx6solox we noticed that the READY bit is still not cleared 792 * right after TDAR. 793 * These are two distinct signals, and in IC simulation, we found that 794 * TDAR always gets cleared prior than the READY bit of last BD becomes 795 * cleared. 796 * In mx6solox, we use a later version of FEC IP. It looks like that 797 * this intrinsic behaviour of TDAR bit has changed in this newer FEC 798 * version. 799 * 800 * Fix this by polling the READY bit of BD after the TDAR polling, 801 * which covers the mx6solox case and does not harm the other SoCs. 802 */ 803 timeout = FEC_XFER_TIMEOUT; 804 while (--timeout) { 805 invalidate_dcache_range(addr, addr + size); 806 if (!(readw(&fec->tbd_base[fec->tbd_index].status) & 807 FEC_TBD_READY)) 808 break; 809 } 810 811 if (!timeout) 812 ret = -EINVAL; 813 814 out: 815 debug("fec_send: status 0x%x index %d ret %i\n", 816 readw(&fec->tbd_base[fec->tbd_index].status), 817 fec->tbd_index, ret); 818 /* for next transmission use the other buffer */ 819 if (fec->tbd_index) 820 fec->tbd_index = 0; 821 else 822 fec->tbd_index = 1; 823 824 return ret; 825 } 826 827 /** 828 * Pull one frame from the card 829 * @param[in] dev Our ethernet device to handle 830 * @return Length of packet read 831 */ 832 #ifdef CONFIG_DM_ETH 833 static int fecmxc_recv(struct udevice *dev, int flags, uchar **packetp) 834 #else 835 static int fec_recv(struct eth_device *dev) 836 #endif 837 { 838 #ifdef CONFIG_DM_ETH 839 struct fec_priv *fec = dev_get_priv(dev); 840 #else 841 struct fec_priv *fec = (struct fec_priv *)dev->priv; 842 #endif 843 struct fec_bd *rbd = &fec->rbd_base[fec->rbd_index]; 844 unsigned long ievent; 845 int frame_length, len = 0; 846 uint16_t bd_status; 847 ulong addr, size, end; 848 int i; 849 850 #ifdef CONFIG_DM_ETH 851 *packetp = memalign(ARCH_DMA_MINALIGN, FEC_MAX_PKT_SIZE); 852 if (*packetp == 0) { 853 printf("%s: error allocating packetp\n", __func__); 854 return -ENOMEM; 855 } 856 #else 857 ALLOC_CACHE_ALIGN_BUFFER(uchar, buff, FEC_MAX_PKT_SIZE); 858 #endif 859 860 /* Check if any critical events have happened */ 861 ievent = readl(&fec->eth->ievent); 862 writel(ievent, &fec->eth->ievent); 863 debug("fec_recv: ievent 0x%lx\n", ievent); 864 if (ievent & FEC_IEVENT_BABR) { 865 #ifdef CONFIG_DM_ETH 866 fecmxc_halt(dev); 867 fecmxc_init(dev); 868 #else 869 fec_halt(dev); 870 fec_init(dev, fec->bd); 871 #endif 872 printf("some error: 0x%08lx\n", ievent); 873 return 0; 874 } 875 if (ievent & FEC_IEVENT_HBERR) { 876 /* Heartbeat error */ 877 writel(0x00000001 | readl(&fec->eth->x_cntrl), 878 &fec->eth->x_cntrl); 879 } 880 if (ievent & FEC_IEVENT_GRA) { 881 /* Graceful stop complete */ 882 if (readl(&fec->eth->x_cntrl) & 0x00000001) { 883 #ifdef CONFIG_DM_ETH 884 fecmxc_halt(dev); 885 #else 886 fec_halt(dev); 887 #endif 888 writel(~0x00000001 & readl(&fec->eth->x_cntrl), 889 &fec->eth->x_cntrl); 890 #ifdef CONFIG_DM_ETH 891 fecmxc_init(dev); 892 #else 893 fec_init(dev, fec->bd); 894 #endif 895 } 896 } 897 898 /* 899 * Read the buffer status. Before the status can be read, the data cache 900 * must be invalidated, because the data in RAM might have been changed 901 * by DMA. The descriptors are properly aligned to cachelines so there's 902 * no need to worry they'd overlap. 903 * 904 * WARNING: By invalidating the descriptor here, we also invalidate 905 * the descriptors surrounding this one. Therefore we can NOT change the 906 * contents of this descriptor nor the surrounding ones. The problem is 907 * that in order to mark the descriptor as processed, we need to change 908 * the descriptor. The solution is to mark the whole cache line when all 909 * descriptors in the cache line are processed. 910 */ 911 addr = (ulong)rbd; 912 addr &= ~(ARCH_DMA_MINALIGN - 1); 913 size = roundup(sizeof(struct fec_bd), ARCH_DMA_MINALIGN); 914 invalidate_dcache_range(addr, addr + size); 915 916 bd_status = readw(&rbd->status); 917 debug("fec_recv: status 0x%x\n", bd_status); 918 919 if (!(bd_status & FEC_RBD_EMPTY)) { 920 if ((bd_status & FEC_RBD_LAST) && !(bd_status & FEC_RBD_ERR) && 921 ((readw(&rbd->data_length) - 4) > 14)) { 922 /* Get buffer address and size */ 923 addr = readl(&rbd->data_pointer); 924 frame_length = readw(&rbd->data_length) - 4; 925 /* Invalidate data cache over the buffer */ 926 end = roundup(addr + frame_length, ARCH_DMA_MINALIGN); 927 addr &= ~(ARCH_DMA_MINALIGN - 1); 928 invalidate_dcache_range(addr, end); 929 930 /* Fill the buffer and pass it to upper layers */ 931 #ifdef CONFIG_FEC_MXC_SWAP_PACKET 932 swap_packet((uint32_t *)addr, frame_length); 933 #endif 934 935 #ifdef CONFIG_DM_ETH 936 memcpy(*packetp, (char *)addr, frame_length); 937 #else 938 memcpy(buff, (char *)addr, frame_length); 939 net_process_received_packet(buff, frame_length); 940 #endif 941 len = frame_length; 942 } else { 943 if (bd_status & FEC_RBD_ERR) 944 debug("error frame: 0x%08lx 0x%08x\n", 945 addr, bd_status); 946 } 947 948 /* 949 * Free the current buffer, restart the engine and move forward 950 * to the next buffer. Here we check if the whole cacheline of 951 * descriptors was already processed and if so, we mark it free 952 * as whole. 953 */ 954 size = RXDESC_PER_CACHELINE - 1; 955 if ((fec->rbd_index & size) == size) { 956 i = fec->rbd_index - size; 957 addr = (ulong)&fec->rbd_base[i]; 958 for (; i <= fec->rbd_index ; i++) { 959 fec_rbd_clean(i == (FEC_RBD_NUM - 1), 960 &fec->rbd_base[i]); 961 } 962 flush_dcache_range(addr, 963 addr + ARCH_DMA_MINALIGN); 964 } 965 966 fec_rx_task_enable(fec); 967 fec->rbd_index = (fec->rbd_index + 1) % FEC_RBD_NUM; 968 } 969 debug("fec_recv: stop\n"); 970 971 return len; 972 } 973 974 static void fec_set_dev_name(char *dest, int dev_id) 975 { 976 sprintf(dest, (dev_id == -1) ? "FEC" : "FEC%i", dev_id); 977 } 978 979 static int fec_alloc_descs(struct fec_priv *fec) 980 { 981 unsigned int size; 982 int i; 983 uint8_t *data; 984 ulong addr; 985 986 /* Allocate TX descriptors. */ 987 size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN); 988 fec->tbd_base = memalign(ARCH_DMA_MINALIGN, size); 989 if (!fec->tbd_base) 990 goto err_tx; 991 992 /* Allocate RX descriptors. */ 993 size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd), ARCH_DMA_MINALIGN); 994 fec->rbd_base = memalign(ARCH_DMA_MINALIGN, size); 995 if (!fec->rbd_base) 996 goto err_rx; 997 998 memset(fec->rbd_base, 0, size); 999 1000 /* Allocate RX buffers. */ 1001 1002 /* Maximum RX buffer size. */ 1003 size = roundup(FEC_MAX_PKT_SIZE, FEC_DMA_RX_MINALIGN); 1004 for (i = 0; i < FEC_RBD_NUM; i++) { 1005 data = memalign(FEC_DMA_RX_MINALIGN, size); 1006 if (!data) { 1007 printf("%s: error allocating rxbuf %d\n", __func__, i); 1008 goto err_ring; 1009 } 1010 1011 memset(data, 0, size); 1012 1013 addr = (ulong)data; 1014 fec->rbd_base[i].data_pointer = (uint32_t)addr; 1015 fec->rbd_base[i].status = FEC_RBD_EMPTY; 1016 fec->rbd_base[i].data_length = 0; 1017 /* Flush the buffer to memory. */ 1018 flush_dcache_range(addr, addr + size); 1019 } 1020 1021 /* Mark the last RBD to close the ring. */ 1022 fec->rbd_base[i - 1].status = FEC_RBD_WRAP | FEC_RBD_EMPTY; 1023 1024 fec->rbd_index = 0; 1025 fec->tbd_index = 0; 1026 1027 return 0; 1028 1029 err_ring: 1030 for (; i >= 0; i--) { 1031 addr = fec->rbd_base[i].data_pointer; 1032 free((void *)addr); 1033 } 1034 free(fec->rbd_base); 1035 err_rx: 1036 free(fec->tbd_base); 1037 err_tx: 1038 return -ENOMEM; 1039 } 1040 1041 static void fec_free_descs(struct fec_priv *fec) 1042 { 1043 int i; 1044 ulong addr; 1045 1046 for (i = 0; i < FEC_RBD_NUM; i++) { 1047 addr = fec->rbd_base[i].data_pointer; 1048 free((void *)addr); 1049 } 1050 free(fec->rbd_base); 1051 free(fec->tbd_base); 1052 } 1053 1054 struct mii_dev *fec_get_miibus(ulong base_addr, int dev_id) 1055 { 1056 struct ethernet_regs *eth = (struct ethernet_regs *)base_addr; 1057 struct mii_dev *bus; 1058 int ret; 1059 1060 bus = mdio_alloc(); 1061 if (!bus) { 1062 printf("mdio_alloc failed\n"); 1063 return NULL; 1064 } 1065 bus->read = fec_phy_read; 1066 bus->write = fec_phy_write; 1067 bus->priv = eth; 1068 fec_set_dev_name(bus->name, dev_id); 1069 1070 ret = mdio_register(bus); 1071 if (ret) { 1072 printf("mdio_register failed\n"); 1073 free(bus); 1074 return NULL; 1075 } 1076 fec_mii_setspeed(eth); 1077 return bus; 1078 } 1079 1080 #ifndef CONFIG_DM_ETH 1081 #ifdef CONFIG_PHYLIB 1082 int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr, 1083 struct mii_dev *bus, struct phy_device *phydev) 1084 #else 1085 static int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr, 1086 struct mii_dev *bus, int phy_id) 1087 #endif 1088 { 1089 struct eth_device *edev; 1090 struct fec_priv *fec; 1091 unsigned char ethaddr[6]; 1092 char mac[16]; 1093 uint32_t start; 1094 int ret = 0; 1095 1096 /* create and fill edev struct */ 1097 edev = (struct eth_device *)malloc(sizeof(struct eth_device)); 1098 if (!edev) { 1099 puts("fec_mxc: not enough malloc memory for eth_device\n"); 1100 ret = -ENOMEM; 1101 goto err1; 1102 } 1103 1104 fec = (struct fec_priv *)malloc(sizeof(struct fec_priv)); 1105 if (!fec) { 1106 puts("fec_mxc: not enough malloc memory for fec_priv\n"); 1107 ret = -ENOMEM; 1108 goto err2; 1109 } 1110 1111 memset(edev, 0, sizeof(*edev)); 1112 memset(fec, 0, sizeof(*fec)); 1113 1114 ret = fec_alloc_descs(fec); 1115 if (ret) 1116 goto err3; 1117 1118 edev->priv = fec; 1119 edev->init = fec_init; 1120 edev->send = fec_send; 1121 edev->recv = fec_recv; 1122 edev->halt = fec_halt; 1123 edev->write_hwaddr = fec_set_hwaddr; 1124 1125 fec->eth = (struct ethernet_regs *)(ulong)base_addr; 1126 fec->bd = bd; 1127 1128 fec->xcv_type = CONFIG_FEC_XCV_TYPE; 1129 1130 /* Reset chip. */ 1131 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_RESET, &fec->eth->ecntrl); 1132 start = get_timer(0); 1133 while (readl(&fec->eth->ecntrl) & FEC_ECNTRL_RESET) { 1134 if (get_timer(start) > (CONFIG_SYS_HZ * 5)) { 1135 printf("FEC MXC: Timeout resetting chip\n"); 1136 goto err4; 1137 } 1138 udelay(10); 1139 } 1140 1141 fec_reg_setup(fec); 1142 fec_set_dev_name(edev->name, dev_id); 1143 fec->dev_id = (dev_id == -1) ? 0 : dev_id; 1144 fec->bus = bus; 1145 fec_mii_setspeed(bus->priv); 1146 #ifdef CONFIG_PHYLIB 1147 fec->phydev = phydev; 1148 phy_connect_dev(phydev, edev); 1149 /* Configure phy */ 1150 phy_config(phydev); 1151 #else 1152 fec->phy_id = phy_id; 1153 #endif 1154 eth_register(edev); 1155 /* only support one eth device, the index number pointed by dev_id */ 1156 edev->index = fec->dev_id; 1157 1158 if (fec_get_hwaddr(fec->dev_id, ethaddr) == 0) { 1159 debug("got MAC%d address from fuse: %pM\n", fec->dev_id, ethaddr); 1160 memcpy(edev->enetaddr, ethaddr, 6); 1161 if (fec->dev_id) 1162 sprintf(mac, "eth%daddr", fec->dev_id); 1163 else 1164 strcpy(mac, "ethaddr"); 1165 if (!env_get(mac)) 1166 eth_env_set_enetaddr(mac, ethaddr); 1167 } 1168 return ret; 1169 err4: 1170 fec_free_descs(fec); 1171 err3: 1172 free(fec); 1173 err2: 1174 free(edev); 1175 err1: 1176 return ret; 1177 } 1178 1179 int fecmxc_initialize_multi(bd_t *bd, int dev_id, int phy_id, uint32_t addr) 1180 { 1181 uint32_t base_mii; 1182 struct mii_dev *bus = NULL; 1183 #ifdef CONFIG_PHYLIB 1184 struct phy_device *phydev = NULL; 1185 #endif 1186 int ret; 1187 1188 #ifdef CONFIG_FEC_MXC_MDIO_BASE 1189 /* 1190 * The i.MX28 has two ethernet interfaces, but they are not equal. 1191 * Only the first one can access the MDIO bus. 1192 */ 1193 base_mii = CONFIG_FEC_MXC_MDIO_BASE; 1194 #else 1195 base_mii = addr; 1196 #endif 1197 debug("eth_init: fec_probe(bd, %i, %i) @ %08x\n", dev_id, phy_id, addr); 1198 bus = fec_get_miibus(base_mii, dev_id); 1199 if (!bus) 1200 return -ENOMEM; 1201 #ifdef CONFIG_PHYLIB 1202 phydev = phy_find_by_mask(bus, 1 << phy_id, PHY_INTERFACE_MODE_RGMII); 1203 if (!phydev) { 1204 mdio_unregister(bus); 1205 free(bus); 1206 return -ENOMEM; 1207 } 1208 ret = fec_probe(bd, dev_id, addr, bus, phydev); 1209 #else 1210 ret = fec_probe(bd, dev_id, addr, bus, phy_id); 1211 #endif 1212 if (ret) { 1213 #ifdef CONFIG_PHYLIB 1214 free(phydev); 1215 #endif 1216 mdio_unregister(bus); 1217 free(bus); 1218 } 1219 return ret; 1220 } 1221 1222 #ifdef CONFIG_FEC_MXC_PHYADDR 1223 int fecmxc_initialize(bd_t *bd) 1224 { 1225 return fecmxc_initialize_multi(bd, -1, CONFIG_FEC_MXC_PHYADDR, 1226 IMX_FEC_BASE); 1227 } 1228 #endif 1229 1230 #ifndef CONFIG_PHYLIB 1231 int fecmxc_register_mii_postcall(struct eth_device *dev, int (*cb)(int)) 1232 { 1233 struct fec_priv *fec = (struct fec_priv *)dev->priv; 1234 fec->mii_postcall = cb; 1235 return 0; 1236 } 1237 #endif 1238 1239 #else 1240 1241 static int fecmxc_read_rom_hwaddr(struct udevice *dev) 1242 { 1243 struct fec_priv *priv = dev_get_priv(dev); 1244 struct eth_pdata *pdata = dev_get_platdata(dev); 1245 1246 return fec_get_hwaddr(priv->dev_id, pdata->enetaddr); 1247 } 1248 1249 static int fecmxc_free_pkt(struct udevice *dev, uchar *packet, int length) 1250 { 1251 if (packet) 1252 free(packet); 1253 1254 return 0; 1255 } 1256 1257 static const struct eth_ops fecmxc_ops = { 1258 .start = fecmxc_init, 1259 .send = fecmxc_send, 1260 .recv = fecmxc_recv, 1261 .free_pkt = fecmxc_free_pkt, 1262 .stop = fecmxc_halt, 1263 .write_hwaddr = fecmxc_set_hwaddr, 1264 .read_rom_hwaddr = fecmxc_read_rom_hwaddr, 1265 }; 1266 1267 static int device_get_phy_addr(struct udevice *dev) 1268 { 1269 struct ofnode_phandle_args phandle_args; 1270 int reg; 1271 1272 if (dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, 1273 &phandle_args)) { 1274 debug("Failed to find phy-handle"); 1275 return -ENODEV; 1276 } 1277 1278 reg = ofnode_read_u32_default(phandle_args.node, "reg", 0); 1279 1280 return reg; 1281 } 1282 1283 static int fec_phy_init(struct fec_priv *priv, struct udevice *dev) 1284 { 1285 struct phy_device *phydev; 1286 int addr; 1287 1288 addr = device_get_phy_addr(dev); 1289 #ifdef CONFIG_FEC_MXC_PHYADDR 1290 addr = CONFIG_FEC_MXC_PHYADDR; 1291 #endif 1292 1293 phydev = phy_connect(priv->bus, addr, dev, priv->interface); 1294 if (!phydev) 1295 return -ENODEV; 1296 1297 priv->phydev = phydev; 1298 phy_config(phydev); 1299 1300 return 0; 1301 } 1302 1303 #ifdef CONFIG_DM_GPIO 1304 /* FEC GPIO reset */ 1305 static void fec_gpio_reset(struct fec_priv *priv) 1306 { 1307 debug("fec_gpio_reset: fec_gpio_reset(dev)\n"); 1308 if (dm_gpio_is_valid(&priv->phy_reset_gpio)) { 1309 dm_gpio_set_value(&priv->phy_reset_gpio, 1); 1310 mdelay(priv->reset_delay); 1311 dm_gpio_set_value(&priv->phy_reset_gpio, 0); 1312 if (priv->reset_post_delay) 1313 mdelay(priv->reset_post_delay); 1314 } 1315 } 1316 #endif 1317 1318 static int fecmxc_probe(struct udevice *dev) 1319 { 1320 struct eth_pdata *pdata = dev_get_platdata(dev); 1321 struct fec_priv *priv = dev_get_priv(dev); 1322 struct mii_dev *bus = NULL; 1323 uint32_t start; 1324 int ret; 1325 1326 if (IS_ENABLED(CONFIG_IMX8)) { 1327 ret = clk_get_by_name(dev, "ipg", &priv->ipg_clk); 1328 if (ret < 0) { 1329 debug("Can't get FEC ipg clk: %d\n", ret); 1330 return ret; 1331 } 1332 ret = clk_enable(&priv->ipg_clk); 1333 if (ret < 0) { 1334 debug("Can't enable FEC ipg clk: %d\n", ret); 1335 return ret; 1336 } 1337 1338 priv->clk_rate = clk_get_rate(&priv->ipg_clk); 1339 } 1340 1341 ret = fec_alloc_descs(priv); 1342 if (ret) 1343 return ret; 1344 1345 #ifdef CONFIG_DM_REGULATOR 1346 if (priv->phy_supply) { 1347 ret = regulator_set_enable(priv->phy_supply, true); 1348 if (ret) { 1349 printf("%s: Error enabling phy supply\n", dev->name); 1350 return ret; 1351 } 1352 } 1353 #endif 1354 1355 #ifdef CONFIG_DM_GPIO 1356 fec_gpio_reset(priv); 1357 #endif 1358 /* Reset chip. */ 1359 writel(readl(&priv->eth->ecntrl) | FEC_ECNTRL_RESET, 1360 &priv->eth->ecntrl); 1361 start = get_timer(0); 1362 while (readl(&priv->eth->ecntrl) & FEC_ECNTRL_RESET) { 1363 if (get_timer(start) > (CONFIG_SYS_HZ * 5)) { 1364 printf("FEC MXC: Timeout reseting chip\n"); 1365 goto err_timeout; 1366 } 1367 udelay(10); 1368 } 1369 1370 fec_reg_setup(priv); 1371 1372 priv->dev_id = dev->seq; 1373 #ifdef CONFIG_FEC_MXC_MDIO_BASE 1374 bus = fec_get_miibus((ulong)CONFIG_FEC_MXC_MDIO_BASE, dev->seq); 1375 #else 1376 bus = fec_get_miibus((ulong)priv->eth, dev->seq); 1377 #endif 1378 if (!bus) { 1379 ret = -ENOMEM; 1380 goto err_mii; 1381 } 1382 1383 priv->bus = bus; 1384 priv->interface = pdata->phy_interface; 1385 switch (priv->interface) { 1386 case PHY_INTERFACE_MODE_MII: 1387 priv->xcv_type = MII100; 1388 break; 1389 case PHY_INTERFACE_MODE_RMII: 1390 priv->xcv_type = RMII; 1391 break; 1392 case PHY_INTERFACE_MODE_RGMII: 1393 case PHY_INTERFACE_MODE_RGMII_ID: 1394 case PHY_INTERFACE_MODE_RGMII_RXID: 1395 case PHY_INTERFACE_MODE_RGMII_TXID: 1396 priv->xcv_type = RGMII; 1397 break; 1398 default: 1399 priv->xcv_type = CONFIG_FEC_XCV_TYPE; 1400 printf("Unsupported interface type %d defaulting to %d\n", 1401 priv->interface, priv->xcv_type); 1402 break; 1403 } 1404 1405 ret = fec_phy_init(priv, dev); 1406 if (ret) 1407 goto err_phy; 1408 1409 return 0; 1410 1411 err_phy: 1412 mdio_unregister(bus); 1413 free(bus); 1414 err_mii: 1415 err_timeout: 1416 fec_free_descs(priv); 1417 return ret; 1418 } 1419 1420 static int fecmxc_remove(struct udevice *dev) 1421 { 1422 struct fec_priv *priv = dev_get_priv(dev); 1423 1424 free(priv->phydev); 1425 fec_free_descs(priv); 1426 mdio_unregister(priv->bus); 1427 mdio_free(priv->bus); 1428 1429 #ifdef CONFIG_DM_REGULATOR 1430 if (priv->phy_supply) 1431 regulator_set_enable(priv->phy_supply, false); 1432 #endif 1433 1434 return 0; 1435 } 1436 1437 static int fecmxc_ofdata_to_platdata(struct udevice *dev) 1438 { 1439 int ret = 0; 1440 struct eth_pdata *pdata = dev_get_platdata(dev); 1441 struct fec_priv *priv = dev_get_priv(dev); 1442 const char *phy_mode; 1443 1444 pdata->iobase = (phys_addr_t)devfdt_get_addr(dev); 1445 priv->eth = (struct ethernet_regs *)pdata->iobase; 1446 1447 pdata->phy_interface = -1; 1448 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode", 1449 NULL); 1450 if (phy_mode) 1451 pdata->phy_interface = phy_get_interface_by_name(phy_mode); 1452 if (pdata->phy_interface == -1) { 1453 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode); 1454 return -EINVAL; 1455 } 1456 1457 #ifdef CONFIG_DM_REGULATOR 1458 device_get_supply_regulator(dev, "phy-supply", &priv->phy_supply); 1459 #endif 1460 1461 #ifdef CONFIG_DM_GPIO 1462 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0, 1463 &priv->phy_reset_gpio, GPIOD_IS_OUT); 1464 if (ret < 0) 1465 return 0; /* property is optional, don't return error! */ 1466 1467 priv->reset_delay = dev_read_u32_default(dev, "phy-reset-duration", 1); 1468 if (priv->reset_delay > 1000) { 1469 printf("FEC MXC: phy reset duration should be <= 1000ms\n"); 1470 /* property value wrong, use default value */ 1471 priv->reset_delay = 1; 1472 } 1473 1474 priv->reset_post_delay = dev_read_u32_default(dev, 1475 "phy-reset-post-delay", 1476 0); 1477 if (priv->reset_post_delay > 1000) { 1478 printf("FEC MXC: phy reset post delay should be <= 1000ms\n"); 1479 /* property value wrong, use default value */ 1480 priv->reset_post_delay = 0; 1481 } 1482 #endif 1483 1484 return 0; 1485 } 1486 1487 static const struct udevice_id fecmxc_ids[] = { 1488 { .compatible = "fsl,imx6q-fec" }, 1489 { .compatible = "fsl,imx6sl-fec" }, 1490 { .compatible = "fsl,imx6sx-fec" }, 1491 { .compatible = "fsl,imx6ul-fec" }, 1492 { .compatible = "fsl,imx53-fec" }, 1493 { .compatible = "fsl,imx7d-fec" }, 1494 { } 1495 }; 1496 1497 U_BOOT_DRIVER(fecmxc_gem) = { 1498 .name = "fecmxc", 1499 .id = UCLASS_ETH, 1500 .of_match = fecmxc_ids, 1501 .ofdata_to_platdata = fecmxc_ofdata_to_platdata, 1502 .probe = fecmxc_probe, 1503 .remove = fecmxc_remove, 1504 .ops = &fecmxc_ops, 1505 .priv_auto_alloc_size = sizeof(struct fec_priv), 1506 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 1507 }; 1508 #endif 1509