1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Opencore 10/100 ethernet mac driver 4 * 5 * Copyright (C) 2007-2008 Avionic Design Development GmbH 6 * Copyright (C) 2008-2009 Avionic Design GmbH 7 * Thierry Reding <thierry.reding@avionic-design.de> 8 * Copyright (C) 2010 Thomas Chou <thomas@wytron.com.tw> 9 * Copyright (C) 2016 Cadence Design Systems Inc. 10 */ 11 12 #include <common.h> 13 #include <dm.h> 14 #include <dm/platform_data/net_ethoc.h> 15 #include <linux/io.h> 16 #include <malloc.h> 17 #include <net.h> 18 #include <miiphy.h> 19 #include <asm/cache.h> 20 #include <wait_bit.h> 21 22 /* register offsets */ 23 #define MODER 0x00 24 #define INT_SOURCE 0x04 25 #define INT_MASK 0x08 26 #define IPGT 0x0c 27 #define IPGR1 0x10 28 #define IPGR2 0x14 29 #define PACKETLEN 0x18 30 #define COLLCONF 0x1c 31 #define TX_BD_NUM 0x20 32 #define CTRLMODER 0x24 33 #define MIIMODER 0x28 34 #define MIICOMMAND 0x2c 35 #define MIIADDRESS 0x30 36 #define MIITX_DATA 0x34 37 #define MIIRX_DATA 0x38 38 #define MIISTATUS 0x3c 39 #define MAC_ADDR0 0x40 40 #define MAC_ADDR1 0x44 41 #define ETH_HASH0 0x48 42 #define ETH_HASH1 0x4c 43 #define ETH_TXCTRL 0x50 44 45 /* mode register */ 46 #define MODER_RXEN (1 << 0) /* receive enable */ 47 #define MODER_TXEN (1 << 1) /* transmit enable */ 48 #define MODER_NOPRE (1 << 2) /* no preamble */ 49 #define MODER_BRO (1 << 3) /* broadcast address */ 50 #define MODER_IAM (1 << 4) /* individual address mode */ 51 #define MODER_PRO (1 << 5) /* promiscuous mode */ 52 #define MODER_IFG (1 << 6) /* interframe gap for incoming frames */ 53 #define MODER_LOOP (1 << 7) /* loopback */ 54 #define MODER_NBO (1 << 8) /* no back-off */ 55 #define MODER_EDE (1 << 9) /* excess defer enable */ 56 #define MODER_FULLD (1 << 10) /* full duplex */ 57 #define MODER_RESET (1 << 11) /* FIXME: reset (undocumented) */ 58 #define MODER_DCRC (1 << 12) /* delayed CRC enable */ 59 #define MODER_CRC (1 << 13) /* CRC enable */ 60 #define MODER_HUGE (1 << 14) /* huge packets enable */ 61 #define MODER_PAD (1 << 15) /* padding enabled */ 62 #define MODER_RSM (1 << 16) /* receive small packets */ 63 64 /* interrupt source and mask registers */ 65 #define INT_MASK_TXF (1 << 0) /* transmit frame */ 66 #define INT_MASK_TXE (1 << 1) /* transmit error */ 67 #define INT_MASK_RXF (1 << 2) /* receive frame */ 68 #define INT_MASK_RXE (1 << 3) /* receive error */ 69 #define INT_MASK_BUSY (1 << 4) 70 #define INT_MASK_TXC (1 << 5) /* transmit control frame */ 71 #define INT_MASK_RXC (1 << 6) /* receive control frame */ 72 73 #define INT_MASK_TX (INT_MASK_TXF | INT_MASK_TXE) 74 #define INT_MASK_RX (INT_MASK_RXF | INT_MASK_RXE) 75 76 #define INT_MASK_ALL ( \ 77 INT_MASK_TXF | INT_MASK_TXE | \ 78 INT_MASK_RXF | INT_MASK_RXE | \ 79 INT_MASK_TXC | INT_MASK_RXC | \ 80 INT_MASK_BUSY \ 81 ) 82 83 /* packet length register */ 84 #define PACKETLEN_MIN(min) (((min) & 0xffff) << 16) 85 #define PACKETLEN_MAX(max) (((max) & 0xffff) << 0) 86 #define PACKETLEN_MIN_MAX(min, max) (PACKETLEN_MIN(min) | \ 87 PACKETLEN_MAX(max)) 88 89 /* transmit buffer number register */ 90 #define TX_BD_NUM_VAL(x) (((x) <= 0x80) ? (x) : 0x80) 91 92 /* control module mode register */ 93 #define CTRLMODER_PASSALL (1 << 0) /* pass all receive frames */ 94 #define CTRLMODER_RXFLOW (1 << 1) /* receive control flow */ 95 #define CTRLMODER_TXFLOW (1 << 2) /* transmit control flow */ 96 97 /* MII mode register */ 98 #define MIIMODER_CLKDIV(x) ((x) & 0xfe) /* needs to be an even number */ 99 #define MIIMODER_NOPRE (1 << 8) /* no preamble */ 100 101 /* MII command register */ 102 #define MIICOMMAND_SCAN (1 << 0) /* scan status */ 103 #define MIICOMMAND_READ (1 << 1) /* read status */ 104 #define MIICOMMAND_WRITE (1 << 2) /* write control data */ 105 106 /* MII address register */ 107 #define MIIADDRESS_FIAD(x) (((x) & 0x1f) << 0) 108 #define MIIADDRESS_RGAD(x) (((x) & 0x1f) << 8) 109 #define MIIADDRESS_ADDR(phy, reg) (MIIADDRESS_FIAD(phy) | \ 110 MIIADDRESS_RGAD(reg)) 111 112 /* MII transmit data register */ 113 #define MIITX_DATA_VAL(x) ((x) & 0xffff) 114 115 /* MII receive data register */ 116 #define MIIRX_DATA_VAL(x) ((x) & 0xffff) 117 118 /* MII status register */ 119 #define MIISTATUS_LINKFAIL (1 << 0) 120 #define MIISTATUS_BUSY (1 << 1) 121 #define MIISTATUS_INVALID (1 << 2) 122 123 /* TX buffer descriptor */ 124 #define TX_BD_CS (1 << 0) /* carrier sense lost */ 125 #define TX_BD_DF (1 << 1) /* defer indication */ 126 #define TX_BD_LC (1 << 2) /* late collision */ 127 #define TX_BD_RL (1 << 3) /* retransmission limit */ 128 #define TX_BD_RETRY_MASK (0x00f0) 129 #define TX_BD_RETRY(x) (((x) & 0x00f0) >> 4) 130 #define TX_BD_UR (1 << 8) /* transmitter underrun */ 131 #define TX_BD_CRC (1 << 11) /* TX CRC enable */ 132 #define TX_BD_PAD (1 << 12) /* pad enable */ 133 #define TX_BD_WRAP (1 << 13) 134 #define TX_BD_IRQ (1 << 14) /* interrupt request enable */ 135 #define TX_BD_READY (1 << 15) /* TX buffer ready */ 136 #define TX_BD_LEN(x) (((x) & 0xffff) << 16) 137 #define TX_BD_LEN_MASK (0xffff << 16) 138 139 #define TX_BD_STATS (TX_BD_CS | TX_BD_DF | TX_BD_LC | \ 140 TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR) 141 142 /* RX buffer descriptor */ 143 #define RX_BD_LC (1 << 0) /* late collision */ 144 #define RX_BD_CRC (1 << 1) /* RX CRC error */ 145 #define RX_BD_SF (1 << 2) /* short frame */ 146 #define RX_BD_TL (1 << 3) /* too long */ 147 #define RX_BD_DN (1 << 4) /* dribble nibble */ 148 #define RX_BD_IS (1 << 5) /* invalid symbol */ 149 #define RX_BD_OR (1 << 6) /* receiver overrun */ 150 #define RX_BD_MISS (1 << 7) 151 #define RX_BD_CF (1 << 8) /* control frame */ 152 #define RX_BD_WRAP (1 << 13) 153 #define RX_BD_IRQ (1 << 14) /* interrupt request enable */ 154 #define RX_BD_EMPTY (1 << 15) 155 #define RX_BD_LEN(x) (((x) & 0xffff) << 16) 156 157 #define RX_BD_STATS (RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \ 158 RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS) 159 160 #define ETHOC_BUFSIZ 1536 161 #define ETHOC_ZLEN 64 162 #define ETHOC_BD_BASE 0x400 163 #define ETHOC_TIMEOUT (HZ / 2) 164 #define ETHOC_MII_TIMEOUT (1 + (HZ / 5)) 165 #define ETHOC_IOSIZE 0x54 166 167 /** 168 * struct ethoc - driver-private device structure 169 * @num_tx: number of send buffers 170 * @cur_tx: last send buffer written 171 * @dty_tx: last buffer actually sent 172 * @num_rx: number of receive buffers 173 * @cur_rx: current receive buffer 174 */ 175 struct ethoc { 176 u32 num_tx; 177 u32 cur_tx; 178 u32 dty_tx; 179 u32 num_rx; 180 u32 cur_rx; 181 void __iomem *iobase; 182 void __iomem *packet; 183 phys_addr_t packet_phys; 184 185 #ifdef CONFIG_PHYLIB 186 struct mii_dev *bus; 187 struct phy_device *phydev; 188 #endif 189 }; 190 191 /** 192 * struct ethoc_bd - buffer descriptor 193 * @stat: buffer statistics 194 * @addr: physical memory address 195 */ 196 struct ethoc_bd { 197 u32 stat; 198 u32 addr; 199 }; 200 201 static inline u32 *ethoc_reg(struct ethoc *priv, size_t offset) 202 { 203 return priv->iobase + offset; 204 } 205 206 static inline u32 ethoc_read(struct ethoc *priv, size_t offset) 207 { 208 return readl(ethoc_reg(priv, offset)); 209 } 210 211 static inline void ethoc_write(struct ethoc *priv, size_t offset, u32 data) 212 { 213 writel(data, ethoc_reg(priv, offset)); 214 } 215 216 static inline void ethoc_read_bd(struct ethoc *priv, int index, 217 struct ethoc_bd *bd) 218 { 219 size_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 220 bd->stat = ethoc_read(priv, offset + 0); 221 bd->addr = ethoc_read(priv, offset + 4); 222 } 223 224 static inline void ethoc_write_bd(struct ethoc *priv, int index, 225 const struct ethoc_bd *bd) 226 { 227 size_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 228 ethoc_write(priv, offset + 0, bd->stat); 229 ethoc_write(priv, offset + 4, bd->addr); 230 } 231 232 static int ethoc_write_hwaddr_common(struct ethoc *priv, u8 *mac) 233 { 234 ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) | 235 (mac[4] << 8) | (mac[5] << 0)); 236 ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0)); 237 return 0; 238 } 239 240 static inline void ethoc_ack_irq(struct ethoc *priv, u32 mask) 241 { 242 ethoc_write(priv, INT_SOURCE, mask); 243 } 244 245 static inline void ethoc_enable_rx_and_tx(struct ethoc *priv) 246 { 247 u32 mode = ethoc_read(priv, MODER); 248 mode |= MODER_RXEN | MODER_TXEN; 249 ethoc_write(priv, MODER, mode); 250 } 251 252 static inline void ethoc_disable_rx_and_tx(struct ethoc *priv) 253 { 254 u32 mode = ethoc_read(priv, MODER); 255 mode &= ~(MODER_RXEN | MODER_TXEN); 256 ethoc_write(priv, MODER, mode); 257 } 258 259 static int ethoc_init_ring(struct ethoc *priv) 260 { 261 struct ethoc_bd bd; 262 phys_addr_t addr = priv->packet_phys; 263 int i; 264 265 priv->cur_tx = 0; 266 priv->dty_tx = 0; 267 priv->cur_rx = 0; 268 269 /* setup transmission buffers */ 270 bd.stat = TX_BD_IRQ | TX_BD_CRC; 271 bd.addr = 0; 272 273 for (i = 0; i < priv->num_tx; i++) { 274 if (addr) { 275 bd.addr = addr; 276 addr += PKTSIZE_ALIGN; 277 } 278 if (i == priv->num_tx - 1) 279 bd.stat |= TX_BD_WRAP; 280 281 ethoc_write_bd(priv, i, &bd); 282 } 283 284 bd.stat = RX_BD_EMPTY | RX_BD_IRQ; 285 286 for (i = 0; i < priv->num_rx; i++) { 287 if (addr) { 288 bd.addr = addr; 289 addr += PKTSIZE_ALIGN; 290 } else { 291 bd.addr = virt_to_phys(net_rx_packets[i]); 292 } 293 if (i == priv->num_rx - 1) 294 bd.stat |= RX_BD_WRAP; 295 296 flush_dcache_range((ulong)net_rx_packets[i], 297 (ulong)net_rx_packets[i] + PKTSIZE_ALIGN); 298 ethoc_write_bd(priv, priv->num_tx + i, &bd); 299 } 300 301 return 0; 302 } 303 304 static int ethoc_reset(struct ethoc *priv) 305 { 306 u32 mode; 307 308 /* TODO: reset controller? */ 309 310 ethoc_disable_rx_and_tx(priv); 311 312 /* TODO: setup registers */ 313 314 /* enable FCS generation and automatic padding */ 315 mode = ethoc_read(priv, MODER); 316 mode |= MODER_CRC | MODER_PAD; 317 ethoc_write(priv, MODER, mode); 318 319 /* set full-duplex mode */ 320 mode = ethoc_read(priv, MODER); 321 mode |= MODER_FULLD; 322 ethoc_write(priv, MODER, mode); 323 ethoc_write(priv, IPGT, 0x15); 324 325 ethoc_ack_irq(priv, INT_MASK_ALL); 326 ethoc_enable_rx_and_tx(priv); 327 return 0; 328 } 329 330 static int ethoc_init_common(struct ethoc *priv) 331 { 332 int ret = 0; 333 334 priv->num_tx = 1; 335 priv->num_rx = PKTBUFSRX; 336 ethoc_write(priv, TX_BD_NUM, priv->num_tx); 337 ethoc_init_ring(priv); 338 ethoc_reset(priv); 339 340 #ifdef CONFIG_PHYLIB 341 ret = phy_startup(priv->phydev); 342 if (ret) { 343 printf("Could not initialize PHY %s\n", 344 priv->phydev->dev->name); 345 return ret; 346 } 347 #endif 348 return ret; 349 } 350 351 static void ethoc_stop_common(struct ethoc *priv) 352 { 353 ethoc_disable_rx_and_tx(priv); 354 #ifdef CONFIG_PHYLIB 355 phy_shutdown(priv->phydev); 356 #endif 357 } 358 359 static int ethoc_update_rx_stats(struct ethoc_bd *bd) 360 { 361 int ret = 0; 362 363 if (bd->stat & RX_BD_TL) { 364 debug("ETHOC: " "RX: frame too long\n"); 365 ret++; 366 } 367 368 if (bd->stat & RX_BD_SF) { 369 debug("ETHOC: " "RX: frame too short\n"); 370 ret++; 371 } 372 373 if (bd->stat & RX_BD_DN) 374 debug("ETHOC: " "RX: dribble nibble\n"); 375 376 if (bd->stat & RX_BD_CRC) { 377 debug("ETHOC: " "RX: wrong CRC\n"); 378 ret++; 379 } 380 381 if (bd->stat & RX_BD_OR) { 382 debug("ETHOC: " "RX: overrun\n"); 383 ret++; 384 } 385 386 if (bd->stat & RX_BD_LC) { 387 debug("ETHOC: " "RX: late collision\n"); 388 ret++; 389 } 390 391 return ret; 392 } 393 394 static int ethoc_rx_common(struct ethoc *priv, uchar **packetp) 395 { 396 struct ethoc_bd bd; 397 u32 i = priv->cur_rx % priv->num_rx; 398 u32 entry = priv->num_tx + i; 399 400 ethoc_read_bd(priv, entry, &bd); 401 if (bd.stat & RX_BD_EMPTY) 402 return -EAGAIN; 403 404 debug("%s(): RX buffer %d, %x received\n", 405 __func__, priv->cur_rx, bd.stat); 406 if (ethoc_update_rx_stats(&bd) == 0) { 407 int size = bd.stat >> 16; 408 409 size -= 4; /* strip the CRC */ 410 if (priv->packet) 411 *packetp = priv->packet + entry * PKTSIZE_ALIGN; 412 else 413 *packetp = net_rx_packets[i]; 414 return size; 415 } else { 416 return 0; 417 } 418 } 419 420 static int ethoc_is_new_packet_received(struct ethoc *priv) 421 { 422 u32 pending; 423 424 pending = ethoc_read(priv, INT_SOURCE); 425 ethoc_ack_irq(priv, pending); 426 if (pending & INT_MASK_BUSY) 427 debug("%s(): packet dropped\n", __func__); 428 if (pending & INT_MASK_RX) { 429 debug("%s(): rx irq\n", __func__); 430 return 1; 431 } 432 433 return 0; 434 } 435 436 static int ethoc_update_tx_stats(struct ethoc_bd *bd) 437 { 438 if (bd->stat & TX_BD_LC) 439 debug("ETHOC: " "TX: late collision\n"); 440 441 if (bd->stat & TX_BD_RL) 442 debug("ETHOC: " "TX: retransmit limit\n"); 443 444 if (bd->stat & TX_BD_UR) 445 debug("ETHOC: " "TX: underrun\n"); 446 447 if (bd->stat & TX_BD_CS) 448 debug("ETHOC: " "TX: carrier sense lost\n"); 449 450 return 0; 451 } 452 453 static void ethoc_tx(struct ethoc *priv) 454 { 455 u32 entry = priv->dty_tx % priv->num_tx; 456 struct ethoc_bd bd; 457 458 ethoc_read_bd(priv, entry, &bd); 459 if ((bd.stat & TX_BD_READY) == 0) 460 (void)ethoc_update_tx_stats(&bd); 461 } 462 463 static int ethoc_send_common(struct ethoc *priv, void *packet, int length) 464 { 465 struct ethoc_bd bd; 466 u32 entry; 467 u32 pending; 468 int tmo; 469 470 entry = priv->cur_tx % priv->num_tx; 471 ethoc_read_bd(priv, entry, &bd); 472 if (unlikely(length < ETHOC_ZLEN)) 473 bd.stat |= TX_BD_PAD; 474 else 475 bd.stat &= ~TX_BD_PAD; 476 477 if (priv->packet) { 478 void *p = priv->packet + entry * PKTSIZE_ALIGN; 479 480 memcpy(p, packet, length); 481 packet = p; 482 } else { 483 bd.addr = virt_to_phys(packet); 484 } 485 flush_dcache_range((ulong)packet, (ulong)packet + length); 486 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); 487 bd.stat |= TX_BD_LEN(length); 488 ethoc_write_bd(priv, entry, &bd); 489 490 /* start transmit */ 491 bd.stat |= TX_BD_READY; 492 ethoc_write_bd(priv, entry, &bd); 493 494 /* wait for transfer to succeed */ 495 tmo = get_timer(0) + 5 * CONFIG_SYS_HZ; 496 while (1) { 497 pending = ethoc_read(priv, INT_SOURCE); 498 ethoc_ack_irq(priv, pending & ~INT_MASK_RX); 499 if (pending & INT_MASK_BUSY) 500 debug("%s(): packet dropped\n", __func__); 501 502 if (pending & INT_MASK_TX) { 503 ethoc_tx(priv); 504 break; 505 } 506 if (get_timer(0) >= tmo) { 507 debug("%s(): timed out\n", __func__); 508 return -1; 509 } 510 } 511 512 debug("%s(): packet sent\n", __func__); 513 return 0; 514 } 515 516 static int ethoc_free_pkt_common(struct ethoc *priv) 517 { 518 struct ethoc_bd bd; 519 u32 i = priv->cur_rx % priv->num_rx; 520 u32 entry = priv->num_tx + i; 521 void *src; 522 523 ethoc_read_bd(priv, entry, &bd); 524 525 if (priv->packet) 526 src = priv->packet + entry * PKTSIZE_ALIGN; 527 else 528 src = net_rx_packets[i]; 529 /* clear the buffer descriptor so it can be reused */ 530 flush_dcache_range((ulong)src, 531 (ulong)src + PKTSIZE_ALIGN); 532 bd.stat &= ~RX_BD_STATS; 533 bd.stat |= RX_BD_EMPTY; 534 ethoc_write_bd(priv, entry, &bd); 535 priv->cur_rx++; 536 537 return 0; 538 } 539 540 #ifdef CONFIG_PHYLIB 541 542 static int ethoc_mdio_read(struct mii_dev *bus, int addr, int devad, int reg) 543 { 544 struct ethoc *priv = bus->priv; 545 int rc; 546 547 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(addr, reg)); 548 ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ); 549 550 rc = wait_for_bit_le32(ethoc_reg(priv, MIISTATUS), 551 MIISTATUS_BUSY, false, CONFIG_SYS_HZ, false); 552 553 if (rc == 0) { 554 u32 data = ethoc_read(priv, MIIRX_DATA); 555 556 /* reset MII command register */ 557 ethoc_write(priv, MIICOMMAND, 0); 558 return data; 559 } 560 return rc; 561 } 562 563 static int ethoc_mdio_write(struct mii_dev *bus, int addr, int devad, int reg, 564 u16 val) 565 { 566 struct ethoc *priv = bus->priv; 567 int rc; 568 569 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(addr, reg)); 570 ethoc_write(priv, MIITX_DATA, val); 571 ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE); 572 573 rc = wait_for_bit_le32(ethoc_reg(priv, MIISTATUS), 574 MIISTATUS_BUSY, false, CONFIG_SYS_HZ, false); 575 576 if (rc == 0) { 577 /* reset MII command register */ 578 ethoc_write(priv, MIICOMMAND, 0); 579 } 580 return rc; 581 } 582 583 static int ethoc_mdio_init(const char *name, struct ethoc *priv) 584 { 585 struct mii_dev *bus = mdio_alloc(); 586 int ret; 587 588 if (!bus) { 589 printf("Failed to allocate MDIO bus\n"); 590 return -ENOMEM; 591 } 592 593 bus->read = ethoc_mdio_read; 594 bus->write = ethoc_mdio_write; 595 snprintf(bus->name, sizeof(bus->name), "%s", name); 596 bus->priv = priv; 597 598 ret = mdio_register(bus); 599 if (ret < 0) 600 return ret; 601 602 priv->bus = miiphy_get_dev_by_name(name); 603 return 0; 604 } 605 606 static int ethoc_phy_init(struct ethoc *priv, void *dev) 607 { 608 struct phy_device *phydev; 609 int mask = 0xffffffff; 610 611 #ifdef CONFIG_PHY_ADDR 612 mask = 1 << CONFIG_PHY_ADDR; 613 #endif 614 615 phydev = phy_find_by_mask(priv->bus, mask, PHY_INTERFACE_MODE_MII); 616 if (!phydev) 617 return -ENODEV; 618 619 phy_connect_dev(phydev, dev); 620 621 phydev->supported &= PHY_BASIC_FEATURES; 622 phydev->advertising = phydev->supported; 623 624 priv->phydev = phydev; 625 phy_config(phydev); 626 627 return 0; 628 } 629 630 #else 631 632 static inline int ethoc_mdio_init(const char *name, struct ethoc *priv) 633 { 634 return 0; 635 } 636 637 static inline int ethoc_phy_init(struct ethoc *priv, void *dev) 638 { 639 return 0; 640 } 641 642 #endif 643 644 #ifdef CONFIG_DM_ETH 645 646 static int ethoc_write_hwaddr(struct udevice *dev) 647 { 648 struct ethoc_eth_pdata *pdata = dev_get_platdata(dev); 649 struct ethoc *priv = dev_get_priv(dev); 650 u8 *mac = pdata->eth_pdata.enetaddr; 651 652 return ethoc_write_hwaddr_common(priv, mac); 653 } 654 655 static int ethoc_send(struct udevice *dev, void *packet, int length) 656 { 657 return ethoc_send_common(dev_get_priv(dev), packet, length); 658 } 659 660 static int ethoc_free_pkt(struct udevice *dev, uchar *packet, int length) 661 { 662 return ethoc_free_pkt_common(dev_get_priv(dev)); 663 } 664 665 static int ethoc_recv(struct udevice *dev, int flags, uchar **packetp) 666 { 667 struct ethoc *priv = dev_get_priv(dev); 668 669 if (flags & ETH_RECV_CHECK_DEVICE) 670 if (!ethoc_is_new_packet_received(priv)) 671 return -EAGAIN; 672 673 return ethoc_rx_common(priv, packetp); 674 } 675 676 static int ethoc_start(struct udevice *dev) 677 { 678 return ethoc_init_common(dev_get_priv(dev)); 679 } 680 681 static void ethoc_stop(struct udevice *dev) 682 { 683 ethoc_stop_common(dev_get_priv(dev)); 684 } 685 686 static int ethoc_ofdata_to_platdata(struct udevice *dev) 687 { 688 struct ethoc_eth_pdata *pdata = dev_get_platdata(dev); 689 fdt_addr_t addr; 690 691 pdata->eth_pdata.iobase = devfdt_get_addr(dev); 692 addr = devfdt_get_addr_index(dev, 1); 693 if (addr != FDT_ADDR_T_NONE) 694 pdata->packet_base = addr; 695 return 0; 696 } 697 698 static int ethoc_probe(struct udevice *dev) 699 { 700 struct ethoc_eth_pdata *pdata = dev_get_platdata(dev); 701 struct ethoc *priv = dev_get_priv(dev); 702 703 priv->iobase = ioremap(pdata->eth_pdata.iobase, ETHOC_IOSIZE); 704 if (pdata->packet_base) { 705 priv->packet_phys = pdata->packet_base; 706 priv->packet = ioremap(pdata->packet_base, 707 (1 + PKTBUFSRX) * PKTSIZE_ALIGN); 708 } 709 710 ethoc_mdio_init(dev->name, priv); 711 ethoc_phy_init(priv, dev); 712 713 return 0; 714 } 715 716 static int ethoc_remove(struct udevice *dev) 717 { 718 struct ethoc *priv = dev_get_priv(dev); 719 720 #ifdef CONFIG_PHYLIB 721 free(priv->phydev); 722 mdio_unregister(priv->bus); 723 mdio_free(priv->bus); 724 #endif 725 iounmap(priv->iobase); 726 return 0; 727 } 728 729 static const struct eth_ops ethoc_ops = { 730 .start = ethoc_start, 731 .stop = ethoc_stop, 732 .send = ethoc_send, 733 .recv = ethoc_recv, 734 .free_pkt = ethoc_free_pkt, 735 .write_hwaddr = ethoc_write_hwaddr, 736 }; 737 738 static const struct udevice_id ethoc_ids[] = { 739 { .compatible = "opencores,ethoc" }, 740 { } 741 }; 742 743 U_BOOT_DRIVER(ethoc) = { 744 .name = "ethoc", 745 .id = UCLASS_ETH, 746 .of_match = ethoc_ids, 747 .ofdata_to_platdata = ethoc_ofdata_to_platdata, 748 .probe = ethoc_probe, 749 .remove = ethoc_remove, 750 .ops = ðoc_ops, 751 .priv_auto_alloc_size = sizeof(struct ethoc), 752 .platdata_auto_alloc_size = sizeof(struct ethoc_eth_pdata), 753 }; 754 755 #else 756 757 static int ethoc_init(struct eth_device *dev, bd_t *bd) 758 { 759 struct ethoc *priv = (struct ethoc *)dev->priv; 760 761 return ethoc_init_common(priv); 762 } 763 764 static int ethoc_write_hwaddr(struct eth_device *dev) 765 { 766 struct ethoc *priv = (struct ethoc *)dev->priv; 767 u8 *mac = dev->enetaddr; 768 769 return ethoc_write_hwaddr_common(priv, mac); 770 } 771 772 static int ethoc_send(struct eth_device *dev, void *packet, int length) 773 { 774 return ethoc_send_common(dev->priv, packet, length); 775 } 776 777 static void ethoc_halt(struct eth_device *dev) 778 { 779 ethoc_disable_rx_and_tx(dev->priv); 780 } 781 782 static int ethoc_recv(struct eth_device *dev) 783 { 784 struct ethoc *priv = (struct ethoc *)dev->priv; 785 int count; 786 787 if (!ethoc_is_new_packet_received(priv)) 788 return 0; 789 790 for (count = 0; count < PKTBUFSRX; ++count) { 791 uchar *packetp; 792 int size = ethoc_rx_common(priv, &packetp); 793 794 if (size < 0) 795 break; 796 if (size > 0) 797 net_process_received_packet(packetp, size); 798 ethoc_free_pkt_common(priv); 799 } 800 return 0; 801 } 802 803 int ethoc_initialize(u8 dev_num, int base_addr) 804 { 805 struct ethoc *priv; 806 struct eth_device *dev; 807 808 priv = malloc(sizeof(*priv)); 809 if (!priv) 810 return 0; 811 dev = malloc(sizeof(*dev)); 812 if (!dev) { 813 free(priv); 814 return 0; 815 } 816 817 memset(dev, 0, sizeof(*dev)); 818 dev->priv = priv; 819 dev->iobase = base_addr; 820 dev->init = ethoc_init; 821 dev->halt = ethoc_halt; 822 dev->send = ethoc_send; 823 dev->recv = ethoc_recv; 824 dev->write_hwaddr = ethoc_write_hwaddr; 825 sprintf(dev->name, "%s-%hu", "ETHOC", dev_num); 826 priv->iobase = ioremap(dev->iobase, ETHOC_IOSIZE); 827 828 eth_register(dev); 829 830 ethoc_mdio_init(dev->name, priv); 831 ethoc_phy_init(priv, dev); 832 833 return 1; 834 } 835 836 #endif 837