1 /* 2 * Opencore 10/100 ethernet mac driver 3 * 4 * Copyright (C) 2007-2008 Avionic Design Development GmbH 5 * Copyright (C) 2008-2009 Avionic Design GmbH 6 * Thierry Reding <thierry.reding@avionic-design.de> 7 * Copyright (C) 2010 Thomas Chou <thomas@wytron.com.tw> 8 * Copyright (C) 2016 Cadence Design Systems Inc. 9 * 10 * SPDX-License-Identifier: GPL-2.0 11 */ 12 13 #include <common.h> 14 #include <dm/device.h> 15 #include <dm/platform_data/net_ethoc.h> 16 #include <linux/io.h> 17 #include <malloc.h> 18 #include <net.h> 19 #include <miiphy.h> 20 #include <asm/cache.h> 21 #include <wait_bit.h> 22 23 /* register offsets */ 24 #define MODER 0x00 25 #define INT_SOURCE 0x04 26 #define INT_MASK 0x08 27 #define IPGT 0x0c 28 #define IPGR1 0x10 29 #define IPGR2 0x14 30 #define PACKETLEN 0x18 31 #define COLLCONF 0x1c 32 #define TX_BD_NUM 0x20 33 #define CTRLMODER 0x24 34 #define MIIMODER 0x28 35 #define MIICOMMAND 0x2c 36 #define MIIADDRESS 0x30 37 #define MIITX_DATA 0x34 38 #define MIIRX_DATA 0x38 39 #define MIISTATUS 0x3c 40 #define MAC_ADDR0 0x40 41 #define MAC_ADDR1 0x44 42 #define ETH_HASH0 0x48 43 #define ETH_HASH1 0x4c 44 #define ETH_TXCTRL 0x50 45 46 /* mode register */ 47 #define MODER_RXEN (1 << 0) /* receive enable */ 48 #define MODER_TXEN (1 << 1) /* transmit enable */ 49 #define MODER_NOPRE (1 << 2) /* no preamble */ 50 #define MODER_BRO (1 << 3) /* broadcast address */ 51 #define MODER_IAM (1 << 4) /* individual address mode */ 52 #define MODER_PRO (1 << 5) /* promiscuous mode */ 53 #define MODER_IFG (1 << 6) /* interframe gap for incoming frames */ 54 #define MODER_LOOP (1 << 7) /* loopback */ 55 #define MODER_NBO (1 << 8) /* no back-off */ 56 #define MODER_EDE (1 << 9) /* excess defer enable */ 57 #define MODER_FULLD (1 << 10) /* full duplex */ 58 #define MODER_RESET (1 << 11) /* FIXME: reset (undocumented) */ 59 #define MODER_DCRC (1 << 12) /* delayed CRC enable */ 60 #define MODER_CRC (1 << 13) /* CRC enable */ 61 #define MODER_HUGE (1 << 14) /* huge packets enable */ 62 #define MODER_PAD (1 << 15) /* padding enabled */ 63 #define MODER_RSM (1 << 16) /* receive small packets */ 64 65 /* interrupt source and mask registers */ 66 #define INT_MASK_TXF (1 << 0) /* transmit frame */ 67 #define INT_MASK_TXE (1 << 1) /* transmit error */ 68 #define INT_MASK_RXF (1 << 2) /* receive frame */ 69 #define INT_MASK_RXE (1 << 3) /* receive error */ 70 #define INT_MASK_BUSY (1 << 4) 71 #define INT_MASK_TXC (1 << 5) /* transmit control frame */ 72 #define INT_MASK_RXC (1 << 6) /* receive control frame */ 73 74 #define INT_MASK_TX (INT_MASK_TXF | INT_MASK_TXE) 75 #define INT_MASK_RX (INT_MASK_RXF | INT_MASK_RXE) 76 77 #define INT_MASK_ALL ( \ 78 INT_MASK_TXF | INT_MASK_TXE | \ 79 INT_MASK_RXF | INT_MASK_RXE | \ 80 INT_MASK_TXC | INT_MASK_RXC | \ 81 INT_MASK_BUSY \ 82 ) 83 84 /* packet length register */ 85 #define PACKETLEN_MIN(min) (((min) & 0xffff) << 16) 86 #define PACKETLEN_MAX(max) (((max) & 0xffff) << 0) 87 #define PACKETLEN_MIN_MAX(min, max) (PACKETLEN_MIN(min) | \ 88 PACKETLEN_MAX(max)) 89 90 /* transmit buffer number register */ 91 #define TX_BD_NUM_VAL(x) (((x) <= 0x80) ? (x) : 0x80) 92 93 /* control module mode register */ 94 #define CTRLMODER_PASSALL (1 << 0) /* pass all receive frames */ 95 #define CTRLMODER_RXFLOW (1 << 1) /* receive control flow */ 96 #define CTRLMODER_TXFLOW (1 << 2) /* transmit control flow */ 97 98 /* MII mode register */ 99 #define MIIMODER_CLKDIV(x) ((x) & 0xfe) /* needs to be an even number */ 100 #define MIIMODER_NOPRE (1 << 8) /* no preamble */ 101 102 /* MII command register */ 103 #define MIICOMMAND_SCAN (1 << 0) /* scan status */ 104 #define MIICOMMAND_READ (1 << 1) /* read status */ 105 #define MIICOMMAND_WRITE (1 << 2) /* write control data */ 106 107 /* MII address register */ 108 #define MIIADDRESS_FIAD(x) (((x) & 0x1f) << 0) 109 #define MIIADDRESS_RGAD(x) (((x) & 0x1f) << 8) 110 #define MIIADDRESS_ADDR(phy, reg) (MIIADDRESS_FIAD(phy) | \ 111 MIIADDRESS_RGAD(reg)) 112 113 /* MII transmit data register */ 114 #define MIITX_DATA_VAL(x) ((x) & 0xffff) 115 116 /* MII receive data register */ 117 #define MIIRX_DATA_VAL(x) ((x) & 0xffff) 118 119 /* MII status register */ 120 #define MIISTATUS_LINKFAIL (1 << 0) 121 #define MIISTATUS_BUSY (1 << 1) 122 #define MIISTATUS_INVALID (1 << 2) 123 124 /* TX buffer descriptor */ 125 #define TX_BD_CS (1 << 0) /* carrier sense lost */ 126 #define TX_BD_DF (1 << 1) /* defer indication */ 127 #define TX_BD_LC (1 << 2) /* late collision */ 128 #define TX_BD_RL (1 << 3) /* retransmission limit */ 129 #define TX_BD_RETRY_MASK (0x00f0) 130 #define TX_BD_RETRY(x) (((x) & 0x00f0) >> 4) 131 #define TX_BD_UR (1 << 8) /* transmitter underrun */ 132 #define TX_BD_CRC (1 << 11) /* TX CRC enable */ 133 #define TX_BD_PAD (1 << 12) /* pad enable */ 134 #define TX_BD_WRAP (1 << 13) 135 #define TX_BD_IRQ (1 << 14) /* interrupt request enable */ 136 #define TX_BD_READY (1 << 15) /* TX buffer ready */ 137 #define TX_BD_LEN(x) (((x) & 0xffff) << 16) 138 #define TX_BD_LEN_MASK (0xffff << 16) 139 140 #define TX_BD_STATS (TX_BD_CS | TX_BD_DF | TX_BD_LC | \ 141 TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR) 142 143 /* RX buffer descriptor */ 144 #define RX_BD_LC (1 << 0) /* late collision */ 145 #define RX_BD_CRC (1 << 1) /* RX CRC error */ 146 #define RX_BD_SF (1 << 2) /* short frame */ 147 #define RX_BD_TL (1 << 3) /* too long */ 148 #define RX_BD_DN (1 << 4) /* dribble nibble */ 149 #define RX_BD_IS (1 << 5) /* invalid symbol */ 150 #define RX_BD_OR (1 << 6) /* receiver overrun */ 151 #define RX_BD_MISS (1 << 7) 152 #define RX_BD_CF (1 << 8) /* control frame */ 153 #define RX_BD_WRAP (1 << 13) 154 #define RX_BD_IRQ (1 << 14) /* interrupt request enable */ 155 #define RX_BD_EMPTY (1 << 15) 156 #define RX_BD_LEN(x) (((x) & 0xffff) << 16) 157 158 #define RX_BD_STATS (RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \ 159 RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS) 160 161 #define ETHOC_BUFSIZ 1536 162 #define ETHOC_ZLEN 64 163 #define ETHOC_BD_BASE 0x400 164 #define ETHOC_TIMEOUT (HZ / 2) 165 #define ETHOC_MII_TIMEOUT (1 + (HZ / 5)) 166 #define ETHOC_IOSIZE 0x54 167 168 /** 169 * struct ethoc - driver-private device structure 170 * @num_tx: number of send buffers 171 * @cur_tx: last send buffer written 172 * @dty_tx: last buffer actually sent 173 * @num_rx: number of receive buffers 174 * @cur_rx: current receive buffer 175 */ 176 struct ethoc { 177 u32 num_tx; 178 u32 cur_tx; 179 u32 dty_tx; 180 u32 num_rx; 181 u32 cur_rx; 182 void __iomem *iobase; 183 void __iomem *packet; 184 phys_addr_t packet_phys; 185 186 #ifdef CONFIG_PHYLIB 187 struct mii_dev *bus; 188 struct phy_device *phydev; 189 #endif 190 }; 191 192 /** 193 * struct ethoc_bd - buffer descriptor 194 * @stat: buffer statistics 195 * @addr: physical memory address 196 */ 197 struct ethoc_bd { 198 u32 stat; 199 u32 addr; 200 }; 201 202 static inline u32 *ethoc_reg(struct ethoc *priv, size_t offset) 203 { 204 return priv->iobase + offset; 205 } 206 207 static inline u32 ethoc_read(struct ethoc *priv, size_t offset) 208 { 209 return readl(ethoc_reg(priv, offset)); 210 } 211 212 static inline void ethoc_write(struct ethoc *priv, size_t offset, u32 data) 213 { 214 writel(data, ethoc_reg(priv, offset)); 215 } 216 217 static inline void ethoc_read_bd(struct ethoc *priv, int index, 218 struct ethoc_bd *bd) 219 { 220 size_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 221 bd->stat = ethoc_read(priv, offset + 0); 222 bd->addr = ethoc_read(priv, offset + 4); 223 } 224 225 static inline void ethoc_write_bd(struct ethoc *priv, int index, 226 const struct ethoc_bd *bd) 227 { 228 size_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 229 ethoc_write(priv, offset + 0, bd->stat); 230 ethoc_write(priv, offset + 4, bd->addr); 231 } 232 233 static int ethoc_write_hwaddr_common(struct ethoc *priv, u8 *mac) 234 { 235 ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) | 236 (mac[4] << 8) | (mac[5] << 0)); 237 ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0)); 238 return 0; 239 } 240 241 static inline void ethoc_ack_irq(struct ethoc *priv, u32 mask) 242 { 243 ethoc_write(priv, INT_SOURCE, mask); 244 } 245 246 static inline void ethoc_enable_rx_and_tx(struct ethoc *priv) 247 { 248 u32 mode = ethoc_read(priv, MODER); 249 mode |= MODER_RXEN | MODER_TXEN; 250 ethoc_write(priv, MODER, mode); 251 } 252 253 static inline void ethoc_disable_rx_and_tx(struct ethoc *priv) 254 { 255 u32 mode = ethoc_read(priv, MODER); 256 mode &= ~(MODER_RXEN | MODER_TXEN); 257 ethoc_write(priv, MODER, mode); 258 } 259 260 static int ethoc_init_ring(struct ethoc *priv) 261 { 262 struct ethoc_bd bd; 263 phys_addr_t addr = priv->packet_phys; 264 int i; 265 266 priv->cur_tx = 0; 267 priv->dty_tx = 0; 268 priv->cur_rx = 0; 269 270 /* setup transmission buffers */ 271 bd.stat = TX_BD_IRQ | TX_BD_CRC; 272 bd.addr = 0; 273 274 for (i = 0; i < priv->num_tx; i++) { 275 if (addr) { 276 bd.addr = addr; 277 addr += PKTSIZE_ALIGN; 278 } 279 if (i == priv->num_tx - 1) 280 bd.stat |= TX_BD_WRAP; 281 282 ethoc_write_bd(priv, i, &bd); 283 } 284 285 bd.stat = RX_BD_EMPTY | RX_BD_IRQ; 286 287 for (i = 0; i < priv->num_rx; i++) { 288 if (addr) { 289 bd.addr = addr; 290 addr += PKTSIZE_ALIGN; 291 } else { 292 bd.addr = virt_to_phys(net_rx_packets[i]); 293 } 294 if (i == priv->num_rx - 1) 295 bd.stat |= RX_BD_WRAP; 296 297 flush_dcache_range((ulong)net_rx_packets[i], 298 (ulong)net_rx_packets[i] + PKTSIZE_ALIGN); 299 ethoc_write_bd(priv, priv->num_tx + i, &bd); 300 } 301 302 return 0; 303 } 304 305 static int ethoc_reset(struct ethoc *priv) 306 { 307 u32 mode; 308 309 /* TODO: reset controller? */ 310 311 ethoc_disable_rx_and_tx(priv); 312 313 /* TODO: setup registers */ 314 315 /* enable FCS generation and automatic padding */ 316 mode = ethoc_read(priv, MODER); 317 mode |= MODER_CRC | MODER_PAD; 318 ethoc_write(priv, MODER, mode); 319 320 /* set full-duplex mode */ 321 mode = ethoc_read(priv, MODER); 322 mode |= MODER_FULLD; 323 ethoc_write(priv, MODER, mode); 324 ethoc_write(priv, IPGT, 0x15); 325 326 ethoc_ack_irq(priv, INT_MASK_ALL); 327 ethoc_enable_rx_and_tx(priv); 328 return 0; 329 } 330 331 static int ethoc_init_common(struct ethoc *priv) 332 { 333 int ret = 0; 334 335 priv->num_tx = 1; 336 priv->num_rx = PKTBUFSRX; 337 ethoc_write(priv, TX_BD_NUM, priv->num_tx); 338 ethoc_init_ring(priv); 339 ethoc_reset(priv); 340 341 #ifdef CONFIG_PHYLIB 342 ret = phy_startup(priv->phydev); 343 if (ret) { 344 printf("Could not initialize PHY %s\n", 345 priv->phydev->dev->name); 346 return ret; 347 } 348 #endif 349 return ret; 350 } 351 352 static void ethoc_stop_common(struct ethoc *priv) 353 { 354 ethoc_disable_rx_and_tx(priv); 355 #ifdef CONFIG_PHYLIB 356 phy_shutdown(priv->phydev); 357 #endif 358 } 359 360 static int ethoc_update_rx_stats(struct ethoc_bd *bd) 361 { 362 int ret = 0; 363 364 if (bd->stat & RX_BD_TL) { 365 debug("ETHOC: " "RX: frame too long\n"); 366 ret++; 367 } 368 369 if (bd->stat & RX_BD_SF) { 370 debug("ETHOC: " "RX: frame too short\n"); 371 ret++; 372 } 373 374 if (bd->stat & RX_BD_DN) 375 debug("ETHOC: " "RX: dribble nibble\n"); 376 377 if (bd->stat & RX_BD_CRC) { 378 debug("ETHOC: " "RX: wrong CRC\n"); 379 ret++; 380 } 381 382 if (bd->stat & RX_BD_OR) { 383 debug("ETHOC: " "RX: overrun\n"); 384 ret++; 385 } 386 387 if (bd->stat & RX_BD_LC) { 388 debug("ETHOC: " "RX: late collision\n"); 389 ret++; 390 } 391 392 return ret; 393 } 394 395 static int ethoc_rx_common(struct ethoc *priv, uchar **packetp) 396 { 397 struct ethoc_bd bd; 398 u32 i = priv->cur_rx % priv->num_rx; 399 u32 entry = priv->num_tx + i; 400 401 ethoc_read_bd(priv, entry, &bd); 402 if (bd.stat & RX_BD_EMPTY) 403 return -EAGAIN; 404 405 debug("%s(): RX buffer %d, %x received\n", 406 __func__, priv->cur_rx, bd.stat); 407 if (ethoc_update_rx_stats(&bd) == 0) { 408 int size = bd.stat >> 16; 409 410 size -= 4; /* strip the CRC */ 411 if (priv->packet) 412 *packetp = priv->packet + entry * PKTSIZE_ALIGN; 413 else 414 *packetp = net_rx_packets[i]; 415 return size; 416 } else { 417 return 0; 418 } 419 } 420 421 static int ethoc_is_new_packet_received(struct ethoc *priv) 422 { 423 u32 pending; 424 425 pending = ethoc_read(priv, INT_SOURCE); 426 ethoc_ack_irq(priv, pending); 427 if (pending & INT_MASK_BUSY) 428 debug("%s(): packet dropped\n", __func__); 429 if (pending & INT_MASK_RX) { 430 debug("%s(): rx irq\n", __func__); 431 return 1; 432 } 433 434 return 0; 435 } 436 437 static int ethoc_update_tx_stats(struct ethoc_bd *bd) 438 { 439 if (bd->stat & TX_BD_LC) 440 debug("ETHOC: " "TX: late collision\n"); 441 442 if (bd->stat & TX_BD_RL) 443 debug("ETHOC: " "TX: retransmit limit\n"); 444 445 if (bd->stat & TX_BD_UR) 446 debug("ETHOC: " "TX: underrun\n"); 447 448 if (bd->stat & TX_BD_CS) 449 debug("ETHOC: " "TX: carrier sense lost\n"); 450 451 return 0; 452 } 453 454 static void ethoc_tx(struct ethoc *priv) 455 { 456 u32 entry = priv->dty_tx % priv->num_tx; 457 struct ethoc_bd bd; 458 459 ethoc_read_bd(priv, entry, &bd); 460 if ((bd.stat & TX_BD_READY) == 0) 461 (void)ethoc_update_tx_stats(&bd); 462 } 463 464 static int ethoc_send_common(struct ethoc *priv, void *packet, int length) 465 { 466 struct ethoc_bd bd; 467 u32 entry; 468 u32 pending; 469 int tmo; 470 471 entry = priv->cur_tx % priv->num_tx; 472 ethoc_read_bd(priv, entry, &bd); 473 if (unlikely(length < ETHOC_ZLEN)) 474 bd.stat |= TX_BD_PAD; 475 else 476 bd.stat &= ~TX_BD_PAD; 477 478 if (priv->packet) { 479 void *p = priv->packet + entry * PKTSIZE_ALIGN; 480 481 memcpy(p, packet, length); 482 packet = p; 483 } else { 484 bd.addr = virt_to_phys(packet); 485 } 486 flush_dcache_range((ulong)packet, (ulong)packet + length); 487 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); 488 bd.stat |= TX_BD_LEN(length); 489 ethoc_write_bd(priv, entry, &bd); 490 491 /* start transmit */ 492 bd.stat |= TX_BD_READY; 493 ethoc_write_bd(priv, entry, &bd); 494 495 /* wait for transfer to succeed */ 496 tmo = get_timer(0) + 5 * CONFIG_SYS_HZ; 497 while (1) { 498 pending = ethoc_read(priv, INT_SOURCE); 499 ethoc_ack_irq(priv, pending & ~INT_MASK_RX); 500 if (pending & INT_MASK_BUSY) 501 debug("%s(): packet dropped\n", __func__); 502 503 if (pending & INT_MASK_TX) { 504 ethoc_tx(priv); 505 break; 506 } 507 if (get_timer(0) >= tmo) { 508 debug("%s(): timed out\n", __func__); 509 return -1; 510 } 511 } 512 513 debug("%s(): packet sent\n", __func__); 514 return 0; 515 } 516 517 static int ethoc_free_pkt_common(struct ethoc *priv) 518 { 519 struct ethoc_bd bd; 520 u32 i = priv->cur_rx % priv->num_rx; 521 u32 entry = priv->num_tx + i; 522 void *src; 523 524 ethoc_read_bd(priv, entry, &bd); 525 526 if (priv->packet) 527 src = priv->packet + entry * PKTSIZE_ALIGN; 528 else 529 src = net_rx_packets[i]; 530 /* clear the buffer descriptor so it can be reused */ 531 flush_dcache_range((ulong)src, 532 (ulong)src + PKTSIZE_ALIGN); 533 bd.stat &= ~RX_BD_STATS; 534 bd.stat |= RX_BD_EMPTY; 535 ethoc_write_bd(priv, entry, &bd); 536 priv->cur_rx++; 537 538 return 0; 539 } 540 541 #ifdef CONFIG_PHYLIB 542 543 static int ethoc_mdio_read(struct mii_dev *bus, int addr, int devad, int reg) 544 { 545 struct ethoc *priv = bus->priv; 546 int rc; 547 548 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(addr, reg)); 549 ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ); 550 551 rc = wait_for_bit(__func__, ethoc_reg(priv, MIISTATUS), 552 MIISTATUS_BUSY, false, CONFIG_SYS_HZ, false); 553 554 if (rc == 0) { 555 u32 data = ethoc_read(priv, MIIRX_DATA); 556 557 /* reset MII command register */ 558 ethoc_write(priv, MIICOMMAND, 0); 559 return data; 560 } 561 return rc; 562 } 563 564 static int ethoc_mdio_write(struct mii_dev *bus, int addr, int devad, int reg, 565 u16 val) 566 { 567 struct ethoc *priv = bus->priv; 568 int rc; 569 570 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(addr, reg)); 571 ethoc_write(priv, MIITX_DATA, val); 572 ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE); 573 574 rc = wait_for_bit(__func__, ethoc_reg(priv, MIISTATUS), 575 MIISTATUS_BUSY, false, CONFIG_SYS_HZ, false); 576 577 if (rc == 0) { 578 /* reset MII command register */ 579 ethoc_write(priv, MIICOMMAND, 0); 580 } 581 return rc; 582 } 583 584 static int ethoc_mdio_init(const char *name, struct ethoc *priv) 585 { 586 struct mii_dev *bus = mdio_alloc(); 587 int ret; 588 589 if (!bus) { 590 printf("Failed to allocate MDIO bus\n"); 591 return -ENOMEM; 592 } 593 594 bus->read = ethoc_mdio_read; 595 bus->write = ethoc_mdio_write; 596 snprintf(bus->name, sizeof(bus->name), "%s", name); 597 bus->priv = priv; 598 599 ret = mdio_register(bus); 600 if (ret < 0) 601 return ret; 602 603 priv->bus = miiphy_get_dev_by_name(name); 604 return 0; 605 } 606 607 static int ethoc_phy_init(struct ethoc *priv, void *dev) 608 { 609 struct phy_device *phydev; 610 int mask = 0xffffffff; 611 612 #ifdef CONFIG_PHY_ADDR 613 mask = 1 << CONFIG_PHY_ADDR; 614 #endif 615 616 phydev = phy_find_by_mask(priv->bus, mask, PHY_INTERFACE_MODE_MII); 617 if (!phydev) 618 return -ENODEV; 619 620 phy_connect_dev(phydev, dev); 621 622 phydev->supported &= PHY_BASIC_FEATURES; 623 phydev->advertising = phydev->supported; 624 625 priv->phydev = phydev; 626 phy_config(phydev); 627 628 return 0; 629 } 630 631 #else 632 633 static inline int ethoc_mdio_init(const char *name, struct ethoc *priv) 634 { 635 return 0; 636 } 637 638 static inline int ethoc_phy_init(struct ethoc *priv, void *dev) 639 { 640 return 0; 641 } 642 643 #endif 644 645 #ifdef CONFIG_DM_ETH 646 647 static int ethoc_write_hwaddr(struct udevice *dev) 648 { 649 struct ethoc_eth_pdata *pdata = dev_get_platdata(dev); 650 struct ethoc *priv = dev_get_priv(dev); 651 u8 *mac = pdata->eth_pdata.enetaddr; 652 653 return ethoc_write_hwaddr_common(priv, mac); 654 } 655 656 static int ethoc_send(struct udevice *dev, void *packet, int length) 657 { 658 return ethoc_send_common(dev_get_priv(dev), packet, length); 659 } 660 661 static int ethoc_free_pkt(struct udevice *dev, uchar *packet, int length) 662 { 663 return ethoc_free_pkt_common(dev_get_priv(dev)); 664 } 665 666 static int ethoc_recv(struct udevice *dev, int flags, uchar **packetp) 667 { 668 struct ethoc *priv = dev_get_priv(dev); 669 670 if (flags & ETH_RECV_CHECK_DEVICE) 671 if (!ethoc_is_new_packet_received(priv)) 672 return -EAGAIN; 673 674 return ethoc_rx_common(priv, packetp); 675 } 676 677 static int ethoc_start(struct udevice *dev) 678 { 679 return ethoc_init_common(dev_get_priv(dev)); 680 } 681 682 static void ethoc_stop(struct udevice *dev) 683 { 684 ethoc_stop_common(dev_get_priv(dev)); 685 } 686 687 static int ethoc_ofdata_to_platdata(struct udevice *dev) 688 { 689 struct ethoc_eth_pdata *pdata = dev_get_platdata(dev); 690 fdt_addr_t addr; 691 692 pdata->eth_pdata.iobase = dev_get_addr(dev); 693 addr = dev_get_addr_index(dev, 1); 694 if (addr != FDT_ADDR_T_NONE) 695 pdata->packet_base = addr; 696 return 0; 697 } 698 699 static int ethoc_probe(struct udevice *dev) 700 { 701 struct ethoc_eth_pdata *pdata = dev_get_platdata(dev); 702 struct ethoc *priv = dev_get_priv(dev); 703 704 priv->iobase = ioremap(pdata->eth_pdata.iobase, ETHOC_IOSIZE); 705 if (pdata->packet_base) { 706 priv->packet_phys = pdata->packet_base; 707 priv->packet = ioremap(pdata->packet_base, 708 (1 + PKTBUFSRX) * PKTSIZE_ALIGN); 709 } 710 711 ethoc_mdio_init(dev->name, priv); 712 ethoc_phy_init(priv, dev); 713 714 return 0; 715 } 716 717 static int ethoc_remove(struct udevice *dev) 718 { 719 struct ethoc *priv = dev_get_priv(dev); 720 721 #ifdef CONFIG_PHYLIB 722 free(priv->phydev); 723 mdio_unregister(priv->bus); 724 mdio_free(priv->bus); 725 #endif 726 iounmap(priv->iobase); 727 return 0; 728 } 729 730 static const struct eth_ops ethoc_ops = { 731 .start = ethoc_start, 732 .stop = ethoc_stop, 733 .send = ethoc_send, 734 .recv = ethoc_recv, 735 .free_pkt = ethoc_free_pkt, 736 .write_hwaddr = ethoc_write_hwaddr, 737 }; 738 739 static const struct udevice_id ethoc_ids[] = { 740 { .compatible = "opencores,ethoc" }, 741 { } 742 }; 743 744 U_BOOT_DRIVER(ethoc) = { 745 .name = "ethoc", 746 .id = UCLASS_ETH, 747 .of_match = ethoc_ids, 748 .ofdata_to_platdata = ethoc_ofdata_to_platdata, 749 .probe = ethoc_probe, 750 .remove = ethoc_remove, 751 .ops = ðoc_ops, 752 .priv_auto_alloc_size = sizeof(struct ethoc), 753 .platdata_auto_alloc_size = sizeof(struct ethoc_eth_pdata), 754 }; 755 756 #else 757 758 static int ethoc_init(struct eth_device *dev, bd_t *bd) 759 { 760 struct ethoc *priv = (struct ethoc *)dev->priv; 761 762 return ethoc_init_common(priv); 763 } 764 765 static int ethoc_write_hwaddr(struct eth_device *dev) 766 { 767 struct ethoc *priv = (struct ethoc *)dev->priv; 768 u8 *mac = dev->enetaddr; 769 770 return ethoc_write_hwaddr_common(priv, mac); 771 } 772 773 static int ethoc_send(struct eth_device *dev, void *packet, int length) 774 { 775 return ethoc_send_common(dev->priv, packet, length); 776 } 777 778 static void ethoc_halt(struct eth_device *dev) 779 { 780 ethoc_disable_rx_and_tx(dev->priv); 781 } 782 783 static int ethoc_recv(struct eth_device *dev) 784 { 785 struct ethoc *priv = (struct ethoc *)dev->priv; 786 int count; 787 788 if (!ethoc_is_new_packet_received(priv)) 789 return 0; 790 791 for (count = 0; count < PKTBUFSRX; ++count) { 792 uchar *packetp; 793 int size = ethoc_rx_common(priv, &packetp); 794 795 if (size < 0) 796 break; 797 if (size > 0) 798 net_process_received_packet(packetp, size); 799 ethoc_free_pkt_common(priv); 800 } 801 return 0; 802 } 803 804 int ethoc_initialize(u8 dev_num, int base_addr) 805 { 806 struct ethoc *priv; 807 struct eth_device *dev; 808 809 priv = malloc(sizeof(*priv)); 810 if (!priv) 811 return 0; 812 dev = malloc(sizeof(*dev)); 813 if (!dev) { 814 free(priv); 815 return 0; 816 } 817 818 memset(dev, 0, sizeof(*dev)); 819 dev->priv = priv; 820 dev->iobase = base_addr; 821 dev->init = ethoc_init; 822 dev->halt = ethoc_halt; 823 dev->send = ethoc_send; 824 dev->recv = ethoc_recv; 825 dev->write_hwaddr = ethoc_write_hwaddr; 826 sprintf(dev->name, "%s-%hu", "ETHOC", dev_num); 827 priv->iobase = ioremap(dev->iobase, ETHOC_IOSIZE); 828 829 eth_register(dev); 830 831 ethoc_mdio_init(dev->name, priv); 832 ethoc_phy_init(priv, dev); 833 834 return 1; 835 } 836 837 #endif 838