1 /* 2 * Opencore 10/100 ethernet mac driver 3 * 4 * Copyright (C) 2007-2008 Avionic Design Development GmbH 5 * Copyright (C) 2008-2009 Avionic Design GmbH 6 * Thierry Reding <thierry.reding@avionic-design.de> 7 * Copyright (C) 2010 Thomas Chou <thomas@wytron.com.tw> 8 * Copyright (C) 2016 Cadence Design Systems Inc. 9 * 10 * SPDX-License-Identifier: GPL-2.0 11 */ 12 13 #include <common.h> 14 #include <dm/device.h> 15 #include <dm/platform_data/net_ethoc.h> 16 #include <linux/io.h> 17 #include <malloc.h> 18 #include <net.h> 19 #include <miiphy.h> 20 #include <asm/cache.h> 21 22 /* register offsets */ 23 #define MODER 0x00 24 #define INT_SOURCE 0x04 25 #define INT_MASK 0x08 26 #define IPGT 0x0c 27 #define IPGR1 0x10 28 #define IPGR2 0x14 29 #define PACKETLEN 0x18 30 #define COLLCONF 0x1c 31 #define TX_BD_NUM 0x20 32 #define CTRLMODER 0x24 33 #define MIIMODER 0x28 34 #define MIICOMMAND 0x2c 35 #define MIIADDRESS 0x30 36 #define MIITX_DATA 0x34 37 #define MIIRX_DATA 0x38 38 #define MIISTATUS 0x3c 39 #define MAC_ADDR0 0x40 40 #define MAC_ADDR1 0x44 41 #define ETH_HASH0 0x48 42 #define ETH_HASH1 0x4c 43 #define ETH_TXCTRL 0x50 44 45 /* mode register */ 46 #define MODER_RXEN (1 << 0) /* receive enable */ 47 #define MODER_TXEN (1 << 1) /* transmit enable */ 48 #define MODER_NOPRE (1 << 2) /* no preamble */ 49 #define MODER_BRO (1 << 3) /* broadcast address */ 50 #define MODER_IAM (1 << 4) /* individual address mode */ 51 #define MODER_PRO (1 << 5) /* promiscuous mode */ 52 #define MODER_IFG (1 << 6) /* interframe gap for incoming frames */ 53 #define MODER_LOOP (1 << 7) /* loopback */ 54 #define MODER_NBO (1 << 8) /* no back-off */ 55 #define MODER_EDE (1 << 9) /* excess defer enable */ 56 #define MODER_FULLD (1 << 10) /* full duplex */ 57 #define MODER_RESET (1 << 11) /* FIXME: reset (undocumented) */ 58 #define MODER_DCRC (1 << 12) /* delayed CRC enable */ 59 #define MODER_CRC (1 << 13) /* CRC enable */ 60 #define MODER_HUGE (1 << 14) /* huge packets enable */ 61 #define MODER_PAD (1 << 15) /* padding enabled */ 62 #define MODER_RSM (1 << 16) /* receive small packets */ 63 64 /* interrupt source and mask registers */ 65 #define INT_MASK_TXF (1 << 0) /* transmit frame */ 66 #define INT_MASK_TXE (1 << 1) /* transmit error */ 67 #define INT_MASK_RXF (1 << 2) /* receive frame */ 68 #define INT_MASK_RXE (1 << 3) /* receive error */ 69 #define INT_MASK_BUSY (1 << 4) 70 #define INT_MASK_TXC (1 << 5) /* transmit control frame */ 71 #define INT_MASK_RXC (1 << 6) /* receive control frame */ 72 73 #define INT_MASK_TX (INT_MASK_TXF | INT_MASK_TXE) 74 #define INT_MASK_RX (INT_MASK_RXF | INT_MASK_RXE) 75 76 #define INT_MASK_ALL ( \ 77 INT_MASK_TXF | INT_MASK_TXE | \ 78 INT_MASK_RXF | INT_MASK_RXE | \ 79 INT_MASK_TXC | INT_MASK_RXC | \ 80 INT_MASK_BUSY \ 81 ) 82 83 /* packet length register */ 84 #define PACKETLEN_MIN(min) (((min) & 0xffff) << 16) 85 #define PACKETLEN_MAX(max) (((max) & 0xffff) << 0) 86 #define PACKETLEN_MIN_MAX(min, max) (PACKETLEN_MIN(min) | \ 87 PACKETLEN_MAX(max)) 88 89 /* transmit buffer number register */ 90 #define TX_BD_NUM_VAL(x) (((x) <= 0x80) ? (x) : 0x80) 91 92 /* control module mode register */ 93 #define CTRLMODER_PASSALL (1 << 0) /* pass all receive frames */ 94 #define CTRLMODER_RXFLOW (1 << 1) /* receive control flow */ 95 #define CTRLMODER_TXFLOW (1 << 2) /* transmit control flow */ 96 97 /* MII mode register */ 98 #define MIIMODER_CLKDIV(x) ((x) & 0xfe) /* needs to be an even number */ 99 #define MIIMODER_NOPRE (1 << 8) /* no preamble */ 100 101 /* MII command register */ 102 #define MIICOMMAND_SCAN (1 << 0) /* scan status */ 103 #define MIICOMMAND_READ (1 << 1) /* read status */ 104 #define MIICOMMAND_WRITE (1 << 2) /* write control data */ 105 106 /* MII address register */ 107 #define MIIADDRESS_FIAD(x) (((x) & 0x1f) << 0) 108 #define MIIADDRESS_RGAD(x) (((x) & 0x1f) << 8) 109 #define MIIADDRESS_ADDR(phy, reg) (MIIADDRESS_FIAD(phy) | \ 110 MIIADDRESS_RGAD(reg)) 111 112 /* MII transmit data register */ 113 #define MIITX_DATA_VAL(x) ((x) & 0xffff) 114 115 /* MII receive data register */ 116 #define MIIRX_DATA_VAL(x) ((x) & 0xffff) 117 118 /* MII status register */ 119 #define MIISTATUS_LINKFAIL (1 << 0) 120 #define MIISTATUS_BUSY (1 << 1) 121 #define MIISTATUS_INVALID (1 << 2) 122 123 /* TX buffer descriptor */ 124 #define TX_BD_CS (1 << 0) /* carrier sense lost */ 125 #define TX_BD_DF (1 << 1) /* defer indication */ 126 #define TX_BD_LC (1 << 2) /* late collision */ 127 #define TX_BD_RL (1 << 3) /* retransmission limit */ 128 #define TX_BD_RETRY_MASK (0x00f0) 129 #define TX_BD_RETRY(x) (((x) & 0x00f0) >> 4) 130 #define TX_BD_UR (1 << 8) /* transmitter underrun */ 131 #define TX_BD_CRC (1 << 11) /* TX CRC enable */ 132 #define TX_BD_PAD (1 << 12) /* pad enable */ 133 #define TX_BD_WRAP (1 << 13) 134 #define TX_BD_IRQ (1 << 14) /* interrupt request enable */ 135 #define TX_BD_READY (1 << 15) /* TX buffer ready */ 136 #define TX_BD_LEN(x) (((x) & 0xffff) << 16) 137 #define TX_BD_LEN_MASK (0xffff << 16) 138 139 #define TX_BD_STATS (TX_BD_CS | TX_BD_DF | TX_BD_LC | \ 140 TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR) 141 142 /* RX buffer descriptor */ 143 #define RX_BD_LC (1 << 0) /* late collision */ 144 #define RX_BD_CRC (1 << 1) /* RX CRC error */ 145 #define RX_BD_SF (1 << 2) /* short frame */ 146 #define RX_BD_TL (1 << 3) /* too long */ 147 #define RX_BD_DN (1 << 4) /* dribble nibble */ 148 #define RX_BD_IS (1 << 5) /* invalid symbol */ 149 #define RX_BD_OR (1 << 6) /* receiver overrun */ 150 #define RX_BD_MISS (1 << 7) 151 #define RX_BD_CF (1 << 8) /* control frame */ 152 #define RX_BD_WRAP (1 << 13) 153 #define RX_BD_IRQ (1 << 14) /* interrupt request enable */ 154 #define RX_BD_EMPTY (1 << 15) 155 #define RX_BD_LEN(x) (((x) & 0xffff) << 16) 156 157 #define RX_BD_STATS (RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \ 158 RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS) 159 160 #define ETHOC_BUFSIZ 1536 161 #define ETHOC_ZLEN 64 162 #define ETHOC_BD_BASE 0x400 163 #define ETHOC_TIMEOUT (HZ / 2) 164 #define ETHOC_MII_TIMEOUT (1 + (HZ / 5)) 165 #define ETHOC_IOSIZE 0x54 166 167 /** 168 * struct ethoc - driver-private device structure 169 * @num_tx: number of send buffers 170 * @cur_tx: last send buffer written 171 * @dty_tx: last buffer actually sent 172 * @num_rx: number of receive buffers 173 * @cur_rx: current receive buffer 174 */ 175 struct ethoc { 176 u32 num_tx; 177 u32 cur_tx; 178 u32 dty_tx; 179 u32 num_rx; 180 u32 cur_rx; 181 void __iomem *iobase; 182 void __iomem *packet; 183 phys_addr_t packet_phys; 184 }; 185 186 /** 187 * struct ethoc_bd - buffer descriptor 188 * @stat: buffer statistics 189 * @addr: physical memory address 190 */ 191 struct ethoc_bd { 192 u32 stat; 193 u32 addr; 194 }; 195 196 static inline u32 ethoc_read(struct ethoc *priv, size_t offset) 197 { 198 return readl(priv->iobase + offset); 199 } 200 201 static inline void ethoc_write(struct ethoc *priv, size_t offset, u32 data) 202 { 203 writel(data, priv->iobase + offset); 204 } 205 206 static inline void ethoc_read_bd(struct ethoc *priv, int index, 207 struct ethoc_bd *bd) 208 { 209 size_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 210 bd->stat = ethoc_read(priv, offset + 0); 211 bd->addr = ethoc_read(priv, offset + 4); 212 } 213 214 static inline void ethoc_write_bd(struct ethoc *priv, int index, 215 const struct ethoc_bd *bd) 216 { 217 size_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 218 ethoc_write(priv, offset + 0, bd->stat); 219 ethoc_write(priv, offset + 4, bd->addr); 220 } 221 222 static int ethoc_write_hwaddr_common(struct ethoc *priv, u8 *mac) 223 { 224 ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) | 225 (mac[4] << 8) | (mac[5] << 0)); 226 ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0)); 227 return 0; 228 } 229 230 static inline void ethoc_ack_irq(struct ethoc *priv, u32 mask) 231 { 232 ethoc_write(priv, INT_SOURCE, mask); 233 } 234 235 static inline void ethoc_enable_rx_and_tx(struct ethoc *priv) 236 { 237 u32 mode = ethoc_read(priv, MODER); 238 mode |= MODER_RXEN | MODER_TXEN; 239 ethoc_write(priv, MODER, mode); 240 } 241 242 static inline void ethoc_disable_rx_and_tx(struct ethoc *priv) 243 { 244 u32 mode = ethoc_read(priv, MODER); 245 mode &= ~(MODER_RXEN | MODER_TXEN); 246 ethoc_write(priv, MODER, mode); 247 } 248 249 static int ethoc_init_ring(struct ethoc *priv) 250 { 251 struct ethoc_bd bd; 252 phys_addr_t addr = priv->packet_phys; 253 int i; 254 255 priv->cur_tx = 0; 256 priv->dty_tx = 0; 257 priv->cur_rx = 0; 258 259 /* setup transmission buffers */ 260 bd.stat = TX_BD_IRQ | TX_BD_CRC; 261 bd.addr = 0; 262 263 for (i = 0; i < priv->num_tx; i++) { 264 if (addr) { 265 bd.addr = addr; 266 addr += PKTSIZE_ALIGN; 267 } 268 if (i == priv->num_tx - 1) 269 bd.stat |= TX_BD_WRAP; 270 271 ethoc_write_bd(priv, i, &bd); 272 } 273 274 bd.stat = RX_BD_EMPTY | RX_BD_IRQ; 275 276 for (i = 0; i < priv->num_rx; i++) { 277 if (addr) { 278 bd.addr = addr; 279 addr += PKTSIZE_ALIGN; 280 } else { 281 bd.addr = virt_to_phys(net_rx_packets[i]); 282 } 283 if (i == priv->num_rx - 1) 284 bd.stat |= RX_BD_WRAP; 285 286 flush_dcache_range((ulong)net_rx_packets[i], 287 (ulong)net_rx_packets[i] + PKTSIZE_ALIGN); 288 ethoc_write_bd(priv, priv->num_tx + i, &bd); 289 } 290 291 return 0; 292 } 293 294 static int ethoc_reset(struct ethoc *priv) 295 { 296 u32 mode; 297 298 /* TODO: reset controller? */ 299 300 ethoc_disable_rx_and_tx(priv); 301 302 /* TODO: setup registers */ 303 304 /* enable FCS generation and automatic padding */ 305 mode = ethoc_read(priv, MODER); 306 mode |= MODER_CRC | MODER_PAD; 307 ethoc_write(priv, MODER, mode); 308 309 /* set full-duplex mode */ 310 mode = ethoc_read(priv, MODER); 311 mode |= MODER_FULLD; 312 ethoc_write(priv, MODER, mode); 313 ethoc_write(priv, IPGT, 0x15); 314 315 ethoc_ack_irq(priv, INT_MASK_ALL); 316 ethoc_enable_rx_and_tx(priv); 317 return 0; 318 } 319 320 static int ethoc_init_common(struct ethoc *priv) 321 { 322 priv->num_tx = 1; 323 priv->num_rx = PKTBUFSRX; 324 ethoc_write(priv, TX_BD_NUM, priv->num_tx); 325 ethoc_init_ring(priv); 326 ethoc_reset(priv); 327 328 return 0; 329 } 330 331 static int ethoc_update_rx_stats(struct ethoc_bd *bd) 332 { 333 int ret = 0; 334 335 if (bd->stat & RX_BD_TL) { 336 debug("ETHOC: " "RX: frame too long\n"); 337 ret++; 338 } 339 340 if (bd->stat & RX_BD_SF) { 341 debug("ETHOC: " "RX: frame too short\n"); 342 ret++; 343 } 344 345 if (bd->stat & RX_BD_DN) 346 debug("ETHOC: " "RX: dribble nibble\n"); 347 348 if (bd->stat & RX_BD_CRC) { 349 debug("ETHOC: " "RX: wrong CRC\n"); 350 ret++; 351 } 352 353 if (bd->stat & RX_BD_OR) { 354 debug("ETHOC: " "RX: overrun\n"); 355 ret++; 356 } 357 358 if (bd->stat & RX_BD_LC) { 359 debug("ETHOC: " "RX: late collision\n"); 360 ret++; 361 } 362 363 return ret; 364 } 365 366 static int ethoc_rx_common(struct ethoc *priv, uchar **packetp) 367 { 368 struct ethoc_bd bd; 369 u32 i = priv->cur_rx % priv->num_rx; 370 u32 entry = priv->num_tx + i; 371 372 ethoc_read_bd(priv, entry, &bd); 373 if (bd.stat & RX_BD_EMPTY) 374 return -EAGAIN; 375 376 debug("%s(): RX buffer %d, %x received\n", 377 __func__, priv->cur_rx, bd.stat); 378 if (ethoc_update_rx_stats(&bd) == 0) { 379 int size = bd.stat >> 16; 380 381 size -= 4; /* strip the CRC */ 382 if (priv->packet) 383 *packetp = priv->packet + entry * PKTSIZE_ALIGN; 384 else 385 *packetp = net_rx_packets[i]; 386 return size; 387 } else { 388 return 0; 389 } 390 } 391 392 static int ethoc_is_new_packet_received(struct ethoc *priv) 393 { 394 u32 pending; 395 396 pending = ethoc_read(priv, INT_SOURCE); 397 ethoc_ack_irq(priv, pending); 398 if (pending & INT_MASK_BUSY) 399 debug("%s(): packet dropped\n", __func__); 400 if (pending & INT_MASK_RX) { 401 debug("%s(): rx irq\n", __func__); 402 return 1; 403 } 404 405 return 0; 406 } 407 408 static int ethoc_update_tx_stats(struct ethoc_bd *bd) 409 { 410 if (bd->stat & TX_BD_LC) 411 debug("ETHOC: " "TX: late collision\n"); 412 413 if (bd->stat & TX_BD_RL) 414 debug("ETHOC: " "TX: retransmit limit\n"); 415 416 if (bd->stat & TX_BD_UR) 417 debug("ETHOC: " "TX: underrun\n"); 418 419 if (bd->stat & TX_BD_CS) 420 debug("ETHOC: " "TX: carrier sense lost\n"); 421 422 return 0; 423 } 424 425 static void ethoc_tx(struct ethoc *priv) 426 { 427 u32 entry = priv->dty_tx % priv->num_tx; 428 struct ethoc_bd bd; 429 430 ethoc_read_bd(priv, entry, &bd); 431 if ((bd.stat & TX_BD_READY) == 0) 432 (void)ethoc_update_tx_stats(&bd); 433 } 434 435 static int ethoc_send_common(struct ethoc *priv, void *packet, int length) 436 { 437 struct ethoc_bd bd; 438 u32 entry; 439 u32 pending; 440 int tmo; 441 442 entry = priv->cur_tx % priv->num_tx; 443 ethoc_read_bd(priv, entry, &bd); 444 if (unlikely(length < ETHOC_ZLEN)) 445 bd.stat |= TX_BD_PAD; 446 else 447 bd.stat &= ~TX_BD_PAD; 448 449 if (priv->packet) { 450 void *p = priv->packet + entry * PKTSIZE_ALIGN; 451 452 memcpy(p, packet, length); 453 packet = p; 454 } else { 455 bd.addr = virt_to_phys(packet); 456 } 457 flush_dcache_range((ulong)packet, (ulong)packet + length); 458 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); 459 bd.stat |= TX_BD_LEN(length); 460 ethoc_write_bd(priv, entry, &bd); 461 462 /* start transmit */ 463 bd.stat |= TX_BD_READY; 464 ethoc_write_bd(priv, entry, &bd); 465 466 /* wait for transfer to succeed */ 467 tmo = get_timer(0) + 5 * CONFIG_SYS_HZ; 468 while (1) { 469 pending = ethoc_read(priv, INT_SOURCE); 470 ethoc_ack_irq(priv, pending & ~INT_MASK_RX); 471 if (pending & INT_MASK_BUSY) 472 debug("%s(): packet dropped\n", __func__); 473 474 if (pending & INT_MASK_TX) { 475 ethoc_tx(priv); 476 break; 477 } 478 if (get_timer(0) >= tmo) { 479 debug("%s(): timed out\n", __func__); 480 return -1; 481 } 482 } 483 484 debug("%s(): packet sent\n", __func__); 485 return 0; 486 } 487 488 static int ethoc_free_pkt_common(struct ethoc *priv) 489 { 490 struct ethoc_bd bd; 491 u32 i = priv->cur_rx % priv->num_rx; 492 u32 entry = priv->num_tx + i; 493 void *src; 494 495 ethoc_read_bd(priv, entry, &bd); 496 497 if (priv->packet) 498 src = priv->packet + entry * PKTSIZE_ALIGN; 499 else 500 src = net_rx_packets[i]; 501 /* clear the buffer descriptor so it can be reused */ 502 flush_dcache_range((ulong)src, 503 (ulong)src + PKTSIZE_ALIGN); 504 bd.stat &= ~RX_BD_STATS; 505 bd.stat |= RX_BD_EMPTY; 506 ethoc_write_bd(priv, entry, &bd); 507 priv->cur_rx++; 508 509 return 0; 510 } 511 512 #ifdef CONFIG_DM_ETH 513 514 static int ethoc_write_hwaddr(struct udevice *dev) 515 { 516 struct ethoc_eth_pdata *pdata = dev_get_platdata(dev); 517 struct ethoc *priv = dev_get_priv(dev); 518 u8 *mac = pdata->eth_pdata.enetaddr; 519 520 return ethoc_write_hwaddr_common(priv, mac); 521 } 522 523 static int ethoc_send(struct udevice *dev, void *packet, int length) 524 { 525 return ethoc_send_common(dev_get_priv(dev), packet, length); 526 } 527 528 static int ethoc_free_pkt(struct udevice *dev, uchar *packet, int length) 529 { 530 return ethoc_free_pkt_common(dev_get_priv(dev)); 531 } 532 533 static int ethoc_recv(struct udevice *dev, int flags, uchar **packetp) 534 { 535 struct ethoc *priv = dev_get_priv(dev); 536 537 if (flags & ETH_RECV_CHECK_DEVICE) 538 if (!ethoc_is_new_packet_received(priv)) 539 return -EAGAIN; 540 541 return ethoc_rx_common(priv, packetp); 542 } 543 544 static int ethoc_start(struct udevice *dev) 545 { 546 return ethoc_init_common(dev_get_priv(dev)); 547 } 548 549 static void ethoc_stop(struct udevice *dev) 550 { 551 struct ethoc *priv = dev_get_priv(dev); 552 553 ethoc_disable_rx_and_tx(priv); 554 } 555 556 static int ethoc_ofdata_to_platdata(struct udevice *dev) 557 { 558 struct ethoc_eth_pdata *pdata = dev_get_platdata(dev); 559 fdt_addr_t addr; 560 561 pdata->eth_pdata.iobase = dev_get_addr(dev); 562 addr = dev_get_addr_index(dev, 1); 563 if (addr != FDT_ADDR_T_NONE) 564 pdata->packet_base = addr; 565 return 0; 566 } 567 568 static int ethoc_probe(struct udevice *dev) 569 { 570 struct ethoc_eth_pdata *pdata = dev_get_platdata(dev); 571 struct ethoc *priv = dev_get_priv(dev); 572 573 priv->iobase = ioremap(pdata->eth_pdata.iobase, ETHOC_IOSIZE); 574 if (pdata->packet_base) { 575 priv->packet_phys = pdata->packet_base; 576 priv->packet = ioremap(pdata->packet_base, 577 (1 + PKTBUFSRX) * PKTSIZE_ALIGN); 578 } 579 return 0; 580 } 581 582 static int ethoc_remove(struct udevice *dev) 583 { 584 struct ethoc *priv = dev_get_priv(dev); 585 586 iounmap(priv->iobase); 587 return 0; 588 } 589 590 static const struct eth_ops ethoc_ops = { 591 .start = ethoc_start, 592 .stop = ethoc_stop, 593 .send = ethoc_send, 594 .recv = ethoc_recv, 595 .free_pkt = ethoc_free_pkt, 596 .write_hwaddr = ethoc_write_hwaddr, 597 }; 598 599 static const struct udevice_id ethoc_ids[] = { 600 { .compatible = "opencores,ethoc" }, 601 { } 602 }; 603 604 U_BOOT_DRIVER(ethoc) = { 605 .name = "ethoc", 606 .id = UCLASS_ETH, 607 .of_match = ethoc_ids, 608 .ofdata_to_platdata = ethoc_ofdata_to_platdata, 609 .probe = ethoc_probe, 610 .remove = ethoc_remove, 611 .ops = ðoc_ops, 612 .priv_auto_alloc_size = sizeof(struct ethoc), 613 .platdata_auto_alloc_size = sizeof(struct ethoc_eth_pdata), 614 }; 615 616 #else 617 618 static int ethoc_init(struct eth_device *dev, bd_t *bd) 619 { 620 struct ethoc *priv = (struct ethoc *)dev->priv; 621 622 return ethoc_init_common(priv); 623 } 624 625 static int ethoc_write_hwaddr(struct eth_device *dev) 626 { 627 struct ethoc *priv = (struct ethoc *)dev->priv; 628 u8 *mac = dev->enetaddr; 629 630 return ethoc_write_hwaddr_common(priv, mac); 631 } 632 633 static int ethoc_send(struct eth_device *dev, void *packet, int length) 634 { 635 return ethoc_send_common(dev->priv, packet, length); 636 } 637 638 static void ethoc_halt(struct eth_device *dev) 639 { 640 ethoc_disable_rx_and_tx(dev->priv); 641 } 642 643 static int ethoc_recv(struct eth_device *dev) 644 { 645 struct ethoc *priv = (struct ethoc *)dev->priv; 646 int count; 647 648 if (!ethoc_is_new_packet_received(priv)) 649 return 0; 650 651 for (count = 0; count < PKTBUFSRX; ++count) { 652 uchar *packetp; 653 int size = ethoc_rx_common(priv, &packetp); 654 655 if (size < 0) 656 break; 657 if (size > 0) 658 net_process_received_packet(packetp, size); 659 ethoc_free_pkt_common(priv); 660 } 661 return 0; 662 } 663 664 int ethoc_initialize(u8 dev_num, int base_addr) 665 { 666 struct ethoc *priv; 667 struct eth_device *dev; 668 669 priv = malloc(sizeof(*priv)); 670 if (!priv) 671 return 0; 672 dev = malloc(sizeof(*dev)); 673 if (!dev) { 674 free(priv); 675 return 0; 676 } 677 678 memset(dev, 0, sizeof(*dev)); 679 dev->priv = priv; 680 dev->iobase = base_addr; 681 dev->init = ethoc_init; 682 dev->halt = ethoc_halt; 683 dev->send = ethoc_send; 684 dev->recv = ethoc_recv; 685 dev->write_hwaddr = ethoc_write_hwaddr; 686 sprintf(dev->name, "%s-%hu", "ETHOC", dev_num); 687 priv->iobase = ioremap(dev->iobase, ETHOC_IOSIZE); 688 689 eth_register(dev); 690 return 1; 691 } 692 693 #endif 694