1 /* 2 * QEMU model of Xilinx AXI-Ethernet. 3 * 4 * Copyright (c) 2011 Edgar E. Iglesias. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "hw/hw.h" 27 #include "hw/sysbus.h" 28 #include "qapi/error.h" 29 #include "qemu/log.h" 30 #include "qemu/module.h" 31 #include "net/net.h" 32 #include "net/checksum.h" 33 34 #include "hw/hw.h" 35 #include "hw/irq.h" 36 #include "hw/qdev-properties.h" 37 #include "hw/stream.h" 38 39 #define DPHY(x) 40 41 #define TYPE_XILINX_AXI_ENET "xlnx.axi-ethernet" 42 #define TYPE_XILINX_AXI_ENET_DATA_STREAM "xilinx-axienet-data-stream" 43 #define TYPE_XILINX_AXI_ENET_CONTROL_STREAM "xilinx-axienet-control-stream" 44 45 #define XILINX_AXI_ENET(obj) \ 46 OBJECT_CHECK(XilinxAXIEnet, (obj), TYPE_XILINX_AXI_ENET) 47 48 #define XILINX_AXI_ENET_DATA_STREAM(obj) \ 49 OBJECT_CHECK(XilinxAXIEnetStreamSlave, (obj),\ 50 TYPE_XILINX_AXI_ENET_DATA_STREAM) 51 52 #define XILINX_AXI_ENET_CONTROL_STREAM(obj) \ 53 OBJECT_CHECK(XilinxAXIEnetStreamSlave, (obj),\ 54 TYPE_XILINX_AXI_ENET_CONTROL_STREAM) 55 56 /* Advertisement control register. */ 57 #define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ 58 #define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ 59 #define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ 60 #define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ 61 62 #define CONTROL_PAYLOAD_WORDS 5 63 #define CONTROL_PAYLOAD_SIZE (CONTROL_PAYLOAD_WORDS * (sizeof(uint32_t))) 64 65 struct PHY { 66 uint32_t regs[32]; 67 68 int link; 69 70 unsigned int (*read)(struct PHY *phy, unsigned int req); 71 void (*write)(struct PHY *phy, unsigned int req, 72 unsigned int data); 73 }; 74 75 static unsigned int tdk_read(struct PHY *phy, unsigned int req) 76 { 77 int regnum; 78 unsigned r = 0; 79 80 regnum = req & 0x1f; 81 82 switch (regnum) { 83 case 1: 84 if (!phy->link) { 85 break; 86 } 87 /* MR1. */ 88 /* Speeds and modes. */ 89 r |= (1 << 13) | (1 << 14); 90 r |= (1 << 11) | (1 << 12); 91 r |= (1 << 5); /* Autoneg complete. */ 92 r |= (1 << 3); /* Autoneg able. */ 93 r |= (1 << 2); /* link. */ 94 r |= (1 << 1); /* link. */ 95 break; 96 case 5: 97 /* Link partner ability. 98 We are kind; always agree with whatever best mode 99 the guest advertises. */ 100 r = 1 << 14; /* Success. */ 101 /* Copy advertised modes. */ 102 r |= phy->regs[4] & (15 << 5); 103 /* Autoneg support. */ 104 r |= 1; 105 break; 106 case 17: 107 /* Marvell PHY on many xilinx boards. */ 108 r = 0x8000; /* 1000Mb */ 109 break; 110 case 18: 111 { 112 /* Diagnostics reg. */ 113 int duplex = 0; 114 int speed_100 = 0; 115 116 if (!phy->link) { 117 break; 118 } 119 120 /* Are we advertising 100 half or 100 duplex ? */ 121 speed_100 = !!(phy->regs[4] & ADVERTISE_100HALF); 122 speed_100 |= !!(phy->regs[4] & ADVERTISE_100FULL); 123 124 /* Are we advertising 10 duplex or 100 duplex ? */ 125 duplex = !!(phy->regs[4] & ADVERTISE_100FULL); 126 duplex |= !!(phy->regs[4] & ADVERTISE_10FULL); 127 r = (speed_100 << 10) | (duplex << 11); 128 } 129 break; 130 131 default: 132 r = phy->regs[regnum]; 133 break; 134 } 135 DPHY(qemu_log("\n%s %x = reg[%d]\n", __func__, r, regnum)); 136 return r; 137 } 138 139 static void 140 tdk_write(struct PHY *phy, unsigned int req, unsigned int data) 141 { 142 int regnum; 143 144 regnum = req & 0x1f; 145 DPHY(qemu_log("%s reg[%d] = %x\n", __func__, regnum, data)); 146 switch (regnum) { 147 default: 148 phy->regs[regnum] = data; 149 break; 150 } 151 152 /* Unconditionally clear regs[BMCR][BMCR_RESET] and auto-neg */ 153 phy->regs[0] &= ~0x8200; 154 } 155 156 static void 157 tdk_init(struct PHY *phy) 158 { 159 phy->regs[0] = 0x3100; 160 /* PHY Id. */ 161 phy->regs[2] = 0x0300; 162 phy->regs[3] = 0xe400; 163 /* Autonegotiation advertisement reg. */ 164 phy->regs[4] = 0x01E1; 165 phy->link = 1; 166 167 phy->read = tdk_read; 168 phy->write = tdk_write; 169 } 170 171 struct MDIOBus { 172 /* bus. */ 173 int mdc; 174 int mdio; 175 176 /* decoder. */ 177 enum { 178 PREAMBLE, 179 SOF, 180 OPC, 181 ADDR, 182 REQ, 183 TURNAROUND, 184 DATA 185 } state; 186 unsigned int drive; 187 188 unsigned int cnt; 189 unsigned int addr; 190 unsigned int opc; 191 unsigned int req; 192 unsigned int data; 193 194 struct PHY *devs[32]; 195 }; 196 197 static void 198 mdio_attach(struct MDIOBus *bus, struct PHY *phy, unsigned int addr) 199 { 200 bus->devs[addr & 0x1f] = phy; 201 } 202 203 #ifdef USE_THIS_DEAD_CODE 204 static void 205 mdio_detach(struct MDIOBus *bus, struct PHY *phy, unsigned int addr) 206 { 207 bus->devs[addr & 0x1f] = NULL; 208 } 209 #endif 210 211 static uint16_t mdio_read_req(struct MDIOBus *bus, unsigned int addr, 212 unsigned int reg) 213 { 214 struct PHY *phy; 215 uint16_t data; 216 217 phy = bus->devs[addr]; 218 if (phy && phy->read) { 219 data = phy->read(phy, reg); 220 } else { 221 data = 0xffff; 222 } 223 DPHY(qemu_log("%s addr=%d reg=%d data=%x\n", __func__, addr, reg, data)); 224 return data; 225 } 226 227 static void mdio_write_req(struct MDIOBus *bus, unsigned int addr, 228 unsigned int reg, uint16_t data) 229 { 230 struct PHY *phy; 231 232 DPHY(qemu_log("%s addr=%d reg=%d data=%x\n", __func__, addr, reg, data)); 233 phy = bus->devs[addr]; 234 if (phy && phy->write) { 235 phy->write(phy, reg, data); 236 } 237 } 238 239 #define DENET(x) 240 241 #define R_RAF (0x000 / 4) 242 enum { 243 RAF_MCAST_REJ = (1 << 1), 244 RAF_BCAST_REJ = (1 << 2), 245 RAF_EMCF_EN = (1 << 12), 246 RAF_NEWFUNC_EN = (1 << 11) 247 }; 248 249 #define R_IS (0x00C / 4) 250 enum { 251 IS_HARD_ACCESS_COMPLETE = 1, 252 IS_AUTONEG = (1 << 1), 253 IS_RX_COMPLETE = (1 << 2), 254 IS_RX_REJECT = (1 << 3), 255 IS_TX_COMPLETE = (1 << 5), 256 IS_RX_DCM_LOCK = (1 << 6), 257 IS_MGM_RDY = (1 << 7), 258 IS_PHY_RST_DONE = (1 << 8), 259 }; 260 261 #define R_IP (0x010 / 4) 262 #define R_IE (0x014 / 4) 263 #define R_UAWL (0x020 / 4) 264 #define R_UAWU (0x024 / 4) 265 #define R_PPST (0x030 / 4) 266 enum { 267 PPST_LINKSTATUS = (1 << 0), 268 PPST_PHY_LINKSTATUS = (1 << 7), 269 }; 270 271 #define R_STATS_RX_BYTESL (0x200 / 4) 272 #define R_STATS_RX_BYTESH (0x204 / 4) 273 #define R_STATS_TX_BYTESL (0x208 / 4) 274 #define R_STATS_TX_BYTESH (0x20C / 4) 275 #define R_STATS_RXL (0x290 / 4) 276 #define R_STATS_RXH (0x294 / 4) 277 #define R_STATS_RX_BCASTL (0x2a0 / 4) 278 #define R_STATS_RX_BCASTH (0x2a4 / 4) 279 #define R_STATS_RX_MCASTL (0x2a8 / 4) 280 #define R_STATS_RX_MCASTH (0x2ac / 4) 281 282 #define R_RCW0 (0x400 / 4) 283 #define R_RCW1 (0x404 / 4) 284 enum { 285 RCW1_VLAN = (1 << 27), 286 RCW1_RX = (1 << 28), 287 RCW1_FCS = (1 << 29), 288 RCW1_JUM = (1 << 30), 289 RCW1_RST = (1 << 31), 290 }; 291 292 #define R_TC (0x408 / 4) 293 enum { 294 TC_VLAN = (1 << 27), 295 TC_TX = (1 << 28), 296 TC_FCS = (1 << 29), 297 TC_JUM = (1 << 30), 298 TC_RST = (1 << 31), 299 }; 300 301 #define R_EMMC (0x410 / 4) 302 enum { 303 EMMC_LINKSPEED_10MB = (0 << 30), 304 EMMC_LINKSPEED_100MB = (1 << 30), 305 EMMC_LINKSPEED_1000MB = (2 << 30), 306 }; 307 308 #define R_PHYC (0x414 / 4) 309 310 #define R_MC (0x500 / 4) 311 #define MC_EN (1 << 6) 312 313 #define R_MCR (0x504 / 4) 314 #define R_MWD (0x508 / 4) 315 #define R_MRD (0x50c / 4) 316 #define R_MIS (0x600 / 4) 317 #define R_MIP (0x620 / 4) 318 #define R_MIE (0x640 / 4) 319 #define R_MIC (0x640 / 4) 320 321 #define R_UAW0 (0x700 / 4) 322 #define R_UAW1 (0x704 / 4) 323 #define R_FMI (0x708 / 4) 324 #define R_AF0 (0x710 / 4) 325 #define R_AF1 (0x714 / 4) 326 #define R_MAX (0x34 / 4) 327 328 /* Indirect registers. */ 329 struct TEMAC { 330 struct MDIOBus mdio_bus; 331 struct PHY phy; 332 333 void *parent; 334 }; 335 336 typedef struct XilinxAXIEnetStreamSlave XilinxAXIEnetStreamSlave; 337 typedef struct XilinxAXIEnet XilinxAXIEnet; 338 339 struct XilinxAXIEnetStreamSlave { 340 Object parent; 341 342 struct XilinxAXIEnet *enet; 343 } ; 344 345 struct XilinxAXIEnet { 346 SysBusDevice busdev; 347 MemoryRegion iomem; 348 qemu_irq irq; 349 StreamSlave *tx_data_dev; 350 StreamSlave *tx_control_dev; 351 XilinxAXIEnetStreamSlave rx_data_dev; 352 XilinxAXIEnetStreamSlave rx_control_dev; 353 NICState *nic; 354 NICConf conf; 355 356 357 uint32_t c_rxmem; 358 uint32_t c_txmem; 359 uint32_t c_phyaddr; 360 361 struct TEMAC TEMAC; 362 363 /* MII regs. */ 364 union { 365 uint32_t regs[4]; 366 struct { 367 uint32_t mc; 368 uint32_t mcr; 369 uint32_t mwd; 370 uint32_t mrd; 371 }; 372 } mii; 373 374 struct { 375 uint64_t rx_bytes; 376 uint64_t tx_bytes; 377 378 uint64_t rx; 379 uint64_t rx_bcast; 380 uint64_t rx_mcast; 381 } stats; 382 383 /* Receive configuration words. */ 384 uint32_t rcw[2]; 385 /* Transmit config. */ 386 uint32_t tc; 387 uint32_t emmc; 388 uint32_t phyc; 389 390 /* Unicast Address Word. */ 391 uint32_t uaw[2]; 392 /* Unicast address filter used with extended mcast. */ 393 uint32_t ext_uaw[2]; 394 uint32_t fmi; 395 396 uint32_t regs[R_MAX]; 397 398 /* Multicast filter addrs. */ 399 uint32_t maddr[4][2]; 400 /* 32K x 1 lookup filter. */ 401 uint32_t ext_mtable[1024]; 402 403 uint32_t hdr[CONTROL_PAYLOAD_WORDS]; 404 405 uint8_t *txmem; 406 uint32_t txpos; 407 408 uint8_t *rxmem; 409 uint32_t rxsize; 410 uint32_t rxpos; 411 412 uint8_t rxapp[CONTROL_PAYLOAD_SIZE]; 413 uint32_t rxappsize; 414 415 /* Whether axienet_eth_rx_notify should flush incoming queue. */ 416 bool need_flush; 417 }; 418 419 static void axienet_rx_reset(XilinxAXIEnet *s) 420 { 421 s->rcw[1] = RCW1_JUM | RCW1_FCS | RCW1_RX | RCW1_VLAN; 422 } 423 424 static void axienet_tx_reset(XilinxAXIEnet *s) 425 { 426 s->tc = TC_JUM | TC_TX | TC_VLAN; 427 s->txpos = 0; 428 } 429 430 static inline int axienet_rx_resetting(XilinxAXIEnet *s) 431 { 432 return s->rcw[1] & RCW1_RST; 433 } 434 435 static inline int axienet_rx_enabled(XilinxAXIEnet *s) 436 { 437 return s->rcw[1] & RCW1_RX; 438 } 439 440 static inline int axienet_extmcf_enabled(XilinxAXIEnet *s) 441 { 442 return !!(s->regs[R_RAF] & RAF_EMCF_EN); 443 } 444 445 static inline int axienet_newfunc_enabled(XilinxAXIEnet *s) 446 { 447 return !!(s->regs[R_RAF] & RAF_NEWFUNC_EN); 448 } 449 450 static void xilinx_axienet_reset(DeviceState *d) 451 { 452 XilinxAXIEnet *s = XILINX_AXI_ENET(d); 453 454 axienet_rx_reset(s); 455 axienet_tx_reset(s); 456 457 s->regs[R_PPST] = PPST_LINKSTATUS | PPST_PHY_LINKSTATUS; 458 s->regs[R_IS] = IS_AUTONEG | IS_RX_DCM_LOCK | IS_MGM_RDY | IS_PHY_RST_DONE; 459 460 s->emmc = EMMC_LINKSPEED_100MB; 461 } 462 463 static void enet_update_irq(XilinxAXIEnet *s) 464 { 465 s->regs[R_IP] = s->regs[R_IS] & s->regs[R_IE]; 466 qemu_set_irq(s->irq, !!s->regs[R_IP]); 467 } 468 469 static uint64_t enet_read(void *opaque, hwaddr addr, unsigned size) 470 { 471 XilinxAXIEnet *s = opaque; 472 uint32_t r = 0; 473 addr >>= 2; 474 475 switch (addr) { 476 case R_RCW0: 477 case R_RCW1: 478 r = s->rcw[addr & 1]; 479 break; 480 481 case R_TC: 482 r = s->tc; 483 break; 484 485 case R_EMMC: 486 r = s->emmc; 487 break; 488 489 case R_PHYC: 490 r = s->phyc; 491 break; 492 493 case R_MCR: 494 r = s->mii.regs[addr & 3] | (1 << 7); /* Always ready. */ 495 break; 496 497 case R_STATS_RX_BYTESL: 498 case R_STATS_RX_BYTESH: 499 r = s->stats.rx_bytes >> (32 * (addr & 1)); 500 break; 501 502 case R_STATS_TX_BYTESL: 503 case R_STATS_TX_BYTESH: 504 r = s->stats.tx_bytes >> (32 * (addr & 1)); 505 break; 506 507 case R_STATS_RXL: 508 case R_STATS_RXH: 509 r = s->stats.rx >> (32 * (addr & 1)); 510 break; 511 case R_STATS_RX_BCASTL: 512 case R_STATS_RX_BCASTH: 513 r = s->stats.rx_bcast >> (32 * (addr & 1)); 514 break; 515 case R_STATS_RX_MCASTL: 516 case R_STATS_RX_MCASTH: 517 r = s->stats.rx_mcast >> (32 * (addr & 1)); 518 break; 519 520 case R_MC: 521 case R_MWD: 522 case R_MRD: 523 r = s->mii.regs[addr & 3]; 524 break; 525 526 case R_UAW0: 527 case R_UAW1: 528 r = s->uaw[addr & 1]; 529 break; 530 531 case R_UAWU: 532 case R_UAWL: 533 r = s->ext_uaw[addr & 1]; 534 break; 535 536 case R_FMI: 537 r = s->fmi; 538 break; 539 540 case R_AF0: 541 case R_AF1: 542 r = s->maddr[s->fmi & 3][addr & 1]; 543 break; 544 545 case 0x8000 ... 0x83ff: 546 r = s->ext_mtable[addr - 0x8000]; 547 break; 548 549 default: 550 if (addr < ARRAY_SIZE(s->regs)) { 551 r = s->regs[addr]; 552 } 553 DENET(qemu_log("%s addr=" TARGET_FMT_plx " v=%x\n", 554 __func__, addr * 4, r)); 555 break; 556 } 557 return r; 558 } 559 560 static void enet_write(void *opaque, hwaddr addr, 561 uint64_t value, unsigned size) 562 { 563 XilinxAXIEnet *s = opaque; 564 struct TEMAC *t = &s->TEMAC; 565 566 addr >>= 2; 567 switch (addr) { 568 case R_RCW0: 569 case R_RCW1: 570 s->rcw[addr & 1] = value; 571 if ((addr & 1) && value & RCW1_RST) { 572 axienet_rx_reset(s); 573 } else { 574 qemu_flush_queued_packets(qemu_get_queue(s->nic)); 575 } 576 break; 577 578 case R_TC: 579 s->tc = value; 580 if (value & TC_RST) { 581 axienet_tx_reset(s); 582 } 583 break; 584 585 case R_EMMC: 586 s->emmc = value; 587 break; 588 589 case R_PHYC: 590 s->phyc = value; 591 break; 592 593 case R_MC: 594 value &= ((1 << 7) - 1); 595 596 /* Enable the MII. */ 597 if (value & MC_EN) { 598 unsigned int miiclkdiv = value & ((1 << 6) - 1); 599 if (!miiclkdiv) { 600 qemu_log("AXIENET: MDIO enabled but MDIOCLK is zero!\n"); 601 } 602 } 603 s->mii.mc = value; 604 break; 605 606 case R_MCR: { 607 unsigned int phyaddr = (value >> 24) & 0x1f; 608 unsigned int regaddr = (value >> 16) & 0x1f; 609 unsigned int op = (value >> 14) & 3; 610 unsigned int initiate = (value >> 11) & 1; 611 612 if (initiate) { 613 if (op == 1) { 614 mdio_write_req(&t->mdio_bus, phyaddr, regaddr, s->mii.mwd); 615 } else if (op == 2) { 616 s->mii.mrd = mdio_read_req(&t->mdio_bus, phyaddr, regaddr); 617 } else { 618 qemu_log("AXIENET: invalid MDIOBus OP=%d\n", op); 619 } 620 } 621 s->mii.mcr = value; 622 break; 623 } 624 625 case R_MWD: 626 case R_MRD: 627 s->mii.regs[addr & 3] = value; 628 break; 629 630 631 case R_UAW0: 632 case R_UAW1: 633 s->uaw[addr & 1] = value; 634 break; 635 636 case R_UAWL: 637 case R_UAWU: 638 s->ext_uaw[addr & 1] = value; 639 break; 640 641 case R_FMI: 642 s->fmi = value; 643 break; 644 645 case R_AF0: 646 case R_AF1: 647 s->maddr[s->fmi & 3][addr & 1] = value; 648 break; 649 650 case R_IS: 651 s->regs[addr] &= ~value; 652 break; 653 654 case 0x8000 ... 0x83ff: 655 s->ext_mtable[addr - 0x8000] = value; 656 break; 657 658 default: 659 DENET(qemu_log("%s addr=" TARGET_FMT_plx " v=%x\n", 660 __func__, addr * 4, (unsigned)value)); 661 if (addr < ARRAY_SIZE(s->regs)) { 662 s->regs[addr] = value; 663 } 664 break; 665 } 666 enet_update_irq(s); 667 } 668 669 static const MemoryRegionOps enet_ops = { 670 .read = enet_read, 671 .write = enet_write, 672 .endianness = DEVICE_LITTLE_ENDIAN, 673 }; 674 675 static int eth_can_rx(XilinxAXIEnet *s) 676 { 677 /* RX enabled? */ 678 return !s->rxsize && !axienet_rx_resetting(s) && axienet_rx_enabled(s); 679 } 680 681 static int enet_match_addr(const uint8_t *buf, uint32_t f0, uint32_t f1) 682 { 683 int match = 1; 684 685 if (memcmp(buf, &f0, 4)) { 686 match = 0; 687 } 688 689 if (buf[4] != (f1 & 0xff) || buf[5] != ((f1 >> 8) & 0xff)) { 690 match = 0; 691 } 692 693 return match; 694 } 695 696 static void axienet_eth_rx_notify(void *opaque) 697 { 698 XilinxAXIEnet *s = XILINX_AXI_ENET(opaque); 699 700 while (s->rxappsize && stream_can_push(s->tx_control_dev, 701 axienet_eth_rx_notify, s)) { 702 size_t ret = stream_push(s->tx_control_dev, 703 (void *)s->rxapp + CONTROL_PAYLOAD_SIZE 704 - s->rxappsize, s->rxappsize, true); 705 s->rxappsize -= ret; 706 } 707 708 while (s->rxsize && stream_can_push(s->tx_data_dev, 709 axienet_eth_rx_notify, s)) { 710 size_t ret = stream_push(s->tx_data_dev, (void *)s->rxmem + s->rxpos, 711 s->rxsize, true); 712 s->rxsize -= ret; 713 s->rxpos += ret; 714 if (!s->rxsize) { 715 s->regs[R_IS] |= IS_RX_COMPLETE; 716 if (s->need_flush) { 717 s->need_flush = false; 718 qemu_flush_queued_packets(qemu_get_queue(s->nic)); 719 } 720 } 721 } 722 enet_update_irq(s); 723 } 724 725 static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size) 726 { 727 XilinxAXIEnet *s = qemu_get_nic_opaque(nc); 728 static const unsigned char sa_bcast[6] = {0xff, 0xff, 0xff, 729 0xff, 0xff, 0xff}; 730 static const unsigned char sa_ipmcast[3] = {0x01, 0x00, 0x52}; 731 uint32_t app[CONTROL_PAYLOAD_WORDS] = {0}; 732 int promisc = s->fmi & (1 << 31); 733 int unicast, broadcast, multicast, ip_multicast = 0; 734 uint32_t csum32; 735 uint16_t csum16; 736 int i; 737 738 DENET(qemu_log("%s: %zd bytes\n", __func__, size)); 739 740 if (!eth_can_rx(s)) { 741 s->need_flush = true; 742 return 0; 743 } 744 745 unicast = ~buf[0] & 0x1; 746 broadcast = memcmp(buf, sa_bcast, 6) == 0; 747 multicast = !unicast && !broadcast; 748 if (multicast && (memcmp(sa_ipmcast, buf, sizeof sa_ipmcast) == 0)) { 749 ip_multicast = 1; 750 } 751 752 /* Jumbo or vlan sizes ? */ 753 if (!(s->rcw[1] & RCW1_JUM)) { 754 if (size > 1518 && size <= 1522 && !(s->rcw[1] & RCW1_VLAN)) { 755 return size; 756 } 757 } 758 759 /* Basic Address filters. If you want to use the extended filters 760 you'll generally have to place the ethernet mac into promiscuous mode 761 to avoid the basic filtering from dropping most frames. */ 762 if (!promisc) { 763 if (unicast) { 764 if (!enet_match_addr(buf, s->uaw[0], s->uaw[1])) { 765 return size; 766 } 767 } else { 768 if (broadcast) { 769 /* Broadcast. */ 770 if (s->regs[R_RAF] & RAF_BCAST_REJ) { 771 return size; 772 } 773 } else { 774 int drop = 1; 775 776 /* Multicast. */ 777 if (s->regs[R_RAF] & RAF_MCAST_REJ) { 778 return size; 779 } 780 781 for (i = 0; i < 4; i++) { 782 if (enet_match_addr(buf, s->maddr[i][0], s->maddr[i][1])) { 783 drop = 0; 784 break; 785 } 786 } 787 788 if (drop) { 789 return size; 790 } 791 } 792 } 793 } 794 795 /* Extended mcast filtering enabled? */ 796 if (axienet_newfunc_enabled(s) && axienet_extmcf_enabled(s)) { 797 if (unicast) { 798 if (!enet_match_addr(buf, s->ext_uaw[0], s->ext_uaw[1])) { 799 return size; 800 } 801 } else { 802 if (broadcast) { 803 /* Broadcast. ??? */ 804 if (s->regs[R_RAF] & RAF_BCAST_REJ) { 805 return size; 806 } 807 } else { 808 int idx, bit; 809 810 /* Multicast. */ 811 if (!memcmp(buf, sa_ipmcast, 3)) { 812 return size; 813 } 814 815 idx = (buf[4] & 0x7f) << 8; 816 idx |= buf[5]; 817 818 bit = 1 << (idx & 0x1f); 819 idx >>= 5; 820 821 if (!(s->ext_mtable[idx] & bit)) { 822 return size; 823 } 824 } 825 } 826 } 827 828 if (size < 12) { 829 s->regs[R_IS] |= IS_RX_REJECT; 830 enet_update_irq(s); 831 return -1; 832 } 833 834 if (size > (s->c_rxmem - 4)) { 835 size = s->c_rxmem - 4; 836 } 837 838 memcpy(s->rxmem, buf, size); 839 memset(s->rxmem + size, 0, 4); /* Clear the FCS. */ 840 841 if (s->rcw[1] & RCW1_FCS) { 842 size += 4; /* fcs is inband. */ 843 } 844 845 app[0] = 5 << 28; 846 csum32 = net_checksum_add(size - 14, (uint8_t *)s->rxmem + 14); 847 /* Fold it once. */ 848 csum32 = (csum32 & 0xffff) + (csum32 >> 16); 849 /* And twice to get rid of possible carries. */ 850 csum16 = (csum32 & 0xffff) + (csum32 >> 16); 851 app[3] = csum16; 852 app[4] = size & 0xffff; 853 854 s->stats.rx_bytes += size; 855 s->stats.rx++; 856 if (multicast) { 857 s->stats.rx_mcast++; 858 app[2] |= 1 | (ip_multicast << 1); 859 } else if (broadcast) { 860 s->stats.rx_bcast++; 861 app[2] |= 1 << 3; 862 } 863 864 /* Good frame. */ 865 app[2] |= 1 << 6; 866 867 s->rxsize = size; 868 s->rxpos = 0; 869 for (i = 0; i < ARRAY_SIZE(app); ++i) { 870 app[i] = cpu_to_le32(app[i]); 871 } 872 s->rxappsize = CONTROL_PAYLOAD_SIZE; 873 memcpy(s->rxapp, app, s->rxappsize); 874 axienet_eth_rx_notify(s); 875 876 enet_update_irq(s); 877 return size; 878 } 879 880 static size_t 881 xilinx_axienet_control_stream_push(StreamSlave *obj, uint8_t *buf, size_t len, 882 bool eop) 883 { 884 int i; 885 XilinxAXIEnetStreamSlave *cs = XILINX_AXI_ENET_CONTROL_STREAM(obj); 886 XilinxAXIEnet *s = cs->enet; 887 888 assert(eop); 889 if (len != CONTROL_PAYLOAD_SIZE) { 890 hw_error("AXI Enet requires %d byte control stream payload\n", 891 (int)CONTROL_PAYLOAD_SIZE); 892 } 893 894 memcpy(s->hdr, buf, len); 895 896 for (i = 0; i < ARRAY_SIZE(s->hdr); ++i) { 897 s->hdr[i] = le32_to_cpu(s->hdr[i]); 898 } 899 return len; 900 } 901 902 static size_t 903 xilinx_axienet_data_stream_push(StreamSlave *obj, uint8_t *buf, size_t size, 904 bool eop) 905 { 906 XilinxAXIEnetStreamSlave *ds = XILINX_AXI_ENET_DATA_STREAM(obj); 907 XilinxAXIEnet *s = ds->enet; 908 909 /* TX enable ? */ 910 if (!(s->tc & TC_TX)) { 911 return size; 912 } 913 914 if (s->txpos + size > s->c_txmem) { 915 qemu_log_mask(LOG_GUEST_ERROR, "%s: Packet larger than txmem\n", 916 TYPE_XILINX_AXI_ENET); 917 s->txpos = 0; 918 return size; 919 } 920 921 if (s->txpos == 0 && eop) { 922 /* Fast path single fragment. */ 923 s->txpos = size; 924 } else { 925 memcpy(s->txmem + s->txpos, buf, size); 926 buf = s->txmem; 927 s->txpos += size; 928 929 if (!eop) { 930 return size; 931 } 932 } 933 934 /* Jumbo or vlan sizes ? */ 935 if (!(s->tc & TC_JUM)) { 936 if (s->txpos > 1518 && s->txpos <= 1522 && !(s->tc & TC_VLAN)) { 937 s->txpos = 0; 938 return size; 939 } 940 } 941 942 if (s->hdr[0] & 1) { 943 unsigned int start_off = s->hdr[1] >> 16; 944 unsigned int write_off = s->hdr[1] & 0xffff; 945 uint32_t tmp_csum; 946 uint16_t csum; 947 948 tmp_csum = net_checksum_add(s->txpos - start_off, 949 buf + start_off); 950 /* Accumulate the seed. */ 951 tmp_csum += s->hdr[2] & 0xffff; 952 953 /* Fold the 32bit partial checksum. */ 954 csum = net_checksum_finish(tmp_csum); 955 956 /* Writeback. */ 957 buf[write_off] = csum >> 8; 958 buf[write_off + 1] = csum & 0xff; 959 } 960 961 qemu_send_packet(qemu_get_queue(s->nic), buf, s->txpos); 962 963 s->stats.tx_bytes += s->txpos; 964 s->regs[R_IS] |= IS_TX_COMPLETE; 965 enet_update_irq(s); 966 967 s->txpos = 0; 968 return size; 969 } 970 971 static NetClientInfo net_xilinx_enet_info = { 972 .type = NET_CLIENT_DRIVER_NIC, 973 .size = sizeof(NICState), 974 .receive = eth_rx, 975 }; 976 977 static void xilinx_enet_realize(DeviceState *dev, Error **errp) 978 { 979 XilinxAXIEnet *s = XILINX_AXI_ENET(dev); 980 XilinxAXIEnetStreamSlave *ds = XILINX_AXI_ENET_DATA_STREAM(&s->rx_data_dev); 981 XilinxAXIEnetStreamSlave *cs = XILINX_AXI_ENET_CONTROL_STREAM( 982 &s->rx_control_dev); 983 Error *local_err = NULL; 984 985 object_property_add_link(OBJECT(ds), "enet", "xlnx.axi-ethernet", 986 (Object **) &ds->enet, 987 object_property_allow_set_link, 988 OBJ_PROP_LINK_STRONG); 989 object_property_add_link(OBJECT(cs), "enet", "xlnx.axi-ethernet", 990 (Object **) &cs->enet, 991 object_property_allow_set_link, 992 OBJ_PROP_LINK_STRONG); 993 object_property_set_link(OBJECT(ds), OBJECT(s), "enet", &local_err); 994 object_property_set_link(OBJECT(cs), OBJECT(s), "enet", &local_err); 995 if (local_err) { 996 goto xilinx_enet_realize_fail; 997 } 998 999 qemu_macaddr_default_if_unset(&s->conf.macaddr); 1000 s->nic = qemu_new_nic(&net_xilinx_enet_info, &s->conf, 1001 object_get_typename(OBJECT(dev)), dev->id, s); 1002 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); 1003 1004 tdk_init(&s->TEMAC.phy); 1005 mdio_attach(&s->TEMAC.mdio_bus, &s->TEMAC.phy, s->c_phyaddr); 1006 1007 s->TEMAC.parent = s; 1008 1009 s->rxmem = g_malloc(s->c_rxmem); 1010 s->txmem = g_malloc(s->c_txmem); 1011 return; 1012 1013 xilinx_enet_realize_fail: 1014 error_propagate(errp, local_err); 1015 } 1016 1017 static void xilinx_enet_init(Object *obj) 1018 { 1019 XilinxAXIEnet *s = XILINX_AXI_ENET(obj); 1020 SysBusDevice *sbd = SYS_BUS_DEVICE(obj); 1021 1022 object_initialize_child(OBJECT(s), "axistream-connected-target", 1023 &s->rx_data_dev, sizeof(s->rx_data_dev), 1024 TYPE_XILINX_AXI_ENET_DATA_STREAM, &error_abort, 1025 NULL); 1026 object_initialize_child(OBJECT(s), "axistream-control-connected-target", 1027 &s->rx_control_dev, sizeof(s->rx_control_dev), 1028 TYPE_XILINX_AXI_ENET_CONTROL_STREAM, &error_abort, 1029 NULL); 1030 sysbus_init_irq(sbd, &s->irq); 1031 1032 memory_region_init_io(&s->iomem, OBJECT(s), &enet_ops, s, "enet", 0x40000); 1033 sysbus_init_mmio(sbd, &s->iomem); 1034 } 1035 1036 static Property xilinx_enet_properties[] = { 1037 DEFINE_PROP_UINT32("phyaddr", XilinxAXIEnet, c_phyaddr, 7), 1038 DEFINE_PROP_UINT32("rxmem", XilinxAXIEnet, c_rxmem, 0x1000), 1039 DEFINE_PROP_UINT32("txmem", XilinxAXIEnet, c_txmem, 0x1000), 1040 DEFINE_NIC_PROPERTIES(XilinxAXIEnet, conf), 1041 DEFINE_PROP_LINK("axistream-connected", XilinxAXIEnet, 1042 tx_data_dev, TYPE_STREAM_SLAVE, StreamSlave *), 1043 DEFINE_PROP_LINK("axistream-control-connected", XilinxAXIEnet, 1044 tx_control_dev, TYPE_STREAM_SLAVE, StreamSlave *), 1045 DEFINE_PROP_END_OF_LIST(), 1046 }; 1047 1048 static void xilinx_enet_class_init(ObjectClass *klass, void *data) 1049 { 1050 DeviceClass *dc = DEVICE_CLASS(klass); 1051 1052 dc->realize = xilinx_enet_realize; 1053 device_class_set_props(dc, xilinx_enet_properties); 1054 dc->reset = xilinx_axienet_reset; 1055 } 1056 1057 static void xilinx_enet_control_stream_class_init(ObjectClass *klass, 1058 void *data) 1059 { 1060 StreamSlaveClass *ssc = STREAM_SLAVE_CLASS(klass); 1061 1062 ssc->push = xilinx_axienet_control_stream_push; 1063 } 1064 1065 static void xilinx_enet_data_stream_class_init(ObjectClass *klass, void *data) 1066 { 1067 StreamSlaveClass *ssc = STREAM_SLAVE_CLASS(klass); 1068 1069 ssc->push = xilinx_axienet_data_stream_push; 1070 } 1071 1072 static const TypeInfo xilinx_enet_info = { 1073 .name = TYPE_XILINX_AXI_ENET, 1074 .parent = TYPE_SYS_BUS_DEVICE, 1075 .instance_size = sizeof(XilinxAXIEnet), 1076 .class_init = xilinx_enet_class_init, 1077 .instance_init = xilinx_enet_init, 1078 }; 1079 1080 static const TypeInfo xilinx_enet_data_stream_info = { 1081 .name = TYPE_XILINX_AXI_ENET_DATA_STREAM, 1082 .parent = TYPE_OBJECT, 1083 .instance_size = sizeof(struct XilinxAXIEnetStreamSlave), 1084 .class_init = xilinx_enet_data_stream_class_init, 1085 .interfaces = (InterfaceInfo[]) { 1086 { TYPE_STREAM_SLAVE }, 1087 { } 1088 } 1089 }; 1090 1091 static const TypeInfo xilinx_enet_control_stream_info = { 1092 .name = TYPE_XILINX_AXI_ENET_CONTROL_STREAM, 1093 .parent = TYPE_OBJECT, 1094 .instance_size = sizeof(struct XilinxAXIEnetStreamSlave), 1095 .class_init = xilinx_enet_control_stream_class_init, 1096 .interfaces = (InterfaceInfo[]) { 1097 { TYPE_STREAM_SLAVE }, 1098 { } 1099 } 1100 }; 1101 1102 static void xilinx_enet_register_types(void) 1103 { 1104 type_register_static(&xilinx_enet_info); 1105 type_register_static(&xilinx_enet_data_stream_info); 1106 type_register_static(&xilinx_enet_control_stream_info); 1107 } 1108 1109 type_init(xilinx_enet_register_types) 1110