1 /* 2 * QEMU Sun Happy Meal Ethernet emulation 3 * 4 * Copyright (c) 2017 Mark Cave-Ayland 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "hw/pci/pci.h" 27 #include "hw/qdev-properties.h" 28 #include "migration/vmstate.h" 29 #include "hw/net/mii.h" 30 #include "net/net.h" 31 #include "qemu/module.h" 32 #include "net/checksum.h" 33 #include "net/eth.h" 34 #include "sysemu/sysemu.h" 35 #include "trace.h" 36 #include "qom/object.h" 37 38 #define HME_REG_SIZE 0x8000 39 40 #define HME_SEB_REG_SIZE 0x2000 41 42 #define HME_SEBI_RESET 0x0 43 #define HME_SEB_RESET_ETX 0x1 44 #define HME_SEB_RESET_ERX 0x2 45 46 #define HME_SEBI_STAT 0x100 47 #define HME_SEBI_STAT_LINUXBUG 0x108 48 #define HME_SEB_STAT_RXTOHOST 0x10000 49 #define HME_SEB_STAT_NORXD 0x20000 50 #define HME_SEB_STAT_MIFIRQ 0x800000 51 #define HME_SEB_STAT_HOSTTOTX 0x1000000 52 #define HME_SEB_STAT_TXALL 0x2000000 53 54 #define HME_SEBI_IMASK 0x104 55 #define HME_SEBI_IMASK_LINUXBUG 0x10c 56 57 #define HME_ETX_REG_SIZE 0x2000 58 59 #define HME_ETXI_PENDING 0x0 60 61 #define HME_ETXI_RING 0x8 62 #define HME_ETXI_RING_ADDR 0xffffff00 63 #define HME_ETXI_RING_OFFSET 0xff 64 65 #define HME_ETXI_RSIZE 0x2c 66 67 #define HME_ERX_REG_SIZE 0x2000 68 69 #define HME_ERXI_CFG 0x0 70 #define HME_ERX_CFG_RINGSIZE 0x600 71 #define HME_ERX_CFG_RINGSIZE_SHIFT 9 72 #define HME_ERX_CFG_BYTEOFFSET 0x38 73 #define HME_ERX_CFG_BYTEOFFSET_SHIFT 3 74 #define HME_ERX_CFG_CSUMSTART 0x7f0000 75 #define HME_ERX_CFG_CSUMSHIFT 16 76 77 #define HME_ERXI_RING 0x4 78 #define HME_ERXI_RING_ADDR 0xffffff00 79 #define HME_ERXI_RING_OFFSET 0xff 80 81 #define HME_MAC_REG_SIZE 0x1000 82 83 #define HME_MACI_TXCFG 0x20c 84 #define HME_MAC_TXCFG_ENABLE 0x1 85 86 #define HME_MACI_RXCFG 0x30c 87 #define HME_MAC_RXCFG_ENABLE 0x1 88 #define HME_MAC_RXCFG_PMISC 0x40 89 #define HME_MAC_RXCFG_HENABLE 0x800 90 91 #define HME_MACI_MACADDR2 0x318 92 #define HME_MACI_MACADDR1 0x31c 93 #define HME_MACI_MACADDR0 0x320 94 95 #define HME_MACI_HASHTAB3 0x340 96 #define HME_MACI_HASHTAB2 0x344 97 #define HME_MACI_HASHTAB1 0x348 98 #define HME_MACI_HASHTAB0 0x34c 99 100 #define HME_MIF_REG_SIZE 0x20 101 102 #define HME_MIFI_FO 0xc 103 #define HME_MIF_FO_ST 0xc0000000 104 #define HME_MIF_FO_ST_SHIFT 30 105 #define HME_MIF_FO_OPC 0x30000000 106 #define HME_MIF_FO_OPC_SHIFT 28 107 #define HME_MIF_FO_PHYAD 0x0f800000 108 #define HME_MIF_FO_PHYAD_SHIFT 23 109 #define HME_MIF_FO_REGAD 0x007c0000 110 #define HME_MIF_FO_REGAD_SHIFT 18 111 #define HME_MIF_FO_TAMSB 0x20000 112 #define HME_MIF_FO_TALSB 0x10000 113 #define HME_MIF_FO_DATA 0xffff 114 115 #define HME_MIFI_CFG 0x10 116 #define HME_MIF_CFG_MDI0 0x100 117 #define HME_MIF_CFG_MDI1 0x200 118 119 #define HME_MIFI_IMASK 0x14 120 121 #define HME_MIFI_STAT 0x18 122 123 124 /* Wired HME PHY addresses */ 125 #define HME_PHYAD_INTERNAL 1 126 #define HME_PHYAD_EXTERNAL 0 127 128 #define MII_COMMAND_START 0x1 129 #define MII_COMMAND_READ 0x2 130 #define MII_COMMAND_WRITE 0x1 131 132 #define TYPE_SUNHME "sunhme" 133 typedef struct SunHMEState SunHMEState; 134 DECLARE_INSTANCE_CHECKER(SunHMEState, SUNHME, 135 TYPE_SUNHME) 136 137 /* Maximum size of buffer */ 138 #define HME_FIFO_SIZE 0x800 139 140 /* Size of TX/RX descriptor */ 141 #define HME_DESC_SIZE 0x8 142 143 #define HME_XD_OWN 0x80000000 144 #define HME_XD_OFL 0x40000000 145 #define HME_XD_SOP 0x40000000 146 #define HME_XD_EOP 0x20000000 147 #define HME_XD_RXLENMSK 0x3fff0000 148 #define HME_XD_RXLENSHIFT 16 149 #define HME_XD_RXCKSUM 0xffff 150 #define HME_XD_TXLENMSK 0x00001fff 151 #define HME_XD_TXCKSUM 0x10000000 152 #define HME_XD_TXCSSTUFF 0xff00000 153 #define HME_XD_TXCSSTUFFSHIFT 20 154 #define HME_XD_TXCSSTART 0xfc000 155 #define HME_XD_TXCSSTARTSHIFT 14 156 157 #define HME_MII_REGS_SIZE 0x20 158 159 struct SunHMEState { 160 /*< private >*/ 161 PCIDevice parent_obj; 162 163 NICState *nic; 164 NICConf conf; 165 166 MemoryRegion hme; 167 MemoryRegion sebreg; 168 MemoryRegion etxreg; 169 MemoryRegion erxreg; 170 MemoryRegion macreg; 171 MemoryRegion mifreg; 172 173 uint32_t sebregs[HME_SEB_REG_SIZE >> 2]; 174 uint32_t etxregs[HME_ETX_REG_SIZE >> 2]; 175 uint32_t erxregs[HME_ERX_REG_SIZE >> 2]; 176 uint32_t macregs[HME_MAC_REG_SIZE >> 2]; 177 uint32_t mifregs[HME_MIF_REG_SIZE >> 2]; 178 179 uint16_t miiregs[HME_MII_REGS_SIZE]; 180 }; 181 182 static Property sunhme_properties[] = { 183 DEFINE_NIC_PROPERTIES(SunHMEState, conf), 184 DEFINE_PROP_END_OF_LIST(), 185 }; 186 187 static void sunhme_reset_tx(SunHMEState *s) 188 { 189 /* Indicate TX reset complete */ 190 s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ETX; 191 } 192 193 static void sunhme_reset_rx(SunHMEState *s) 194 { 195 /* Indicate RX reset complete */ 196 s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ERX; 197 } 198 199 static void sunhme_update_irq(SunHMEState *s) 200 { 201 PCIDevice *d = PCI_DEVICE(s); 202 int level; 203 204 /* MIF interrupt mask (16-bit) */ 205 uint32_t mifmask = ~(s->mifregs[HME_MIFI_IMASK >> 2]) & 0xffff; 206 uint32_t mif = s->mifregs[HME_MIFI_STAT >> 2] & mifmask; 207 208 /* Main SEB interrupt mask (include MIF status from above) */ 209 uint32_t sebmask = ~(s->sebregs[HME_SEBI_IMASK >> 2]) & 210 ~HME_SEB_STAT_MIFIRQ; 211 uint32_t seb = s->sebregs[HME_SEBI_STAT >> 2] & sebmask; 212 if (mif) { 213 seb |= HME_SEB_STAT_MIFIRQ; 214 } 215 216 level = (seb ? 1 : 0); 217 trace_sunhme_update_irq(mifmask, mif, sebmask, seb, level); 218 219 pci_set_irq(d, level); 220 } 221 222 static void sunhme_seb_write(void *opaque, hwaddr addr, 223 uint64_t val, unsigned size) 224 { 225 SunHMEState *s = SUNHME(opaque); 226 227 trace_sunhme_seb_write(addr, val); 228 229 /* Handly buggy Linux drivers before 4.13 which have 230 the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */ 231 switch (addr) { 232 case HME_SEBI_STAT_LINUXBUG: 233 addr = HME_SEBI_STAT; 234 break; 235 case HME_SEBI_IMASK_LINUXBUG: 236 addr = HME_SEBI_IMASK; 237 break; 238 default: 239 break; 240 } 241 242 switch (addr) { 243 case HME_SEBI_RESET: 244 if (val & HME_SEB_RESET_ETX) { 245 sunhme_reset_tx(s); 246 } 247 if (val & HME_SEB_RESET_ERX) { 248 sunhme_reset_rx(s); 249 } 250 val = s->sebregs[HME_SEBI_RESET >> 2]; 251 break; 252 } 253 254 s->sebregs[addr >> 2] = val; 255 } 256 257 static uint64_t sunhme_seb_read(void *opaque, hwaddr addr, 258 unsigned size) 259 { 260 SunHMEState *s = SUNHME(opaque); 261 uint64_t val; 262 263 /* Handly buggy Linux drivers before 4.13 which have 264 the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */ 265 switch (addr) { 266 case HME_SEBI_STAT_LINUXBUG: 267 addr = HME_SEBI_STAT; 268 break; 269 case HME_SEBI_IMASK_LINUXBUG: 270 addr = HME_SEBI_IMASK; 271 break; 272 default: 273 break; 274 } 275 276 val = s->sebregs[addr >> 2]; 277 278 switch (addr) { 279 case HME_SEBI_STAT: 280 /* Autoclear status (except MIF) */ 281 s->sebregs[HME_SEBI_STAT >> 2] &= HME_SEB_STAT_MIFIRQ; 282 sunhme_update_irq(s); 283 break; 284 } 285 286 trace_sunhme_seb_read(addr, val); 287 288 return val; 289 } 290 291 static const MemoryRegionOps sunhme_seb_ops = { 292 .read = sunhme_seb_read, 293 .write = sunhme_seb_write, 294 .endianness = DEVICE_LITTLE_ENDIAN, 295 .valid = { 296 .min_access_size = 4, 297 .max_access_size = 4, 298 }, 299 }; 300 301 static void sunhme_transmit(SunHMEState *s); 302 303 static void sunhme_etx_write(void *opaque, hwaddr addr, 304 uint64_t val, unsigned size) 305 { 306 SunHMEState *s = SUNHME(opaque); 307 308 trace_sunhme_etx_write(addr, val); 309 310 switch (addr) { 311 case HME_ETXI_PENDING: 312 if (val) { 313 sunhme_transmit(s); 314 } 315 break; 316 } 317 318 s->etxregs[addr >> 2] = val; 319 } 320 321 static uint64_t sunhme_etx_read(void *opaque, hwaddr addr, 322 unsigned size) 323 { 324 SunHMEState *s = SUNHME(opaque); 325 uint64_t val; 326 327 val = s->etxregs[addr >> 2]; 328 329 trace_sunhme_etx_read(addr, val); 330 331 return val; 332 } 333 334 static const MemoryRegionOps sunhme_etx_ops = { 335 .read = sunhme_etx_read, 336 .write = sunhme_etx_write, 337 .endianness = DEVICE_LITTLE_ENDIAN, 338 .valid = { 339 .min_access_size = 4, 340 .max_access_size = 4, 341 }, 342 }; 343 344 static void sunhme_erx_write(void *opaque, hwaddr addr, 345 uint64_t val, unsigned size) 346 { 347 SunHMEState *s = SUNHME(opaque); 348 349 trace_sunhme_erx_write(addr, val); 350 351 s->erxregs[addr >> 2] = val; 352 } 353 354 static uint64_t sunhme_erx_read(void *opaque, hwaddr addr, 355 unsigned size) 356 { 357 SunHMEState *s = SUNHME(opaque); 358 uint64_t val; 359 360 val = s->erxregs[addr >> 2]; 361 362 trace_sunhme_erx_read(addr, val); 363 364 return val; 365 } 366 367 static const MemoryRegionOps sunhme_erx_ops = { 368 .read = sunhme_erx_read, 369 .write = sunhme_erx_write, 370 .endianness = DEVICE_LITTLE_ENDIAN, 371 .valid = { 372 .min_access_size = 4, 373 .max_access_size = 4, 374 }, 375 }; 376 377 static void sunhme_mac_write(void *opaque, hwaddr addr, 378 uint64_t val, unsigned size) 379 { 380 SunHMEState *s = SUNHME(opaque); 381 uint64_t oldval = s->macregs[addr >> 2]; 382 383 trace_sunhme_mac_write(addr, val); 384 385 s->macregs[addr >> 2] = val; 386 387 switch (addr) { 388 case HME_MACI_RXCFG: 389 if (!(oldval & HME_MAC_RXCFG_ENABLE) && 390 (val & HME_MAC_RXCFG_ENABLE)) { 391 qemu_flush_queued_packets(qemu_get_queue(s->nic)); 392 } 393 break; 394 } 395 } 396 397 static uint64_t sunhme_mac_read(void *opaque, hwaddr addr, 398 unsigned size) 399 { 400 SunHMEState *s = SUNHME(opaque); 401 uint64_t val; 402 403 val = s->macregs[addr >> 2]; 404 405 trace_sunhme_mac_read(addr, val); 406 407 return val; 408 } 409 410 static const MemoryRegionOps sunhme_mac_ops = { 411 .read = sunhme_mac_read, 412 .write = sunhme_mac_write, 413 .endianness = DEVICE_LITTLE_ENDIAN, 414 .valid = { 415 .min_access_size = 4, 416 .max_access_size = 4, 417 }, 418 }; 419 420 static void sunhme_mii_write(SunHMEState *s, uint8_t reg, uint16_t data) 421 { 422 trace_sunhme_mii_write(reg, data); 423 424 switch (reg) { 425 case MII_BMCR: 426 if (data & MII_BMCR_RESET) { 427 /* Autoclear reset bit, enable auto negotiation */ 428 data &= ~MII_BMCR_RESET; 429 data |= MII_BMCR_AUTOEN; 430 } 431 if (data & MII_BMCR_ANRESTART) { 432 /* Autoclear auto negotiation restart */ 433 data &= ~MII_BMCR_ANRESTART; 434 435 /* Indicate negotiation complete */ 436 s->miiregs[MII_BMSR] |= MII_BMSR_AN_COMP; 437 438 if (!qemu_get_queue(s->nic)->link_down) { 439 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD; 440 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST; 441 } 442 } 443 break; 444 } 445 446 s->miiregs[reg] = data; 447 } 448 449 static uint16_t sunhme_mii_read(SunHMEState *s, uint8_t reg) 450 { 451 uint16_t data = s->miiregs[reg]; 452 453 trace_sunhme_mii_read(reg, data); 454 455 return data; 456 } 457 458 static void sunhme_mif_write(void *opaque, hwaddr addr, 459 uint64_t val, unsigned size) 460 { 461 SunHMEState *s = SUNHME(opaque); 462 uint8_t cmd, reg; 463 uint16_t data; 464 465 trace_sunhme_mif_write(addr, val); 466 467 switch (addr) { 468 case HME_MIFI_CFG: 469 /* Mask the read-only bits */ 470 val &= ~(HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1); 471 val |= s->mifregs[HME_MIFI_CFG >> 2] & 472 (HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1); 473 break; 474 case HME_MIFI_FO: 475 /* Detect start of MII command */ 476 if ((val & HME_MIF_FO_ST) >> HME_MIF_FO_ST_SHIFT 477 != MII_COMMAND_START) { 478 val |= HME_MIF_FO_TALSB; 479 break; 480 } 481 482 /* Internal phy only */ 483 if ((val & HME_MIF_FO_PHYAD) >> HME_MIF_FO_PHYAD_SHIFT 484 != HME_PHYAD_INTERNAL) { 485 val |= HME_MIF_FO_TALSB; 486 break; 487 } 488 489 cmd = (val & HME_MIF_FO_OPC) >> HME_MIF_FO_OPC_SHIFT; 490 reg = (val & HME_MIF_FO_REGAD) >> HME_MIF_FO_REGAD_SHIFT; 491 data = (val & HME_MIF_FO_DATA); 492 493 switch (cmd) { 494 case MII_COMMAND_WRITE: 495 sunhme_mii_write(s, reg, data); 496 break; 497 498 case MII_COMMAND_READ: 499 val &= ~HME_MIF_FO_DATA; 500 val |= sunhme_mii_read(s, reg); 501 break; 502 } 503 504 val |= HME_MIF_FO_TALSB; 505 break; 506 } 507 508 s->mifregs[addr >> 2] = val; 509 } 510 511 static uint64_t sunhme_mif_read(void *opaque, hwaddr addr, 512 unsigned size) 513 { 514 SunHMEState *s = SUNHME(opaque); 515 uint64_t val; 516 517 val = s->mifregs[addr >> 2]; 518 519 switch (addr) { 520 case HME_MIFI_STAT: 521 /* Autoclear MIF interrupt status */ 522 s->mifregs[HME_MIFI_STAT >> 2] = 0; 523 sunhme_update_irq(s); 524 break; 525 } 526 527 trace_sunhme_mif_read(addr, val); 528 529 return val; 530 } 531 532 static const MemoryRegionOps sunhme_mif_ops = { 533 .read = sunhme_mif_read, 534 .write = sunhme_mif_write, 535 .endianness = DEVICE_LITTLE_ENDIAN, 536 .valid = { 537 .min_access_size = 4, 538 .max_access_size = 4, 539 }, 540 }; 541 542 static void sunhme_transmit_frame(SunHMEState *s, uint8_t *buf, int size) 543 { 544 qemu_send_packet(qemu_get_queue(s->nic), buf, size); 545 } 546 547 static inline int sunhme_get_tx_ring_count(SunHMEState *s) 548 { 549 return (s->etxregs[HME_ETXI_RSIZE >> 2] + 1) << 4; 550 } 551 552 static inline int sunhme_get_tx_ring_nr(SunHMEState *s) 553 { 554 return s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_OFFSET; 555 } 556 557 static inline void sunhme_set_tx_ring_nr(SunHMEState *s, int i) 558 { 559 uint32_t ring = s->etxregs[HME_ETXI_RING >> 2] & ~HME_ETXI_RING_OFFSET; 560 ring |= i & HME_ETXI_RING_OFFSET; 561 562 s->etxregs[HME_ETXI_RING >> 2] = ring; 563 } 564 565 static void sunhme_transmit(SunHMEState *s) 566 { 567 PCIDevice *d = PCI_DEVICE(s); 568 dma_addr_t tb, addr; 569 uint32_t intstatus, status, buffer, sum = 0; 570 int cr, nr, len, xmit_pos, csum_offset = 0, csum_stuff_offset = 0; 571 uint16_t csum = 0; 572 uint8_t xmit_buffer[HME_FIFO_SIZE]; 573 574 tb = s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_ADDR; 575 nr = sunhme_get_tx_ring_count(s); 576 cr = sunhme_get_tx_ring_nr(s); 577 578 pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4); 579 pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4); 580 581 xmit_pos = 0; 582 while (status & HME_XD_OWN) { 583 trace_sunhme_tx_desc(buffer, status, cr, nr); 584 585 /* Copy data into transmit buffer */ 586 addr = buffer; 587 len = status & HME_XD_TXLENMSK; 588 589 if (xmit_pos + len > HME_FIFO_SIZE) { 590 len = HME_FIFO_SIZE - xmit_pos; 591 } 592 593 pci_dma_read(d, addr, &xmit_buffer[xmit_pos], len); 594 xmit_pos += len; 595 596 /* Detect start of packet for TX checksum */ 597 if (status & HME_XD_SOP) { 598 sum = 0; 599 csum_offset = (status & HME_XD_TXCSSTART) >> HME_XD_TXCSSTARTSHIFT; 600 csum_stuff_offset = (status & HME_XD_TXCSSTUFF) >> 601 HME_XD_TXCSSTUFFSHIFT; 602 } 603 604 if (status & HME_XD_TXCKSUM) { 605 /* Only start calculation from csum_offset */ 606 if (xmit_pos - len <= csum_offset && xmit_pos > csum_offset) { 607 sum += net_checksum_add(xmit_pos - csum_offset, 608 xmit_buffer + csum_offset); 609 trace_sunhme_tx_xsum_add(csum_offset, xmit_pos - csum_offset); 610 } else { 611 sum += net_checksum_add(len, xmit_buffer + xmit_pos - len); 612 trace_sunhme_tx_xsum_add(xmit_pos - len, len); 613 } 614 } 615 616 /* Detect end of packet for TX checksum */ 617 if (status & HME_XD_EOP) { 618 /* Stuff the checksum if required */ 619 if (status & HME_XD_TXCKSUM) { 620 csum = net_checksum_finish(sum); 621 stw_be_p(xmit_buffer + csum_stuff_offset, csum); 622 trace_sunhme_tx_xsum_stuff(csum, csum_stuff_offset); 623 } 624 625 if (s->macregs[HME_MACI_TXCFG >> 2] & HME_MAC_TXCFG_ENABLE) { 626 sunhme_transmit_frame(s, xmit_buffer, xmit_pos); 627 trace_sunhme_tx_done(xmit_pos); 628 } 629 } 630 631 /* Update status */ 632 status &= ~HME_XD_OWN; 633 pci_dma_write(d, tb + cr * HME_DESC_SIZE, &status, 4); 634 635 /* Move onto next descriptor */ 636 cr++; 637 if (cr >= nr) { 638 cr = 0; 639 } 640 sunhme_set_tx_ring_nr(s, cr); 641 642 pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4); 643 pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4); 644 645 /* Indicate TX complete */ 646 intstatus = s->sebregs[HME_SEBI_STAT >> 2]; 647 intstatus |= HME_SEB_STAT_HOSTTOTX; 648 s->sebregs[HME_SEBI_STAT >> 2] = intstatus; 649 650 /* Autoclear TX pending */ 651 s->etxregs[HME_ETXI_PENDING >> 2] = 0; 652 653 sunhme_update_irq(s); 654 } 655 656 /* TX FIFO now clear */ 657 intstatus = s->sebregs[HME_SEBI_STAT >> 2]; 658 intstatus |= HME_SEB_STAT_TXALL; 659 s->sebregs[HME_SEBI_STAT >> 2] = intstatus; 660 sunhme_update_irq(s); 661 } 662 663 static bool sunhme_can_receive(NetClientState *nc) 664 { 665 SunHMEState *s = qemu_get_nic_opaque(nc); 666 667 return !!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE); 668 } 669 670 static void sunhme_link_status_changed(NetClientState *nc) 671 { 672 SunHMEState *s = qemu_get_nic_opaque(nc); 673 674 if (nc->link_down) { 675 s->miiregs[MII_ANLPAR] &= ~MII_ANLPAR_TXFD; 676 s->miiregs[MII_BMSR] &= ~MII_BMSR_LINK_ST; 677 } else { 678 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD; 679 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST; 680 } 681 682 /* Exact bits unknown */ 683 s->mifregs[HME_MIFI_STAT >> 2] = 0xffff; 684 sunhme_update_irq(s); 685 } 686 687 static inline int sunhme_get_rx_ring_count(SunHMEState *s) 688 { 689 uint32_t rings = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_RINGSIZE) 690 >> HME_ERX_CFG_RINGSIZE_SHIFT; 691 692 switch (rings) { 693 case 0: 694 return 32; 695 case 1: 696 return 64; 697 case 2: 698 return 128; 699 case 3: 700 return 256; 701 } 702 703 return 0; 704 } 705 706 static inline int sunhme_get_rx_ring_nr(SunHMEState *s) 707 { 708 return s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_OFFSET; 709 } 710 711 static inline void sunhme_set_rx_ring_nr(SunHMEState *s, int i) 712 { 713 uint32_t ring = s->erxregs[HME_ERXI_RING >> 2] & ~HME_ERXI_RING_OFFSET; 714 ring |= i & HME_ERXI_RING_OFFSET; 715 716 s->erxregs[HME_ERXI_RING >> 2] = ring; 717 } 718 719 #define MIN_BUF_SIZE 60 720 721 static ssize_t sunhme_receive(NetClientState *nc, const uint8_t *buf, 722 size_t size) 723 { 724 SunHMEState *s = qemu_get_nic_opaque(nc); 725 PCIDevice *d = PCI_DEVICE(s); 726 dma_addr_t rb, addr; 727 uint32_t intstatus, status, buffer, buffersize, sum; 728 uint16_t csum; 729 uint8_t buf1[60]; 730 int nr, cr, len, rxoffset, csum_offset; 731 732 trace_sunhme_rx_incoming(size); 733 734 /* Do nothing if MAC RX disabled */ 735 if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE)) { 736 return 0; 737 } 738 739 trace_sunhme_rx_filter_destmac(buf[0], buf[1], buf[2], 740 buf[3], buf[4], buf[5]); 741 742 /* Check destination MAC address */ 743 if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_PMISC)) { 744 /* Try and match local MAC address */ 745 if (((s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff00) >> 8) == buf[0] && 746 (s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff) == buf[1] && 747 ((s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff00) >> 8) == buf[2] && 748 (s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff) == buf[3] && 749 ((s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff00) >> 8) == buf[4] && 750 (s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff) == buf[5]) { 751 /* Matched local MAC address */ 752 trace_sunhme_rx_filter_local_match(); 753 } else if (buf[0] == 0xff && buf[1] == 0xff && buf[2] == 0xff && 754 buf[3] == 0xff && buf[4] == 0xff && buf[5] == 0xff) { 755 /* Matched broadcast address */ 756 trace_sunhme_rx_filter_bcast_match(); 757 } else if (s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_HENABLE) { 758 /* Didn't match local address, check hash filter */ 759 int mcast_idx = net_crc32_le(buf, ETH_ALEN) >> 26; 760 if (!(s->macregs[(HME_MACI_HASHTAB0 >> 2) - (mcast_idx >> 4)] & 761 (1 << (mcast_idx & 0xf)))) { 762 /* Didn't match hash filter */ 763 trace_sunhme_rx_filter_hash_nomatch(); 764 trace_sunhme_rx_filter_reject(); 765 return -1; 766 } else { 767 trace_sunhme_rx_filter_hash_match(); 768 } 769 } else { 770 /* Not for us */ 771 trace_sunhme_rx_filter_reject(); 772 return -1; 773 } 774 } else { 775 trace_sunhme_rx_filter_promisc_match(); 776 } 777 778 trace_sunhme_rx_filter_accept(); 779 780 /* If too small buffer, then expand it */ 781 if (size < MIN_BUF_SIZE) { 782 memcpy(buf1, buf, size); 783 memset(buf1 + size, 0, MIN_BUF_SIZE - size); 784 buf = buf1; 785 size = MIN_BUF_SIZE; 786 } 787 788 rb = s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_ADDR; 789 nr = sunhme_get_rx_ring_count(s); 790 cr = sunhme_get_rx_ring_nr(s); 791 792 pci_dma_read(d, rb + cr * HME_DESC_SIZE, &status, 4); 793 pci_dma_read(d, rb + cr * HME_DESC_SIZE + 4, &buffer, 4); 794 795 /* If we don't own the current descriptor then indicate overflow error */ 796 if (!(status & HME_XD_OWN)) { 797 s->sebregs[HME_SEBI_STAT >> 2] |= HME_SEB_STAT_NORXD; 798 sunhme_update_irq(s); 799 trace_sunhme_rx_norxd(); 800 return -1; 801 } 802 803 rxoffset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_BYTEOFFSET) >> 804 HME_ERX_CFG_BYTEOFFSET_SHIFT; 805 806 addr = buffer + rxoffset; 807 buffersize = (status & HME_XD_RXLENMSK) >> HME_XD_RXLENSHIFT; 808 809 /* Detect receive overflow */ 810 len = size; 811 if (size > buffersize) { 812 status |= HME_XD_OFL; 813 len = buffersize; 814 } 815 816 pci_dma_write(d, addr, buf, len); 817 818 trace_sunhme_rx_desc(buffer, rxoffset, status, len, cr, nr); 819 820 /* Calculate the receive checksum */ 821 csum_offset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_CSUMSTART) >> 822 HME_ERX_CFG_CSUMSHIFT << 1; 823 sum = 0; 824 sum += net_checksum_add(len - csum_offset, (uint8_t *)buf + csum_offset); 825 csum = net_checksum_finish(sum); 826 827 trace_sunhme_rx_xsum_calc(csum); 828 829 /* Update status */ 830 status &= ~HME_XD_OWN; 831 status &= ~HME_XD_RXLENMSK; 832 status |= len << HME_XD_RXLENSHIFT; 833 status &= ~HME_XD_RXCKSUM; 834 status |= csum; 835 836 pci_dma_write(d, rb + cr * HME_DESC_SIZE, &status, 4); 837 838 cr++; 839 if (cr >= nr) { 840 cr = 0; 841 } 842 843 sunhme_set_rx_ring_nr(s, cr); 844 845 /* Indicate RX complete */ 846 intstatus = s->sebregs[HME_SEBI_STAT >> 2]; 847 intstatus |= HME_SEB_STAT_RXTOHOST; 848 s->sebregs[HME_SEBI_STAT >> 2] = intstatus; 849 850 sunhme_update_irq(s); 851 852 return len; 853 } 854 855 static NetClientInfo net_sunhme_info = { 856 .type = NET_CLIENT_DRIVER_NIC, 857 .size = sizeof(NICState), 858 .can_receive = sunhme_can_receive, 859 .receive = sunhme_receive, 860 .link_status_changed = sunhme_link_status_changed, 861 }; 862 863 static void sunhme_realize(PCIDevice *pci_dev, Error **errp) 864 { 865 SunHMEState *s = SUNHME(pci_dev); 866 DeviceState *d = DEVICE(pci_dev); 867 uint8_t *pci_conf; 868 869 pci_conf = pci_dev->config; 870 pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */ 871 872 memory_region_init(&s->hme, OBJECT(pci_dev), "sunhme", HME_REG_SIZE); 873 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->hme); 874 875 memory_region_init_io(&s->sebreg, OBJECT(pci_dev), &sunhme_seb_ops, s, 876 "sunhme.seb", HME_SEB_REG_SIZE); 877 memory_region_add_subregion(&s->hme, 0, &s->sebreg); 878 879 memory_region_init_io(&s->etxreg, OBJECT(pci_dev), &sunhme_etx_ops, s, 880 "sunhme.etx", HME_ETX_REG_SIZE); 881 memory_region_add_subregion(&s->hme, 0x2000, &s->etxreg); 882 883 memory_region_init_io(&s->erxreg, OBJECT(pci_dev), &sunhme_erx_ops, s, 884 "sunhme.erx", HME_ERX_REG_SIZE); 885 memory_region_add_subregion(&s->hme, 0x4000, &s->erxreg); 886 887 memory_region_init_io(&s->macreg, OBJECT(pci_dev), &sunhme_mac_ops, s, 888 "sunhme.mac", HME_MAC_REG_SIZE); 889 memory_region_add_subregion(&s->hme, 0x6000, &s->macreg); 890 891 memory_region_init_io(&s->mifreg, OBJECT(pci_dev), &sunhme_mif_ops, s, 892 "sunhme.mif", HME_MIF_REG_SIZE); 893 memory_region_add_subregion(&s->hme, 0x7000, &s->mifreg); 894 895 qemu_macaddr_default_if_unset(&s->conf.macaddr); 896 s->nic = qemu_new_nic(&net_sunhme_info, &s->conf, 897 object_get_typename(OBJECT(d)), d->id, s); 898 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); 899 } 900 901 static void sunhme_instance_init(Object *obj) 902 { 903 SunHMEState *s = SUNHME(obj); 904 905 device_add_bootindex_property(obj, &s->conf.bootindex, 906 "bootindex", "/ethernet-phy@0", 907 DEVICE(obj)); 908 } 909 910 static void sunhme_reset(DeviceState *ds) 911 { 912 SunHMEState *s = SUNHME(ds); 913 914 /* Configure internal transceiver */ 915 s->mifregs[HME_MIFI_CFG >> 2] |= HME_MIF_CFG_MDI0; 916 917 /* Advetise auto, 100Mbps FD */ 918 s->miiregs[MII_ANAR] = MII_ANAR_TXFD; 919 s->miiregs[MII_BMSR] = MII_BMSR_AUTONEG | MII_BMSR_100TX_FD | 920 MII_BMSR_AN_COMP; 921 922 if (!qemu_get_queue(s->nic)->link_down) { 923 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD; 924 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST; 925 } 926 927 /* Set manufacturer */ 928 s->miiregs[MII_PHYID1] = DP83840_PHYID1; 929 s->miiregs[MII_PHYID2] = DP83840_PHYID2; 930 931 /* Configure default interrupt mask */ 932 s->mifregs[HME_MIFI_IMASK >> 2] = 0xffff; 933 s->sebregs[HME_SEBI_IMASK >> 2] = 0xff7fffff; 934 } 935 936 static const VMStateDescription vmstate_hme = { 937 .name = "sunhme", 938 .version_id = 0, 939 .minimum_version_id = 0, 940 .fields = (VMStateField[]) { 941 VMSTATE_PCI_DEVICE(parent_obj, SunHMEState), 942 VMSTATE_MACADDR(conf.macaddr, SunHMEState), 943 VMSTATE_UINT32_ARRAY(sebregs, SunHMEState, (HME_SEB_REG_SIZE >> 2)), 944 VMSTATE_UINT32_ARRAY(etxregs, SunHMEState, (HME_ETX_REG_SIZE >> 2)), 945 VMSTATE_UINT32_ARRAY(erxregs, SunHMEState, (HME_ERX_REG_SIZE >> 2)), 946 VMSTATE_UINT32_ARRAY(macregs, SunHMEState, (HME_MAC_REG_SIZE >> 2)), 947 VMSTATE_UINT32_ARRAY(mifregs, SunHMEState, (HME_MIF_REG_SIZE >> 2)), 948 VMSTATE_UINT16_ARRAY(miiregs, SunHMEState, HME_MII_REGS_SIZE), 949 VMSTATE_END_OF_LIST() 950 } 951 }; 952 953 static void sunhme_class_init(ObjectClass *klass, void *data) 954 { 955 DeviceClass *dc = DEVICE_CLASS(klass); 956 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 957 958 k->realize = sunhme_realize; 959 k->vendor_id = PCI_VENDOR_ID_SUN; 960 k->device_id = PCI_DEVICE_ID_SUN_HME; 961 k->class_id = PCI_CLASS_NETWORK_ETHERNET; 962 dc->vmsd = &vmstate_hme; 963 dc->reset = sunhme_reset; 964 device_class_set_props(dc, sunhme_properties); 965 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); 966 } 967 968 static const TypeInfo sunhme_info = { 969 .name = TYPE_SUNHME, 970 .parent = TYPE_PCI_DEVICE, 971 .class_init = sunhme_class_init, 972 .instance_size = sizeof(SunHMEState), 973 .instance_init = sunhme_instance_init, 974 .interfaces = (InterfaceInfo[]) { 975 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 976 { } 977 } 978 }; 979 980 static void sunhme_register_types(void) 981 { 982 type_register_static(&sunhme_info); 983 } 984 985 type_init(sunhme_register_types) 986