1 /* 2 * QEMU model of SUN GEM ethernet controller 3 * 4 * As found in Apple ASICs among others 5 * 6 * Copyright 2016 Ben Herrenschmidt 7 * Copyright 2017 Mark Cave-Ayland 8 */ 9 10 #include "qemu/osdep.h" 11 #include "hw/pci/pci.h" 12 #include "hw/qdev-properties.h" 13 #include "migration/vmstate.h" 14 #include "qemu/log.h" 15 #include "qemu/module.h" 16 #include "net/net.h" 17 #include "net/eth.h" 18 #include "net/checksum.h" 19 #include "hw/net/mii.h" 20 #include "sysemu/sysemu.h" 21 #include "trace.h" 22 #include "qom/object.h" 23 24 #define TYPE_SUNGEM "sungem" 25 26 typedef struct SunGEMState SunGEMState; 27 DECLARE_INSTANCE_CHECKER(SunGEMState, SUNGEM, 28 TYPE_SUNGEM) 29 30 #define MAX_PACKET_SIZE 9016 31 32 #define SUNGEM_MMIO_SIZE 0x200000 33 34 /* Global registers */ 35 #define SUNGEM_MMIO_GREG_SIZE 0x2000 36 37 #define GREG_SEBSTATE 0x0000UL /* SEB State Register */ 38 39 #define GREG_STAT 0x000CUL /* Status Register */ 40 #define GREG_STAT_TXINTME 0x00000001 /* TX INTME frame transferred */ 41 #define GREG_STAT_TXALL 0x00000002 /* All TX frames transferred */ 42 #define GREG_STAT_TXDONE 0x00000004 /* One TX frame transferred */ 43 #define GREG_STAT_RXDONE 0x00000010 /* One RX frame arrived */ 44 #define GREG_STAT_RXNOBUF 0x00000020 /* No free RX buffers available */ 45 #define GREG_STAT_RXTAGERR 0x00000040 /* RX tag framing is corrupt */ 46 #define GREG_STAT_TXMAC 0x00004000 /* TX MAC signalled interrupt */ 47 #define GREG_STAT_RXMAC 0x00008000 /* RX MAC signalled interrupt */ 48 #define GREG_STAT_MAC 0x00010000 /* MAC Control signalled irq */ 49 #define GREG_STAT_TXNR 0xfff80000 /* == TXDMA_TXDONE reg val */ 50 #define GREG_STAT_TXNR_SHIFT 19 51 52 /* These interrupts are edge latches in the status register, 53 * reading it (or writing the corresponding bit in IACK) will 54 * clear them 55 */ 56 #define GREG_STAT_LATCH (GREG_STAT_TXALL | GREG_STAT_TXINTME | \ 57 GREG_STAT_RXDONE | GREG_STAT_RXDONE | \ 58 GREG_STAT_RXNOBUF | GREG_STAT_RXTAGERR) 59 60 #define GREG_IMASK 0x0010UL /* Interrupt Mask Register */ 61 #define GREG_IACK 0x0014UL /* Interrupt ACK Register */ 62 #define GREG_STAT2 0x001CUL /* Alias of GREG_STAT */ 63 #define GREG_PCIESTAT 0x1000UL /* PCI Error Status Register */ 64 #define GREG_PCIEMASK 0x1004UL /* PCI Error Mask Register */ 65 66 #define GREG_SWRST 0x1010UL /* Software Reset Register */ 67 #define GREG_SWRST_TXRST 0x00000001 /* TX Software Reset */ 68 #define GREG_SWRST_RXRST 0x00000002 /* RX Software Reset */ 69 #define GREG_SWRST_RSTOUT 0x00000004 /* Force RST# pin active */ 70 71 /* TX DMA Registers */ 72 #define SUNGEM_MMIO_TXDMA_SIZE 0x1000 73 74 #define TXDMA_KICK 0x0000UL /* TX Kick Register */ 75 76 #define TXDMA_CFG 0x0004UL /* TX Configuration Register */ 77 #define TXDMA_CFG_ENABLE 0x00000001 /* Enable TX DMA channel */ 78 #define TXDMA_CFG_RINGSZ 0x0000001e /* TX descriptor ring size */ 79 80 #define TXDMA_DBLOW 0x0008UL /* TX Desc. Base Low */ 81 #define TXDMA_DBHI 0x000CUL /* TX Desc. Base High */ 82 #define TXDMA_PCNT 0x0024UL /* TX FIFO Packet Counter */ 83 #define TXDMA_SMACHINE 0x0028UL /* TX State Machine Register */ 84 #define TXDMA_DPLOW 0x0030UL /* TX Data Pointer Low */ 85 #define TXDMA_DPHI 0x0034UL /* TX Data Pointer High */ 86 #define TXDMA_TXDONE 0x0100UL /* TX Completion Register */ 87 #define TXDMA_FTAG 0x0108UL /* TX FIFO Tag */ 88 #define TXDMA_FSZ 0x0118UL /* TX FIFO Size */ 89 90 /* Receive DMA Registers */ 91 #define SUNGEM_MMIO_RXDMA_SIZE 0x2000 92 93 #define RXDMA_CFG 0x0000UL /* RX Configuration Register */ 94 #define RXDMA_CFG_ENABLE 0x00000001 /* Enable RX DMA channel */ 95 #define RXDMA_CFG_RINGSZ 0x0000001e /* RX descriptor ring size */ 96 #define RXDMA_CFG_FBOFF 0x00001c00 /* Offset of first data byte */ 97 #define RXDMA_CFG_CSUMOFF 0x000fe000 /* Skip bytes before csum calc */ 98 99 #define RXDMA_DBLOW 0x0004UL /* RX Descriptor Base Low */ 100 #define RXDMA_DBHI 0x0008UL /* RX Descriptor Base High */ 101 #define RXDMA_PCNT 0x0018UL /* RX FIFO Packet Counter */ 102 #define RXDMA_SMACHINE 0x001CUL /* RX State Machine Register */ 103 #define RXDMA_PTHRESH 0x0020UL /* Pause Thresholds */ 104 #define RXDMA_DPLOW 0x0024UL /* RX Data Pointer Low */ 105 #define RXDMA_DPHI 0x0028UL /* RX Data Pointer High */ 106 #define RXDMA_KICK 0x0100UL /* RX Kick Register */ 107 #define RXDMA_DONE 0x0104UL /* RX Completion Register */ 108 #define RXDMA_BLANK 0x0108UL /* RX Blanking Register */ 109 #define RXDMA_FTAG 0x0110UL /* RX FIFO Tag */ 110 #define RXDMA_FSZ 0x0120UL /* RX FIFO Size */ 111 112 /* MAC Registers */ 113 #define SUNGEM_MMIO_MAC_SIZE 0x200 114 115 #define MAC_TXRST 0x0000UL /* TX MAC Software Reset Command */ 116 #define MAC_RXRST 0x0004UL /* RX MAC Software Reset Command */ 117 #define MAC_TXSTAT 0x0010UL /* TX MAC Status Register */ 118 #define MAC_RXSTAT 0x0014UL /* RX MAC Status Register */ 119 120 #define MAC_CSTAT 0x0018UL /* MAC Control Status Register */ 121 #define MAC_CSTAT_PTR 0xffff0000 /* Pause Time Received */ 122 123 #define MAC_TXMASK 0x0020UL /* TX MAC Mask Register */ 124 #define MAC_RXMASK 0x0024UL /* RX MAC Mask Register */ 125 #define MAC_MCMASK 0x0028UL /* MAC Control Mask Register */ 126 127 #define MAC_TXCFG 0x0030UL /* TX MAC Configuration Register */ 128 #define MAC_TXCFG_ENAB 0x00000001 /* TX MAC Enable */ 129 130 #define MAC_RXCFG 0x0034UL /* RX MAC Configuration Register */ 131 #define MAC_RXCFG_ENAB 0x00000001 /* RX MAC Enable */ 132 #define MAC_RXCFG_SFCS 0x00000004 /* Strip FCS */ 133 #define MAC_RXCFG_PROM 0x00000008 /* Promiscuous Mode */ 134 #define MAC_RXCFG_PGRP 0x00000010 /* Promiscuous Group */ 135 #define MAC_RXCFG_HFE 0x00000020 /* Hash Filter Enable */ 136 137 #define MAC_XIFCFG 0x003CUL /* XIF Configuration Register */ 138 #define MAC_XIFCFG_LBCK 0x00000002 /* Loopback TX to RX */ 139 140 #define MAC_MINFSZ 0x0050UL /* MinFrameSize Register */ 141 #define MAC_MAXFSZ 0x0054UL /* MaxFrameSize Register */ 142 #define MAC_ADDR0 0x0080UL /* MAC Address 0 Register */ 143 #define MAC_ADDR1 0x0084UL /* MAC Address 1 Register */ 144 #define MAC_ADDR2 0x0088UL /* MAC Address 2 Register */ 145 #define MAC_ADDR3 0x008CUL /* MAC Address 3 Register */ 146 #define MAC_ADDR4 0x0090UL /* MAC Address 4 Register */ 147 #define MAC_ADDR5 0x0094UL /* MAC Address 5 Register */ 148 #define MAC_HASH0 0x00C0UL /* Hash Table 0 Register */ 149 #define MAC_PATMPS 0x0114UL /* Peak Attempts Register */ 150 #define MAC_SMACHINE 0x0134UL /* State Machine Register */ 151 152 /* MIF Registers */ 153 #define SUNGEM_MMIO_MIF_SIZE 0x20 154 155 #define MIF_FRAME 0x000CUL /* MIF Frame/Output Register */ 156 #define MIF_FRAME_OP 0x30000000 /* OPcode */ 157 #define MIF_FRAME_PHYAD 0x0f800000 /* PHY ADdress */ 158 #define MIF_FRAME_REGAD 0x007c0000 /* REGister ADdress */ 159 #define MIF_FRAME_TALSB 0x00010000 /* Turn Around LSB */ 160 #define MIF_FRAME_DATA 0x0000ffff /* Instruction Payload */ 161 162 #define MIF_CFG 0x0010UL /* MIF Configuration Register */ 163 #define MIF_CFG_MDI0 0x00000100 /* MDIO_0 present or read-bit */ 164 #define MIF_CFG_MDI1 0x00000200 /* MDIO_1 present or read-bit */ 165 166 #define MIF_STATUS 0x0018UL /* MIF Status Register */ 167 #define MIF_SMACHINE 0x001CUL /* MIF State Machine Register */ 168 169 /* PCS/Serialink Registers */ 170 #define SUNGEM_MMIO_PCS_SIZE 0x60 171 #define PCS_MIISTAT 0x0004UL /* PCS MII Status Register */ 172 #define PCS_ISTAT 0x0018UL /* PCS Interrupt Status Reg */ 173 #define PCS_SSTATE 0x005CUL /* Serialink State Register */ 174 175 /* Descriptors */ 176 struct gem_txd { 177 uint64_t control_word; 178 uint64_t buffer; 179 }; 180 181 #define TXDCTRL_BUFSZ 0x0000000000007fffULL /* Buffer Size */ 182 #define TXDCTRL_CSTART 0x00000000001f8000ULL /* CSUM Start Offset */ 183 #define TXDCTRL_COFF 0x000000001fe00000ULL /* CSUM Stuff Offset */ 184 #define TXDCTRL_CENAB 0x0000000020000000ULL /* CSUM Enable */ 185 #define TXDCTRL_EOF 0x0000000040000000ULL /* End of Frame */ 186 #define TXDCTRL_SOF 0x0000000080000000ULL /* Start of Frame */ 187 #define TXDCTRL_INTME 0x0000000100000000ULL /* "Interrupt Me" */ 188 189 struct gem_rxd { 190 uint64_t status_word; 191 uint64_t buffer; 192 }; 193 194 #define RXDCTRL_HPASS 0x1000000000000000ULL /* Passed Hash Filter */ 195 #define RXDCTRL_ALTMAC 0x2000000000000000ULL /* Matched ALT MAC */ 196 197 198 struct SunGEMState { 199 PCIDevice pdev; 200 201 MemoryRegion sungem; 202 MemoryRegion greg; 203 MemoryRegion txdma; 204 MemoryRegion rxdma; 205 MemoryRegion mac; 206 MemoryRegion mif; 207 MemoryRegion pcs; 208 NICState *nic; 209 NICConf conf; 210 uint32_t phy_addr; 211 212 uint32_t gregs[SUNGEM_MMIO_GREG_SIZE >> 2]; 213 uint32_t txdmaregs[SUNGEM_MMIO_TXDMA_SIZE >> 2]; 214 uint32_t rxdmaregs[SUNGEM_MMIO_RXDMA_SIZE >> 2]; 215 uint32_t macregs[SUNGEM_MMIO_MAC_SIZE >> 2]; 216 uint32_t mifregs[SUNGEM_MMIO_MIF_SIZE >> 2]; 217 uint32_t pcsregs[SUNGEM_MMIO_PCS_SIZE >> 2]; 218 219 /* Cache some useful things */ 220 uint32_t rx_mask; 221 uint32_t tx_mask; 222 223 /* Current tx packet */ 224 uint8_t tx_data[MAX_PACKET_SIZE]; 225 uint32_t tx_size; 226 uint64_t tx_first_ctl; 227 }; 228 229 230 static void sungem_eval_irq(SunGEMState *s) 231 { 232 uint32_t stat, mask; 233 234 mask = s->gregs[GREG_IMASK >> 2]; 235 stat = s->gregs[GREG_STAT >> 2] & ~GREG_STAT_TXNR; 236 if (stat & ~mask) { 237 pci_set_irq(PCI_DEVICE(s), 1); 238 } else { 239 pci_set_irq(PCI_DEVICE(s), 0); 240 } 241 } 242 243 static void sungem_update_status(SunGEMState *s, uint32_t bits, bool val) 244 { 245 uint32_t stat; 246 247 stat = s->gregs[GREG_STAT >> 2]; 248 if (val) { 249 stat |= bits; 250 } else { 251 stat &= ~bits; 252 } 253 s->gregs[GREG_STAT >> 2] = stat; 254 sungem_eval_irq(s); 255 } 256 257 static void sungem_eval_cascade_irq(SunGEMState *s) 258 { 259 uint32_t stat, mask; 260 261 mask = s->macregs[MAC_TXSTAT >> 2]; 262 stat = s->macregs[MAC_TXMASK >> 2]; 263 if (stat & ~mask) { 264 sungem_update_status(s, GREG_STAT_TXMAC, true); 265 } else { 266 sungem_update_status(s, GREG_STAT_TXMAC, false); 267 } 268 269 mask = s->macregs[MAC_RXSTAT >> 2]; 270 stat = s->macregs[MAC_RXMASK >> 2]; 271 if (stat & ~mask) { 272 sungem_update_status(s, GREG_STAT_RXMAC, true); 273 } else { 274 sungem_update_status(s, GREG_STAT_RXMAC, false); 275 } 276 277 mask = s->macregs[MAC_CSTAT >> 2]; 278 stat = s->macregs[MAC_MCMASK >> 2] & ~MAC_CSTAT_PTR; 279 if (stat & ~mask) { 280 sungem_update_status(s, GREG_STAT_MAC, true); 281 } else { 282 sungem_update_status(s, GREG_STAT_MAC, false); 283 } 284 } 285 286 static void sungem_do_tx_csum(SunGEMState *s) 287 { 288 uint16_t start, off; 289 uint32_t csum; 290 291 start = (s->tx_first_ctl & TXDCTRL_CSTART) >> 15; 292 off = (s->tx_first_ctl & TXDCTRL_COFF) >> 21; 293 294 trace_sungem_tx_checksum(start, off); 295 296 if (start > (s->tx_size - 2) || off > (s->tx_size - 2)) { 297 trace_sungem_tx_checksum_oob(); 298 return; 299 } 300 301 csum = net_raw_checksum(s->tx_data + start, s->tx_size - start); 302 stw_be_p(s->tx_data + off, csum); 303 } 304 305 static void sungem_send_packet(SunGEMState *s, const uint8_t *buf, 306 int size) 307 { 308 NetClientState *nc = qemu_get_queue(s->nic); 309 310 if (s->macregs[MAC_XIFCFG >> 2] & MAC_XIFCFG_LBCK) { 311 nc->info->receive(nc, buf, size); 312 } else { 313 qemu_send_packet(nc, buf, size); 314 } 315 } 316 317 static void sungem_process_tx_desc(SunGEMState *s, struct gem_txd *desc) 318 { 319 PCIDevice *d = PCI_DEVICE(s); 320 uint32_t len; 321 322 /* If it's a start of frame, discard anything we had in the 323 * buffer and start again. This should be an error condition 324 * if we had something ... for now we ignore it 325 */ 326 if (desc->control_word & TXDCTRL_SOF) { 327 if (s->tx_first_ctl) { 328 trace_sungem_tx_unfinished(); 329 } 330 s->tx_size = 0; 331 s->tx_first_ctl = desc->control_word; 332 } 333 334 /* Grab data size */ 335 len = desc->control_word & TXDCTRL_BUFSZ; 336 337 /* Clamp it to our max size */ 338 if ((s->tx_size + len) > MAX_PACKET_SIZE) { 339 trace_sungem_tx_overflow(); 340 len = MAX_PACKET_SIZE - s->tx_size; 341 } 342 343 /* Read the data */ 344 pci_dma_read(d, desc->buffer, &s->tx_data[s->tx_size], len); 345 s->tx_size += len; 346 347 /* If end of frame, send packet */ 348 if (desc->control_word & TXDCTRL_EOF) { 349 trace_sungem_tx_finished(s->tx_size); 350 351 /* Handle csum */ 352 if (s->tx_first_ctl & TXDCTRL_CENAB) { 353 sungem_do_tx_csum(s); 354 } 355 356 /* Send it */ 357 sungem_send_packet(s, s->tx_data, s->tx_size); 358 359 /* No more pending packet */ 360 s->tx_size = 0; 361 s->tx_first_ctl = 0; 362 } 363 } 364 365 static void sungem_tx_kick(SunGEMState *s) 366 { 367 PCIDevice *d = PCI_DEVICE(s); 368 uint32_t comp, kick; 369 uint32_t txdma_cfg, txmac_cfg, ints; 370 uint64_t dbase; 371 372 trace_sungem_tx_kick(); 373 374 /* Check that both TX MAC and TX DMA are enabled. We don't 375 * handle DMA-less direct FIFO operations (we don't emulate 376 * the FIFO at all). 377 * 378 * A write to TXDMA_KICK while DMA isn't enabled can happen 379 * when the driver is resetting the pointer. 380 */ 381 txdma_cfg = s->txdmaregs[TXDMA_CFG >> 2]; 382 txmac_cfg = s->macregs[MAC_TXCFG >> 2]; 383 if (!(txdma_cfg & TXDMA_CFG_ENABLE) || 384 !(txmac_cfg & MAC_TXCFG_ENAB)) { 385 trace_sungem_tx_disabled(); 386 return; 387 } 388 389 /* XXX Test min frame size register ? */ 390 /* XXX Test max frame size register ? */ 391 392 dbase = s->txdmaregs[TXDMA_DBHI >> 2]; 393 dbase = (dbase << 32) | s->txdmaregs[TXDMA_DBLOW >> 2]; 394 395 comp = s->txdmaregs[TXDMA_TXDONE >> 2] & s->tx_mask; 396 kick = s->txdmaregs[TXDMA_KICK >> 2] & s->tx_mask; 397 398 trace_sungem_tx_process(comp, kick, s->tx_mask + 1); 399 400 /* This is rather primitive for now, we just send everything we 401 * can in one go, like e1000. Ideally we should do the sending 402 * from some kind of background task 403 */ 404 while (comp != kick) { 405 struct gem_txd desc; 406 407 /* Read the next descriptor */ 408 pci_dma_read(d, dbase + comp * sizeof(desc), &desc, sizeof(desc)); 409 410 /* Byteswap descriptor */ 411 desc.control_word = le64_to_cpu(desc.control_word); 412 desc.buffer = le64_to_cpu(desc.buffer); 413 trace_sungem_tx_desc(comp, desc.control_word, desc.buffer); 414 415 /* Send it for processing */ 416 sungem_process_tx_desc(s, &desc); 417 418 /* Interrupt */ 419 ints = GREG_STAT_TXDONE; 420 if (desc.control_word & TXDCTRL_INTME) { 421 ints |= GREG_STAT_TXINTME; 422 } 423 sungem_update_status(s, ints, true); 424 425 /* Next ! */ 426 comp = (comp + 1) & s->tx_mask; 427 s->txdmaregs[TXDMA_TXDONE >> 2] = comp; 428 } 429 430 /* We sent everything, set status/irq bit */ 431 sungem_update_status(s, GREG_STAT_TXALL, true); 432 } 433 434 static bool sungem_rx_full(SunGEMState *s, uint32_t kick, uint32_t done) 435 { 436 return kick == ((done + 1) & s->rx_mask); 437 } 438 439 static bool sungem_can_receive(NetClientState *nc) 440 { 441 SunGEMState *s = qemu_get_nic_opaque(nc); 442 uint32_t kick, done, rxdma_cfg, rxmac_cfg; 443 bool full; 444 445 rxmac_cfg = s->macregs[MAC_RXCFG >> 2]; 446 rxdma_cfg = s->rxdmaregs[RXDMA_CFG >> 2]; 447 448 /* If MAC disabled, can't receive */ 449 if ((rxmac_cfg & MAC_RXCFG_ENAB) == 0) { 450 trace_sungem_rx_mac_disabled(); 451 return false; 452 } 453 if ((rxdma_cfg & RXDMA_CFG_ENABLE) == 0) { 454 trace_sungem_rx_txdma_disabled(); 455 return false; 456 } 457 458 /* Check RX availability */ 459 kick = s->rxdmaregs[RXDMA_KICK >> 2]; 460 done = s->rxdmaregs[RXDMA_DONE >> 2]; 461 full = sungem_rx_full(s, kick, done); 462 463 trace_sungem_rx_check(!full, kick, done); 464 465 return !full; 466 } 467 468 enum { 469 rx_no_match, 470 rx_match_promisc, 471 rx_match_bcast, 472 rx_match_allmcast, 473 rx_match_mcast, 474 rx_match_mac, 475 rx_match_altmac, 476 }; 477 478 static int sungem_check_rx_mac(SunGEMState *s, const uint8_t *mac, uint32_t crc) 479 { 480 uint32_t rxcfg = s->macregs[MAC_RXCFG >> 2]; 481 uint32_t mac0, mac1, mac2; 482 483 /* Promisc enabled ? */ 484 if (rxcfg & MAC_RXCFG_PROM) { 485 return rx_match_promisc; 486 } 487 488 /* Format MAC address into dwords */ 489 mac0 = (mac[4] << 8) | mac[5]; 490 mac1 = (mac[2] << 8) | mac[3]; 491 mac2 = (mac[0] << 8) | mac[1]; 492 493 trace_sungem_rx_mac_check(mac0, mac1, mac2); 494 495 /* Is this a broadcast frame ? */ 496 if (mac0 == 0xffff && mac1 == 0xffff && mac2 == 0xffff) { 497 return rx_match_bcast; 498 } 499 500 /* TODO: Implement address filter registers (or we don't care ?) */ 501 502 /* Is this a multicast frame ? */ 503 if (mac[0] & 1) { 504 trace_sungem_rx_mac_multicast(); 505 506 /* Promisc group enabled ? */ 507 if (rxcfg & MAC_RXCFG_PGRP) { 508 return rx_match_allmcast; 509 } 510 511 /* TODO: Check MAC control frames (or we don't care) ? */ 512 513 /* Check hash filter (somebody check that's correct ?) */ 514 if (rxcfg & MAC_RXCFG_HFE) { 515 uint32_t hash, idx; 516 517 crc >>= 24; 518 idx = (crc >> 2) & 0x3c; 519 hash = s->macregs[(MAC_HASH0 + idx) >> 2]; 520 if (hash & (1 << (15 - (crc & 0xf)))) { 521 return rx_match_mcast; 522 } 523 } 524 return rx_no_match; 525 } 526 527 /* Main MAC check */ 528 trace_sungem_rx_mac_compare(s->macregs[MAC_ADDR0 >> 2], 529 s->macregs[MAC_ADDR1 >> 2], 530 s->macregs[MAC_ADDR2 >> 2]); 531 532 if (mac0 == s->macregs[MAC_ADDR0 >> 2] && 533 mac1 == s->macregs[MAC_ADDR1 >> 2] && 534 mac2 == s->macregs[MAC_ADDR2 >> 2]) { 535 return rx_match_mac; 536 } 537 538 /* Alt MAC check */ 539 if (mac0 == s->macregs[MAC_ADDR3 >> 2] && 540 mac1 == s->macregs[MAC_ADDR4 >> 2] && 541 mac2 == s->macregs[MAC_ADDR5 >> 2]) { 542 return rx_match_altmac; 543 } 544 545 return rx_no_match; 546 } 547 548 static ssize_t sungem_receive(NetClientState *nc, const uint8_t *buf, 549 size_t size) 550 { 551 SunGEMState *s = qemu_get_nic_opaque(nc); 552 PCIDevice *d = PCI_DEVICE(s); 553 uint32_t mac_crc, done, kick, max_fsize; 554 uint32_t fcs_size, ints, rxdma_cfg, rxmac_cfg, csum, coff; 555 uint8_t smallbuf[60]; 556 struct gem_rxd desc; 557 uint64_t dbase, baddr; 558 unsigned int rx_cond; 559 560 trace_sungem_rx_packet(size); 561 562 rxmac_cfg = s->macregs[MAC_RXCFG >> 2]; 563 rxdma_cfg = s->rxdmaregs[RXDMA_CFG >> 2]; 564 max_fsize = s->macregs[MAC_MAXFSZ >> 2] & 0x7fff; 565 566 /* If MAC or DMA disabled, can't receive */ 567 if (!(rxdma_cfg & RXDMA_CFG_ENABLE) || 568 !(rxmac_cfg & MAC_RXCFG_ENAB)) { 569 trace_sungem_rx_disabled(); 570 return 0; 571 } 572 573 /* Size adjustment for FCS */ 574 if (rxmac_cfg & MAC_RXCFG_SFCS) { 575 fcs_size = 0; 576 } else { 577 fcs_size = 4; 578 } 579 580 /* Discard frame smaller than a MAC or larger than max frame size 581 * (when accounting for FCS) 582 */ 583 if (size < 6 || (size + 4) > max_fsize) { 584 trace_sungem_rx_bad_frame_size(size); 585 /* XXX Increment error statistics ? */ 586 return size; 587 } 588 589 /* We don't drop too small frames since we get them in qemu, we pad 590 * them instead. We should probably use the min frame size register 591 * but I don't want to use a variable size staging buffer and I 592 * know both MacOS and Linux use the default 64 anyway. We use 60 593 * here to account for the non-existent FCS. 594 */ 595 if (size < 60) { 596 memcpy(smallbuf, buf, size); 597 memset(&smallbuf[size], 0, 60 - size); 598 buf = smallbuf; 599 size = 60; 600 } 601 602 /* Get MAC crc */ 603 mac_crc = net_crc32_le(buf, ETH_ALEN); 604 605 /* Packet isn't for me ? */ 606 rx_cond = sungem_check_rx_mac(s, buf, mac_crc); 607 if (rx_cond == rx_no_match) { 608 /* Just drop it */ 609 trace_sungem_rx_unmatched(); 610 return size; 611 } 612 613 /* Get ring pointers */ 614 kick = s->rxdmaregs[RXDMA_KICK >> 2] & s->rx_mask; 615 done = s->rxdmaregs[RXDMA_DONE >> 2] & s->rx_mask; 616 617 trace_sungem_rx_process(done, kick, s->rx_mask + 1); 618 619 /* Ring full ? Can't receive */ 620 if (sungem_rx_full(s, kick, done)) { 621 trace_sungem_rx_ringfull(); 622 return 0; 623 } 624 625 /* Note: The real GEM will fetch descriptors in blocks of 4, 626 * for now we handle them one at a time, I think the driver will 627 * cope 628 */ 629 630 dbase = s->rxdmaregs[RXDMA_DBHI >> 2]; 631 dbase = (dbase << 32) | s->rxdmaregs[RXDMA_DBLOW >> 2]; 632 633 /* Read the next descriptor */ 634 pci_dma_read(d, dbase + done * sizeof(desc), &desc, sizeof(desc)); 635 636 trace_sungem_rx_desc(le64_to_cpu(desc.status_word), 637 le64_to_cpu(desc.buffer)); 638 639 /* Effective buffer address */ 640 baddr = le64_to_cpu(desc.buffer) & ~7ull; 641 baddr |= (rxdma_cfg & RXDMA_CFG_FBOFF) >> 10; 642 643 /* Write buffer out */ 644 pci_dma_write(d, baddr, buf, size); 645 646 if (fcs_size) { 647 /* Should we add an FCS ? Linux doesn't ask us to strip it, 648 * however I believe nothing checks it... For now we just 649 * do nothing. It's faster this way. 650 */ 651 } 652 653 /* Calculate the checksum */ 654 coff = (rxdma_cfg & RXDMA_CFG_CSUMOFF) >> 13; 655 csum = net_raw_checksum((uint8_t *)buf + coff, size - coff); 656 657 /* Build the updated descriptor */ 658 desc.status_word = (size + fcs_size) << 16; 659 desc.status_word |= ((uint64_t)(mac_crc >> 16)) << 44; 660 desc.status_word |= csum; 661 if (rx_cond == rx_match_mcast) { 662 desc.status_word |= RXDCTRL_HPASS; 663 } 664 if (rx_cond == rx_match_altmac) { 665 desc.status_word |= RXDCTRL_ALTMAC; 666 } 667 desc.status_word = cpu_to_le64(desc.status_word); 668 669 pci_dma_write(d, dbase + done * sizeof(desc), &desc, sizeof(desc)); 670 671 done = (done + 1) & s->rx_mask; 672 s->rxdmaregs[RXDMA_DONE >> 2] = done; 673 674 /* XXX Unconditionally set RX interrupt for now. The interrupt 675 * mitigation timer might well end up adding more overhead than 676 * helping here... 677 */ 678 ints = GREG_STAT_RXDONE; 679 if (sungem_rx_full(s, kick, done)) { 680 ints |= GREG_STAT_RXNOBUF; 681 } 682 sungem_update_status(s, ints, true); 683 684 return size; 685 } 686 687 static void sungem_set_link_status(NetClientState *nc) 688 { 689 /* We don't do anything for now as I believe none of the OSes 690 * drivers use the MIF autopoll feature nor the PHY interrupt 691 */ 692 } 693 694 static void sungem_update_masks(SunGEMState *s) 695 { 696 uint32_t sz; 697 698 sz = 1 << (((s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_RINGSZ) >> 1) + 5); 699 s->rx_mask = sz - 1; 700 701 sz = 1 << (((s->txdmaregs[TXDMA_CFG >> 2] & TXDMA_CFG_RINGSZ) >> 1) + 5); 702 s->tx_mask = sz - 1; 703 } 704 705 static void sungem_reset_rx(SunGEMState *s) 706 { 707 trace_sungem_rx_reset(); 708 709 /* XXX Do RXCFG */ 710 /* XXX Check value */ 711 s->rxdmaregs[RXDMA_FSZ >> 2] = 0x140; 712 s->rxdmaregs[RXDMA_DONE >> 2] = 0; 713 s->rxdmaregs[RXDMA_KICK >> 2] = 0; 714 s->rxdmaregs[RXDMA_CFG >> 2] = 0x1000010; 715 s->rxdmaregs[RXDMA_PTHRESH >> 2] = 0xf8; 716 s->rxdmaregs[RXDMA_BLANK >> 2] = 0; 717 718 sungem_update_masks(s); 719 } 720 721 static void sungem_reset_tx(SunGEMState *s) 722 { 723 trace_sungem_tx_reset(); 724 725 /* XXX Do TXCFG */ 726 /* XXX Check value */ 727 s->txdmaregs[TXDMA_FSZ >> 2] = 0x90; 728 s->txdmaregs[TXDMA_TXDONE >> 2] = 0; 729 s->txdmaregs[TXDMA_KICK >> 2] = 0; 730 s->txdmaregs[TXDMA_CFG >> 2] = 0x118010; 731 732 sungem_update_masks(s); 733 734 s->tx_size = 0; 735 s->tx_first_ctl = 0; 736 } 737 738 static void sungem_reset_all(SunGEMState *s, bool pci_reset) 739 { 740 trace_sungem_reset(pci_reset); 741 742 sungem_reset_rx(s); 743 sungem_reset_tx(s); 744 745 s->gregs[GREG_IMASK >> 2] = 0xFFFFFFF; 746 s->gregs[GREG_STAT >> 2] = 0; 747 if (pci_reset) { 748 uint8_t *ma = s->conf.macaddr.a; 749 750 s->gregs[GREG_SWRST >> 2] = 0; 751 s->macregs[MAC_ADDR0 >> 2] = (ma[4] << 8) | ma[5]; 752 s->macregs[MAC_ADDR1 >> 2] = (ma[2] << 8) | ma[3]; 753 s->macregs[MAC_ADDR2 >> 2] = (ma[0] << 8) | ma[1]; 754 } else { 755 s->gregs[GREG_SWRST >> 2] &= GREG_SWRST_RSTOUT; 756 } 757 s->mifregs[MIF_CFG >> 2] = MIF_CFG_MDI0; 758 } 759 760 static void sungem_mii_write(SunGEMState *s, uint8_t phy_addr, 761 uint8_t reg_addr, uint16_t val) 762 { 763 trace_sungem_mii_write(phy_addr, reg_addr, val); 764 765 /* XXX TODO */ 766 } 767 768 static uint16_t __sungem_mii_read(SunGEMState *s, uint8_t phy_addr, 769 uint8_t reg_addr) 770 { 771 if (phy_addr != s->phy_addr) { 772 return 0xffff; 773 } 774 /* Primitive emulation of a BCM5201 to please the driver, 775 * ID is 0x00406210. TODO: Do a gigabit PHY like BCM5400 776 */ 777 switch (reg_addr) { 778 case MII_BMCR: 779 return 0; 780 case MII_PHYID1: 781 return 0x0040; 782 case MII_PHYID2: 783 return 0x6210; 784 case MII_BMSR: 785 if (qemu_get_queue(s->nic)->link_down) { 786 return MII_BMSR_100TX_FD | MII_BMSR_AUTONEG; 787 } else { 788 return MII_BMSR_100TX_FD | MII_BMSR_AN_COMP | 789 MII_BMSR_AUTONEG | MII_BMSR_LINK_ST; 790 } 791 case MII_ANLPAR: 792 case MII_ANAR: 793 return MII_ANLPAR_TXFD; 794 case 0x18: /* 5201 AUX status */ 795 return 3; /* 100FD */ 796 default: 797 return 0; 798 }; 799 } 800 static uint16_t sungem_mii_read(SunGEMState *s, uint8_t phy_addr, 801 uint8_t reg_addr) 802 { 803 uint16_t val; 804 805 val = __sungem_mii_read(s, phy_addr, reg_addr); 806 807 trace_sungem_mii_read(phy_addr, reg_addr, val); 808 809 return val; 810 } 811 812 static uint32_t sungem_mii_op(SunGEMState *s, uint32_t val) 813 { 814 uint8_t phy_addr, reg_addr, op; 815 816 /* Ignore not start of frame */ 817 if ((val >> 30) != 1) { 818 trace_sungem_mii_invalid_sof(val >> 30); 819 return 0xffff; 820 } 821 phy_addr = (val & MIF_FRAME_PHYAD) >> 23; 822 reg_addr = (val & MIF_FRAME_REGAD) >> 18; 823 op = (val & MIF_FRAME_OP) >> 28; 824 switch (op) { 825 case 1: 826 sungem_mii_write(s, phy_addr, reg_addr, val & MIF_FRAME_DATA); 827 return val | MIF_FRAME_TALSB; 828 case 2: 829 return sungem_mii_read(s, phy_addr, reg_addr) | MIF_FRAME_TALSB; 830 default: 831 trace_sungem_mii_invalid_op(op); 832 } 833 return 0xffff | MIF_FRAME_TALSB; 834 } 835 836 static void sungem_mmio_greg_write(void *opaque, hwaddr addr, uint64_t val, 837 unsigned size) 838 { 839 SunGEMState *s = opaque; 840 841 if (!(addr < 0x20) && !(addr >= 0x1000 && addr <= 0x1010)) { 842 qemu_log_mask(LOG_GUEST_ERROR, 843 "Write to unknown GREG register 0x%"HWADDR_PRIx"\n", 844 addr); 845 return; 846 } 847 848 trace_sungem_mmio_greg_write(addr, val); 849 850 /* Pre-write filter */ 851 switch (addr) { 852 /* Read only registers */ 853 case GREG_SEBSTATE: 854 case GREG_STAT: 855 case GREG_STAT2: 856 case GREG_PCIESTAT: 857 return; /* No actual write */ 858 case GREG_IACK: 859 val &= GREG_STAT_LATCH; 860 s->gregs[GREG_STAT >> 2] &= ~val; 861 sungem_eval_irq(s); 862 return; /* No actual write */ 863 case GREG_PCIEMASK: 864 val &= 0x7; 865 break; 866 } 867 868 s->gregs[addr >> 2] = val; 869 870 /* Post write action */ 871 switch (addr) { 872 case GREG_IMASK: 873 /* Re-evaluate interrupt */ 874 sungem_eval_irq(s); 875 break; 876 case GREG_SWRST: 877 switch (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)) { 878 case GREG_SWRST_RXRST: 879 sungem_reset_rx(s); 880 break; 881 case GREG_SWRST_TXRST: 882 sungem_reset_tx(s); 883 break; 884 case GREG_SWRST_RXRST | GREG_SWRST_TXRST: 885 sungem_reset_all(s, false); 886 } 887 break; 888 } 889 } 890 891 static uint64_t sungem_mmio_greg_read(void *opaque, hwaddr addr, unsigned size) 892 { 893 SunGEMState *s = opaque; 894 uint32_t val; 895 896 if (!(addr < 0x20) && !(addr >= 0x1000 && addr <= 0x1010)) { 897 qemu_log_mask(LOG_GUEST_ERROR, 898 "Read from unknown GREG register 0x%"HWADDR_PRIx"\n", 899 addr); 900 return 0; 901 } 902 903 val = s->gregs[addr >> 2]; 904 905 trace_sungem_mmio_greg_read(addr, val); 906 907 switch (addr) { 908 case GREG_STAT: 909 /* Side effect, clear bottom 7 bits */ 910 s->gregs[GREG_STAT >> 2] &= ~GREG_STAT_LATCH; 911 sungem_eval_irq(s); 912 913 /* Inject TX completion in returned value */ 914 val = (val & ~GREG_STAT_TXNR) | 915 (s->txdmaregs[TXDMA_TXDONE >> 2] << GREG_STAT_TXNR_SHIFT); 916 break; 917 case GREG_STAT2: 918 /* Return the status reg without side effect 919 * (and inject TX completion in returned value) 920 */ 921 val = (s->gregs[GREG_STAT >> 2] & ~GREG_STAT_TXNR) | 922 (s->txdmaregs[TXDMA_TXDONE >> 2] << GREG_STAT_TXNR_SHIFT); 923 break; 924 } 925 926 return val; 927 } 928 929 static const MemoryRegionOps sungem_mmio_greg_ops = { 930 .read = sungem_mmio_greg_read, 931 .write = sungem_mmio_greg_write, 932 .endianness = DEVICE_LITTLE_ENDIAN, 933 .impl = { 934 .min_access_size = 4, 935 .max_access_size = 4, 936 }, 937 }; 938 939 static void sungem_mmio_txdma_write(void *opaque, hwaddr addr, uint64_t val, 940 unsigned size) 941 { 942 SunGEMState *s = opaque; 943 944 if (!(addr < 0x38) && !(addr >= 0x100 && addr <= 0x118)) { 945 qemu_log_mask(LOG_GUEST_ERROR, 946 "Write to unknown TXDMA register 0x%"HWADDR_PRIx"\n", 947 addr); 948 return; 949 } 950 951 trace_sungem_mmio_txdma_write(addr, val); 952 953 /* Pre-write filter */ 954 switch (addr) { 955 /* Read only registers */ 956 case TXDMA_TXDONE: 957 case TXDMA_PCNT: 958 case TXDMA_SMACHINE: 959 case TXDMA_DPLOW: 960 case TXDMA_DPHI: 961 case TXDMA_FSZ: 962 case TXDMA_FTAG: 963 return; /* No actual write */ 964 } 965 966 s->txdmaregs[addr >> 2] = val; 967 968 /* Post write action */ 969 switch (addr) { 970 case TXDMA_KICK: 971 sungem_tx_kick(s); 972 break; 973 case TXDMA_CFG: 974 sungem_update_masks(s); 975 break; 976 } 977 } 978 979 static uint64_t sungem_mmio_txdma_read(void *opaque, hwaddr addr, unsigned size) 980 { 981 SunGEMState *s = opaque; 982 uint32_t val; 983 984 if (!(addr < 0x38) && !(addr >= 0x100 && addr <= 0x118)) { 985 qemu_log_mask(LOG_GUEST_ERROR, 986 "Read from unknown TXDMA register 0x%"HWADDR_PRIx"\n", 987 addr); 988 return 0; 989 } 990 991 val = s->txdmaregs[addr >> 2]; 992 993 trace_sungem_mmio_txdma_read(addr, val); 994 995 return val; 996 } 997 998 static const MemoryRegionOps sungem_mmio_txdma_ops = { 999 .read = sungem_mmio_txdma_read, 1000 .write = sungem_mmio_txdma_write, 1001 .endianness = DEVICE_LITTLE_ENDIAN, 1002 .impl = { 1003 .min_access_size = 4, 1004 .max_access_size = 4, 1005 }, 1006 }; 1007 1008 static void sungem_mmio_rxdma_write(void *opaque, hwaddr addr, uint64_t val, 1009 unsigned size) 1010 { 1011 SunGEMState *s = opaque; 1012 1013 if (!(addr <= 0x28) && !(addr >= 0x100 && addr <= 0x120)) { 1014 qemu_log_mask(LOG_GUEST_ERROR, 1015 "Write to unknown RXDMA register 0x%"HWADDR_PRIx"\n", 1016 addr); 1017 return; 1018 } 1019 1020 trace_sungem_mmio_rxdma_write(addr, val); 1021 1022 /* Pre-write filter */ 1023 switch (addr) { 1024 /* Read only registers */ 1025 case RXDMA_DONE: 1026 case RXDMA_PCNT: 1027 case RXDMA_SMACHINE: 1028 case RXDMA_DPLOW: 1029 case RXDMA_DPHI: 1030 case RXDMA_FSZ: 1031 case RXDMA_FTAG: 1032 return; /* No actual write */ 1033 } 1034 1035 s->rxdmaregs[addr >> 2] = val; 1036 1037 /* Post write action */ 1038 switch (addr) { 1039 case RXDMA_KICK: 1040 trace_sungem_rx_kick(val); 1041 break; 1042 case RXDMA_CFG: 1043 sungem_update_masks(s); 1044 if ((s->macregs[MAC_RXCFG >> 2] & MAC_RXCFG_ENAB) != 0 && 1045 (s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_ENABLE) != 0) { 1046 qemu_flush_queued_packets(qemu_get_queue(s->nic)); 1047 } 1048 break; 1049 } 1050 } 1051 1052 static uint64_t sungem_mmio_rxdma_read(void *opaque, hwaddr addr, unsigned size) 1053 { 1054 SunGEMState *s = opaque; 1055 uint32_t val; 1056 1057 if (!(addr <= 0x28) && !(addr >= 0x100 && addr <= 0x120)) { 1058 qemu_log_mask(LOG_GUEST_ERROR, 1059 "Read from unknown RXDMA register 0x%"HWADDR_PRIx"\n", 1060 addr); 1061 return 0; 1062 } 1063 1064 val = s->rxdmaregs[addr >> 2]; 1065 1066 trace_sungem_mmio_rxdma_read(addr, val); 1067 1068 return val; 1069 } 1070 1071 static const MemoryRegionOps sungem_mmio_rxdma_ops = { 1072 .read = sungem_mmio_rxdma_read, 1073 .write = sungem_mmio_rxdma_write, 1074 .endianness = DEVICE_LITTLE_ENDIAN, 1075 .impl = { 1076 .min_access_size = 4, 1077 .max_access_size = 4, 1078 }, 1079 }; 1080 1081 static void sungem_mmio_mac_write(void *opaque, hwaddr addr, uint64_t val, 1082 unsigned size) 1083 { 1084 SunGEMState *s = opaque; 1085 1086 if (!(addr <= 0x134)) { 1087 qemu_log_mask(LOG_GUEST_ERROR, 1088 "Write to unknown MAC register 0x%"HWADDR_PRIx"\n", 1089 addr); 1090 return; 1091 } 1092 1093 trace_sungem_mmio_mac_write(addr, val); 1094 1095 /* Pre-write filter */ 1096 switch (addr) { 1097 /* Read only registers */ 1098 case MAC_TXRST: /* Not technically read-only but will do for now */ 1099 case MAC_RXRST: /* Not technically read-only but will do for now */ 1100 case MAC_TXSTAT: 1101 case MAC_RXSTAT: 1102 case MAC_CSTAT: 1103 case MAC_PATMPS: 1104 case MAC_SMACHINE: 1105 return; /* No actual write */ 1106 case MAC_MINFSZ: 1107 /* 10-bits implemented */ 1108 val &= 0x3ff; 1109 break; 1110 } 1111 1112 s->macregs[addr >> 2] = val; 1113 1114 /* Post write action */ 1115 switch (addr) { 1116 case MAC_TXMASK: 1117 case MAC_RXMASK: 1118 case MAC_MCMASK: 1119 sungem_eval_cascade_irq(s); 1120 break; 1121 case MAC_RXCFG: 1122 sungem_update_masks(s); 1123 if ((s->macregs[MAC_RXCFG >> 2] & MAC_RXCFG_ENAB) != 0 && 1124 (s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_ENABLE) != 0) { 1125 qemu_flush_queued_packets(qemu_get_queue(s->nic)); 1126 } 1127 break; 1128 } 1129 } 1130 1131 static uint64_t sungem_mmio_mac_read(void *opaque, hwaddr addr, unsigned size) 1132 { 1133 SunGEMState *s = opaque; 1134 uint32_t val; 1135 1136 if (!(addr <= 0x134)) { 1137 qemu_log_mask(LOG_GUEST_ERROR, 1138 "Read from unknown MAC register 0x%"HWADDR_PRIx"\n", 1139 addr); 1140 return 0; 1141 } 1142 1143 val = s->macregs[addr >> 2]; 1144 1145 trace_sungem_mmio_mac_read(addr, val); 1146 1147 switch (addr) { 1148 case MAC_TXSTAT: 1149 /* Side effect, clear all */ 1150 s->macregs[addr >> 2] = 0; 1151 sungem_update_status(s, GREG_STAT_TXMAC, false); 1152 break; 1153 case MAC_RXSTAT: 1154 /* Side effect, clear all */ 1155 s->macregs[addr >> 2] = 0; 1156 sungem_update_status(s, GREG_STAT_RXMAC, false); 1157 break; 1158 case MAC_CSTAT: 1159 /* Side effect, interrupt bits */ 1160 s->macregs[addr >> 2] &= MAC_CSTAT_PTR; 1161 sungem_update_status(s, GREG_STAT_MAC, false); 1162 break; 1163 } 1164 1165 return val; 1166 } 1167 1168 static const MemoryRegionOps sungem_mmio_mac_ops = { 1169 .read = sungem_mmio_mac_read, 1170 .write = sungem_mmio_mac_write, 1171 .endianness = DEVICE_LITTLE_ENDIAN, 1172 .impl = { 1173 .min_access_size = 4, 1174 .max_access_size = 4, 1175 }, 1176 }; 1177 1178 static void sungem_mmio_mif_write(void *opaque, hwaddr addr, uint64_t val, 1179 unsigned size) 1180 { 1181 SunGEMState *s = opaque; 1182 1183 if (!(addr <= 0x1c)) { 1184 qemu_log_mask(LOG_GUEST_ERROR, 1185 "Write to unknown MIF register 0x%"HWADDR_PRIx"\n", 1186 addr); 1187 return; 1188 } 1189 1190 trace_sungem_mmio_mif_write(addr, val); 1191 1192 /* Pre-write filter */ 1193 switch (addr) { 1194 /* Read only registers */ 1195 case MIF_STATUS: 1196 case MIF_SMACHINE: 1197 return; /* No actual write */ 1198 case MIF_CFG: 1199 /* Maintain the RO MDI bits to advertize an MDIO PHY on MDI0 */ 1200 val &= ~MIF_CFG_MDI1; 1201 val |= MIF_CFG_MDI0; 1202 break; 1203 } 1204 1205 s->mifregs[addr >> 2] = val; 1206 1207 /* Post write action */ 1208 switch (addr) { 1209 case MIF_FRAME: 1210 s->mifregs[addr >> 2] = sungem_mii_op(s, val); 1211 break; 1212 } 1213 } 1214 1215 static uint64_t sungem_mmio_mif_read(void *opaque, hwaddr addr, unsigned size) 1216 { 1217 SunGEMState *s = opaque; 1218 uint32_t val; 1219 1220 if (!(addr <= 0x1c)) { 1221 qemu_log_mask(LOG_GUEST_ERROR, 1222 "Read from unknown MIF register 0x%"HWADDR_PRIx"\n", 1223 addr); 1224 return 0; 1225 } 1226 1227 val = s->mifregs[addr >> 2]; 1228 1229 trace_sungem_mmio_mif_read(addr, val); 1230 1231 return val; 1232 } 1233 1234 static const MemoryRegionOps sungem_mmio_mif_ops = { 1235 .read = sungem_mmio_mif_read, 1236 .write = sungem_mmio_mif_write, 1237 .endianness = DEVICE_LITTLE_ENDIAN, 1238 .impl = { 1239 .min_access_size = 4, 1240 .max_access_size = 4, 1241 }, 1242 }; 1243 1244 static void sungem_mmio_pcs_write(void *opaque, hwaddr addr, uint64_t val, 1245 unsigned size) 1246 { 1247 SunGEMState *s = opaque; 1248 1249 if (!(addr <= 0x18) && !(addr >= 0x50 && addr <= 0x5c)) { 1250 qemu_log_mask(LOG_GUEST_ERROR, 1251 "Write to unknown PCS register 0x%"HWADDR_PRIx"\n", 1252 addr); 1253 return; 1254 } 1255 1256 trace_sungem_mmio_pcs_write(addr, val); 1257 1258 /* Pre-write filter */ 1259 switch (addr) { 1260 /* Read only registers */ 1261 case PCS_MIISTAT: 1262 case PCS_ISTAT: 1263 case PCS_SSTATE: 1264 return; /* No actual write */ 1265 } 1266 1267 s->pcsregs[addr >> 2] = val; 1268 } 1269 1270 static uint64_t sungem_mmio_pcs_read(void *opaque, hwaddr addr, unsigned size) 1271 { 1272 SunGEMState *s = opaque; 1273 uint32_t val; 1274 1275 if (!(addr <= 0x18) && !(addr >= 0x50 && addr <= 0x5c)) { 1276 qemu_log_mask(LOG_GUEST_ERROR, 1277 "Read from unknown PCS register 0x%"HWADDR_PRIx"\n", 1278 addr); 1279 return 0; 1280 } 1281 1282 val = s->pcsregs[addr >> 2]; 1283 1284 trace_sungem_mmio_pcs_read(addr, val); 1285 1286 return val; 1287 } 1288 1289 static const MemoryRegionOps sungem_mmio_pcs_ops = { 1290 .read = sungem_mmio_pcs_read, 1291 .write = sungem_mmio_pcs_write, 1292 .endianness = DEVICE_LITTLE_ENDIAN, 1293 .impl = { 1294 .min_access_size = 4, 1295 .max_access_size = 4, 1296 }, 1297 }; 1298 1299 static void sungem_uninit(PCIDevice *dev) 1300 { 1301 SunGEMState *s = SUNGEM(dev); 1302 1303 qemu_del_nic(s->nic); 1304 } 1305 1306 static NetClientInfo net_sungem_info = { 1307 .type = NET_CLIENT_DRIVER_NIC, 1308 .size = sizeof(NICState), 1309 .can_receive = sungem_can_receive, 1310 .receive = sungem_receive, 1311 .link_status_changed = sungem_set_link_status, 1312 }; 1313 1314 static void sungem_realize(PCIDevice *pci_dev, Error **errp) 1315 { 1316 DeviceState *dev = DEVICE(pci_dev); 1317 SunGEMState *s = SUNGEM(pci_dev); 1318 uint8_t *pci_conf; 1319 1320 pci_conf = pci_dev->config; 1321 1322 pci_set_word(pci_conf + PCI_STATUS, 1323 PCI_STATUS_FAST_BACK | 1324 PCI_STATUS_DEVSEL_MEDIUM | 1325 PCI_STATUS_66MHZ); 1326 1327 pci_set_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID, 0x0); 1328 pci_set_word(pci_conf + PCI_SUBSYSTEM_ID, 0x0); 1329 1330 pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */ 1331 pci_conf[PCI_MIN_GNT] = 0x40; 1332 pci_conf[PCI_MAX_LAT] = 0x40; 1333 1334 sungem_reset_all(s, true); 1335 memory_region_init(&s->sungem, OBJECT(s), "sungem", SUNGEM_MMIO_SIZE); 1336 1337 memory_region_init_io(&s->greg, OBJECT(s), &sungem_mmio_greg_ops, s, 1338 "sungem.greg", SUNGEM_MMIO_GREG_SIZE); 1339 memory_region_add_subregion(&s->sungem, 0, &s->greg); 1340 1341 memory_region_init_io(&s->txdma, OBJECT(s), &sungem_mmio_txdma_ops, s, 1342 "sungem.txdma", SUNGEM_MMIO_TXDMA_SIZE); 1343 memory_region_add_subregion(&s->sungem, 0x2000, &s->txdma); 1344 1345 memory_region_init_io(&s->rxdma, OBJECT(s), &sungem_mmio_rxdma_ops, s, 1346 "sungem.rxdma", SUNGEM_MMIO_RXDMA_SIZE); 1347 memory_region_add_subregion(&s->sungem, 0x4000, &s->rxdma); 1348 1349 memory_region_init_io(&s->mac, OBJECT(s), &sungem_mmio_mac_ops, s, 1350 "sungem.mac", SUNGEM_MMIO_MAC_SIZE); 1351 memory_region_add_subregion(&s->sungem, 0x6000, &s->mac); 1352 1353 memory_region_init_io(&s->mif, OBJECT(s), &sungem_mmio_mif_ops, s, 1354 "sungem.mif", SUNGEM_MMIO_MIF_SIZE); 1355 memory_region_add_subregion(&s->sungem, 0x6200, &s->mif); 1356 1357 memory_region_init_io(&s->pcs, OBJECT(s), &sungem_mmio_pcs_ops, s, 1358 "sungem.pcs", SUNGEM_MMIO_PCS_SIZE); 1359 memory_region_add_subregion(&s->sungem, 0x9000, &s->pcs); 1360 1361 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->sungem); 1362 1363 qemu_macaddr_default_if_unset(&s->conf.macaddr); 1364 s->nic = qemu_new_nic(&net_sungem_info, &s->conf, 1365 object_get_typename(OBJECT(dev)), 1366 dev->id, s); 1367 qemu_format_nic_info_str(qemu_get_queue(s->nic), 1368 s->conf.macaddr.a); 1369 } 1370 1371 static void sungem_reset(DeviceState *dev) 1372 { 1373 SunGEMState *s = SUNGEM(dev); 1374 1375 sungem_reset_all(s, true); 1376 } 1377 1378 static void sungem_instance_init(Object *obj) 1379 { 1380 SunGEMState *s = SUNGEM(obj); 1381 1382 device_add_bootindex_property(obj, &s->conf.bootindex, 1383 "bootindex", "/ethernet-phy@0", 1384 DEVICE(obj)); 1385 } 1386 1387 static Property sungem_properties[] = { 1388 DEFINE_NIC_PROPERTIES(SunGEMState, conf), 1389 /* Phy address should be 0 for most Apple machines except 1390 * for K2 in which case it's 1. Will be set by a machine 1391 * override. 1392 */ 1393 DEFINE_PROP_UINT32("phy_addr", SunGEMState, phy_addr, 0), 1394 DEFINE_PROP_END_OF_LIST(), 1395 }; 1396 1397 static const VMStateDescription vmstate_sungem = { 1398 .name = "sungem", 1399 .version_id = 0, 1400 .minimum_version_id = 0, 1401 .fields = (VMStateField[]) { 1402 VMSTATE_PCI_DEVICE(pdev, SunGEMState), 1403 VMSTATE_MACADDR(conf.macaddr, SunGEMState), 1404 VMSTATE_UINT32(phy_addr, SunGEMState), 1405 VMSTATE_UINT32_ARRAY(gregs, SunGEMState, (SUNGEM_MMIO_GREG_SIZE >> 2)), 1406 VMSTATE_UINT32_ARRAY(txdmaregs, SunGEMState, 1407 (SUNGEM_MMIO_TXDMA_SIZE >> 2)), 1408 VMSTATE_UINT32_ARRAY(rxdmaregs, SunGEMState, 1409 (SUNGEM_MMIO_RXDMA_SIZE >> 2)), 1410 VMSTATE_UINT32_ARRAY(macregs, SunGEMState, (SUNGEM_MMIO_MAC_SIZE >> 2)), 1411 VMSTATE_UINT32_ARRAY(mifregs, SunGEMState, (SUNGEM_MMIO_MIF_SIZE >> 2)), 1412 VMSTATE_UINT32_ARRAY(pcsregs, SunGEMState, (SUNGEM_MMIO_PCS_SIZE >> 2)), 1413 VMSTATE_UINT32(rx_mask, SunGEMState), 1414 VMSTATE_UINT32(tx_mask, SunGEMState), 1415 VMSTATE_UINT8_ARRAY(tx_data, SunGEMState, MAX_PACKET_SIZE), 1416 VMSTATE_UINT32(tx_size, SunGEMState), 1417 VMSTATE_UINT64(tx_first_ctl, SunGEMState), 1418 VMSTATE_END_OF_LIST() 1419 } 1420 }; 1421 1422 static void sungem_class_init(ObjectClass *klass, void *data) 1423 { 1424 DeviceClass *dc = DEVICE_CLASS(klass); 1425 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 1426 1427 k->realize = sungem_realize; 1428 k->exit = sungem_uninit; 1429 k->vendor_id = PCI_VENDOR_ID_APPLE; 1430 k->device_id = PCI_DEVICE_ID_APPLE_UNI_N_GMAC; 1431 k->revision = 0x01; 1432 k->class_id = PCI_CLASS_NETWORK_ETHERNET; 1433 dc->vmsd = &vmstate_sungem; 1434 dc->reset = sungem_reset; 1435 device_class_set_props(dc, sungem_properties); 1436 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); 1437 } 1438 1439 static const TypeInfo sungem_info = { 1440 .name = TYPE_SUNGEM, 1441 .parent = TYPE_PCI_DEVICE, 1442 .instance_size = sizeof(SunGEMState), 1443 .class_init = sungem_class_init, 1444 .instance_init = sungem_instance_init, 1445 .interfaces = (InterfaceInfo[]) { 1446 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 1447 { } 1448 } 1449 }; 1450 1451 static void sungem_register_types(void) 1452 { 1453 type_register_static(&sungem_info); 1454 } 1455 1456 type_init(sungem_register_types) 1457