1 /* 2 * Nuvoton NPCM7xx EMC Module 3 * 4 * Copyright 2020 Google LLC 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * for more details. 15 * 16 * Unsupported/unimplemented features: 17 * - MCMDR.FDUP (full duplex) is ignored, half duplex is not supported 18 * - Only CAM0 is supported, CAM[1-15] are not 19 * - writes to CAMEN.[1-15] are ignored, these bits always read as zeroes 20 * - MII is not implemented, MIIDA.BUSY and MIID always return zero 21 * - MCMDR.LBK is not implemented 22 * - MCMDR.{OPMOD,ENSQE,AEP,ARP} are not supported 23 * - H/W FIFOs are not supported, MCMDR.FFTCR is ignored 24 * - MGSTA.SQE is not supported 25 * - pause and control frames are not implemented 26 * - MGSTA.CCNT is not supported 27 * - MPCNT, DMARFS are not implemented 28 */ 29 30 #include "qemu/osdep.h" 31 32 /* For crc32 */ 33 #include <zlib.h> 34 35 #include "qemu-common.h" 36 #include "hw/irq.h" 37 #include "hw/qdev-clock.h" 38 #include "hw/qdev-properties.h" 39 #include "hw/net/npcm7xx_emc.h" 40 #include "net/eth.h" 41 #include "migration/vmstate.h" 42 #include "qemu/bitops.h" 43 #include "qemu/error-report.h" 44 #include "qemu/log.h" 45 #include "qemu/module.h" 46 #include "qemu/units.h" 47 #include "sysemu/dma.h" 48 #include "trace.h" 49 50 #define CRC_LENGTH 4 51 52 /* 53 * The maximum size of a (layer 2) ethernet frame as defined by 802.3. 54 * 1518 = 6(dest macaddr) + 6(src macaddr) + 2(proto) + 4(crc) + 1500(payload) 55 * This does not include an additional 4 for the vlan field (802.1q). 56 */ 57 #define MAX_ETH_FRAME_SIZE 1518 58 59 static const char *emc_reg_name(int regno) 60 { 61 #define REG(name) case REG_ ## name: return #name; 62 switch (regno) { 63 REG(CAMCMR) 64 REG(CAMEN) 65 REG(TXDLSA) 66 REG(RXDLSA) 67 REG(MCMDR) 68 REG(MIID) 69 REG(MIIDA) 70 REG(FFTCR) 71 REG(TSDR) 72 REG(RSDR) 73 REG(DMARFC) 74 REG(MIEN) 75 REG(MISTA) 76 REG(MGSTA) 77 REG(MPCNT) 78 REG(MRPC) 79 REG(MRPCC) 80 REG(MREPC) 81 REG(DMARFS) 82 REG(CTXDSA) 83 REG(CTXBSA) 84 REG(CRXDSA) 85 REG(CRXBSA) 86 case REG_CAMM_BASE + 0: return "CAM0M"; 87 case REG_CAML_BASE + 0: return "CAM0L"; 88 case REG_CAMM_BASE + 2 ... REG_CAMML_LAST: 89 /* Only CAM0 is supported, fold the others into something simple. */ 90 if (regno & 1) { 91 return "CAM<n>L"; 92 } else { 93 return "CAM<n>M"; 94 } 95 default: return "UNKNOWN"; 96 } 97 #undef REG 98 } 99 100 static void emc_reset(NPCM7xxEMCState *emc) 101 { 102 trace_npcm7xx_emc_reset(emc->emc_num); 103 104 memset(&emc->regs[0], 0, sizeof(emc->regs)); 105 106 /* These regs have non-zero reset values. */ 107 emc->regs[REG_TXDLSA] = 0xfffffffc; 108 emc->regs[REG_RXDLSA] = 0xfffffffc; 109 emc->regs[REG_MIIDA] = 0x00900000; 110 emc->regs[REG_FFTCR] = 0x0101; 111 emc->regs[REG_DMARFC] = 0x0800; 112 emc->regs[REG_MPCNT] = 0x7fff; 113 114 emc->tx_active = false; 115 emc->rx_active = false; 116 } 117 118 static void npcm7xx_emc_reset(DeviceState *dev) 119 { 120 NPCM7xxEMCState *emc = NPCM7XX_EMC(dev); 121 emc_reset(emc); 122 } 123 124 static void emc_soft_reset(NPCM7xxEMCState *emc) 125 { 126 /* 127 * The docs say at least MCMDR.{LBK,OPMOD} bits are not changed during a 128 * soft reset, but does not go into further detail. For now, KISS. 129 */ 130 uint32_t mcmdr = emc->regs[REG_MCMDR]; 131 emc_reset(emc); 132 emc->regs[REG_MCMDR] = mcmdr & (REG_MCMDR_LBK | REG_MCMDR_OPMOD); 133 134 qemu_set_irq(emc->tx_irq, 0); 135 qemu_set_irq(emc->rx_irq, 0); 136 } 137 138 static void emc_set_link(NetClientState *nc) 139 { 140 /* Nothing to do yet. */ 141 } 142 143 /* MISTA.TXINTR is the union of the individual bits with their enables. */ 144 static void emc_update_mista_txintr(NPCM7xxEMCState *emc) 145 { 146 /* Only look at the bits we support. */ 147 uint32_t mask = (REG_MISTA_TXBERR | 148 REG_MISTA_TDU | 149 REG_MISTA_TXCP); 150 if (emc->regs[REG_MISTA] & emc->regs[REG_MIEN] & mask) { 151 emc->regs[REG_MISTA] |= REG_MISTA_TXINTR; 152 } else { 153 emc->regs[REG_MISTA] &= ~REG_MISTA_TXINTR; 154 } 155 } 156 157 /* MISTA.RXINTR is the union of the individual bits with their enables. */ 158 static void emc_update_mista_rxintr(NPCM7xxEMCState *emc) 159 { 160 /* Only look at the bits we support. */ 161 uint32_t mask = (REG_MISTA_RXBERR | 162 REG_MISTA_RDU | 163 REG_MISTA_RXGD); 164 if (emc->regs[REG_MISTA] & emc->regs[REG_MIEN] & mask) { 165 emc->regs[REG_MISTA] |= REG_MISTA_RXINTR; 166 } else { 167 emc->regs[REG_MISTA] &= ~REG_MISTA_RXINTR; 168 } 169 } 170 171 /* N.B. emc_update_mista_txintr must have already been called. */ 172 static void emc_update_tx_irq(NPCM7xxEMCState *emc) 173 { 174 int level = !!(emc->regs[REG_MISTA] & 175 emc->regs[REG_MIEN] & 176 REG_MISTA_TXINTR); 177 trace_npcm7xx_emc_update_tx_irq(level); 178 qemu_set_irq(emc->tx_irq, level); 179 } 180 181 /* N.B. emc_update_mista_rxintr must have already been called. */ 182 static void emc_update_rx_irq(NPCM7xxEMCState *emc) 183 { 184 int level = !!(emc->regs[REG_MISTA] & 185 emc->regs[REG_MIEN] & 186 REG_MISTA_RXINTR); 187 trace_npcm7xx_emc_update_rx_irq(level); 188 qemu_set_irq(emc->rx_irq, level); 189 } 190 191 /* Update IRQ states due to changes in MIEN,MISTA. */ 192 static void emc_update_irq_from_reg_change(NPCM7xxEMCState *emc) 193 { 194 emc_update_mista_txintr(emc); 195 emc_update_tx_irq(emc); 196 197 emc_update_mista_rxintr(emc); 198 emc_update_rx_irq(emc); 199 } 200 201 static int emc_read_tx_desc(dma_addr_t addr, NPCM7xxEMCTxDesc *desc) 202 { 203 if (dma_memory_read(&address_space_memory, addr, desc, 204 sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) { 205 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%" 206 HWADDR_PRIx "\n", __func__, addr); 207 return -1; 208 } 209 desc->flags = le32_to_cpu(desc->flags); 210 desc->txbsa = le32_to_cpu(desc->txbsa); 211 desc->status_and_length = le32_to_cpu(desc->status_and_length); 212 desc->ntxdsa = le32_to_cpu(desc->ntxdsa); 213 return 0; 214 } 215 216 static int emc_write_tx_desc(const NPCM7xxEMCTxDesc *desc, dma_addr_t addr) 217 { 218 NPCM7xxEMCTxDesc le_desc; 219 220 le_desc.flags = cpu_to_le32(desc->flags); 221 le_desc.txbsa = cpu_to_le32(desc->txbsa); 222 le_desc.status_and_length = cpu_to_le32(desc->status_and_length); 223 le_desc.ntxdsa = cpu_to_le32(desc->ntxdsa); 224 if (dma_memory_write(&address_space_memory, addr, &le_desc, 225 sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) { 226 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%" 227 HWADDR_PRIx "\n", __func__, addr); 228 return -1; 229 } 230 return 0; 231 } 232 233 static int emc_read_rx_desc(dma_addr_t addr, NPCM7xxEMCRxDesc *desc) 234 { 235 if (dma_memory_read(&address_space_memory, addr, desc, 236 sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) { 237 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%" 238 HWADDR_PRIx "\n", __func__, addr); 239 return -1; 240 } 241 desc->status_and_length = le32_to_cpu(desc->status_and_length); 242 desc->rxbsa = le32_to_cpu(desc->rxbsa); 243 desc->reserved = le32_to_cpu(desc->reserved); 244 desc->nrxdsa = le32_to_cpu(desc->nrxdsa); 245 return 0; 246 } 247 248 static int emc_write_rx_desc(const NPCM7xxEMCRxDesc *desc, dma_addr_t addr) 249 { 250 NPCM7xxEMCRxDesc le_desc; 251 252 le_desc.status_and_length = cpu_to_le32(desc->status_and_length); 253 le_desc.rxbsa = cpu_to_le32(desc->rxbsa); 254 le_desc.reserved = cpu_to_le32(desc->reserved); 255 le_desc.nrxdsa = cpu_to_le32(desc->nrxdsa); 256 if (dma_memory_write(&address_space_memory, addr, &le_desc, 257 sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) { 258 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%" 259 HWADDR_PRIx "\n", __func__, addr); 260 return -1; 261 } 262 return 0; 263 } 264 265 static void emc_set_mista(NPCM7xxEMCState *emc, uint32_t flags) 266 { 267 trace_npcm7xx_emc_set_mista(flags); 268 emc->regs[REG_MISTA] |= flags; 269 if (extract32(flags, 16, 16)) { 270 emc_update_mista_txintr(emc); 271 } 272 if (extract32(flags, 0, 16)) { 273 emc_update_mista_rxintr(emc); 274 } 275 } 276 277 static void emc_halt_tx(NPCM7xxEMCState *emc, uint32_t mista_flag) 278 { 279 emc->tx_active = false; 280 emc_set_mista(emc, mista_flag); 281 } 282 283 static void emc_halt_rx(NPCM7xxEMCState *emc, uint32_t mista_flag) 284 { 285 emc->rx_active = false; 286 emc_set_mista(emc, mista_flag); 287 } 288 289 static void emc_enable_rx_and_flush(NPCM7xxEMCState *emc) 290 { 291 emc->rx_active = true; 292 qemu_flush_queued_packets(qemu_get_queue(emc->nic)); 293 } 294 295 static void emc_set_next_tx_descriptor(NPCM7xxEMCState *emc, 296 const NPCM7xxEMCTxDesc *tx_desc, 297 uint32_t desc_addr) 298 { 299 /* Update the current descriptor, if only to reset the owner flag. */ 300 if (emc_write_tx_desc(tx_desc, desc_addr)) { 301 /* 302 * We just read it so this shouldn't generally happen. 303 * Error already reported. 304 */ 305 emc_set_mista(emc, REG_MISTA_TXBERR); 306 } 307 emc->regs[REG_CTXDSA] = TX_DESC_NTXDSA(tx_desc->ntxdsa); 308 } 309 310 static void emc_set_next_rx_descriptor(NPCM7xxEMCState *emc, 311 const NPCM7xxEMCRxDesc *rx_desc, 312 uint32_t desc_addr) 313 { 314 /* Update the current descriptor, if only to reset the owner flag. */ 315 if (emc_write_rx_desc(rx_desc, desc_addr)) { 316 /* 317 * We just read it so this shouldn't generally happen. 318 * Error already reported. 319 */ 320 emc_set_mista(emc, REG_MISTA_RXBERR); 321 } 322 emc->regs[REG_CRXDSA] = RX_DESC_NRXDSA(rx_desc->nrxdsa); 323 } 324 325 static void emc_try_send_next_packet(NPCM7xxEMCState *emc) 326 { 327 /* Working buffer for sending out packets. Most packets fit in this. */ 328 #define TX_BUFFER_SIZE 2048 329 uint8_t tx_send_buffer[TX_BUFFER_SIZE]; 330 uint32_t desc_addr = TX_DESC_NTXDSA(emc->regs[REG_CTXDSA]); 331 NPCM7xxEMCTxDesc tx_desc; 332 uint32_t next_buf_addr, length; 333 uint8_t *buf; 334 g_autofree uint8_t *malloced_buf = NULL; 335 336 if (emc_read_tx_desc(desc_addr, &tx_desc)) { 337 /* Error reading descriptor, already reported. */ 338 emc_halt_tx(emc, REG_MISTA_TXBERR); 339 emc_update_tx_irq(emc); 340 return; 341 } 342 343 /* Nothing we can do if we don't own the descriptor. */ 344 if (!(tx_desc.flags & TX_DESC_FLAG_OWNER_MASK)) { 345 trace_npcm7xx_emc_cpu_owned_desc(desc_addr); 346 emc_halt_tx(emc, REG_MISTA_TDU); 347 emc_update_tx_irq(emc); 348 return; 349 } 350 351 /* Give the descriptor back regardless of what happens. */ 352 tx_desc.flags &= ~TX_DESC_FLAG_OWNER_MASK; 353 tx_desc.status_and_length &= 0xffff; 354 355 /* 356 * Despite the h/w documentation saying the tx buffer is word aligned, 357 * the linux driver does not word align the buffer. There is value in not 358 * aligning the buffer: See the description of NET_IP_ALIGN in linux 359 * kernel sources. 360 */ 361 next_buf_addr = tx_desc.txbsa; 362 emc->regs[REG_CTXBSA] = next_buf_addr; 363 length = TX_DESC_PKT_LEN(tx_desc.status_and_length); 364 buf = &tx_send_buffer[0]; 365 366 if (length > sizeof(tx_send_buffer)) { 367 malloced_buf = g_malloc(length); 368 buf = malloced_buf; 369 } 370 371 if (dma_memory_read(&address_space_memory, next_buf_addr, buf, 372 length, MEMTXATTRS_UNSPECIFIED)) { 373 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read packet @ 0x%x\n", 374 __func__, next_buf_addr); 375 emc_set_mista(emc, REG_MISTA_TXBERR); 376 emc_set_next_tx_descriptor(emc, &tx_desc, desc_addr); 377 emc_update_tx_irq(emc); 378 trace_npcm7xx_emc_tx_done(emc->regs[REG_CTXDSA]); 379 return; 380 } 381 382 if ((tx_desc.flags & TX_DESC_FLAG_PADEN) && (length < MIN_PACKET_LENGTH)) { 383 memset(buf + length, 0, MIN_PACKET_LENGTH - length); 384 length = MIN_PACKET_LENGTH; 385 } 386 387 /* N.B. emc_receive can get called here. */ 388 qemu_send_packet(qemu_get_queue(emc->nic), buf, length); 389 trace_npcm7xx_emc_sent_packet(length); 390 391 tx_desc.status_and_length |= TX_DESC_STATUS_TXCP; 392 if (tx_desc.flags & TX_DESC_FLAG_INTEN) { 393 emc_set_mista(emc, REG_MISTA_TXCP); 394 } 395 if (emc->regs[REG_MISTA] & emc->regs[REG_MIEN] & REG_MISTA_TXINTR) { 396 tx_desc.status_and_length |= TX_DESC_STATUS_TXINTR; 397 } 398 399 emc_set_next_tx_descriptor(emc, &tx_desc, desc_addr); 400 emc_update_tx_irq(emc); 401 trace_npcm7xx_emc_tx_done(emc->regs[REG_CTXDSA]); 402 } 403 404 static bool emc_can_receive(NetClientState *nc) 405 { 406 NPCM7xxEMCState *emc = NPCM7XX_EMC(qemu_get_nic_opaque(nc)); 407 408 bool can_receive = emc->rx_active; 409 trace_npcm7xx_emc_can_receive(can_receive); 410 return can_receive; 411 } 412 413 /* If result is false then *fail_reason contains the reason. */ 414 static bool emc_receive_filter1(NPCM7xxEMCState *emc, const uint8_t *buf, 415 size_t len, const char **fail_reason) 416 { 417 eth_pkt_types_e pkt_type = get_eth_packet_type(PKT_GET_ETH_HDR(buf)); 418 419 switch (pkt_type) { 420 case ETH_PKT_BCAST: 421 if (emc->regs[REG_CAMCMR] & REG_CAMCMR_CCAM) { 422 return true; 423 } else { 424 *fail_reason = "Broadcast packet disabled"; 425 return !!(emc->regs[REG_CAMCMR] & REG_CAMCMR_ABP); 426 } 427 case ETH_PKT_MCAST: 428 if (emc->regs[REG_CAMCMR] & REG_CAMCMR_CCAM) { 429 return true; 430 } else { 431 *fail_reason = "Multicast packet disabled"; 432 return !!(emc->regs[REG_CAMCMR] & REG_CAMCMR_AMP); 433 } 434 case ETH_PKT_UCAST: { 435 bool matches; 436 if (emc->regs[REG_CAMCMR] & REG_CAMCMR_AUP) { 437 return true; 438 } 439 matches = ((emc->regs[REG_CAMCMR] & REG_CAMCMR_ECMP) && 440 /* We only support one CAM register, CAM0. */ 441 (emc->regs[REG_CAMEN] & (1 << 0)) && 442 memcmp(buf, emc->conf.macaddr.a, ETH_ALEN) == 0); 443 if (emc->regs[REG_CAMCMR] & REG_CAMCMR_CCAM) { 444 *fail_reason = "MACADDR matched, comparison complemented"; 445 return !matches; 446 } else { 447 *fail_reason = "MACADDR didn't match"; 448 return matches; 449 } 450 } 451 default: 452 g_assert_not_reached(); 453 } 454 } 455 456 static bool emc_receive_filter(NPCM7xxEMCState *emc, const uint8_t *buf, 457 size_t len) 458 { 459 const char *fail_reason = NULL; 460 bool ok = emc_receive_filter1(emc, buf, len, &fail_reason); 461 if (!ok) { 462 trace_npcm7xx_emc_packet_filtered_out(fail_reason); 463 } 464 return ok; 465 } 466 467 static ssize_t emc_receive(NetClientState *nc, const uint8_t *buf, size_t len1) 468 { 469 NPCM7xxEMCState *emc = NPCM7XX_EMC(qemu_get_nic_opaque(nc)); 470 const uint32_t len = len1; 471 size_t max_frame_len; 472 bool long_frame; 473 uint32_t desc_addr; 474 NPCM7xxEMCRxDesc rx_desc; 475 uint32_t crc; 476 uint8_t *crc_ptr; 477 uint32_t buf_addr; 478 479 trace_npcm7xx_emc_receiving_packet(len); 480 481 if (!emc_can_receive(nc)) { 482 qemu_log_mask(LOG_GUEST_ERROR, "%s: Unexpected packet\n", __func__); 483 return -1; 484 } 485 486 if (len < ETH_HLEN || 487 /* Defensive programming: drop unsupportable large packets. */ 488 len > 0xffff - CRC_LENGTH) { 489 qemu_log_mask(LOG_GUEST_ERROR, "%s: Dropped frame of %u bytes\n", 490 __func__, len); 491 return len; 492 } 493 494 /* 495 * DENI is set if EMC received the Length/Type field of the incoming 496 * packet, so it will be set regardless of what happens next. 497 */ 498 emc_set_mista(emc, REG_MISTA_DENI); 499 500 if (!emc_receive_filter(emc, buf, len)) { 501 emc_update_rx_irq(emc); 502 return len; 503 } 504 505 /* Huge frames (> DMARFC) are dropped. */ 506 max_frame_len = REG_DMARFC_RXMS(emc->regs[REG_DMARFC]); 507 if (len + CRC_LENGTH > max_frame_len) { 508 trace_npcm7xx_emc_packet_dropped(len); 509 emc_set_mista(emc, REG_MISTA_DFOI); 510 emc_update_rx_irq(emc); 511 return len; 512 } 513 514 /* 515 * Long Frames (> MAX_ETH_FRAME_SIZE) are also dropped, unless MCMDR.ALP 516 * is set. 517 */ 518 long_frame = false; 519 if (len + CRC_LENGTH > MAX_ETH_FRAME_SIZE) { 520 if (emc->regs[REG_MCMDR] & REG_MCMDR_ALP) { 521 long_frame = true; 522 } else { 523 trace_npcm7xx_emc_packet_dropped(len); 524 emc_set_mista(emc, REG_MISTA_PTLE); 525 emc_update_rx_irq(emc); 526 return len; 527 } 528 } 529 530 desc_addr = RX_DESC_NRXDSA(emc->regs[REG_CRXDSA]); 531 if (emc_read_rx_desc(desc_addr, &rx_desc)) { 532 /* Error reading descriptor, already reported. */ 533 emc_halt_rx(emc, REG_MISTA_RXBERR); 534 emc_update_rx_irq(emc); 535 return len; 536 } 537 538 /* Nothing we can do if we don't own the descriptor. */ 539 if (!(rx_desc.status_and_length & RX_DESC_STATUS_OWNER_MASK)) { 540 trace_npcm7xx_emc_cpu_owned_desc(desc_addr); 541 emc_halt_rx(emc, REG_MISTA_RDU); 542 emc_update_rx_irq(emc); 543 return len; 544 } 545 546 crc = 0; 547 crc_ptr = (uint8_t *) &crc; 548 if (!(emc->regs[REG_MCMDR] & REG_MCMDR_SPCRC)) { 549 crc = cpu_to_be32(crc32(~0, buf, len)); 550 } 551 552 /* Give the descriptor back regardless of what happens. */ 553 rx_desc.status_and_length &= ~RX_DESC_STATUS_OWNER_MASK; 554 555 buf_addr = rx_desc.rxbsa; 556 emc->regs[REG_CRXBSA] = buf_addr; 557 if (dma_memory_write(&address_space_memory, buf_addr, buf, 558 len, MEMTXATTRS_UNSPECIFIED) || 559 (!(emc->regs[REG_MCMDR] & REG_MCMDR_SPCRC) && 560 dma_memory_write(&address_space_memory, buf_addr + len, 561 crc_ptr, 4, MEMTXATTRS_UNSPECIFIED))) { 562 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bus error writing packet\n", 563 __func__); 564 emc_set_mista(emc, REG_MISTA_RXBERR); 565 emc_set_next_rx_descriptor(emc, &rx_desc, desc_addr); 566 emc_update_rx_irq(emc); 567 trace_npcm7xx_emc_rx_done(emc->regs[REG_CRXDSA]); 568 return len; 569 } 570 571 trace_npcm7xx_emc_received_packet(len); 572 573 /* Note: We've already verified len+4 <= 0xffff. */ 574 rx_desc.status_and_length = len; 575 if (!(emc->regs[REG_MCMDR] & REG_MCMDR_SPCRC)) { 576 rx_desc.status_and_length += 4; 577 } 578 rx_desc.status_and_length |= RX_DESC_STATUS_RXGD; 579 emc_set_mista(emc, REG_MISTA_RXGD); 580 581 if (emc->regs[REG_MISTA] & emc->regs[REG_MIEN] & REG_MISTA_RXINTR) { 582 rx_desc.status_and_length |= RX_DESC_STATUS_RXINTR; 583 } 584 if (long_frame) { 585 rx_desc.status_and_length |= RX_DESC_STATUS_PTLE; 586 } 587 588 emc_set_next_rx_descriptor(emc, &rx_desc, desc_addr); 589 emc_update_rx_irq(emc); 590 trace_npcm7xx_emc_rx_done(emc->regs[REG_CRXDSA]); 591 return len; 592 } 593 594 static uint64_t npcm7xx_emc_read(void *opaque, hwaddr offset, unsigned size) 595 { 596 NPCM7xxEMCState *emc = opaque; 597 uint32_t reg = offset / sizeof(uint32_t); 598 uint32_t result; 599 600 if (reg >= NPCM7XX_NUM_EMC_REGS) { 601 qemu_log_mask(LOG_GUEST_ERROR, 602 "%s: Invalid offset 0x%04" HWADDR_PRIx "\n", 603 __func__, offset); 604 return 0; 605 } 606 607 switch (reg) { 608 case REG_MIID: 609 /* 610 * We don't implement MII. For determinism, always return zero as 611 * writes record the last value written for debugging purposes. 612 */ 613 qemu_log_mask(LOG_UNIMP, "%s: Read of MIID, returning 0\n", __func__); 614 result = 0; 615 break; 616 case REG_TSDR: 617 case REG_RSDR: 618 qemu_log_mask(LOG_GUEST_ERROR, 619 "%s: Read of write-only reg, %s/%d\n", 620 __func__, emc_reg_name(reg), reg); 621 return 0; 622 default: 623 result = emc->regs[reg]; 624 break; 625 } 626 627 trace_npcm7xx_emc_reg_read(emc->emc_num, result, emc_reg_name(reg), reg); 628 return result; 629 } 630 631 static void npcm7xx_emc_write(void *opaque, hwaddr offset, 632 uint64_t v, unsigned size) 633 { 634 NPCM7xxEMCState *emc = opaque; 635 uint32_t reg = offset / sizeof(uint32_t); 636 uint32_t value = v; 637 638 g_assert(size == sizeof(uint32_t)); 639 640 if (reg >= NPCM7XX_NUM_EMC_REGS) { 641 qemu_log_mask(LOG_GUEST_ERROR, 642 "%s: Invalid offset 0x%04" HWADDR_PRIx "\n", 643 __func__, offset); 644 return; 645 } 646 647 trace_npcm7xx_emc_reg_write(emc->emc_num, emc_reg_name(reg), reg, value); 648 649 switch (reg) { 650 case REG_CAMCMR: 651 emc->regs[reg] = value; 652 break; 653 case REG_CAMEN: 654 /* Only CAM0 is supported, don't pretend otherwise. */ 655 if (value & ~1) { 656 qemu_log_mask(LOG_GUEST_ERROR, 657 "%s: Only CAM0 is supported, cannot enable others" 658 ": 0x%x\n", 659 __func__, value); 660 } 661 emc->regs[reg] = value & 1; 662 break; 663 case REG_CAMM_BASE + 0: 664 emc->regs[reg] = value; 665 emc->conf.macaddr.a[0] = value >> 24; 666 emc->conf.macaddr.a[1] = value >> 16; 667 emc->conf.macaddr.a[2] = value >> 8; 668 emc->conf.macaddr.a[3] = value >> 0; 669 break; 670 case REG_CAML_BASE + 0: 671 emc->regs[reg] = value; 672 emc->conf.macaddr.a[4] = value >> 24; 673 emc->conf.macaddr.a[5] = value >> 16; 674 break; 675 case REG_MCMDR: { 676 uint32_t prev; 677 if (value & REG_MCMDR_SWR) { 678 emc_soft_reset(emc); 679 /* On h/w the reset happens over multiple cycles. For now KISS. */ 680 break; 681 } 682 prev = emc->regs[reg]; 683 emc->regs[reg] = value; 684 /* Update tx state. */ 685 if (!(prev & REG_MCMDR_TXON) && 686 (value & REG_MCMDR_TXON)) { 687 emc->regs[REG_CTXDSA] = emc->regs[REG_TXDLSA]; 688 /* 689 * Linux kernel turns TX on with CPU still holding descriptor, 690 * which suggests we should wait for a write to TSDR before trying 691 * to send a packet: so we don't send one here. 692 */ 693 } else if ((prev & REG_MCMDR_TXON) && 694 !(value & REG_MCMDR_TXON)) { 695 emc->regs[REG_MGSTA] |= REG_MGSTA_TXHA; 696 } 697 if (!(value & REG_MCMDR_TXON)) { 698 emc_halt_tx(emc, 0); 699 } 700 /* Update rx state. */ 701 if (!(prev & REG_MCMDR_RXON) && 702 (value & REG_MCMDR_RXON)) { 703 emc->regs[REG_CRXDSA] = emc->regs[REG_RXDLSA]; 704 } else if ((prev & REG_MCMDR_RXON) && 705 !(value & REG_MCMDR_RXON)) { 706 emc->regs[REG_MGSTA] |= REG_MGSTA_RXHA; 707 } 708 if (value & REG_MCMDR_RXON) { 709 emc_enable_rx_and_flush(emc); 710 } else { 711 emc_halt_rx(emc, 0); 712 } 713 break; 714 } 715 case REG_TXDLSA: 716 case REG_RXDLSA: 717 case REG_DMARFC: 718 case REG_MIID: 719 emc->regs[reg] = value; 720 break; 721 case REG_MIEN: 722 emc->regs[reg] = value; 723 emc_update_irq_from_reg_change(emc); 724 break; 725 case REG_MISTA: 726 /* Clear the bits that have 1 in "value". */ 727 emc->regs[reg] &= ~value; 728 emc_update_irq_from_reg_change(emc); 729 break; 730 case REG_MGSTA: 731 /* Clear the bits that have 1 in "value". */ 732 emc->regs[reg] &= ~value; 733 break; 734 case REG_TSDR: 735 if (emc->regs[REG_MCMDR] & REG_MCMDR_TXON) { 736 emc->tx_active = true; 737 /* Keep trying to send packets until we run out. */ 738 while (emc->tx_active) { 739 emc_try_send_next_packet(emc); 740 } 741 } 742 break; 743 case REG_RSDR: 744 if (emc->regs[REG_MCMDR] & REG_MCMDR_RXON) { 745 emc_enable_rx_and_flush(emc); 746 } 747 break; 748 case REG_MIIDA: 749 emc->regs[reg] = value & ~REG_MIIDA_BUSY; 750 break; 751 case REG_MRPC: 752 case REG_MRPCC: 753 case REG_MREPC: 754 case REG_CTXDSA: 755 case REG_CTXBSA: 756 case REG_CRXDSA: 757 case REG_CRXBSA: 758 qemu_log_mask(LOG_GUEST_ERROR, 759 "%s: Write to read-only reg %s/%d\n", 760 __func__, emc_reg_name(reg), reg); 761 break; 762 default: 763 qemu_log_mask(LOG_UNIMP, "%s: Write to unimplemented reg %s/%d\n", 764 __func__, emc_reg_name(reg), reg); 765 break; 766 } 767 } 768 769 static const struct MemoryRegionOps npcm7xx_emc_ops = { 770 .read = npcm7xx_emc_read, 771 .write = npcm7xx_emc_write, 772 .endianness = DEVICE_LITTLE_ENDIAN, 773 .valid = { 774 .min_access_size = 4, 775 .max_access_size = 4, 776 .unaligned = false, 777 }, 778 }; 779 780 static void emc_cleanup(NetClientState *nc) 781 { 782 /* Nothing to do yet. */ 783 } 784 785 static NetClientInfo net_npcm7xx_emc_info = { 786 .type = NET_CLIENT_DRIVER_NIC, 787 .size = sizeof(NICState), 788 .can_receive = emc_can_receive, 789 .receive = emc_receive, 790 .cleanup = emc_cleanup, 791 .link_status_changed = emc_set_link, 792 }; 793 794 static void npcm7xx_emc_realize(DeviceState *dev, Error **errp) 795 { 796 NPCM7xxEMCState *emc = NPCM7XX_EMC(dev); 797 SysBusDevice *sbd = SYS_BUS_DEVICE(emc); 798 799 memory_region_init_io(&emc->iomem, OBJECT(emc), &npcm7xx_emc_ops, emc, 800 TYPE_NPCM7XX_EMC, 4 * KiB); 801 sysbus_init_mmio(sbd, &emc->iomem); 802 sysbus_init_irq(sbd, &emc->tx_irq); 803 sysbus_init_irq(sbd, &emc->rx_irq); 804 805 qemu_macaddr_default_if_unset(&emc->conf.macaddr); 806 emc->nic = qemu_new_nic(&net_npcm7xx_emc_info, &emc->conf, 807 object_get_typename(OBJECT(dev)), dev->id, emc); 808 qemu_format_nic_info_str(qemu_get_queue(emc->nic), emc->conf.macaddr.a); 809 } 810 811 static void npcm7xx_emc_unrealize(DeviceState *dev) 812 { 813 NPCM7xxEMCState *emc = NPCM7XX_EMC(dev); 814 815 qemu_del_nic(emc->nic); 816 } 817 818 static const VMStateDescription vmstate_npcm7xx_emc = { 819 .name = TYPE_NPCM7XX_EMC, 820 .version_id = 0, 821 .minimum_version_id = 0, 822 .fields = (VMStateField[]) { 823 VMSTATE_UINT8(emc_num, NPCM7xxEMCState), 824 VMSTATE_UINT32_ARRAY(regs, NPCM7xxEMCState, NPCM7XX_NUM_EMC_REGS), 825 VMSTATE_BOOL(tx_active, NPCM7xxEMCState), 826 VMSTATE_BOOL(rx_active, NPCM7xxEMCState), 827 VMSTATE_END_OF_LIST(), 828 }, 829 }; 830 831 static Property npcm7xx_emc_properties[] = { 832 DEFINE_NIC_PROPERTIES(NPCM7xxEMCState, conf), 833 DEFINE_PROP_END_OF_LIST(), 834 }; 835 836 static void npcm7xx_emc_class_init(ObjectClass *klass, void *data) 837 { 838 DeviceClass *dc = DEVICE_CLASS(klass); 839 840 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); 841 dc->desc = "NPCM7xx EMC Controller"; 842 dc->realize = npcm7xx_emc_realize; 843 dc->unrealize = npcm7xx_emc_unrealize; 844 dc->reset = npcm7xx_emc_reset; 845 dc->vmsd = &vmstate_npcm7xx_emc; 846 device_class_set_props(dc, npcm7xx_emc_properties); 847 } 848 849 static const TypeInfo npcm7xx_emc_info = { 850 .name = TYPE_NPCM7XX_EMC, 851 .parent = TYPE_SYS_BUS_DEVICE, 852 .instance_size = sizeof(NPCM7xxEMCState), 853 .class_init = npcm7xx_emc_class_init, 854 }; 855 856 static void npcm7xx_emc_register_type(void) 857 { 858 type_register_static(&npcm7xx_emc_info); 859 } 860 861 type_init(npcm7xx_emc_register_type) 862