npcm_gmac.c (a4dd7a1dad1d3d0ca270a403212ea93c801927db) | npcm_gmac.c (1c51c571998a669712235cb35093ae817e19b3ff) |
---|---|
1/* 2 * Nuvoton NPCM7xx/8xx GMAC Module 3 * 4 * Copyright 2024 Google LLC 5 * Authors: 6 * Hao Wu <wuhaotsh@google.com> 7 * Nabih Estefan <nabihestefan@google.com> 8 * --- 224 unchanged lines hidden (view full) --- 233 sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) { 234 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%" 235 HWADDR_PRIx "\n", __func__, addr); 236 return -1; 237 } 238 return 0; 239} 240 | 1/* 2 * Nuvoton NPCM7xx/8xx GMAC Module 3 * 4 * Copyright 2024 Google LLC 5 * Authors: 6 * Hao Wu <wuhaotsh@google.com> 7 * Nabih Estefan <nabihestefan@google.com> 8 * --- 224 unchanged lines hidden (view full) --- 233 sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) { 234 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%" 235 HWADDR_PRIx "\n", __func__, addr); 236 return -1; 237 } 238 return 0; 239} 240 |
241static int gmac_read_tx_desc(dma_addr_t addr, struct NPCMGMACTxDesc *desc) 242{ 243 if (dma_memory_read(&address_space_memory, addr, desc, 244 sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) { 245 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%" 246 HWADDR_PRIx "\n", __func__, addr); 247 return -1; 248 } 249 desc->tdes0 = le32_to_cpu(desc->tdes0); 250 desc->tdes1 = le32_to_cpu(desc->tdes1); 251 desc->tdes2 = le32_to_cpu(desc->tdes2); 252 desc->tdes3 = le32_to_cpu(desc->tdes3); 253 return 0; 254} 255 256static int gmac_write_tx_desc(dma_addr_t addr, struct NPCMGMACTxDesc *desc) 257{ 258 struct NPCMGMACTxDesc le_desc; 259 le_desc.tdes0 = cpu_to_le32(desc->tdes0); 260 le_desc.tdes1 = cpu_to_le32(desc->tdes1); 261 le_desc.tdes2 = cpu_to_le32(desc->tdes2); 262 le_desc.tdes3 = cpu_to_le32(desc->tdes3); 263 if (dma_memory_write(&address_space_memory, addr, &le_desc, 264 sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) { 265 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%" 266 HWADDR_PRIx "\n", __func__, addr); 267 return -1; 268 } 269 return 0; 270} 271 |
|
241static int gmac_rx_transfer_frame_to_buffer(uint32_t rx_buf_len, 242 uint32_t *left_frame, 243 uint32_t rx_buf_addr, 244 bool *eof_transferred, 245 const uint8_t **frame_ptr, 246 uint16_t *transferred) 247{ 248 uint32_t to_transfer; --- 205 unchanged lines hidden (view full) --- 454 desc_addr = rx_desc.rdes3; 455 } else { 456 desc_addr += sizeof(rx_desc); 457 } 458 gmac->regs[R_NPCM_DMA_HOST_RX_DESC] = desc_addr; 459 return len; 460} 461 | 272static int gmac_rx_transfer_frame_to_buffer(uint32_t rx_buf_len, 273 uint32_t *left_frame, 274 uint32_t rx_buf_addr, 275 bool *eof_transferred, 276 const uint8_t **frame_ptr, 277 uint16_t *transferred) 278{ 279 uint32_t to_transfer; --- 205 unchanged lines hidden (view full) --- 485 desc_addr = rx_desc.rdes3; 486 } else { 487 desc_addr += sizeof(rx_desc); 488 } 489 gmac->regs[R_NPCM_DMA_HOST_RX_DESC] = desc_addr; 490 return len; 491} 492 |
493static int gmac_tx_get_csum(uint32_t tdes1) 494{ 495 uint32_t mask = TX_DESC_TDES1_CHKSM_INS_CTRL_MASK(tdes1); 496 int csum = 0; 497 498 if (likely(mask > 0)) { 499 csum |= CSUM_IP; 500 } 501 if (likely(mask > 1)) { 502 csum |= CSUM_TCP | CSUM_UDP; 503 } 504 505 return csum; 506} 507 508static void gmac_try_send_next_packet(NPCMGMACState *gmac) 509{ 510 /* 511 * Comments about steps refer to steps for 512 * transmitting in page 384 of datasheet 513 */ 514 uint16_t tx_buffer_size = 2048; 515 g_autofree uint8_t *tx_send_buffer = g_malloc(tx_buffer_size); 516 uint32_t desc_addr; 517 struct NPCMGMACTxDesc tx_desc; 518 uint32_t tx_buf_addr, tx_buf_len; 519 uint16_t length = 0; 520 uint8_t *buf = tx_send_buffer; 521 uint32_t prev_buf_size = 0; 522 int csum = 0; 523 524 /* steps 1&2 */ 525 if (!gmac->regs[R_NPCM_DMA_HOST_TX_DESC]) { 526 gmac->regs[R_NPCM_DMA_HOST_TX_DESC] = 527 NPCM_DMA_HOST_TX_DESC_MASK(gmac->regs[R_NPCM_DMA_TX_BASE_ADDR]); 528 } 529 desc_addr = gmac->regs[R_NPCM_DMA_HOST_TX_DESC]; 530 531 while (true) { 532 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT, 533 NPCM_DMA_STATUS_TX_RUNNING_FETCHING_STATE); 534 if (gmac_read_tx_desc(desc_addr, &tx_desc)) { 535 qemu_log_mask(LOG_GUEST_ERROR, 536 "TX Descriptor @ 0x%x can't be read\n", 537 desc_addr); 538 return; 539 } 540 /* step 3 */ 541 542 trace_npcm_gmac_packet_desc_read(DEVICE(gmac)->canonical_path, 543 desc_addr); 544 trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &tx_desc, 545 tx_desc.tdes0, tx_desc.tdes1, tx_desc.tdes2, tx_desc.tdes3); 546 547 /* 1 = DMA Owned, 0 = Software Owned */ 548 if (!(tx_desc.tdes0 & TX_DESC_TDES0_OWN)) { 549 qemu_log_mask(LOG_GUEST_ERROR, 550 "TX Descriptor @ 0x%x is owned by software\n", 551 desc_addr); 552 gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_TU; 553 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT, 554 NPCM_DMA_STATUS_TX_SUSPENDED_STATE); 555 gmac_update_irq(gmac); 556 return; 557 } 558 559 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT, 560 NPCM_DMA_STATUS_TX_RUNNING_READ_STATE); 561 /* Give the descriptor back regardless of what happens. */ 562 tx_desc.tdes0 &= ~TX_DESC_TDES0_OWN; 563 564 if (tx_desc.tdes1 & TX_DESC_TDES1_FIRST_SEG_MASK) { 565 csum = gmac_tx_get_csum(tx_desc.tdes1); 566 } 567 568 /* step 4 */ 569 tx_buf_addr = tx_desc.tdes2; 570 gmac->regs[R_NPCM_DMA_CUR_TX_BUF_ADDR] = tx_buf_addr; 571 tx_buf_len = TX_DESC_TDES1_BFFR1_SZ_MASK(tx_desc.tdes1); 572 buf = &tx_send_buffer[prev_buf_size]; 573 574 if ((prev_buf_size + tx_buf_len) > sizeof(buf)) { 575 tx_buffer_size = prev_buf_size + tx_buf_len; 576 tx_send_buffer = g_realloc(tx_send_buffer, tx_buffer_size); 577 buf = &tx_send_buffer[prev_buf_size]; 578 } 579 580 /* step 5 */ 581 if (dma_memory_read(&address_space_memory, tx_buf_addr, buf, 582 tx_buf_len, MEMTXATTRS_UNSPECIFIED)) { 583 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read packet @ 0x%x\n", 584 __func__, tx_buf_addr); 585 return; 586 } 587 length += tx_buf_len; 588 prev_buf_size += tx_buf_len; 589 590 /* If not chained we'll have a second buffer. */ 591 if (!(tx_desc.tdes1 & TX_DESC_TDES1_SEC_ADDR_CHND_MASK)) { 592 tx_buf_addr = tx_desc.tdes3; 593 gmac->regs[R_NPCM_DMA_CUR_TX_BUF_ADDR] = tx_buf_addr; 594 tx_buf_len = TX_DESC_TDES1_BFFR2_SZ_MASK(tx_desc.tdes1); 595 buf = &tx_send_buffer[prev_buf_size]; 596 597 if ((prev_buf_size + tx_buf_len) > sizeof(buf)) { 598 tx_buffer_size = prev_buf_size + tx_buf_len; 599 tx_send_buffer = g_realloc(tx_send_buffer, tx_buffer_size); 600 buf = &tx_send_buffer[prev_buf_size]; 601 } 602 603 if (dma_memory_read(&address_space_memory, tx_buf_addr, buf, 604 tx_buf_len, MEMTXATTRS_UNSPECIFIED)) { 605 qemu_log_mask(LOG_GUEST_ERROR, 606 "%s: Failed to read packet @ 0x%x\n", 607 __func__, tx_buf_addr); 608 return; 609 } 610 length += tx_buf_len; 611 prev_buf_size += tx_buf_len; 612 } 613 if (tx_desc.tdes1 & TX_DESC_TDES1_LAST_SEG_MASK) { 614 net_checksum_calculate(tx_send_buffer, length, csum); 615 qemu_send_packet(qemu_get_queue(gmac->nic), tx_send_buffer, length); 616 trace_npcm_gmac_packet_sent(DEVICE(gmac)->canonical_path, length); 617 buf = tx_send_buffer; 618 length = 0; 619 } 620 621 /* step 6 */ 622 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT, 623 NPCM_DMA_STATUS_TX_RUNNING_CLOSING_STATE); 624 gmac_write_tx_desc(desc_addr, &tx_desc); 625 if (tx_desc.tdes1 & TX_DESC_TDES1_TX_END_RING_MASK) { 626 desc_addr = gmac->regs[R_NPCM_DMA_TX_BASE_ADDR]; 627 } else if (tx_desc.tdes1 & TX_DESC_TDES1_SEC_ADDR_CHND_MASK) { 628 desc_addr = tx_desc.tdes3; 629 } else { 630 desc_addr += sizeof(tx_desc); 631 } 632 gmac->regs[R_NPCM_DMA_HOST_TX_DESC] = desc_addr; 633 634 /* step 7 */ 635 if (tx_desc.tdes1 & TX_DESC_TDES1_INTERR_COMP_MASK) { 636 gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_TI; 637 gmac_update_irq(gmac); 638 } 639 } 640} 641 |
|
462static void gmac_cleanup(NetClientState *nc) 463{ 464 /* Nothing to do yet. */ 465} 466 467static void gmac_set_link(NetClientState *nc) 468{ 469 NPCMGMACState *gmac = qemu_get_nic_opaque(nc); --- 138 unchanged lines hidden (view full) --- 608 break; 609 610 case A_NPCM_DMA_RCV_POLL_DEMAND: 611 /* We dont actually care about the value */ 612 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, 613 NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE); 614 break; 615 | 642static void gmac_cleanup(NetClientState *nc) 643{ 644 /* Nothing to do yet. */ 645} 646 647static void gmac_set_link(NetClientState *nc) 648{ 649 NPCMGMACState *gmac = qemu_get_nic_opaque(nc); --- 138 unchanged lines hidden (view full) --- 788 break; 789 790 case A_NPCM_DMA_RCV_POLL_DEMAND: 791 /* We dont actually care about the value */ 792 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, 793 NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE); 794 break; 795 |
796 case A_NPCM_DMA_XMT_POLL_DEMAND: 797 /* We dont actually care about the value */ 798 gmac_try_send_next_packet(gmac); 799 break; 800 801 case A_NPCM_DMA_CONTROL: 802 gmac->regs[offset / sizeof(uint32_t)] = v; 803 if (v & NPCM_DMA_CONTROL_START_STOP_TX) { 804 gmac_try_send_next_packet(gmac); 805 } else { 806 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT, 807 NPCM_DMA_STATUS_TX_STOPPED_STATE); 808 } 809 if (v & NPCM_DMA_CONTROL_START_STOP_RX) { 810 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, 811 NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE); 812 qemu_flush_queued_packets(qemu_get_queue(gmac->nic)); 813 } else { 814 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, 815 NPCM_DMA_STATUS_RX_STOPPED_STATE); 816 } 817 break; 818 |
|
616 case A_NPCM_DMA_STATUS: 617 /* Check that RO bits are not written to */ 618 if (NPCM_DMA_STATUS_RO_MASK(v)) { 619 qemu_log_mask(LOG_GUEST_ERROR, 620 "%s: Write of read-only bits of reg: offset: 0x%04" 621 HWADDR_PRIx ", value: 0x%04" PRIx64 "\n", 622 DEVICE(gmac)->canonical_path, offset, v); 623 } --- 116 unchanged lines hidden --- | 819 case A_NPCM_DMA_STATUS: 820 /* Check that RO bits are not written to */ 821 if (NPCM_DMA_STATUS_RO_MASK(v)) { 822 qemu_log_mask(LOG_GUEST_ERROR, 823 "%s: Write of read-only bits of reg: offset: 0x%04" 824 HWADDR_PRIx ", value: 0x%04" PRIx64 "\n", 825 DEVICE(gmac)->canonical_path, offset, v); 826 } --- 116 unchanged lines hidden --- |