1 /* 2 * Nuvoton NPCM7xx/8xx GMAC Module 3 * 4 * Copyright 2024 Google LLC 5 * Authors: 6 * Hao Wu <wuhaotsh@google.com> 7 * Nabih Estefan <nabihestefan@google.com> 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, but WITHOUT 15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 17 * for more details. 18 * 19 * Unsupported/unimplemented features: 20 * - MII is not implemented, MII_ADDR.BUSY and MII_DATA always return zero 21 * - Precision timestamp (PTP) is not implemented. 22 */ 23 24 #include "qemu/osdep.h" 25 26 #include "hw/registerfields.h" 27 #include "hw/net/mii.h" 28 #include "hw/net/npcm_gmac.h" 29 #include "migration/vmstate.h" 30 #include "net/checksum.h" 31 #include "net/eth.h" 32 #include "net/net.h" 33 #include "qemu/cutils.h" 34 #include "qemu/log.h" 35 #include "qemu/units.h" 36 #include "sysemu/dma.h" 37 #include "trace.h" 38 39 REG32(NPCM_DMA_BUS_MODE, 0x1000) 40 REG32(NPCM_DMA_XMT_POLL_DEMAND, 0x1004) 41 REG32(NPCM_DMA_RCV_POLL_DEMAND, 0x1008) 42 REG32(NPCM_DMA_RX_BASE_ADDR, 0x100c) 43 REG32(NPCM_DMA_TX_BASE_ADDR, 0x1010) 44 REG32(NPCM_DMA_STATUS, 0x1014) 45 REG32(NPCM_DMA_CONTROL, 0x1018) 46 REG32(NPCM_DMA_INTR_ENA, 0x101c) 47 REG32(NPCM_DMA_MISSED_FRAME_CTR, 0x1020) 48 REG32(NPCM_DMA_HOST_TX_DESC, 0x1048) 49 REG32(NPCM_DMA_HOST_RX_DESC, 0x104c) 50 REG32(NPCM_DMA_CUR_TX_BUF_ADDR, 0x1050) 51 REG32(NPCM_DMA_CUR_RX_BUF_ADDR, 0x1054) 52 REG32(NPCM_DMA_HW_FEATURE, 0x1058) 53 54 REG32(NPCM_GMAC_MAC_CONFIG, 0x0) 55 REG32(NPCM_GMAC_FRAME_FILTER, 0x4) 56 REG32(NPCM_GMAC_HASH_HIGH, 0x8) 57 REG32(NPCM_GMAC_HASH_LOW, 0xc) 58 REG32(NPCM_GMAC_MII_ADDR, 0x10) 59 REG32(NPCM_GMAC_MII_DATA, 0x14) 60 REG32(NPCM_GMAC_FLOW_CTRL, 0x18) 61 REG32(NPCM_GMAC_VLAN_FLAG, 0x1c) 62 REG32(NPCM_GMAC_VERSION, 0x20) 63 REG32(NPCM_GMAC_WAKEUP_FILTER, 0x28) 64 REG32(NPCM_GMAC_PMT, 0x2c) 65 REG32(NPCM_GMAC_LPI_CTRL, 0x30) 66 REG32(NPCM_GMAC_TIMER_CTRL, 0x34) 67 REG32(NPCM_GMAC_INT_STATUS, 0x38) 68 REG32(NPCM_GMAC_INT_MASK, 0x3c) 69 REG32(NPCM_GMAC_MAC0_ADDR_HI, 0x40) 70 REG32(NPCM_GMAC_MAC0_ADDR_LO, 0x44) 71 REG32(NPCM_GMAC_MAC1_ADDR_HI, 0x48) 72 REG32(NPCM_GMAC_MAC1_ADDR_LO, 0x4c) 73 REG32(NPCM_GMAC_MAC2_ADDR_HI, 0x50) 74 REG32(NPCM_GMAC_MAC2_ADDR_LO, 0x54) 75 REG32(NPCM_GMAC_MAC3_ADDR_HI, 0x58) 76 REG32(NPCM_GMAC_MAC3_ADDR_LO, 0x5c) 77 REG32(NPCM_GMAC_RGMII_STATUS, 0xd8) 78 REG32(NPCM_GMAC_WATCHDOG, 0xdc) 79 REG32(NPCM_GMAC_PTP_TCR, 0x700) 80 REG32(NPCM_GMAC_PTP_SSIR, 0x704) 81 REG32(NPCM_GMAC_PTP_STSR, 0x708) 82 REG32(NPCM_GMAC_PTP_STNSR, 0x70c) 83 REG32(NPCM_GMAC_PTP_STSUR, 0x710) 84 REG32(NPCM_GMAC_PTP_STNSUR, 0x714) 85 REG32(NPCM_GMAC_PTP_TAR, 0x718) 86 REG32(NPCM_GMAC_PTP_TTSR, 0x71c) 87 88 /* Register Fields */ 89 #define NPCM_GMAC_MII_ADDR_BUSY BIT(0) 90 #define NPCM_GMAC_MII_ADDR_WRITE BIT(1) 91 #define NPCM_GMAC_MII_ADDR_GR(rv) extract16((rv), 6, 5) 92 #define NPCM_GMAC_MII_ADDR_PA(rv) extract16((rv), 11, 5) 93 94 #define NPCM_GMAC_INT_MASK_LPIIM BIT(10) 95 #define NPCM_GMAC_INT_MASK_PMTM BIT(3) 96 #define NPCM_GMAC_INT_MASK_RGIM BIT(0) 97 98 #define NPCM_DMA_BUS_MODE_SWR BIT(0) 99 100 static const uint32_t npcm_gmac_cold_reset_values[NPCM_GMAC_NR_REGS] = { 101 /* Reduce version to 3.2 so that the kernel can enable interrupt. */ 102 [R_NPCM_GMAC_VERSION] = 0x00001032, 103 [R_NPCM_GMAC_TIMER_CTRL] = 0x03e80000, 104 [R_NPCM_GMAC_MAC0_ADDR_HI] = 0x8000ffff, 105 [R_NPCM_GMAC_MAC0_ADDR_LO] = 0xffffffff, 106 [R_NPCM_GMAC_MAC1_ADDR_HI] = 0x0000ffff, 107 [R_NPCM_GMAC_MAC1_ADDR_LO] = 0xffffffff, 108 [R_NPCM_GMAC_MAC2_ADDR_HI] = 0x0000ffff, 109 [R_NPCM_GMAC_MAC2_ADDR_LO] = 0xffffffff, 110 [R_NPCM_GMAC_MAC3_ADDR_HI] = 0x0000ffff, 111 [R_NPCM_GMAC_MAC3_ADDR_LO] = 0xffffffff, 112 [R_NPCM_GMAC_PTP_TCR] = 0x00002000, 113 [R_NPCM_DMA_BUS_MODE] = 0x00020101, 114 [R_NPCM_DMA_HW_FEATURE] = 0x100d4f37, 115 }; 116 117 static const uint16_t phy_reg_init[] = { 118 [MII_BMCR] = MII_BMCR_AUTOEN | MII_BMCR_FD | MII_BMCR_SPEED1000, 119 [MII_BMSR] = MII_BMSR_100TX_FD | MII_BMSR_100TX_HD | MII_BMSR_10T_FD | 120 MII_BMSR_10T_HD | MII_BMSR_EXTSTAT | MII_BMSR_AUTONEG | 121 MII_BMSR_LINK_ST | MII_BMSR_EXTCAP, 122 [MII_PHYID1] = 0x0362, 123 [MII_PHYID2] = 0x5e6a, 124 [MII_ANAR] = MII_ANAR_TXFD | MII_ANAR_TX | MII_ANAR_10FD | 125 MII_ANAR_10 | MII_ANAR_CSMACD, 126 [MII_ANLPAR] = MII_ANLPAR_ACK | MII_ANLPAR_PAUSE | 127 MII_ANLPAR_TXFD | MII_ANLPAR_TX | MII_ANLPAR_10FD | 128 MII_ANLPAR_10 | MII_ANLPAR_CSMACD, 129 [MII_ANER] = 0x64 | MII_ANER_NWAY, 130 [MII_ANNP] = 0x2001, 131 [MII_CTRL1000] = MII_CTRL1000_FULL, 132 [MII_STAT1000] = MII_STAT1000_FULL, 133 [MII_EXTSTAT] = 0x3000, /* 1000BASTE_T full-duplex capable */ 134 }; 135 136 static void npcm_gmac_soft_reset(NPCMGMACState *gmac) 137 { 138 memcpy(gmac->regs, npcm_gmac_cold_reset_values, 139 NPCM_GMAC_NR_REGS * sizeof(uint32_t)); 140 /* Clear reset bits */ 141 gmac->regs[R_NPCM_DMA_BUS_MODE] &= ~NPCM_DMA_BUS_MODE_SWR; 142 } 143 144 static void gmac_phy_set_link(NPCMGMACState *gmac, bool active) 145 { 146 /* Autonegotiation status mirrors link status. */ 147 if (active) { 148 gmac->phy_regs[0][MII_BMSR] |= (MII_BMSR_LINK_ST | MII_BMSR_AN_COMP); 149 } else { 150 gmac->phy_regs[0][MII_BMSR] &= ~(MII_BMSR_LINK_ST | MII_BMSR_AN_COMP); 151 } 152 } 153 154 static bool gmac_can_receive(NetClientState *nc) 155 { 156 NPCMGMACState *gmac = NPCM_GMAC(qemu_get_nic_opaque(nc)); 157 158 /* If GMAC receive is disabled. */ 159 if (!(gmac->regs[R_NPCM_GMAC_MAC_CONFIG] & NPCM_GMAC_MAC_CONFIG_RX_EN)) { 160 return false; 161 } 162 163 /* If GMAC DMA RX is stopped. */ 164 if (!(gmac->regs[R_NPCM_DMA_CONTROL] & NPCM_DMA_CONTROL_START_STOP_RX)) { 165 return false; 166 } 167 return true; 168 } 169 170 /* 171 * Function that updates the GMAC IRQ 172 * It find the logical OR of the enabled bits for NIS (if enabled) 173 * It find the logical OR of the enabled bits for AIS (if enabled) 174 */ 175 static void gmac_update_irq(NPCMGMACState *gmac) 176 { 177 /* 178 * Check if the normal interrupts summary is enabled 179 * if so, add the bits for the summary that are enabled 180 */ 181 if (gmac->regs[R_NPCM_DMA_INTR_ENA] & gmac->regs[R_NPCM_DMA_STATUS] & 182 (NPCM_DMA_INTR_ENAB_NIE_BITS)) { 183 gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_NIS; 184 } 185 /* 186 * Check if the abnormal interrupts summary is enabled 187 * if so, add the bits for the summary that are enabled 188 */ 189 if (gmac->regs[R_NPCM_DMA_INTR_ENA] & gmac->regs[R_NPCM_DMA_STATUS] & 190 (NPCM_DMA_INTR_ENAB_AIE_BITS)) { 191 gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_AIS; 192 } 193 194 /* Get the logical OR of both normal and abnormal interrupts */ 195 int level = !!((gmac->regs[R_NPCM_DMA_STATUS] & 196 gmac->regs[R_NPCM_DMA_INTR_ENA] & 197 NPCM_DMA_STATUS_NIS) | 198 (gmac->regs[R_NPCM_DMA_STATUS] & 199 gmac->regs[R_NPCM_DMA_INTR_ENA] & 200 NPCM_DMA_STATUS_AIS)); 201 202 /* Set the IRQ */ 203 trace_npcm_gmac_update_irq(DEVICE(gmac)->canonical_path, 204 gmac->regs[R_NPCM_DMA_STATUS], 205 gmac->regs[R_NPCM_DMA_INTR_ENA], 206 level); 207 qemu_set_irq(gmac->irq, level); 208 } 209 210 static int gmac_read_rx_desc(dma_addr_t addr, struct NPCMGMACRxDesc *desc) 211 { 212 if (dma_memory_read(&address_space_memory, addr, desc, 213 sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) { 214 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%" 215 HWADDR_PRIx "\n", __func__, addr); 216 return -1; 217 } 218 desc->rdes0 = le32_to_cpu(desc->rdes0); 219 desc->rdes1 = le32_to_cpu(desc->rdes1); 220 desc->rdes2 = le32_to_cpu(desc->rdes2); 221 desc->rdes3 = le32_to_cpu(desc->rdes3); 222 return 0; 223 } 224 225 static int gmac_write_rx_desc(dma_addr_t addr, struct NPCMGMACRxDesc *desc) 226 { 227 struct NPCMGMACRxDesc le_desc; 228 le_desc.rdes0 = cpu_to_le32(desc->rdes0); 229 le_desc.rdes1 = cpu_to_le32(desc->rdes1); 230 le_desc.rdes2 = cpu_to_le32(desc->rdes2); 231 le_desc.rdes3 = cpu_to_le32(desc->rdes3); 232 if (dma_memory_write(&address_space_memory, addr, &le_desc, 233 sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) { 234 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%" 235 HWADDR_PRIx "\n", __func__, addr); 236 return -1; 237 } 238 return 0; 239 } 240 241 static int gmac_read_tx_desc(dma_addr_t addr, struct NPCMGMACTxDesc *desc) 242 { 243 if (dma_memory_read(&address_space_memory, addr, desc, 244 sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) { 245 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%" 246 HWADDR_PRIx "\n", __func__, addr); 247 return -1; 248 } 249 desc->tdes0 = le32_to_cpu(desc->tdes0); 250 desc->tdes1 = le32_to_cpu(desc->tdes1); 251 desc->tdes2 = le32_to_cpu(desc->tdes2); 252 desc->tdes3 = le32_to_cpu(desc->tdes3); 253 return 0; 254 } 255 256 static int gmac_write_tx_desc(dma_addr_t addr, struct NPCMGMACTxDesc *desc) 257 { 258 struct NPCMGMACTxDesc le_desc; 259 le_desc.tdes0 = cpu_to_le32(desc->tdes0); 260 le_desc.tdes1 = cpu_to_le32(desc->tdes1); 261 le_desc.tdes2 = cpu_to_le32(desc->tdes2); 262 le_desc.tdes3 = cpu_to_le32(desc->tdes3); 263 if (dma_memory_write(&address_space_memory, addr, &le_desc, 264 sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) { 265 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%" 266 HWADDR_PRIx "\n", __func__, addr); 267 return -1; 268 } 269 return 0; 270 } 271 272 static int gmac_rx_transfer_frame_to_buffer(uint32_t rx_buf_len, 273 uint32_t *left_frame, 274 uint32_t rx_buf_addr, 275 bool *eof_transferred, 276 const uint8_t **frame_ptr, 277 uint16_t *transferred) 278 { 279 uint32_t to_transfer; 280 /* 281 * Check that buffer is bigger than the frame being transfered 282 * If bigger then transfer only whats left of frame 283 * Else, fill frame with all the content possible 284 */ 285 if (rx_buf_len >= *left_frame) { 286 to_transfer = *left_frame; 287 *eof_transferred = true; 288 } else { 289 to_transfer = rx_buf_len; 290 } 291 292 /* write frame part to memory */ 293 if (dma_memory_write(&address_space_memory, (uint64_t) rx_buf_addr, 294 *frame_ptr, to_transfer, MEMTXATTRS_UNSPECIFIED)) { 295 return -1; 296 } 297 298 /* update frame pointer and size of whats left of frame */ 299 *frame_ptr += to_transfer; 300 *left_frame -= to_transfer; 301 *transferred += to_transfer; 302 303 return 0; 304 } 305 306 static void gmac_dma_set_state(NPCMGMACState *gmac, int shift, uint32_t state) 307 { 308 gmac->regs[R_NPCM_DMA_STATUS] = deposit32(gmac->regs[R_NPCM_DMA_STATUS], 309 shift, 3, state); 310 } 311 312 static ssize_t gmac_receive(NetClientState *nc, const uint8_t *buf, size_t len) 313 { 314 /* 315 * Comments have steps that relate to the 316 * receiving process steps in pg 386 317 */ 318 NPCMGMACState *gmac = NPCM_GMAC(qemu_get_nic_opaque(nc)); 319 uint32_t left_frame = len; 320 const uint8_t *frame_ptr = buf; 321 uint32_t desc_addr; 322 uint32_t rx_buf_len, rx_buf_addr; 323 struct NPCMGMACRxDesc rx_desc; 324 uint16_t transferred = 0; 325 bool eof_transferred = false; 326 327 trace_npcm_gmac_packet_receive(DEVICE(gmac)->canonical_path, len); 328 if (!gmac_can_receive(nc)) { 329 qemu_log_mask(LOG_GUEST_ERROR, "GMAC Currently is not able for Rx"); 330 return -1; 331 } 332 if (!gmac->regs[R_NPCM_DMA_HOST_RX_DESC]) { 333 gmac->regs[R_NPCM_DMA_HOST_RX_DESC] = 334 NPCM_DMA_HOST_RX_DESC_MASK(gmac->regs[R_NPCM_DMA_RX_BASE_ADDR]); 335 } 336 desc_addr = NPCM_DMA_HOST_RX_DESC_MASK(gmac->regs[R_NPCM_DMA_HOST_RX_DESC]); 337 338 /* step 1 */ 339 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, 340 NPCM_DMA_STATUS_RX_RUNNING_FETCHING_STATE); 341 trace_npcm_gmac_packet_desc_read(DEVICE(gmac)->canonical_path, desc_addr); 342 if (gmac_read_rx_desc(desc_addr, &rx_desc)) { 343 qemu_log_mask(LOG_GUEST_ERROR, "RX Descriptor @ 0x%x cant be read\n", 344 desc_addr); 345 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, 346 NPCM_DMA_STATUS_RX_SUSPENDED_STATE); 347 return -1; 348 } 349 350 /* step 2 */ 351 if (!(rx_desc.rdes0 & RX_DESC_RDES0_OWN)) { 352 qemu_log_mask(LOG_GUEST_ERROR, 353 "RX Descriptor @ 0x%x is owned by software\n", 354 desc_addr); 355 gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_RU; 356 gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_RI; 357 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, 358 NPCM_DMA_STATUS_RX_SUSPENDED_STATE); 359 gmac_update_irq(gmac); 360 return len; 361 } 362 /* step 3 */ 363 /* 364 * TODO -- 365 * Implement all frame filtering and processing (with its own interrupts) 366 */ 367 trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &rx_desc, 368 rx_desc.rdes0, rx_desc.rdes1, rx_desc.rdes2, 369 rx_desc.rdes3); 370 /* Clear rdes0 for the incoming descriptor and set FS in first descriptor.*/ 371 rx_desc.rdes0 = RX_DESC_RDES0_FIRST_DESC_MASK; 372 373 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, 374 NPCM_DMA_STATUS_RX_RUNNING_TRANSFERRING_STATE); 375 376 /* Pad the frame with FCS as the kernel driver will strip it away. */ 377 left_frame += ETH_FCS_LEN; 378 379 /* repeat while we still have frame to transfer to memory */ 380 while (!eof_transferred) { 381 /* Return descriptor no matter what happens */ 382 rx_desc.rdes0 &= ~RX_DESC_RDES0_OWN; 383 /* Set the frame to be an IPv4/IPv6 frame. */ 384 rx_desc.rdes0 |= RX_DESC_RDES0_FRM_TYPE_MASK; 385 386 /* step 4 */ 387 rx_buf_len = RX_DESC_RDES1_BFFR1_SZ_MASK(rx_desc.rdes1); 388 rx_buf_addr = rx_desc.rdes2; 389 gmac->regs[R_NPCM_DMA_CUR_RX_BUF_ADDR] = rx_buf_addr; 390 gmac_rx_transfer_frame_to_buffer(rx_buf_len, &left_frame, rx_buf_addr, 391 &eof_transferred, &frame_ptr, 392 &transferred); 393 394 trace_npcm_gmac_packet_receiving_buffer(DEVICE(gmac)->canonical_path, 395 rx_buf_len, rx_buf_addr); 396 /* if we still have frame left and the second buffer is not chained */ 397 if (!(rx_desc.rdes1 & RX_DESC_RDES1_SEC_ADDR_CHND_MASK) && \ 398 !eof_transferred) { 399 /* repeat process from above on buffer 2 */ 400 rx_buf_len = RX_DESC_RDES1_BFFR2_SZ_MASK(rx_desc.rdes1); 401 rx_buf_addr = rx_desc.rdes3; 402 gmac->regs[R_NPCM_DMA_CUR_RX_BUF_ADDR] = rx_buf_addr; 403 gmac_rx_transfer_frame_to_buffer(rx_buf_len, &left_frame, 404 rx_buf_addr, &eof_transferred, 405 &frame_ptr, &transferred); 406 trace_npcm_gmac_packet_receiving_buffer( \ 407 DEVICE(gmac)->canonical_path, 408 rx_buf_len, rx_buf_addr); 409 } 410 /* update address for descriptor */ 411 gmac->regs[R_NPCM_DMA_HOST_RX_DESC] = rx_buf_addr; 412 /* Return descriptor */ 413 rx_desc.rdes0 &= ~RX_DESC_RDES0_OWN; 414 /* Update frame length transferred */ 415 rx_desc.rdes0 |= ((uint32_t)transferred) 416 << RX_DESC_RDES0_FRAME_LEN_SHIFT; 417 trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &rx_desc, 418 rx_desc.rdes0, rx_desc.rdes1, 419 rx_desc.rdes2, rx_desc.rdes3); 420 421 /* step 5 */ 422 gmac_write_rx_desc(desc_addr, &rx_desc); 423 trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, 424 &rx_desc, rx_desc.rdes0, 425 rx_desc.rdes1, rx_desc.rdes2, 426 rx_desc.rdes3); 427 /* read new descriptor into rx_desc if needed*/ 428 if (!eof_transferred) { 429 /* Get next descriptor address (chained or sequential) */ 430 if (rx_desc.rdes1 & RX_DESC_RDES1_RC_END_RING_MASK) { 431 desc_addr = gmac->regs[R_NPCM_DMA_RX_BASE_ADDR]; 432 } else if (rx_desc.rdes1 & RX_DESC_RDES1_SEC_ADDR_CHND_MASK) { 433 desc_addr = rx_desc.rdes3; 434 } else { 435 desc_addr += sizeof(rx_desc); 436 } 437 trace_npcm_gmac_packet_desc_read(DEVICE(gmac)->canonical_path, 438 desc_addr); 439 if (gmac_read_rx_desc(desc_addr, &rx_desc)) { 440 qemu_log_mask(LOG_GUEST_ERROR, 441 "RX Descriptor @ 0x%x cant be read\n", 442 desc_addr); 443 gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_RU; 444 gmac_update_irq(gmac); 445 return len; 446 } 447 448 /* step 6 */ 449 if (!(rx_desc.rdes0 & RX_DESC_RDES0_OWN)) { 450 if (!(gmac->regs[R_NPCM_DMA_CONTROL] & \ 451 NPCM_DMA_CONTROL_FLUSH_MASK)) { 452 rx_desc.rdes0 |= RX_DESC_RDES0_DESC_ERR_MASK; 453 } 454 eof_transferred = true; 455 } 456 /* Clear rdes0 for the incoming descriptor */ 457 rx_desc.rdes0 = 0; 458 } 459 } 460 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, 461 NPCM_DMA_STATUS_RX_RUNNING_CLOSING_STATE); 462 463 rx_desc.rdes0 |= RX_DESC_RDES0_LAST_DESC_MASK; 464 if (!(rx_desc.rdes1 & RX_DESC_RDES1_DIS_INTR_COMP_MASK)) { 465 gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_RI; 466 gmac_update_irq(gmac); 467 } 468 trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &rx_desc, 469 rx_desc.rdes0, rx_desc.rdes1, rx_desc.rdes2, 470 rx_desc.rdes3); 471 472 /* step 8 */ 473 gmac->regs[R_NPCM_DMA_CONTROL] |= NPCM_DMA_CONTROL_FLUSH_MASK; 474 475 /* step 9 */ 476 trace_npcm_gmac_packet_received(DEVICE(gmac)->canonical_path, left_frame); 477 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, 478 NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE); 479 gmac_write_rx_desc(desc_addr, &rx_desc); 480 481 /* Get next descriptor address (chained or sequential) */ 482 if (rx_desc.rdes1 & RX_DESC_RDES1_RC_END_RING_MASK) { 483 desc_addr = gmac->regs[R_NPCM_DMA_RX_BASE_ADDR]; 484 } else if (rx_desc.rdes1 & RX_DESC_RDES1_SEC_ADDR_CHND_MASK) { 485 desc_addr = rx_desc.rdes3; 486 } else { 487 desc_addr += sizeof(rx_desc); 488 } 489 gmac->regs[R_NPCM_DMA_HOST_RX_DESC] = desc_addr; 490 return len; 491 } 492 493 static int gmac_tx_get_csum(uint32_t tdes1) 494 { 495 uint32_t mask = TX_DESC_TDES1_CHKSM_INS_CTRL_MASK(tdes1); 496 int csum = 0; 497 498 if (likely(mask > 0)) { 499 csum |= CSUM_IP; 500 } 501 if (likely(mask > 1)) { 502 csum |= CSUM_TCP | CSUM_UDP; 503 } 504 505 return csum; 506 } 507 508 static void gmac_try_send_next_packet(NPCMGMACState *gmac) 509 { 510 /* 511 * Comments about steps refer to steps for 512 * transmitting in page 384 of datasheet 513 */ 514 uint16_t tx_buffer_size = 2048; 515 g_autofree uint8_t *tx_send_buffer = g_malloc(tx_buffer_size); 516 uint32_t desc_addr; 517 struct NPCMGMACTxDesc tx_desc; 518 uint32_t tx_buf_addr, tx_buf_len; 519 uint16_t length = 0; 520 uint8_t *buf = tx_send_buffer; 521 uint32_t prev_buf_size = 0; 522 int csum = 0; 523 524 /* steps 1&2 */ 525 if (!gmac->regs[R_NPCM_DMA_HOST_TX_DESC]) { 526 gmac->regs[R_NPCM_DMA_HOST_TX_DESC] = 527 NPCM_DMA_HOST_TX_DESC_MASK(gmac->regs[R_NPCM_DMA_TX_BASE_ADDR]); 528 } 529 desc_addr = gmac->regs[R_NPCM_DMA_HOST_TX_DESC]; 530 531 while (true) { 532 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT, 533 NPCM_DMA_STATUS_TX_RUNNING_FETCHING_STATE); 534 if (gmac_read_tx_desc(desc_addr, &tx_desc)) { 535 qemu_log_mask(LOG_GUEST_ERROR, 536 "TX Descriptor @ 0x%x can't be read\n", 537 desc_addr); 538 return; 539 } 540 /* step 3 */ 541 542 trace_npcm_gmac_packet_desc_read(DEVICE(gmac)->canonical_path, 543 desc_addr); 544 trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &tx_desc, 545 tx_desc.tdes0, tx_desc.tdes1, tx_desc.tdes2, tx_desc.tdes3); 546 547 /* 1 = DMA Owned, 0 = Software Owned */ 548 if (!(tx_desc.tdes0 & TX_DESC_TDES0_OWN)) { 549 qemu_log_mask(LOG_GUEST_ERROR, 550 "TX Descriptor @ 0x%x is owned by software\n", 551 desc_addr); 552 gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_TU; 553 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT, 554 NPCM_DMA_STATUS_TX_SUSPENDED_STATE); 555 gmac_update_irq(gmac); 556 return; 557 } 558 559 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT, 560 NPCM_DMA_STATUS_TX_RUNNING_READ_STATE); 561 /* Give the descriptor back regardless of what happens. */ 562 tx_desc.tdes0 &= ~TX_DESC_TDES0_OWN; 563 564 if (tx_desc.tdes1 & TX_DESC_TDES1_FIRST_SEG_MASK) { 565 csum = gmac_tx_get_csum(tx_desc.tdes1); 566 } 567 568 /* step 4 */ 569 tx_buf_addr = tx_desc.tdes2; 570 gmac->regs[R_NPCM_DMA_CUR_TX_BUF_ADDR] = tx_buf_addr; 571 tx_buf_len = TX_DESC_TDES1_BFFR1_SZ_MASK(tx_desc.tdes1); 572 buf = &tx_send_buffer[prev_buf_size]; 573 574 if ((prev_buf_size + tx_buf_len) > sizeof(buf)) { 575 tx_buffer_size = prev_buf_size + tx_buf_len; 576 tx_send_buffer = g_realloc(tx_send_buffer, tx_buffer_size); 577 buf = &tx_send_buffer[prev_buf_size]; 578 } 579 580 /* step 5 */ 581 if (dma_memory_read(&address_space_memory, tx_buf_addr, buf, 582 tx_buf_len, MEMTXATTRS_UNSPECIFIED)) { 583 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read packet @ 0x%x\n", 584 __func__, tx_buf_addr); 585 return; 586 } 587 length += tx_buf_len; 588 prev_buf_size += tx_buf_len; 589 590 /* If not chained we'll have a second buffer. */ 591 if (!(tx_desc.tdes1 & TX_DESC_TDES1_SEC_ADDR_CHND_MASK)) { 592 tx_buf_addr = tx_desc.tdes3; 593 gmac->regs[R_NPCM_DMA_CUR_TX_BUF_ADDR] = tx_buf_addr; 594 tx_buf_len = TX_DESC_TDES1_BFFR2_SZ_MASK(tx_desc.tdes1); 595 buf = &tx_send_buffer[prev_buf_size]; 596 597 if ((prev_buf_size + tx_buf_len) > sizeof(buf)) { 598 tx_buffer_size = prev_buf_size + tx_buf_len; 599 tx_send_buffer = g_realloc(tx_send_buffer, tx_buffer_size); 600 buf = &tx_send_buffer[prev_buf_size]; 601 } 602 603 if (dma_memory_read(&address_space_memory, tx_buf_addr, buf, 604 tx_buf_len, MEMTXATTRS_UNSPECIFIED)) { 605 qemu_log_mask(LOG_GUEST_ERROR, 606 "%s: Failed to read packet @ 0x%x\n", 607 __func__, tx_buf_addr); 608 return; 609 } 610 length += tx_buf_len; 611 prev_buf_size += tx_buf_len; 612 } 613 if (tx_desc.tdes1 & TX_DESC_TDES1_LAST_SEG_MASK) { 614 net_checksum_calculate(tx_send_buffer, length, csum); 615 qemu_send_packet(qemu_get_queue(gmac->nic), tx_send_buffer, length); 616 trace_npcm_gmac_packet_sent(DEVICE(gmac)->canonical_path, length); 617 buf = tx_send_buffer; 618 length = 0; 619 } 620 621 /* step 6 */ 622 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT, 623 NPCM_DMA_STATUS_TX_RUNNING_CLOSING_STATE); 624 gmac_write_tx_desc(desc_addr, &tx_desc); 625 if (tx_desc.tdes1 & TX_DESC_TDES1_TX_END_RING_MASK) { 626 desc_addr = gmac->regs[R_NPCM_DMA_TX_BASE_ADDR]; 627 } else if (tx_desc.tdes1 & TX_DESC_TDES1_SEC_ADDR_CHND_MASK) { 628 desc_addr = tx_desc.tdes3; 629 } else { 630 desc_addr += sizeof(tx_desc); 631 } 632 gmac->regs[R_NPCM_DMA_HOST_TX_DESC] = desc_addr; 633 634 /* step 7 */ 635 if (tx_desc.tdes1 & TX_DESC_TDES1_INTERR_COMP_MASK) { 636 gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_TI; 637 gmac_update_irq(gmac); 638 } 639 } 640 } 641 642 static void gmac_cleanup(NetClientState *nc) 643 { 644 /* Nothing to do yet. */ 645 } 646 647 static void gmac_set_link(NetClientState *nc) 648 { 649 NPCMGMACState *gmac = qemu_get_nic_opaque(nc); 650 651 trace_npcm_gmac_set_link(!nc->link_down); 652 gmac_phy_set_link(gmac, !nc->link_down); 653 } 654 655 static void npcm_gmac_mdio_access(NPCMGMACState *gmac, uint16_t v) 656 { 657 bool busy = v & NPCM_GMAC_MII_ADDR_BUSY; 658 uint8_t is_write; 659 uint8_t pa, gr; 660 uint16_t data; 661 662 if (busy) { 663 is_write = v & NPCM_GMAC_MII_ADDR_WRITE; 664 pa = NPCM_GMAC_MII_ADDR_PA(v); 665 gr = NPCM_GMAC_MII_ADDR_GR(v); 666 /* Both pa and gr are 5 bits, so they are less than 32. */ 667 g_assert(pa < NPCM_GMAC_MAX_PHYS); 668 g_assert(gr < NPCM_GMAC_MAX_PHY_REGS); 669 670 671 if (v & NPCM_GMAC_MII_ADDR_WRITE) { 672 data = gmac->regs[R_NPCM_GMAC_MII_DATA]; 673 /* Clear reset bit for BMCR register */ 674 switch (gr) { 675 case MII_BMCR: 676 data &= ~MII_BMCR_RESET; 677 /* Autonegotiation is a W1C bit*/ 678 if (data & MII_BMCR_ANRESTART) { 679 /* Tells autonegotiation to not restart again */ 680 data &= ~MII_BMCR_ANRESTART; 681 } 682 if ((data & MII_BMCR_AUTOEN) && 683 !(gmac->phy_regs[pa][MII_BMSR] & MII_BMSR_AN_COMP)) { 684 /* sets autonegotiation as complete */ 685 gmac->phy_regs[pa][MII_BMSR] |= MII_BMSR_AN_COMP; 686 /* Resolve AN automatically->need to set this */ 687 gmac->phy_regs[0][MII_ANLPAR] = 0x0000; 688 } 689 } 690 gmac->phy_regs[pa][gr] = data; 691 } else { 692 data = gmac->phy_regs[pa][gr]; 693 gmac->regs[R_NPCM_GMAC_MII_DATA] = data; 694 } 695 trace_npcm_gmac_mdio_access(DEVICE(gmac)->canonical_path, is_write, pa, 696 gr, data); 697 } 698 gmac->regs[R_NPCM_GMAC_MII_ADDR] = v & ~NPCM_GMAC_MII_ADDR_BUSY; 699 } 700 701 static uint64_t npcm_gmac_read(void *opaque, hwaddr offset, unsigned size) 702 { 703 NPCMGMACState *gmac = opaque; 704 uint32_t v = 0; 705 706 switch (offset) { 707 /* Write only registers */ 708 case A_NPCM_DMA_XMT_POLL_DEMAND: 709 case A_NPCM_DMA_RCV_POLL_DEMAND: 710 qemu_log_mask(LOG_GUEST_ERROR, 711 "%s: Read of write-only reg: offset: 0x%04" HWADDR_PRIx 712 "\n", DEVICE(gmac)->canonical_path, offset); 713 break; 714 715 default: 716 v = gmac->regs[offset / sizeof(uint32_t)]; 717 } 718 719 trace_npcm_gmac_reg_read(DEVICE(gmac)->canonical_path, offset, v); 720 return v; 721 } 722 723 static void npcm_gmac_write(void *opaque, hwaddr offset, 724 uint64_t v, unsigned size) 725 { 726 NPCMGMACState *gmac = opaque; 727 728 trace_npcm_gmac_reg_write(DEVICE(gmac)->canonical_path, offset, v); 729 730 switch (offset) { 731 /* Read only registers */ 732 case A_NPCM_GMAC_VERSION: 733 case A_NPCM_GMAC_INT_STATUS: 734 case A_NPCM_GMAC_RGMII_STATUS: 735 case A_NPCM_GMAC_PTP_STSR: 736 case A_NPCM_GMAC_PTP_STNSR: 737 case A_NPCM_DMA_MISSED_FRAME_CTR: 738 case A_NPCM_DMA_HOST_TX_DESC: 739 case A_NPCM_DMA_HOST_RX_DESC: 740 case A_NPCM_DMA_CUR_TX_BUF_ADDR: 741 case A_NPCM_DMA_CUR_RX_BUF_ADDR: 742 case A_NPCM_DMA_HW_FEATURE: 743 qemu_log_mask(LOG_GUEST_ERROR, 744 "%s: Write of read-only reg: offset: 0x%04" HWADDR_PRIx 745 ", value: 0x%04" PRIx64 "\n", 746 DEVICE(gmac)->canonical_path, offset, v); 747 break; 748 749 case A_NPCM_GMAC_MAC_CONFIG: 750 gmac->regs[offset / sizeof(uint32_t)] = v; 751 break; 752 753 case A_NPCM_GMAC_MII_ADDR: 754 npcm_gmac_mdio_access(gmac, v); 755 break; 756 757 case A_NPCM_GMAC_MAC0_ADDR_HI: 758 gmac->regs[offset / sizeof(uint32_t)] = v; 759 gmac->conf.macaddr.a[0] = v >> 8; 760 gmac->conf.macaddr.a[1] = v >> 0; 761 break; 762 763 case A_NPCM_GMAC_MAC0_ADDR_LO: 764 gmac->regs[offset / sizeof(uint32_t)] = v; 765 gmac->conf.macaddr.a[2] = v >> 24; 766 gmac->conf.macaddr.a[3] = v >> 16; 767 gmac->conf.macaddr.a[4] = v >> 8; 768 gmac->conf.macaddr.a[5] = v >> 0; 769 break; 770 771 case A_NPCM_GMAC_MAC1_ADDR_HI: 772 case A_NPCM_GMAC_MAC1_ADDR_LO: 773 case A_NPCM_GMAC_MAC2_ADDR_HI: 774 case A_NPCM_GMAC_MAC2_ADDR_LO: 775 case A_NPCM_GMAC_MAC3_ADDR_HI: 776 case A_NPCM_GMAC_MAC3_ADDR_LO: 777 gmac->regs[offset / sizeof(uint32_t)] = v; 778 qemu_log_mask(LOG_UNIMP, 779 "%s: Only MAC Address 0 is supported. This request " 780 "is ignored.\n", DEVICE(gmac)->canonical_path); 781 break; 782 783 case A_NPCM_DMA_BUS_MODE: 784 gmac->regs[offset / sizeof(uint32_t)] = v; 785 if (v & NPCM_DMA_BUS_MODE_SWR) { 786 npcm_gmac_soft_reset(gmac); 787 } 788 break; 789 790 case A_NPCM_DMA_RCV_POLL_DEMAND: 791 /* We dont actually care about the value */ 792 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, 793 NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE); 794 break; 795 796 case A_NPCM_DMA_XMT_POLL_DEMAND: 797 /* We dont actually care about the value */ 798 gmac_try_send_next_packet(gmac); 799 break; 800 801 case A_NPCM_DMA_CONTROL: 802 gmac->regs[offset / sizeof(uint32_t)] = v; 803 if (v & NPCM_DMA_CONTROL_START_STOP_TX) { 804 gmac_try_send_next_packet(gmac); 805 } else { 806 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT, 807 NPCM_DMA_STATUS_TX_STOPPED_STATE); 808 } 809 if (v & NPCM_DMA_CONTROL_START_STOP_RX) { 810 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, 811 NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE); 812 qemu_flush_queued_packets(qemu_get_queue(gmac->nic)); 813 } else { 814 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, 815 NPCM_DMA_STATUS_RX_STOPPED_STATE); 816 } 817 break; 818 819 case A_NPCM_DMA_STATUS: 820 /* Check that RO bits are not written to */ 821 if (NPCM_DMA_STATUS_RO_MASK(v)) { 822 qemu_log_mask(LOG_GUEST_ERROR, 823 "%s: Write of read-only bits of reg: offset: 0x%04" 824 HWADDR_PRIx ", value: 0x%04" PRIx64 "\n", 825 DEVICE(gmac)->canonical_path, offset, v); 826 } 827 /* for W1C bits, implement W1C */ 828 gmac->regs[offset / sizeof(uint32_t)] &= ~NPCM_DMA_STATUS_W1C_MASK(v); 829 if (v & NPCM_DMA_STATUS_RU) { 830 /* Clearing RU bit indicates descriptor is owned by DMA again. */ 831 gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, 832 NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE); 833 qemu_flush_queued_packets(qemu_get_queue(gmac->nic)); 834 } 835 break; 836 837 default: 838 gmac->regs[offset / sizeof(uint32_t)] = v; 839 break; 840 } 841 842 gmac_update_irq(gmac); 843 } 844 845 static void npcm_gmac_reset(DeviceState *dev) 846 { 847 NPCMGMACState *gmac = NPCM_GMAC(dev); 848 849 npcm_gmac_soft_reset(gmac); 850 memcpy(gmac->phy_regs[0], phy_reg_init, sizeof(phy_reg_init)); 851 852 trace_npcm_gmac_reset(DEVICE(gmac)->canonical_path, 853 gmac->phy_regs[0][MII_BMSR]); 854 } 855 856 static NetClientInfo net_npcm_gmac_info = { 857 .type = NET_CLIENT_DRIVER_NIC, 858 .size = sizeof(NICState), 859 .can_receive = gmac_can_receive, 860 .receive = gmac_receive, 861 .cleanup = gmac_cleanup, 862 .link_status_changed = gmac_set_link, 863 }; 864 865 static const struct MemoryRegionOps npcm_gmac_ops = { 866 .read = npcm_gmac_read, 867 .write = npcm_gmac_write, 868 .endianness = DEVICE_LITTLE_ENDIAN, 869 .valid = { 870 .min_access_size = 4, 871 .max_access_size = 4, 872 .unaligned = false, 873 }, 874 }; 875 876 static void npcm_gmac_realize(DeviceState *dev, Error **errp) 877 { 878 NPCMGMACState *gmac = NPCM_GMAC(dev); 879 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 880 881 memory_region_init_io(&gmac->iomem, OBJECT(gmac), &npcm_gmac_ops, gmac, 882 TYPE_NPCM_GMAC, 8 * KiB); 883 sysbus_init_mmio(sbd, &gmac->iomem); 884 sysbus_init_irq(sbd, &gmac->irq); 885 886 qemu_macaddr_default_if_unset(&gmac->conf.macaddr); 887 888 gmac->nic = qemu_new_nic(&net_npcm_gmac_info, &gmac->conf, TYPE_NPCM_GMAC, 889 dev->id, &dev->mem_reentrancy_guard, gmac); 890 qemu_format_nic_info_str(qemu_get_queue(gmac->nic), gmac->conf.macaddr.a); 891 gmac->regs[R_NPCM_GMAC_MAC0_ADDR_HI] = (gmac->conf.macaddr.a[0] << 8) + \ 892 gmac->conf.macaddr.a[1]; 893 gmac->regs[R_NPCM_GMAC_MAC0_ADDR_LO] = (gmac->conf.macaddr.a[2] << 24) + \ 894 (gmac->conf.macaddr.a[3] << 16) + \ 895 (gmac->conf.macaddr.a[4] << 8) + \ 896 gmac->conf.macaddr.a[5]; 897 } 898 899 static void npcm_gmac_unrealize(DeviceState *dev) 900 { 901 NPCMGMACState *gmac = NPCM_GMAC(dev); 902 903 qemu_del_nic(gmac->nic); 904 } 905 906 static const VMStateDescription vmstate_npcm_gmac = { 907 .name = TYPE_NPCM_GMAC, 908 .version_id = 0, 909 .minimum_version_id = 0, 910 .fields = (VMStateField[]) { 911 VMSTATE_UINT32_ARRAY(regs, NPCMGMACState, NPCM_GMAC_NR_REGS), 912 VMSTATE_END_OF_LIST(), 913 }, 914 }; 915 916 static Property npcm_gmac_properties[] = { 917 DEFINE_NIC_PROPERTIES(NPCMGMACState, conf), 918 DEFINE_PROP_END_OF_LIST(), 919 }; 920 921 static void npcm_gmac_class_init(ObjectClass *klass, void *data) 922 { 923 DeviceClass *dc = DEVICE_CLASS(klass); 924 925 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); 926 dc->desc = "NPCM GMAC Controller"; 927 dc->realize = npcm_gmac_realize; 928 dc->unrealize = npcm_gmac_unrealize; 929 device_class_set_legacy_reset(dc, npcm_gmac_reset); 930 dc->vmsd = &vmstate_npcm_gmac; 931 device_class_set_props(dc, npcm_gmac_properties); 932 } 933 934 static const TypeInfo npcm_gmac_types[] = { 935 { 936 .name = TYPE_NPCM_GMAC, 937 .parent = TYPE_SYS_BUS_DEVICE, 938 .instance_size = sizeof(NPCMGMACState), 939 .class_init = npcm_gmac_class_init, 940 }, 941 }; 942 DEFINE_TYPES(npcm_gmac_types) 943