1 /* 2 * QEMU VMWARE VMXNET3 paravirtual NIC 3 * 4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com) 5 * 6 * Developed by Daynix Computing LTD (http://www.daynix.com) 7 * 8 * Authors: 9 * Dmitry Fleytman <dmitry@daynix.com> 10 * Tamir Shomer <tamirs@daynix.com> 11 * Yan Vugenfirer <yan@daynix.com> 12 * 13 * This work is licensed under the terms of the GNU GPL, version 2. 14 * See the COPYING file in the top-level directory. 15 * 16 */ 17 18 #include "hw/hw.h" 19 #include "hw/pci/pci.h" 20 #include "net/net.h" 21 #include "net/tap.h" 22 #include "net/checksum.h" 23 #include "sysemu/sysemu.h" 24 #include "qemu-common.h" 25 #include "qemu/bswap.h" 26 #include "hw/pci/msix.h" 27 #include "hw/pci/msi.h" 28 29 #include "vmxnet3.h" 30 #include "vmxnet_debug.h" 31 #include "vmware_utils.h" 32 #include "vmxnet_tx_pkt.h" 33 #include "vmxnet_rx_pkt.h" 34 35 #define PCI_DEVICE_ID_VMWARE_VMXNET3_REVISION 0x1 36 #define VMXNET3_MSIX_BAR_SIZE 0x2000 37 38 #define VMXNET3_BAR0_IDX (0) 39 #define VMXNET3_BAR1_IDX (1) 40 #define VMXNET3_MSIX_BAR_IDX (2) 41 42 #define VMXNET3_OFF_MSIX_TABLE (0x000) 43 #define VMXNET3_OFF_MSIX_PBA (0x800) 44 45 /* Link speed in Mbps should be shifted by 16 */ 46 #define VMXNET3_LINK_SPEED (1000 << 16) 47 48 /* Link status: 1 - up, 0 - down. */ 49 #define VMXNET3_LINK_STATUS_UP 0x1 50 51 /* Least significant bit should be set for revision and version */ 52 #define VMXNET3_DEVICE_VERSION 0x1 53 #define VMXNET3_DEVICE_REVISION 0x1 54 55 /* Macros for rings descriptors access */ 56 #define VMXNET3_READ_TX_QUEUE_DESCR8(dpa, field) \ 57 (vmw_shmem_ld8(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) 58 59 #define VMXNET3_WRITE_TX_QUEUE_DESCR8(dpa, field, value) \ 60 (vmw_shmem_st8(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field, value))) 61 62 #define VMXNET3_READ_TX_QUEUE_DESCR32(dpa, field) \ 63 (vmw_shmem_ld32(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) 64 65 #define VMXNET3_WRITE_TX_QUEUE_DESCR32(dpa, field, value) \ 66 (vmw_shmem_st32(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value)) 67 68 #define VMXNET3_READ_TX_QUEUE_DESCR64(dpa, field) \ 69 (vmw_shmem_ld64(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) 70 71 #define VMXNET3_WRITE_TX_QUEUE_DESCR64(dpa, field, value) \ 72 (vmw_shmem_st64(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value)) 73 74 #define VMXNET3_READ_RX_QUEUE_DESCR64(dpa, field) \ 75 (vmw_shmem_ld64(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field))) 76 77 #define VMXNET3_READ_RX_QUEUE_DESCR32(dpa, field) \ 78 (vmw_shmem_ld32(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field))) 79 80 #define VMXNET3_WRITE_RX_QUEUE_DESCR64(dpa, field, value) \ 81 (vmw_shmem_st64(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value)) 82 83 #define VMXNET3_WRITE_RX_QUEUE_DESCR8(dpa, field, value) \ 84 (vmw_shmem_st8(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value)) 85 86 /* Macros for guest driver shared area access */ 87 #define VMXNET3_READ_DRV_SHARED64(shpa, field) \ 88 (vmw_shmem_ld64(shpa + offsetof(struct Vmxnet3_DriverShared, field))) 89 90 #define VMXNET3_READ_DRV_SHARED32(shpa, field) \ 91 (vmw_shmem_ld32(shpa + offsetof(struct Vmxnet3_DriverShared, field))) 92 93 #define VMXNET3_WRITE_DRV_SHARED32(shpa, field, val) \ 94 (vmw_shmem_st32(shpa + offsetof(struct Vmxnet3_DriverShared, field), val)) 95 96 #define VMXNET3_READ_DRV_SHARED16(shpa, field) \ 97 (vmw_shmem_ld16(shpa + offsetof(struct Vmxnet3_DriverShared, field))) 98 99 #define VMXNET3_READ_DRV_SHARED8(shpa, field) \ 100 (vmw_shmem_ld8(shpa + offsetof(struct Vmxnet3_DriverShared, field))) 101 102 #define VMXNET3_READ_DRV_SHARED(shpa, field, b, l) \ 103 (vmw_shmem_read(shpa + offsetof(struct Vmxnet3_DriverShared, field), b, l)) 104 105 #define VMXNET_FLAG_IS_SET(field, flag) (((field) & (flag)) == (flag)) 106 107 #define TYPE_VMXNET3 "vmxnet3" 108 #define VMXNET3(obj) OBJECT_CHECK(VMXNET3State, (obj), TYPE_VMXNET3) 109 110 /* Cyclic ring abstraction */ 111 typedef struct { 112 hwaddr pa; 113 size_t size; 114 size_t cell_size; 115 size_t next; 116 uint8_t gen; 117 } Vmxnet3Ring; 118 119 static inline void vmxnet3_ring_init(Vmxnet3Ring *ring, 120 hwaddr pa, 121 size_t size, 122 size_t cell_size, 123 bool zero_region) 124 { 125 ring->pa = pa; 126 ring->size = size; 127 ring->cell_size = cell_size; 128 ring->gen = VMXNET3_INIT_GEN; 129 ring->next = 0; 130 131 if (zero_region) { 132 vmw_shmem_set(pa, 0, size * cell_size); 133 } 134 } 135 136 #define VMXNET3_RING_DUMP(macro, ring_name, ridx, r) \ 137 macro("%s#%d: base %" PRIx64 " size %lu cell_size %lu gen %d next %lu", \ 138 (ring_name), (ridx), \ 139 (r)->pa, (r)->size, (r)->cell_size, (r)->gen, (r)->next) 140 141 static inline void vmxnet3_ring_inc(Vmxnet3Ring *ring) 142 { 143 if (++ring->next >= ring->size) { 144 ring->next = 0; 145 ring->gen ^= 1; 146 } 147 } 148 149 static inline void vmxnet3_ring_dec(Vmxnet3Ring *ring) 150 { 151 if (ring->next-- == 0) { 152 ring->next = ring->size - 1; 153 ring->gen ^= 1; 154 } 155 } 156 157 static inline hwaddr vmxnet3_ring_curr_cell_pa(Vmxnet3Ring *ring) 158 { 159 return ring->pa + ring->next * ring->cell_size; 160 } 161 162 static inline void vmxnet3_ring_read_curr_cell(Vmxnet3Ring *ring, void *buff) 163 { 164 vmw_shmem_read(vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size); 165 } 166 167 static inline void vmxnet3_ring_write_curr_cell(Vmxnet3Ring *ring, void *buff) 168 { 169 vmw_shmem_write(vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size); 170 } 171 172 static inline size_t vmxnet3_ring_curr_cell_idx(Vmxnet3Ring *ring) 173 { 174 return ring->next; 175 } 176 177 static inline uint8_t vmxnet3_ring_curr_gen(Vmxnet3Ring *ring) 178 { 179 return ring->gen; 180 } 181 182 /* Debug trace-related functions */ 183 static inline void 184 vmxnet3_dump_tx_descr(struct Vmxnet3_TxDesc *descr) 185 { 186 VMW_PKPRN("TX DESCR: " 187 "addr %" PRIx64 ", len: %d, gen: %d, rsvd: %d, " 188 "dtype: %d, ext1: %d, msscof: %d, hlen: %d, om: %d, " 189 "eop: %d, cq: %d, ext2: %d, ti: %d, tci: %d", 190 le64_to_cpu(descr->addr), descr->len, descr->gen, descr->rsvd, 191 descr->dtype, descr->ext1, descr->msscof, descr->hlen, descr->om, 192 descr->eop, descr->cq, descr->ext2, descr->ti, descr->tci); 193 } 194 195 static inline void 196 vmxnet3_dump_virt_hdr(struct virtio_net_hdr *vhdr) 197 { 198 VMW_PKPRN("VHDR: flags 0x%x, gso_type: 0x%x, hdr_len: %d, gso_size: %d, " 199 "csum_start: %d, csum_offset: %d", 200 vhdr->flags, vhdr->gso_type, vhdr->hdr_len, vhdr->gso_size, 201 vhdr->csum_start, vhdr->csum_offset); 202 } 203 204 static inline void 205 vmxnet3_dump_rx_descr(struct Vmxnet3_RxDesc *descr) 206 { 207 VMW_PKPRN("RX DESCR: addr %" PRIx64 ", len: %d, gen: %d, rsvd: %d, " 208 "dtype: %d, ext1: %d, btype: %d", 209 le64_to_cpu(descr->addr), descr->len, descr->gen, 210 descr->rsvd, descr->dtype, descr->ext1, descr->btype); 211 } 212 213 /* Device state and helper functions */ 214 #define VMXNET3_RX_RINGS_PER_QUEUE (2) 215 216 typedef struct { 217 Vmxnet3Ring tx_ring; 218 Vmxnet3Ring comp_ring; 219 220 uint8_t intr_idx; 221 hwaddr tx_stats_pa; 222 struct UPT1_TxStats txq_stats; 223 } Vmxnet3TxqDescr; 224 225 typedef struct { 226 Vmxnet3Ring rx_ring[VMXNET3_RX_RINGS_PER_QUEUE]; 227 Vmxnet3Ring comp_ring; 228 uint8_t intr_idx; 229 hwaddr rx_stats_pa; 230 struct UPT1_RxStats rxq_stats; 231 } Vmxnet3RxqDescr; 232 233 typedef struct { 234 bool is_masked; 235 bool is_pending; 236 bool is_asserted; 237 } Vmxnet3IntState; 238 239 typedef struct { 240 PCIDevice parent_obj; 241 NICState *nic; 242 NICConf conf; 243 MemoryRegion bar0; 244 MemoryRegion bar1; 245 MemoryRegion msix_bar; 246 247 Vmxnet3RxqDescr rxq_descr[VMXNET3_DEVICE_MAX_RX_QUEUES]; 248 Vmxnet3TxqDescr txq_descr[VMXNET3_DEVICE_MAX_TX_QUEUES]; 249 250 /* Whether MSI-X support was installed successfully */ 251 bool msix_used; 252 /* Whether MSI support was installed successfully */ 253 bool msi_used; 254 hwaddr drv_shmem; 255 hwaddr temp_shared_guest_driver_memory; 256 257 uint8_t txq_num; 258 259 /* This boolean tells whether RX packet being indicated has to */ 260 /* be split into head and body chunks from different RX rings */ 261 bool rx_packets_compound; 262 263 bool rx_vlan_stripping; 264 bool lro_supported; 265 266 uint8_t rxq_num; 267 268 /* Network MTU */ 269 uint32_t mtu; 270 271 /* Maximum number of fragments for indicated TX packets */ 272 uint32_t max_tx_frags; 273 274 /* Maximum number of fragments for indicated RX packets */ 275 uint16_t max_rx_frags; 276 277 /* Index for events interrupt */ 278 uint8_t event_int_idx; 279 280 /* Whether automatic interrupts masking enabled */ 281 bool auto_int_masking; 282 283 bool peer_has_vhdr; 284 285 /* TX packets to QEMU interface */ 286 struct VmxnetTxPkt *tx_pkt; 287 uint32_t offload_mode; 288 uint32_t cso_or_gso_size; 289 uint16_t tci; 290 bool needs_vlan; 291 292 struct VmxnetRxPkt *rx_pkt; 293 294 bool tx_sop; 295 bool skip_current_tx_pkt; 296 297 uint32_t device_active; 298 uint32_t last_command; 299 300 uint32_t link_status_and_speed; 301 302 Vmxnet3IntState interrupt_states[VMXNET3_MAX_INTRS]; 303 304 uint32_t temp_mac; /* To store the low part first */ 305 306 MACAddr perm_mac; 307 uint32_t vlan_table[VMXNET3_VFT_SIZE]; 308 uint32_t rx_mode; 309 MACAddr *mcast_list; 310 uint32_t mcast_list_len; 311 uint32_t mcast_list_buff_size; /* needed for live migration. */ 312 } VMXNET3State; 313 314 /* Interrupt management */ 315 316 /* 317 *This function returns sign whether interrupt line is in asserted state 318 * This depends on the type of interrupt used. For INTX interrupt line will 319 * be asserted until explicit deassertion, for MSI(X) interrupt line will 320 * be deasserted automatically due to notification semantics of the MSI(X) 321 * interrupts 322 */ 323 static bool _vmxnet3_assert_interrupt_line(VMXNET3State *s, uint32_t int_idx) 324 { 325 PCIDevice *d = PCI_DEVICE(s); 326 327 if (s->msix_used && msix_enabled(d)) { 328 VMW_IRPRN("Sending MSI-X notification for vector %u", int_idx); 329 msix_notify(d, int_idx); 330 return false; 331 } 332 if (s->msi_used && msi_enabled(d)) { 333 VMW_IRPRN("Sending MSI notification for vector %u", int_idx); 334 msi_notify(d, int_idx); 335 return false; 336 } 337 338 VMW_IRPRN("Asserting line for interrupt %u", int_idx); 339 pci_irq_assert(d); 340 return true; 341 } 342 343 static void _vmxnet3_deassert_interrupt_line(VMXNET3State *s, int lidx) 344 { 345 PCIDevice *d = PCI_DEVICE(s); 346 347 /* 348 * This function should never be called for MSI(X) interrupts 349 * because deassertion never required for message interrupts 350 */ 351 assert(!s->msix_used || !msix_enabled(d)); 352 /* 353 * This function should never be called for MSI(X) interrupts 354 * because deassertion never required for message interrupts 355 */ 356 assert(!s->msi_used || !msi_enabled(d)); 357 358 VMW_IRPRN("Deasserting line for interrupt %u", lidx); 359 pci_irq_deassert(d); 360 } 361 362 static void vmxnet3_update_interrupt_line_state(VMXNET3State *s, int lidx) 363 { 364 if (!s->interrupt_states[lidx].is_pending && 365 s->interrupt_states[lidx].is_asserted) { 366 VMW_IRPRN("New interrupt line state for index %d is DOWN", lidx); 367 _vmxnet3_deassert_interrupt_line(s, lidx); 368 s->interrupt_states[lidx].is_asserted = false; 369 return; 370 } 371 372 if (s->interrupt_states[lidx].is_pending && 373 !s->interrupt_states[lidx].is_masked && 374 !s->interrupt_states[lidx].is_asserted) { 375 VMW_IRPRN("New interrupt line state for index %d is UP", lidx); 376 s->interrupt_states[lidx].is_asserted = 377 _vmxnet3_assert_interrupt_line(s, lidx); 378 s->interrupt_states[lidx].is_pending = false; 379 return; 380 } 381 } 382 383 static void vmxnet3_trigger_interrupt(VMXNET3State *s, int lidx) 384 { 385 PCIDevice *d = PCI_DEVICE(s); 386 s->interrupt_states[lidx].is_pending = true; 387 vmxnet3_update_interrupt_line_state(s, lidx); 388 389 if (s->msix_used && msix_enabled(d) && s->auto_int_masking) { 390 goto do_automask; 391 } 392 393 if (s->msi_used && msi_enabled(d) && s->auto_int_masking) { 394 goto do_automask; 395 } 396 397 return; 398 399 do_automask: 400 s->interrupt_states[lidx].is_masked = true; 401 vmxnet3_update_interrupt_line_state(s, lidx); 402 } 403 404 static bool vmxnet3_interrupt_asserted(VMXNET3State *s, int lidx) 405 { 406 return s->interrupt_states[lidx].is_asserted; 407 } 408 409 static void vmxnet3_clear_interrupt(VMXNET3State *s, int int_idx) 410 { 411 s->interrupt_states[int_idx].is_pending = false; 412 if (s->auto_int_masking) { 413 s->interrupt_states[int_idx].is_masked = true; 414 } 415 vmxnet3_update_interrupt_line_state(s, int_idx); 416 } 417 418 static void 419 vmxnet3_on_interrupt_mask_changed(VMXNET3State *s, int lidx, bool is_masked) 420 { 421 s->interrupt_states[lidx].is_masked = is_masked; 422 vmxnet3_update_interrupt_line_state(s, lidx); 423 } 424 425 static bool vmxnet3_verify_driver_magic(hwaddr dshmem) 426 { 427 return (VMXNET3_READ_DRV_SHARED32(dshmem, magic) == VMXNET3_REV1_MAGIC); 428 } 429 430 #define VMXNET3_GET_BYTE(x, byte_num) (((x) >> (byte_num)*8) & 0xFF) 431 #define VMXNET3_MAKE_BYTE(byte_num, val) \ 432 (((uint32_t)((val) & 0xFF)) << (byte_num)*8) 433 434 static void vmxnet3_set_variable_mac(VMXNET3State *s, uint32_t h, uint32_t l) 435 { 436 s->conf.macaddr.a[0] = VMXNET3_GET_BYTE(l, 0); 437 s->conf.macaddr.a[1] = VMXNET3_GET_BYTE(l, 1); 438 s->conf.macaddr.a[2] = VMXNET3_GET_BYTE(l, 2); 439 s->conf.macaddr.a[3] = VMXNET3_GET_BYTE(l, 3); 440 s->conf.macaddr.a[4] = VMXNET3_GET_BYTE(h, 0); 441 s->conf.macaddr.a[5] = VMXNET3_GET_BYTE(h, 1); 442 443 VMW_CFPRN("Variable MAC: " VMXNET_MF, VMXNET_MA(s->conf.macaddr.a)); 444 445 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); 446 } 447 448 static uint64_t vmxnet3_get_mac_low(MACAddr *addr) 449 { 450 return VMXNET3_MAKE_BYTE(0, addr->a[0]) | 451 VMXNET3_MAKE_BYTE(1, addr->a[1]) | 452 VMXNET3_MAKE_BYTE(2, addr->a[2]) | 453 VMXNET3_MAKE_BYTE(3, addr->a[3]); 454 } 455 456 static uint64_t vmxnet3_get_mac_high(MACAddr *addr) 457 { 458 return VMXNET3_MAKE_BYTE(0, addr->a[4]) | 459 VMXNET3_MAKE_BYTE(1, addr->a[5]); 460 } 461 462 static void 463 vmxnet3_inc_tx_consumption_counter(VMXNET3State *s, int qidx) 464 { 465 vmxnet3_ring_inc(&s->txq_descr[qidx].tx_ring); 466 } 467 468 static inline void 469 vmxnet3_inc_rx_consumption_counter(VMXNET3State *s, int qidx, int ridx) 470 { 471 vmxnet3_ring_inc(&s->rxq_descr[qidx].rx_ring[ridx]); 472 } 473 474 static inline void 475 vmxnet3_inc_tx_completion_counter(VMXNET3State *s, int qidx) 476 { 477 vmxnet3_ring_inc(&s->txq_descr[qidx].comp_ring); 478 } 479 480 static void 481 vmxnet3_inc_rx_completion_counter(VMXNET3State *s, int qidx) 482 { 483 vmxnet3_ring_inc(&s->rxq_descr[qidx].comp_ring); 484 } 485 486 static void 487 vmxnet3_dec_rx_completion_counter(VMXNET3State *s, int qidx) 488 { 489 vmxnet3_ring_dec(&s->rxq_descr[qidx].comp_ring); 490 } 491 492 static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32 tx_ridx) 493 { 494 struct Vmxnet3_TxCompDesc txcq_descr; 495 496 VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring); 497 498 txcq_descr.txdIdx = tx_ridx; 499 txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring); 500 501 vmxnet3_ring_write_curr_cell(&s->txq_descr[qidx].comp_ring, &txcq_descr); 502 503 /* Flush changes in TX descriptor before changing the counter value */ 504 smp_wmb(); 505 506 vmxnet3_inc_tx_completion_counter(s, qidx); 507 vmxnet3_trigger_interrupt(s, s->txq_descr[qidx].intr_idx); 508 } 509 510 static bool 511 vmxnet3_setup_tx_offloads(VMXNET3State *s) 512 { 513 switch (s->offload_mode) { 514 case VMXNET3_OM_NONE: 515 vmxnet_tx_pkt_build_vheader(s->tx_pkt, false, false, 0); 516 break; 517 518 case VMXNET3_OM_CSUM: 519 vmxnet_tx_pkt_build_vheader(s->tx_pkt, false, true, 0); 520 VMW_PKPRN("L4 CSO requested\n"); 521 break; 522 523 case VMXNET3_OM_TSO: 524 vmxnet_tx_pkt_build_vheader(s->tx_pkt, true, true, 525 s->cso_or_gso_size); 526 vmxnet_tx_pkt_update_ip_checksums(s->tx_pkt); 527 VMW_PKPRN("GSO offload requested."); 528 break; 529 530 default: 531 g_assert_not_reached(); 532 return false; 533 } 534 535 return true; 536 } 537 538 static void 539 vmxnet3_tx_retrieve_metadata(VMXNET3State *s, 540 const struct Vmxnet3_TxDesc *txd) 541 { 542 s->offload_mode = txd->om; 543 s->cso_or_gso_size = txd->msscof; 544 s->tci = txd->tci; 545 s->needs_vlan = txd->ti; 546 } 547 548 typedef enum { 549 VMXNET3_PKT_STATUS_OK, 550 VMXNET3_PKT_STATUS_ERROR, 551 VMXNET3_PKT_STATUS_DISCARD,/* only for tx */ 552 VMXNET3_PKT_STATUS_OUT_OF_BUF /* only for rx */ 553 } Vmxnet3PktStatus; 554 555 static void 556 vmxnet3_on_tx_done_update_stats(VMXNET3State *s, int qidx, 557 Vmxnet3PktStatus status) 558 { 559 size_t tot_len = vmxnet_tx_pkt_get_total_len(s->tx_pkt); 560 struct UPT1_TxStats *stats = &s->txq_descr[qidx].txq_stats; 561 562 switch (status) { 563 case VMXNET3_PKT_STATUS_OK: 564 switch (vmxnet_tx_pkt_get_packet_type(s->tx_pkt)) { 565 case ETH_PKT_BCAST: 566 stats->bcastPktsTxOK++; 567 stats->bcastBytesTxOK += tot_len; 568 break; 569 case ETH_PKT_MCAST: 570 stats->mcastPktsTxOK++; 571 stats->mcastBytesTxOK += tot_len; 572 break; 573 case ETH_PKT_UCAST: 574 stats->ucastPktsTxOK++; 575 stats->ucastBytesTxOK += tot_len; 576 break; 577 default: 578 g_assert_not_reached(); 579 } 580 581 if (s->offload_mode == VMXNET3_OM_TSO) { 582 /* 583 * According to VMWARE headers this statistic is a number 584 * of packets after segmentation but since we don't have 585 * this information in QEMU model, the best we can do is to 586 * provide number of non-segmented packets 587 */ 588 stats->TSOPktsTxOK++; 589 stats->TSOBytesTxOK += tot_len; 590 } 591 break; 592 593 case VMXNET3_PKT_STATUS_DISCARD: 594 stats->pktsTxDiscard++; 595 break; 596 597 case VMXNET3_PKT_STATUS_ERROR: 598 stats->pktsTxError++; 599 break; 600 601 default: 602 g_assert_not_reached(); 603 } 604 } 605 606 static void 607 vmxnet3_on_rx_done_update_stats(VMXNET3State *s, 608 int qidx, 609 Vmxnet3PktStatus status) 610 { 611 struct UPT1_RxStats *stats = &s->rxq_descr[qidx].rxq_stats; 612 size_t tot_len = vmxnet_rx_pkt_get_total_len(s->rx_pkt); 613 614 switch (status) { 615 case VMXNET3_PKT_STATUS_OUT_OF_BUF: 616 stats->pktsRxOutOfBuf++; 617 break; 618 619 case VMXNET3_PKT_STATUS_ERROR: 620 stats->pktsRxError++; 621 break; 622 case VMXNET3_PKT_STATUS_OK: 623 switch (vmxnet_rx_pkt_get_packet_type(s->rx_pkt)) { 624 case ETH_PKT_BCAST: 625 stats->bcastPktsRxOK++; 626 stats->bcastBytesRxOK += tot_len; 627 break; 628 case ETH_PKT_MCAST: 629 stats->mcastPktsRxOK++; 630 stats->mcastBytesRxOK += tot_len; 631 break; 632 case ETH_PKT_UCAST: 633 stats->ucastPktsRxOK++; 634 stats->ucastBytesRxOK += tot_len; 635 break; 636 default: 637 g_assert_not_reached(); 638 } 639 640 if (tot_len > s->mtu) { 641 stats->LROPktsRxOK++; 642 stats->LROBytesRxOK += tot_len; 643 } 644 break; 645 default: 646 g_assert_not_reached(); 647 } 648 } 649 650 static inline bool 651 vmxnet3_pop_next_tx_descr(VMXNET3State *s, 652 int qidx, 653 struct Vmxnet3_TxDesc *txd, 654 uint32_t *descr_idx) 655 { 656 Vmxnet3Ring *ring = &s->txq_descr[qidx].tx_ring; 657 658 vmxnet3_ring_read_curr_cell(ring, txd); 659 if (txd->gen == vmxnet3_ring_curr_gen(ring)) { 660 /* Only read after generation field verification */ 661 smp_rmb(); 662 /* Re-read to be sure we got the latest version */ 663 vmxnet3_ring_read_curr_cell(ring, txd); 664 VMXNET3_RING_DUMP(VMW_RIPRN, "TX", qidx, ring); 665 *descr_idx = vmxnet3_ring_curr_cell_idx(ring); 666 vmxnet3_inc_tx_consumption_counter(s, qidx); 667 return true; 668 } 669 670 return false; 671 } 672 673 static bool 674 vmxnet3_send_packet(VMXNET3State *s, uint32_t qidx) 675 { 676 Vmxnet3PktStatus status = VMXNET3_PKT_STATUS_OK; 677 678 if (!vmxnet3_setup_tx_offloads(s)) { 679 status = VMXNET3_PKT_STATUS_ERROR; 680 goto func_exit; 681 } 682 683 /* debug prints */ 684 vmxnet3_dump_virt_hdr(vmxnet_tx_pkt_get_vhdr(s->tx_pkt)); 685 vmxnet_tx_pkt_dump(s->tx_pkt); 686 687 if (!vmxnet_tx_pkt_send(s->tx_pkt, qemu_get_queue(s->nic))) { 688 status = VMXNET3_PKT_STATUS_DISCARD; 689 goto func_exit; 690 } 691 692 func_exit: 693 vmxnet3_on_tx_done_update_stats(s, qidx, status); 694 return (status == VMXNET3_PKT_STATUS_OK); 695 } 696 697 static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx) 698 { 699 struct Vmxnet3_TxDesc txd; 700 uint32_t txd_idx; 701 uint32_t data_len; 702 hwaddr data_pa; 703 704 for (;;) { 705 if (!vmxnet3_pop_next_tx_descr(s, qidx, &txd, &txd_idx)) { 706 break; 707 } 708 709 vmxnet3_dump_tx_descr(&txd); 710 711 if (!s->skip_current_tx_pkt) { 712 data_len = (txd.len > 0) ? txd.len : VMXNET3_MAX_TX_BUF_SIZE; 713 data_pa = le64_to_cpu(txd.addr); 714 715 if (!vmxnet_tx_pkt_add_raw_fragment(s->tx_pkt, 716 data_pa, 717 data_len)) { 718 s->skip_current_tx_pkt = true; 719 } 720 } 721 722 if (s->tx_sop) { 723 vmxnet3_tx_retrieve_metadata(s, &txd); 724 s->tx_sop = false; 725 } 726 727 if (txd.eop) { 728 if (!s->skip_current_tx_pkt) { 729 vmxnet_tx_pkt_parse(s->tx_pkt); 730 731 if (s->needs_vlan) { 732 vmxnet_tx_pkt_setup_vlan_header(s->tx_pkt, s->tci); 733 } 734 735 vmxnet3_send_packet(s, qidx); 736 } else { 737 vmxnet3_on_tx_done_update_stats(s, qidx, 738 VMXNET3_PKT_STATUS_ERROR); 739 } 740 741 vmxnet3_complete_packet(s, qidx, txd_idx); 742 s->tx_sop = true; 743 s->skip_current_tx_pkt = false; 744 vmxnet_tx_pkt_reset(s->tx_pkt); 745 } 746 } 747 } 748 749 static inline void 750 vmxnet3_read_next_rx_descr(VMXNET3State *s, int qidx, int ridx, 751 struct Vmxnet3_RxDesc *dbuf, uint32_t *didx) 752 { 753 Vmxnet3Ring *ring = &s->rxq_descr[qidx].rx_ring[ridx]; 754 *didx = vmxnet3_ring_curr_cell_idx(ring); 755 vmxnet3_ring_read_curr_cell(ring, dbuf); 756 } 757 758 static inline uint8_t 759 vmxnet3_get_rx_ring_gen(VMXNET3State *s, int qidx, int ridx) 760 { 761 return s->rxq_descr[qidx].rx_ring[ridx].gen; 762 } 763 764 static inline hwaddr 765 vmxnet3_pop_rxc_descr(VMXNET3State *s, int qidx, uint32_t *descr_gen) 766 { 767 uint8_t ring_gen; 768 struct Vmxnet3_RxCompDesc rxcd; 769 770 hwaddr daddr = 771 vmxnet3_ring_curr_cell_pa(&s->rxq_descr[qidx].comp_ring); 772 773 cpu_physical_memory_read(daddr, &rxcd, sizeof(struct Vmxnet3_RxCompDesc)); 774 ring_gen = vmxnet3_ring_curr_gen(&s->rxq_descr[qidx].comp_ring); 775 776 if (rxcd.gen != ring_gen) { 777 *descr_gen = ring_gen; 778 vmxnet3_inc_rx_completion_counter(s, qidx); 779 return daddr; 780 } 781 782 return 0; 783 } 784 785 static inline void 786 vmxnet3_revert_rxc_descr(VMXNET3State *s, int qidx) 787 { 788 vmxnet3_dec_rx_completion_counter(s, qidx); 789 } 790 791 #define RXQ_IDX (0) 792 #define RX_HEAD_BODY_RING (0) 793 #define RX_BODY_ONLY_RING (1) 794 795 static bool 796 vmxnet3_get_next_head_rx_descr(VMXNET3State *s, 797 struct Vmxnet3_RxDesc *descr_buf, 798 uint32_t *descr_idx, 799 uint32_t *ridx) 800 { 801 for (;;) { 802 uint32_t ring_gen; 803 vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, 804 descr_buf, descr_idx); 805 806 /* If no more free descriptors - return */ 807 ring_gen = vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_HEAD_BODY_RING); 808 if (descr_buf->gen != ring_gen) { 809 return false; 810 } 811 812 /* Only read after generation field verification */ 813 smp_rmb(); 814 /* Re-read to be sure we got the latest version */ 815 vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, 816 descr_buf, descr_idx); 817 818 /* Mark current descriptor as used/skipped */ 819 vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_HEAD_BODY_RING); 820 821 /* If this is what we are looking for - return */ 822 if (descr_buf->btype == VMXNET3_RXD_BTYPE_HEAD) { 823 *ridx = RX_HEAD_BODY_RING; 824 return true; 825 } 826 } 827 } 828 829 static bool 830 vmxnet3_get_next_body_rx_descr(VMXNET3State *s, 831 struct Vmxnet3_RxDesc *d, 832 uint32_t *didx, 833 uint32_t *ridx) 834 { 835 vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, d, didx); 836 837 /* Try to find corresponding descriptor in head/body ring */ 838 if (d->gen == vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_HEAD_BODY_RING)) { 839 /* Only read after generation field verification */ 840 smp_rmb(); 841 /* Re-read to be sure we got the latest version */ 842 vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, d, didx); 843 if (d->btype == VMXNET3_RXD_BTYPE_BODY) { 844 vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_HEAD_BODY_RING); 845 *ridx = RX_HEAD_BODY_RING; 846 return true; 847 } 848 } 849 850 /* 851 * If there is no free descriptors on head/body ring or next free 852 * descriptor is a head descriptor switch to body only ring 853 */ 854 vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_BODY_ONLY_RING, d, didx); 855 856 /* If no more free descriptors - return */ 857 if (d->gen == vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_BODY_ONLY_RING)) { 858 /* Only read after generation field verification */ 859 smp_rmb(); 860 /* Re-read to be sure we got the latest version */ 861 vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_BODY_ONLY_RING, d, didx); 862 assert(d->btype == VMXNET3_RXD_BTYPE_BODY); 863 *ridx = RX_BODY_ONLY_RING; 864 vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_BODY_ONLY_RING); 865 return true; 866 } 867 868 return false; 869 } 870 871 static inline bool 872 vmxnet3_get_next_rx_descr(VMXNET3State *s, bool is_head, 873 struct Vmxnet3_RxDesc *descr_buf, 874 uint32_t *descr_idx, 875 uint32_t *ridx) 876 { 877 if (is_head || !s->rx_packets_compound) { 878 return vmxnet3_get_next_head_rx_descr(s, descr_buf, descr_idx, ridx); 879 } else { 880 return vmxnet3_get_next_body_rx_descr(s, descr_buf, descr_idx, ridx); 881 } 882 } 883 884 static void vmxnet3_rx_update_descr(struct VmxnetRxPkt *pkt, 885 struct Vmxnet3_RxCompDesc *rxcd) 886 { 887 int csum_ok, is_gso; 888 bool isip4, isip6, istcp, isudp; 889 struct virtio_net_hdr *vhdr; 890 uint8_t offload_type; 891 892 if (vmxnet_rx_pkt_is_vlan_stripped(pkt)) { 893 rxcd->ts = 1; 894 rxcd->tci = vmxnet_rx_pkt_get_vlan_tag(pkt); 895 } 896 897 if (!vmxnet_rx_pkt_has_virt_hdr(pkt)) { 898 goto nocsum; 899 } 900 901 vhdr = vmxnet_rx_pkt_get_vhdr(pkt); 902 /* 903 * Checksum is valid when lower level tell so or when lower level 904 * requires checksum offload telling that packet produced/bridged 905 * locally and did travel over network after last checksum calculation 906 * or production 907 */ 908 csum_ok = VMXNET_FLAG_IS_SET(vhdr->flags, VIRTIO_NET_HDR_F_DATA_VALID) || 909 VMXNET_FLAG_IS_SET(vhdr->flags, VIRTIO_NET_HDR_F_NEEDS_CSUM); 910 911 offload_type = vhdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN; 912 is_gso = (offload_type != VIRTIO_NET_HDR_GSO_NONE) ? 1 : 0; 913 914 if (!csum_ok && !is_gso) { 915 goto nocsum; 916 } 917 918 vmxnet_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); 919 if ((!istcp && !isudp) || (!isip4 && !isip6)) { 920 goto nocsum; 921 } 922 923 rxcd->cnc = 0; 924 rxcd->v4 = isip4 ? 1 : 0; 925 rxcd->v6 = isip6 ? 1 : 0; 926 rxcd->tcp = istcp ? 1 : 0; 927 rxcd->udp = isudp ? 1 : 0; 928 rxcd->fcs = rxcd->tuc = rxcd->ipc = 1; 929 return; 930 931 nocsum: 932 rxcd->cnc = 1; 933 return; 934 } 935 936 static void 937 vmxnet3_physical_memory_writev(const struct iovec *iov, 938 size_t start_iov_off, 939 hwaddr target_addr, 940 size_t bytes_to_copy) 941 { 942 size_t curr_off = 0; 943 size_t copied = 0; 944 945 while (bytes_to_copy) { 946 if (start_iov_off < (curr_off + iov->iov_len)) { 947 size_t chunk_len = 948 MIN((curr_off + iov->iov_len) - start_iov_off, bytes_to_copy); 949 950 cpu_physical_memory_write(target_addr + copied, 951 iov->iov_base + start_iov_off - curr_off, 952 chunk_len); 953 954 copied += chunk_len; 955 start_iov_off += chunk_len; 956 curr_off = start_iov_off; 957 bytes_to_copy -= chunk_len; 958 } else { 959 curr_off += iov->iov_len; 960 } 961 iov++; 962 } 963 } 964 965 static bool 966 vmxnet3_indicate_packet(VMXNET3State *s) 967 { 968 struct Vmxnet3_RxDesc rxd; 969 bool is_head = true; 970 uint32_t rxd_idx; 971 uint32_t rx_ridx = 0; 972 973 struct Vmxnet3_RxCompDesc rxcd; 974 uint32_t new_rxcd_gen = VMXNET3_INIT_GEN; 975 hwaddr new_rxcd_pa = 0; 976 hwaddr ready_rxcd_pa = 0; 977 struct iovec *data = vmxnet_rx_pkt_get_iovec(s->rx_pkt); 978 size_t bytes_copied = 0; 979 size_t bytes_left = vmxnet_rx_pkt_get_total_len(s->rx_pkt); 980 uint16_t num_frags = 0; 981 size_t chunk_size; 982 983 vmxnet_rx_pkt_dump(s->rx_pkt); 984 985 while (bytes_left > 0) { 986 987 /* cannot add more frags to packet */ 988 if (num_frags == s->max_rx_frags) { 989 break; 990 } 991 992 new_rxcd_pa = vmxnet3_pop_rxc_descr(s, RXQ_IDX, &new_rxcd_gen); 993 if (!new_rxcd_pa) { 994 break; 995 } 996 997 if (!vmxnet3_get_next_rx_descr(s, is_head, &rxd, &rxd_idx, &rx_ridx)) { 998 break; 999 } 1000 1001 chunk_size = MIN(bytes_left, rxd.len); 1002 vmxnet3_physical_memory_writev(data, bytes_copied, 1003 le64_to_cpu(rxd.addr), chunk_size); 1004 bytes_copied += chunk_size; 1005 bytes_left -= chunk_size; 1006 1007 vmxnet3_dump_rx_descr(&rxd); 1008 1009 if (0 != ready_rxcd_pa) { 1010 cpu_physical_memory_write(ready_rxcd_pa, &rxcd, sizeof(rxcd)); 1011 } 1012 1013 memset(&rxcd, 0, sizeof(struct Vmxnet3_RxCompDesc)); 1014 rxcd.rxdIdx = rxd_idx; 1015 rxcd.len = chunk_size; 1016 rxcd.sop = is_head; 1017 rxcd.gen = new_rxcd_gen; 1018 rxcd.rqID = RXQ_IDX + rx_ridx * s->rxq_num; 1019 1020 if (0 == bytes_left) { 1021 vmxnet3_rx_update_descr(s->rx_pkt, &rxcd); 1022 } 1023 1024 VMW_RIPRN("RX Completion descriptor: rxRing: %lu rxIdx %lu len %lu " 1025 "sop %d csum_correct %lu", 1026 (unsigned long) rx_ridx, 1027 (unsigned long) rxcd.rxdIdx, 1028 (unsigned long) rxcd.len, 1029 (int) rxcd.sop, 1030 (unsigned long) rxcd.tuc); 1031 1032 is_head = false; 1033 ready_rxcd_pa = new_rxcd_pa; 1034 new_rxcd_pa = 0; 1035 num_frags++; 1036 } 1037 1038 if (0 != ready_rxcd_pa) { 1039 rxcd.eop = 1; 1040 rxcd.err = (0 != bytes_left); 1041 cpu_physical_memory_write(ready_rxcd_pa, &rxcd, sizeof(rxcd)); 1042 1043 /* Flush RX descriptor changes */ 1044 smp_wmb(); 1045 } 1046 1047 if (0 != new_rxcd_pa) { 1048 vmxnet3_revert_rxc_descr(s, RXQ_IDX); 1049 } 1050 1051 vmxnet3_trigger_interrupt(s, s->rxq_descr[RXQ_IDX].intr_idx); 1052 1053 if (bytes_left == 0) { 1054 vmxnet3_on_rx_done_update_stats(s, RXQ_IDX, VMXNET3_PKT_STATUS_OK); 1055 return true; 1056 } else if (num_frags == s->max_rx_frags) { 1057 vmxnet3_on_rx_done_update_stats(s, RXQ_IDX, VMXNET3_PKT_STATUS_ERROR); 1058 return false; 1059 } else { 1060 vmxnet3_on_rx_done_update_stats(s, RXQ_IDX, 1061 VMXNET3_PKT_STATUS_OUT_OF_BUF); 1062 return false; 1063 } 1064 } 1065 1066 static void 1067 vmxnet3_io_bar0_write(void *opaque, hwaddr addr, 1068 uint64_t val, unsigned size) 1069 { 1070 VMXNET3State *s = opaque; 1071 1072 if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_TXPROD, 1073 VMXNET3_DEVICE_MAX_TX_QUEUES, VMXNET3_REG_ALIGN)) { 1074 int tx_queue_idx = 1075 VMW_MULTIREG_IDX_BY_ADDR(addr, VMXNET3_REG_TXPROD, 1076 VMXNET3_REG_ALIGN); 1077 assert(tx_queue_idx <= s->txq_num); 1078 vmxnet3_process_tx_queue(s, tx_queue_idx); 1079 return; 1080 } 1081 1082 if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_IMR, 1083 VMXNET3_MAX_INTRS, VMXNET3_REG_ALIGN)) { 1084 int l = VMW_MULTIREG_IDX_BY_ADDR(addr, VMXNET3_REG_IMR, 1085 VMXNET3_REG_ALIGN); 1086 1087 VMW_CBPRN("Interrupt mask for line %d written: 0x%" PRIx64, l, val); 1088 1089 vmxnet3_on_interrupt_mask_changed(s, l, val); 1090 return; 1091 } 1092 1093 if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_RXPROD, 1094 VMXNET3_DEVICE_MAX_RX_QUEUES, VMXNET3_REG_ALIGN) || 1095 VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_RXPROD2, 1096 VMXNET3_DEVICE_MAX_RX_QUEUES, VMXNET3_REG_ALIGN)) { 1097 return; 1098 } 1099 1100 VMW_WRPRN("BAR0 unknown write [%" PRIx64 "] = %" PRIx64 ", size %d", 1101 (uint64_t) addr, val, size); 1102 } 1103 1104 static uint64_t 1105 vmxnet3_io_bar0_read(void *opaque, hwaddr addr, unsigned size) 1106 { 1107 if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_IMR, 1108 VMXNET3_MAX_INTRS, VMXNET3_REG_ALIGN)) { 1109 g_assert_not_reached(); 1110 } 1111 1112 VMW_CBPRN("BAR0 unknown read [%" PRIx64 "], size %d", addr, size); 1113 return 0; 1114 } 1115 1116 static void vmxnet3_reset_interrupt_states(VMXNET3State *s) 1117 { 1118 int i; 1119 for (i = 0; i < ARRAY_SIZE(s->interrupt_states); i++) { 1120 s->interrupt_states[i].is_asserted = false; 1121 s->interrupt_states[i].is_pending = false; 1122 s->interrupt_states[i].is_masked = true; 1123 } 1124 } 1125 1126 static void vmxnet3_reset_mac(VMXNET3State *s) 1127 { 1128 memcpy(&s->conf.macaddr.a, &s->perm_mac.a, sizeof(s->perm_mac.a)); 1129 VMW_CFPRN("MAC address set to: " VMXNET_MF, VMXNET_MA(s->conf.macaddr.a)); 1130 } 1131 1132 static void vmxnet3_deactivate_device(VMXNET3State *s) 1133 { 1134 VMW_CBPRN("Deactivating vmxnet3..."); 1135 s->device_active = false; 1136 } 1137 1138 static void vmxnet3_reset(VMXNET3State *s) 1139 { 1140 VMW_CBPRN("Resetting vmxnet3..."); 1141 1142 vmxnet3_deactivate_device(s); 1143 vmxnet3_reset_interrupt_states(s); 1144 vmxnet_tx_pkt_reset(s->tx_pkt); 1145 s->drv_shmem = 0; 1146 s->tx_sop = true; 1147 s->skip_current_tx_pkt = false; 1148 } 1149 1150 static void vmxnet3_update_rx_mode(VMXNET3State *s) 1151 { 1152 s->rx_mode = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, 1153 devRead.rxFilterConf.rxMode); 1154 VMW_CFPRN("RX mode: 0x%08X", s->rx_mode); 1155 } 1156 1157 static void vmxnet3_update_vlan_filters(VMXNET3State *s) 1158 { 1159 int i; 1160 1161 /* Copy configuration from shared memory */ 1162 VMXNET3_READ_DRV_SHARED(s->drv_shmem, 1163 devRead.rxFilterConf.vfTable, 1164 s->vlan_table, 1165 sizeof(s->vlan_table)); 1166 1167 /* Invert byte order when needed */ 1168 for (i = 0; i < ARRAY_SIZE(s->vlan_table); i++) { 1169 s->vlan_table[i] = le32_to_cpu(s->vlan_table[i]); 1170 } 1171 1172 /* Dump configuration for debugging purposes */ 1173 VMW_CFPRN("Configured VLANs:"); 1174 for (i = 0; i < sizeof(s->vlan_table) * 8; i++) { 1175 if (VMXNET3_VFTABLE_ENTRY_IS_SET(s->vlan_table, i)) { 1176 VMW_CFPRN("\tVLAN %d is present", i); 1177 } 1178 } 1179 } 1180 1181 static void vmxnet3_update_mcast_filters(VMXNET3State *s) 1182 { 1183 uint16_t list_bytes = 1184 VMXNET3_READ_DRV_SHARED16(s->drv_shmem, 1185 devRead.rxFilterConf.mfTableLen); 1186 1187 s->mcast_list_len = list_bytes / sizeof(s->mcast_list[0]); 1188 1189 s->mcast_list = g_realloc(s->mcast_list, list_bytes); 1190 if (NULL == s->mcast_list) { 1191 if (0 == s->mcast_list_len) { 1192 VMW_CFPRN("Current multicast list is empty"); 1193 } else { 1194 VMW_ERPRN("Failed to allocate multicast list of %d elements", 1195 s->mcast_list_len); 1196 } 1197 s->mcast_list_len = 0; 1198 } else { 1199 int i; 1200 hwaddr mcast_list_pa = 1201 VMXNET3_READ_DRV_SHARED64(s->drv_shmem, 1202 devRead.rxFilterConf.mfTablePA); 1203 1204 cpu_physical_memory_read(mcast_list_pa, s->mcast_list, list_bytes); 1205 VMW_CFPRN("Current multicast list len is %d:", s->mcast_list_len); 1206 for (i = 0; i < s->mcast_list_len; i++) { 1207 VMW_CFPRN("\t" VMXNET_MF, VMXNET_MA(s->mcast_list[i].a)); 1208 } 1209 } 1210 } 1211 1212 static void vmxnet3_setup_rx_filtering(VMXNET3State *s) 1213 { 1214 vmxnet3_update_rx_mode(s); 1215 vmxnet3_update_vlan_filters(s); 1216 vmxnet3_update_mcast_filters(s); 1217 } 1218 1219 static uint32_t vmxnet3_get_interrupt_config(VMXNET3State *s) 1220 { 1221 uint32_t interrupt_mode = VMXNET3_IT_AUTO | (VMXNET3_IMM_AUTO << 2); 1222 VMW_CFPRN("Interrupt config is 0x%X", interrupt_mode); 1223 return interrupt_mode; 1224 } 1225 1226 static void vmxnet3_fill_stats(VMXNET3State *s) 1227 { 1228 int i; 1229 for (i = 0; i < s->txq_num; i++) { 1230 cpu_physical_memory_write(s->txq_descr[i].tx_stats_pa, 1231 &s->txq_descr[i].txq_stats, 1232 sizeof(s->txq_descr[i].txq_stats)); 1233 } 1234 1235 for (i = 0; i < s->rxq_num; i++) { 1236 cpu_physical_memory_write(s->rxq_descr[i].rx_stats_pa, 1237 &s->rxq_descr[i].rxq_stats, 1238 sizeof(s->rxq_descr[i].rxq_stats)); 1239 } 1240 } 1241 1242 static void vmxnet3_adjust_by_guest_type(VMXNET3State *s) 1243 { 1244 struct Vmxnet3_GOSInfo gos; 1245 1246 VMXNET3_READ_DRV_SHARED(s->drv_shmem, devRead.misc.driverInfo.gos, 1247 &gos, sizeof(gos)); 1248 s->rx_packets_compound = 1249 (gos.gosType == VMXNET3_GOS_TYPE_WIN) ? false : true; 1250 1251 VMW_CFPRN("Guest type specifics: RXCOMPOUND: %d", s->rx_packets_compound); 1252 } 1253 1254 static void 1255 vmxnet3_dump_conf_descr(const char *name, 1256 struct Vmxnet3_VariableLenConfDesc *pm_descr) 1257 { 1258 VMW_CFPRN("%s descriptor dump: Version %u, Length %u", 1259 name, pm_descr->confVer, pm_descr->confLen); 1260 1261 }; 1262 1263 static void vmxnet3_update_pm_state(VMXNET3State *s) 1264 { 1265 struct Vmxnet3_VariableLenConfDesc pm_descr; 1266 1267 pm_descr.confLen = 1268 VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.pmConfDesc.confLen); 1269 pm_descr.confVer = 1270 VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.pmConfDesc.confVer); 1271 pm_descr.confPA = 1272 VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.pmConfDesc.confPA); 1273 1274 vmxnet3_dump_conf_descr("PM State", &pm_descr); 1275 } 1276 1277 static void vmxnet3_update_features(VMXNET3State *s) 1278 { 1279 uint32_t guest_features; 1280 int rxcso_supported; 1281 1282 guest_features = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, 1283 devRead.misc.uptFeatures); 1284 1285 rxcso_supported = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_RXCSUM); 1286 s->rx_vlan_stripping = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_RXVLAN); 1287 s->lro_supported = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_LRO); 1288 1289 VMW_CFPRN("Features configuration: LRO: %d, RXCSUM: %d, VLANSTRIP: %d", 1290 s->lro_supported, rxcso_supported, 1291 s->rx_vlan_stripping); 1292 if (s->peer_has_vhdr) { 1293 tap_set_offload(qemu_get_queue(s->nic)->peer, 1294 rxcso_supported, 1295 s->lro_supported, 1296 s->lro_supported, 1297 0, 1298 0); 1299 } 1300 } 1301 1302 static bool vmxnet3_verify_intx(VMXNET3State *s, int intx) 1303 { 1304 return s->msix_used || s->msi_used || (intx == 1305 (pci_get_byte(s->parent_obj.config + PCI_INTERRUPT_PIN) - 1)); 1306 } 1307 1308 static void vmxnet3_activate_device(VMXNET3State *s) 1309 { 1310 int i; 1311 static const uint32_t VMXNET3_DEF_TX_THRESHOLD = 1; 1312 hwaddr qdescr_table_pa; 1313 uint64_t pa; 1314 uint32_t size; 1315 1316 /* Verify configuration consistency */ 1317 if (!vmxnet3_verify_driver_magic(s->drv_shmem)) { 1318 VMW_ERPRN("Device configuration received from driver is invalid"); 1319 return; 1320 } 1321 1322 vmxnet3_adjust_by_guest_type(s); 1323 vmxnet3_update_features(s); 1324 vmxnet3_update_pm_state(s); 1325 vmxnet3_setup_rx_filtering(s); 1326 /* Cache fields from shared memory */ 1327 s->mtu = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.misc.mtu); 1328 VMW_CFPRN("MTU is %u", s->mtu); 1329 1330 s->max_rx_frags = 1331 VMXNET3_READ_DRV_SHARED16(s->drv_shmem, devRead.misc.maxNumRxSG); 1332 1333 if (s->max_rx_frags == 0) { 1334 s->max_rx_frags = 1; 1335 } 1336 1337 VMW_CFPRN("Max RX fragments is %u", s->max_rx_frags); 1338 1339 s->event_int_idx = 1340 VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.intrConf.eventIntrIdx); 1341 assert(vmxnet3_verify_intx(s, s->event_int_idx)); 1342 VMW_CFPRN("Events interrupt line is %u", s->event_int_idx); 1343 1344 s->auto_int_masking = 1345 VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.intrConf.autoMask); 1346 VMW_CFPRN("Automatic interrupt masking is %d", (int)s->auto_int_masking); 1347 1348 s->txq_num = 1349 VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.misc.numTxQueues); 1350 s->rxq_num = 1351 VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.misc.numRxQueues); 1352 1353 VMW_CFPRN("Number of TX/RX queues %u/%u", s->txq_num, s->rxq_num); 1354 assert(s->txq_num <= VMXNET3_DEVICE_MAX_TX_QUEUES); 1355 1356 qdescr_table_pa = 1357 VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.misc.queueDescPA); 1358 VMW_CFPRN("TX queues descriptors table is at 0x%" PRIx64, qdescr_table_pa); 1359 1360 /* 1361 * Worst-case scenario is a packet that holds all TX rings space so 1362 * we calculate total size of all TX rings for max TX fragments number 1363 */ 1364 s->max_tx_frags = 0; 1365 1366 /* TX queues */ 1367 for (i = 0; i < s->txq_num; i++) { 1368 hwaddr qdescr_pa = 1369 qdescr_table_pa + i * sizeof(struct Vmxnet3_TxQueueDesc); 1370 1371 /* Read interrupt number for this TX queue */ 1372 s->txq_descr[i].intr_idx = 1373 VMXNET3_READ_TX_QUEUE_DESCR8(qdescr_pa, conf.intrIdx); 1374 assert(vmxnet3_verify_intx(s, s->txq_descr[i].intr_idx)); 1375 1376 VMW_CFPRN("TX Queue %d interrupt: %d", i, s->txq_descr[i].intr_idx); 1377 1378 /* Read rings memory locations for TX queues */ 1379 pa = VMXNET3_READ_TX_QUEUE_DESCR64(qdescr_pa, conf.txRingBasePA); 1380 size = VMXNET3_READ_TX_QUEUE_DESCR32(qdescr_pa, conf.txRingSize); 1381 1382 vmxnet3_ring_init(&s->txq_descr[i].tx_ring, pa, size, 1383 sizeof(struct Vmxnet3_TxDesc), false); 1384 VMXNET3_RING_DUMP(VMW_CFPRN, "TX", i, &s->txq_descr[i].tx_ring); 1385 1386 s->max_tx_frags += size; 1387 1388 /* TXC ring */ 1389 pa = VMXNET3_READ_TX_QUEUE_DESCR64(qdescr_pa, conf.compRingBasePA); 1390 size = VMXNET3_READ_TX_QUEUE_DESCR32(qdescr_pa, conf.compRingSize); 1391 vmxnet3_ring_init(&s->txq_descr[i].comp_ring, pa, size, 1392 sizeof(struct Vmxnet3_TxCompDesc), true); 1393 VMXNET3_RING_DUMP(VMW_CFPRN, "TXC", i, &s->txq_descr[i].comp_ring); 1394 1395 s->txq_descr[i].tx_stats_pa = 1396 qdescr_pa + offsetof(struct Vmxnet3_TxQueueDesc, stats); 1397 1398 memset(&s->txq_descr[i].txq_stats, 0, 1399 sizeof(s->txq_descr[i].txq_stats)); 1400 1401 /* Fill device-managed parameters for queues */ 1402 VMXNET3_WRITE_TX_QUEUE_DESCR32(qdescr_pa, 1403 ctrl.txThreshold, 1404 VMXNET3_DEF_TX_THRESHOLD); 1405 } 1406 1407 /* Preallocate TX packet wrapper */ 1408 VMW_CFPRN("Max TX fragments is %u", s->max_tx_frags); 1409 vmxnet_tx_pkt_init(&s->tx_pkt, s->max_tx_frags, s->peer_has_vhdr); 1410 vmxnet_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr); 1411 1412 /* Read rings memory locations for RX queues */ 1413 for (i = 0; i < s->rxq_num; i++) { 1414 int j; 1415 hwaddr qd_pa = 1416 qdescr_table_pa + s->txq_num * sizeof(struct Vmxnet3_TxQueueDesc) + 1417 i * sizeof(struct Vmxnet3_RxQueueDesc); 1418 1419 /* Read interrupt number for this RX queue */ 1420 s->rxq_descr[i].intr_idx = 1421 VMXNET3_READ_TX_QUEUE_DESCR8(qd_pa, conf.intrIdx); 1422 assert(vmxnet3_verify_intx(s, s->rxq_descr[i].intr_idx)); 1423 1424 VMW_CFPRN("RX Queue %d interrupt: %d", i, s->rxq_descr[i].intr_idx); 1425 1426 /* Read rings memory locations */ 1427 for (j = 0; j < VMXNET3_RX_RINGS_PER_QUEUE; j++) { 1428 /* RX rings */ 1429 pa = VMXNET3_READ_RX_QUEUE_DESCR64(qd_pa, conf.rxRingBasePA[j]); 1430 size = VMXNET3_READ_RX_QUEUE_DESCR32(qd_pa, conf.rxRingSize[j]); 1431 vmxnet3_ring_init(&s->rxq_descr[i].rx_ring[j], pa, size, 1432 sizeof(struct Vmxnet3_RxDesc), false); 1433 VMW_CFPRN("RX queue %d:%d: Base: %" PRIx64 ", Size: %d", 1434 i, j, pa, size); 1435 } 1436 1437 /* RXC ring */ 1438 pa = VMXNET3_READ_RX_QUEUE_DESCR64(qd_pa, conf.compRingBasePA); 1439 size = VMXNET3_READ_RX_QUEUE_DESCR32(qd_pa, conf.compRingSize); 1440 vmxnet3_ring_init(&s->rxq_descr[i].comp_ring, pa, size, 1441 sizeof(struct Vmxnet3_RxCompDesc), true); 1442 VMW_CFPRN("RXC queue %d: Base: %" PRIx64 ", Size: %d", i, pa, size); 1443 1444 s->rxq_descr[i].rx_stats_pa = 1445 qd_pa + offsetof(struct Vmxnet3_RxQueueDesc, stats); 1446 memset(&s->rxq_descr[i].rxq_stats, 0, 1447 sizeof(s->rxq_descr[i].rxq_stats)); 1448 } 1449 1450 /* Make sure everything is in place before device activation */ 1451 smp_wmb(); 1452 1453 vmxnet3_reset_mac(s); 1454 1455 s->device_active = true; 1456 } 1457 1458 static void vmxnet3_handle_command(VMXNET3State *s, uint64_t cmd) 1459 { 1460 s->last_command = cmd; 1461 1462 switch (cmd) { 1463 case VMXNET3_CMD_GET_PERM_MAC_HI: 1464 VMW_CBPRN("Set: Get upper part of permanent MAC"); 1465 break; 1466 1467 case VMXNET3_CMD_GET_PERM_MAC_LO: 1468 VMW_CBPRN("Set: Get lower part of permanent MAC"); 1469 break; 1470 1471 case VMXNET3_CMD_GET_STATS: 1472 VMW_CBPRN("Set: Get device statistics"); 1473 vmxnet3_fill_stats(s); 1474 break; 1475 1476 case VMXNET3_CMD_ACTIVATE_DEV: 1477 VMW_CBPRN("Set: Activating vmxnet3 device"); 1478 vmxnet3_activate_device(s); 1479 break; 1480 1481 case VMXNET3_CMD_UPDATE_RX_MODE: 1482 VMW_CBPRN("Set: Update rx mode"); 1483 vmxnet3_update_rx_mode(s); 1484 break; 1485 1486 case VMXNET3_CMD_UPDATE_VLAN_FILTERS: 1487 VMW_CBPRN("Set: Update VLAN filters"); 1488 vmxnet3_update_vlan_filters(s); 1489 break; 1490 1491 case VMXNET3_CMD_UPDATE_MAC_FILTERS: 1492 VMW_CBPRN("Set: Update MAC filters"); 1493 vmxnet3_update_mcast_filters(s); 1494 break; 1495 1496 case VMXNET3_CMD_UPDATE_FEATURE: 1497 VMW_CBPRN("Set: Update features"); 1498 vmxnet3_update_features(s); 1499 break; 1500 1501 case VMXNET3_CMD_UPDATE_PMCFG: 1502 VMW_CBPRN("Set: Update power management config"); 1503 vmxnet3_update_pm_state(s); 1504 break; 1505 1506 case VMXNET3_CMD_GET_LINK: 1507 VMW_CBPRN("Set: Get link"); 1508 break; 1509 1510 case VMXNET3_CMD_RESET_DEV: 1511 VMW_CBPRN("Set: Reset device"); 1512 vmxnet3_reset(s); 1513 break; 1514 1515 case VMXNET3_CMD_QUIESCE_DEV: 1516 VMW_CBPRN("Set: VMXNET3_CMD_QUIESCE_DEV - pause the device"); 1517 vmxnet3_deactivate_device(s); 1518 break; 1519 1520 case VMXNET3_CMD_GET_CONF_INTR: 1521 VMW_CBPRN("Set: VMXNET3_CMD_GET_CONF_INTR - interrupt configuration"); 1522 break; 1523 1524 default: 1525 VMW_CBPRN("Received unknown command: %" PRIx64, cmd); 1526 break; 1527 } 1528 } 1529 1530 static uint64_t vmxnet3_get_command_status(VMXNET3State *s) 1531 { 1532 uint64_t ret; 1533 1534 switch (s->last_command) { 1535 case VMXNET3_CMD_ACTIVATE_DEV: 1536 ret = (s->device_active) ? 0 : -1; 1537 VMW_CFPRN("Device active: %" PRIx64, ret); 1538 break; 1539 1540 case VMXNET3_CMD_RESET_DEV: 1541 case VMXNET3_CMD_QUIESCE_DEV: 1542 case VMXNET3_CMD_GET_QUEUE_STATUS: 1543 ret = 0; 1544 break; 1545 1546 case VMXNET3_CMD_GET_LINK: 1547 ret = s->link_status_and_speed; 1548 VMW_CFPRN("Link and speed: %" PRIx64, ret); 1549 break; 1550 1551 case VMXNET3_CMD_GET_PERM_MAC_LO: 1552 ret = vmxnet3_get_mac_low(&s->perm_mac); 1553 break; 1554 1555 case VMXNET3_CMD_GET_PERM_MAC_HI: 1556 ret = vmxnet3_get_mac_high(&s->perm_mac); 1557 break; 1558 1559 case VMXNET3_CMD_GET_CONF_INTR: 1560 ret = vmxnet3_get_interrupt_config(s); 1561 break; 1562 1563 default: 1564 VMW_WRPRN("Received request for unknown command: %x", s->last_command); 1565 ret = -1; 1566 break; 1567 } 1568 1569 return ret; 1570 } 1571 1572 static void vmxnet3_set_events(VMXNET3State *s, uint32_t val) 1573 { 1574 uint32_t events; 1575 1576 VMW_CBPRN("Setting events: 0x%x", val); 1577 events = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, ecr) | val; 1578 VMXNET3_WRITE_DRV_SHARED32(s->drv_shmem, ecr, events); 1579 } 1580 1581 static void vmxnet3_ack_events(VMXNET3State *s, uint32_t val) 1582 { 1583 uint32_t events; 1584 1585 VMW_CBPRN("Clearing events: 0x%x", val); 1586 events = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, ecr) & ~val; 1587 VMXNET3_WRITE_DRV_SHARED32(s->drv_shmem, ecr, events); 1588 } 1589 1590 static void 1591 vmxnet3_io_bar1_write(void *opaque, 1592 hwaddr addr, 1593 uint64_t val, 1594 unsigned size) 1595 { 1596 VMXNET3State *s = opaque; 1597 1598 switch (addr) { 1599 /* Vmxnet3 Revision Report Selection */ 1600 case VMXNET3_REG_VRRS: 1601 VMW_CBPRN("Write BAR1 [VMXNET3_REG_VRRS] = %" PRIx64 ", size %d", 1602 val, size); 1603 break; 1604 1605 /* UPT Version Report Selection */ 1606 case VMXNET3_REG_UVRS: 1607 VMW_CBPRN("Write BAR1 [VMXNET3_REG_UVRS] = %" PRIx64 ", size %d", 1608 val, size); 1609 break; 1610 1611 /* Driver Shared Address Low */ 1612 case VMXNET3_REG_DSAL: 1613 VMW_CBPRN("Write BAR1 [VMXNET3_REG_DSAL] = %" PRIx64 ", size %d", 1614 val, size); 1615 /* 1616 * Guest driver will first write the low part of the shared 1617 * memory address. We save it to temp variable and set the 1618 * shared address only after we get the high part 1619 */ 1620 if (0 == val) { 1621 s->device_active = false; 1622 } 1623 s->temp_shared_guest_driver_memory = val; 1624 s->drv_shmem = 0; 1625 break; 1626 1627 /* Driver Shared Address High */ 1628 case VMXNET3_REG_DSAH: 1629 VMW_CBPRN("Write BAR1 [VMXNET3_REG_DSAH] = %" PRIx64 ", size %d", 1630 val, size); 1631 /* 1632 * Set the shared memory between guest driver and device. 1633 * We already should have low address part. 1634 */ 1635 s->drv_shmem = s->temp_shared_guest_driver_memory | (val << 32); 1636 break; 1637 1638 /* Command */ 1639 case VMXNET3_REG_CMD: 1640 VMW_CBPRN("Write BAR1 [VMXNET3_REG_CMD] = %" PRIx64 ", size %d", 1641 val, size); 1642 vmxnet3_handle_command(s, val); 1643 break; 1644 1645 /* MAC Address Low */ 1646 case VMXNET3_REG_MACL: 1647 VMW_CBPRN("Write BAR1 [VMXNET3_REG_MACL] = %" PRIx64 ", size %d", 1648 val, size); 1649 s->temp_mac = val; 1650 break; 1651 1652 /* MAC Address High */ 1653 case VMXNET3_REG_MACH: 1654 VMW_CBPRN("Write BAR1 [VMXNET3_REG_MACH] = %" PRIx64 ", size %d", 1655 val, size); 1656 vmxnet3_set_variable_mac(s, val, s->temp_mac); 1657 break; 1658 1659 /* Interrupt Cause Register */ 1660 case VMXNET3_REG_ICR: 1661 VMW_CBPRN("Write BAR1 [VMXNET3_REG_ICR] = %" PRIx64 ", size %d", 1662 val, size); 1663 g_assert_not_reached(); 1664 break; 1665 1666 /* Event Cause Register */ 1667 case VMXNET3_REG_ECR: 1668 VMW_CBPRN("Write BAR1 [VMXNET3_REG_ECR] = %" PRIx64 ", size %d", 1669 val, size); 1670 vmxnet3_ack_events(s, val); 1671 break; 1672 1673 default: 1674 VMW_CBPRN("Unknown Write to BAR1 [%" PRIx64 "] = %" PRIx64 ", size %d", 1675 addr, val, size); 1676 break; 1677 } 1678 } 1679 1680 static uint64_t 1681 vmxnet3_io_bar1_read(void *opaque, hwaddr addr, unsigned size) 1682 { 1683 VMXNET3State *s = opaque; 1684 uint64_t ret = 0; 1685 1686 switch (addr) { 1687 /* Vmxnet3 Revision Report Selection */ 1688 case VMXNET3_REG_VRRS: 1689 VMW_CBPRN("Read BAR1 [VMXNET3_REG_VRRS], size %d", size); 1690 ret = VMXNET3_DEVICE_REVISION; 1691 break; 1692 1693 /* UPT Version Report Selection */ 1694 case VMXNET3_REG_UVRS: 1695 VMW_CBPRN("Read BAR1 [VMXNET3_REG_UVRS], size %d", size); 1696 ret = VMXNET3_DEVICE_VERSION; 1697 break; 1698 1699 /* Command */ 1700 case VMXNET3_REG_CMD: 1701 VMW_CBPRN("Read BAR1 [VMXNET3_REG_CMD], size %d", size); 1702 ret = vmxnet3_get_command_status(s); 1703 break; 1704 1705 /* MAC Address Low */ 1706 case VMXNET3_REG_MACL: 1707 VMW_CBPRN("Read BAR1 [VMXNET3_REG_MACL], size %d", size); 1708 ret = vmxnet3_get_mac_low(&s->conf.macaddr); 1709 break; 1710 1711 /* MAC Address High */ 1712 case VMXNET3_REG_MACH: 1713 VMW_CBPRN("Read BAR1 [VMXNET3_REG_MACH], size %d", size); 1714 ret = vmxnet3_get_mac_high(&s->conf.macaddr); 1715 break; 1716 1717 /* 1718 * Interrupt Cause Register 1719 * Used for legacy interrupts only so interrupt index always 0 1720 */ 1721 case VMXNET3_REG_ICR: 1722 VMW_CBPRN("Read BAR1 [VMXNET3_REG_ICR], size %d", size); 1723 if (vmxnet3_interrupt_asserted(s, 0)) { 1724 vmxnet3_clear_interrupt(s, 0); 1725 ret = true; 1726 } else { 1727 ret = false; 1728 } 1729 break; 1730 1731 default: 1732 VMW_CBPRN("Unknow read BAR1[%" PRIx64 "], %d bytes", addr, size); 1733 break; 1734 } 1735 1736 return ret; 1737 } 1738 1739 static int 1740 vmxnet3_can_receive(NetClientState *nc) 1741 { 1742 VMXNET3State *s = qemu_get_nic_opaque(nc); 1743 return s->device_active && 1744 VMXNET_FLAG_IS_SET(s->link_status_and_speed, VMXNET3_LINK_STATUS_UP); 1745 } 1746 1747 static inline bool 1748 vmxnet3_is_registered_vlan(VMXNET3State *s, const void *data) 1749 { 1750 uint16_t vlan_tag = eth_get_pkt_tci(data) & VLAN_VID_MASK; 1751 if (IS_SPECIAL_VLAN_ID(vlan_tag)) { 1752 return true; 1753 } 1754 1755 return VMXNET3_VFTABLE_ENTRY_IS_SET(s->vlan_table, vlan_tag); 1756 } 1757 1758 static bool 1759 vmxnet3_is_allowed_mcast_group(VMXNET3State *s, const uint8_t *group_mac) 1760 { 1761 int i; 1762 for (i = 0; i < s->mcast_list_len; i++) { 1763 if (!memcmp(group_mac, s->mcast_list[i].a, sizeof(s->mcast_list[i]))) { 1764 return true; 1765 } 1766 } 1767 return false; 1768 } 1769 1770 static bool 1771 vmxnet3_rx_filter_may_indicate(VMXNET3State *s, const void *data, 1772 size_t size) 1773 { 1774 struct eth_header *ehdr = PKT_GET_ETH_HDR(data); 1775 1776 if (VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_PROMISC)) { 1777 return true; 1778 } 1779 1780 if (!vmxnet3_is_registered_vlan(s, data)) { 1781 return false; 1782 } 1783 1784 switch (vmxnet_rx_pkt_get_packet_type(s->rx_pkt)) { 1785 case ETH_PKT_UCAST: 1786 if (!VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_UCAST)) { 1787 return false; 1788 } 1789 if (memcmp(s->conf.macaddr.a, ehdr->h_dest, ETH_ALEN)) { 1790 return false; 1791 } 1792 break; 1793 1794 case ETH_PKT_BCAST: 1795 if (!VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_BCAST)) { 1796 return false; 1797 } 1798 break; 1799 1800 case ETH_PKT_MCAST: 1801 if (VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_ALL_MULTI)) { 1802 return true; 1803 } 1804 if (!VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_MCAST)) { 1805 return false; 1806 } 1807 if (!vmxnet3_is_allowed_mcast_group(s, ehdr->h_dest)) { 1808 return false; 1809 } 1810 break; 1811 1812 default: 1813 g_assert_not_reached(); 1814 } 1815 1816 return true; 1817 } 1818 1819 static ssize_t 1820 vmxnet3_receive(NetClientState *nc, const uint8_t *buf, size_t size) 1821 { 1822 VMXNET3State *s = qemu_get_nic_opaque(nc); 1823 size_t bytes_indicated; 1824 1825 if (!vmxnet3_can_receive(nc)) { 1826 VMW_PKPRN("Cannot receive now"); 1827 return -1; 1828 } 1829 1830 if (s->peer_has_vhdr) { 1831 vmxnet_rx_pkt_set_vhdr(s->rx_pkt, (struct virtio_net_hdr *)buf); 1832 buf += sizeof(struct virtio_net_hdr); 1833 size -= sizeof(struct virtio_net_hdr); 1834 } 1835 1836 vmxnet_rx_pkt_set_packet_type(s->rx_pkt, 1837 get_eth_packet_type(PKT_GET_ETH_HDR(buf))); 1838 1839 if (vmxnet3_rx_filter_may_indicate(s, buf, size)) { 1840 vmxnet_rx_pkt_attach_data(s->rx_pkt, buf, size, s->rx_vlan_stripping); 1841 bytes_indicated = vmxnet3_indicate_packet(s) ? size : -1; 1842 if (bytes_indicated < size) { 1843 VMW_PKPRN("RX: %lu of %lu bytes indicated", bytes_indicated, size); 1844 } 1845 } else { 1846 VMW_PKPRN("Packet dropped by RX filter"); 1847 bytes_indicated = size; 1848 } 1849 1850 assert(size > 0); 1851 assert(bytes_indicated != 0); 1852 return bytes_indicated; 1853 } 1854 1855 static void vmxnet3_cleanup(NetClientState *nc) 1856 { 1857 VMXNET3State *s = qemu_get_nic_opaque(nc); 1858 s->nic = NULL; 1859 } 1860 1861 static void vmxnet3_set_link_status(NetClientState *nc) 1862 { 1863 VMXNET3State *s = qemu_get_nic_opaque(nc); 1864 1865 if (nc->link_down) { 1866 s->link_status_and_speed &= ~VMXNET3_LINK_STATUS_UP; 1867 } else { 1868 s->link_status_and_speed |= VMXNET3_LINK_STATUS_UP; 1869 } 1870 1871 vmxnet3_set_events(s, VMXNET3_ECR_LINK); 1872 vmxnet3_trigger_interrupt(s, s->event_int_idx); 1873 } 1874 1875 static NetClientInfo net_vmxnet3_info = { 1876 .type = NET_CLIENT_OPTIONS_KIND_NIC, 1877 .size = sizeof(NICState), 1878 .can_receive = vmxnet3_can_receive, 1879 .receive = vmxnet3_receive, 1880 .cleanup = vmxnet3_cleanup, 1881 .link_status_changed = vmxnet3_set_link_status, 1882 }; 1883 1884 static bool vmxnet3_peer_has_vnet_hdr(VMXNET3State *s) 1885 { 1886 NetClientState *peer = qemu_get_queue(s->nic)->peer; 1887 1888 if ((NULL != peer) && 1889 (peer->info->type == NET_CLIENT_OPTIONS_KIND_TAP) && 1890 tap_has_vnet_hdr(peer)) { 1891 return true; 1892 } 1893 1894 VMW_WRPRN("Peer has no virtio extension. Task offloads will be emulated."); 1895 return false; 1896 } 1897 1898 static void vmxnet3_net_uninit(VMXNET3State *s) 1899 { 1900 g_free(s->mcast_list); 1901 vmxnet_tx_pkt_reset(s->tx_pkt); 1902 vmxnet_tx_pkt_uninit(s->tx_pkt); 1903 vmxnet_rx_pkt_uninit(s->rx_pkt); 1904 qemu_del_nic(s->nic); 1905 } 1906 1907 static void vmxnet3_net_init(VMXNET3State *s) 1908 { 1909 DeviceState *d = DEVICE(s); 1910 1911 VMW_CBPRN("vmxnet3_net_init called..."); 1912 1913 qemu_macaddr_default_if_unset(&s->conf.macaddr); 1914 1915 /* Windows guest will query the address that was set on init */ 1916 memcpy(&s->perm_mac.a, &s->conf.macaddr.a, sizeof(s->perm_mac.a)); 1917 1918 s->mcast_list = NULL; 1919 s->mcast_list_len = 0; 1920 1921 s->link_status_and_speed = VMXNET3_LINK_SPEED | VMXNET3_LINK_STATUS_UP; 1922 1923 VMW_CFPRN("Permanent MAC: " MAC_FMT, MAC_ARG(s->perm_mac.a)); 1924 1925 s->nic = qemu_new_nic(&net_vmxnet3_info, &s->conf, 1926 object_get_typename(OBJECT(s)), 1927 d->id, s); 1928 1929 s->peer_has_vhdr = vmxnet3_peer_has_vnet_hdr(s); 1930 s->tx_sop = true; 1931 s->skip_current_tx_pkt = false; 1932 s->tx_pkt = NULL; 1933 s->rx_pkt = NULL; 1934 s->rx_vlan_stripping = false; 1935 s->lro_supported = false; 1936 1937 if (s->peer_has_vhdr) { 1938 tap_set_vnet_hdr_len(qemu_get_queue(s->nic)->peer, 1939 sizeof(struct virtio_net_hdr)); 1940 1941 tap_using_vnet_hdr(qemu_get_queue(s->nic)->peer, 1); 1942 } 1943 1944 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); 1945 } 1946 1947 static void 1948 vmxnet3_unuse_msix_vectors(VMXNET3State *s, int num_vectors) 1949 { 1950 PCIDevice *d = PCI_DEVICE(s); 1951 int i; 1952 for (i = 0; i < num_vectors; i++) { 1953 msix_vector_unuse(d, i); 1954 } 1955 } 1956 1957 static bool 1958 vmxnet3_use_msix_vectors(VMXNET3State *s, int num_vectors) 1959 { 1960 PCIDevice *d = PCI_DEVICE(s); 1961 int i; 1962 for (i = 0; i < num_vectors; i++) { 1963 int res = msix_vector_use(d, i); 1964 if (0 > res) { 1965 VMW_WRPRN("Failed to use MSI-X vector %d, error %d", i, res); 1966 vmxnet3_unuse_msix_vectors(s, i); 1967 return false; 1968 } 1969 } 1970 return true; 1971 } 1972 1973 static bool 1974 vmxnet3_init_msix(VMXNET3State *s) 1975 { 1976 PCIDevice *d = PCI_DEVICE(s); 1977 int res = msix_init(d, VMXNET3_MAX_INTRS, 1978 &s->msix_bar, 1979 VMXNET3_MSIX_BAR_IDX, VMXNET3_OFF_MSIX_TABLE, 1980 &s->msix_bar, 1981 VMXNET3_MSIX_BAR_IDX, VMXNET3_OFF_MSIX_PBA, 1982 0); 1983 1984 if (0 > res) { 1985 VMW_WRPRN("Failed to initialize MSI-X, error %d", res); 1986 s->msix_used = false; 1987 } else { 1988 if (!vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS)) { 1989 VMW_WRPRN("Failed to use MSI-X vectors, error %d", res); 1990 msix_uninit(d, &s->msix_bar, &s->msix_bar); 1991 s->msix_used = false; 1992 } else { 1993 s->msix_used = true; 1994 } 1995 } 1996 return s->msix_used; 1997 } 1998 1999 static void 2000 vmxnet3_cleanup_msix(VMXNET3State *s) 2001 { 2002 PCIDevice *d = PCI_DEVICE(s); 2003 2004 if (s->msix_used) { 2005 msix_vector_unuse(d, VMXNET3_MAX_INTRS); 2006 msix_uninit(d, &s->msix_bar, &s->msix_bar); 2007 } 2008 } 2009 2010 #define VMXNET3_MSI_NUM_VECTORS (1) 2011 #define VMXNET3_MSI_OFFSET (0x50) 2012 #define VMXNET3_USE_64BIT (true) 2013 #define VMXNET3_PER_VECTOR_MASK (false) 2014 2015 static bool 2016 vmxnet3_init_msi(VMXNET3State *s) 2017 { 2018 PCIDevice *d = PCI_DEVICE(s); 2019 int res; 2020 2021 res = msi_init(d, VMXNET3_MSI_OFFSET, VMXNET3_MSI_NUM_VECTORS, 2022 VMXNET3_USE_64BIT, VMXNET3_PER_VECTOR_MASK); 2023 if (0 > res) { 2024 VMW_WRPRN("Failed to initialize MSI, error %d", res); 2025 s->msi_used = false; 2026 } else { 2027 s->msi_used = true; 2028 } 2029 2030 return s->msi_used; 2031 } 2032 2033 static void 2034 vmxnet3_cleanup_msi(VMXNET3State *s) 2035 { 2036 PCIDevice *d = PCI_DEVICE(s); 2037 2038 if (s->msi_used) { 2039 msi_uninit(d); 2040 } 2041 } 2042 2043 static void 2044 vmxnet3_msix_save(QEMUFile *f, void *opaque) 2045 { 2046 PCIDevice *d = PCI_DEVICE(opaque); 2047 msix_save(d, f); 2048 } 2049 2050 static int 2051 vmxnet3_msix_load(QEMUFile *f, void *opaque, int version_id) 2052 { 2053 PCIDevice *d = PCI_DEVICE(opaque); 2054 msix_load(d, f); 2055 return 0; 2056 } 2057 2058 static const MemoryRegionOps b0_ops = { 2059 .read = vmxnet3_io_bar0_read, 2060 .write = vmxnet3_io_bar0_write, 2061 .endianness = DEVICE_LITTLE_ENDIAN, 2062 .impl = { 2063 .min_access_size = 4, 2064 .max_access_size = 4, 2065 }, 2066 }; 2067 2068 static const MemoryRegionOps b1_ops = { 2069 .read = vmxnet3_io_bar1_read, 2070 .write = vmxnet3_io_bar1_write, 2071 .endianness = DEVICE_LITTLE_ENDIAN, 2072 .impl = { 2073 .min_access_size = 4, 2074 .max_access_size = 4, 2075 }, 2076 }; 2077 2078 static int vmxnet3_pci_init(PCIDevice *pci_dev) 2079 { 2080 DeviceState *dev = DEVICE(pci_dev); 2081 VMXNET3State *s = VMXNET3(pci_dev); 2082 2083 VMW_CBPRN("Starting init..."); 2084 2085 memory_region_init_io(&s->bar0, OBJECT(s), &b0_ops, s, 2086 "vmxnet3-b0", VMXNET3_PT_REG_SIZE); 2087 pci_register_bar(pci_dev, VMXNET3_BAR0_IDX, 2088 PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar0); 2089 2090 memory_region_init_io(&s->bar1, OBJECT(s), &b1_ops, s, 2091 "vmxnet3-b1", VMXNET3_VD_REG_SIZE); 2092 pci_register_bar(pci_dev, VMXNET3_BAR1_IDX, 2093 PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar1); 2094 2095 memory_region_init(&s->msix_bar, OBJECT(s), "vmxnet3-msix-bar", 2096 VMXNET3_MSIX_BAR_SIZE); 2097 pci_register_bar(pci_dev, VMXNET3_MSIX_BAR_IDX, 2098 PCI_BASE_ADDRESS_SPACE_MEMORY, &s->msix_bar); 2099 2100 vmxnet3_reset_interrupt_states(s); 2101 2102 /* Interrupt pin A */ 2103 pci_dev->config[PCI_INTERRUPT_PIN] = 0x01; 2104 2105 if (!vmxnet3_init_msix(s)) { 2106 VMW_WRPRN("Failed to initialize MSI-X, configuration is inconsistent."); 2107 } 2108 2109 if (!vmxnet3_init_msi(s)) { 2110 VMW_WRPRN("Failed to initialize MSI, configuration is inconsistent."); 2111 } 2112 2113 vmxnet3_net_init(s); 2114 2115 register_savevm(dev, "vmxnet3-msix", -1, 1, 2116 vmxnet3_msix_save, vmxnet3_msix_load, s); 2117 2118 add_boot_device_path(s->conf.bootindex, dev, "/ethernet-phy@0"); 2119 2120 return 0; 2121 } 2122 2123 2124 static void vmxnet3_pci_uninit(PCIDevice *pci_dev) 2125 { 2126 DeviceState *dev = DEVICE(pci_dev); 2127 VMXNET3State *s = VMXNET3(pci_dev); 2128 2129 VMW_CBPRN("Starting uninit..."); 2130 2131 unregister_savevm(dev, "vmxnet3-msix", s); 2132 2133 vmxnet3_net_uninit(s); 2134 2135 vmxnet3_cleanup_msix(s); 2136 2137 vmxnet3_cleanup_msi(s); 2138 2139 memory_region_destroy(&s->bar0); 2140 memory_region_destroy(&s->bar1); 2141 memory_region_destroy(&s->msix_bar); 2142 } 2143 2144 static void vmxnet3_qdev_reset(DeviceState *dev) 2145 { 2146 PCIDevice *d = PCI_DEVICE(dev); 2147 VMXNET3State *s = VMXNET3(d); 2148 2149 VMW_CBPRN("Starting QDEV reset..."); 2150 vmxnet3_reset(s); 2151 } 2152 2153 static bool vmxnet3_mc_list_needed(void *opaque) 2154 { 2155 return true; 2156 } 2157 2158 static int vmxnet3_mcast_list_pre_load(void *opaque) 2159 { 2160 VMXNET3State *s = opaque; 2161 2162 s->mcast_list = g_malloc(s->mcast_list_buff_size); 2163 2164 return 0; 2165 } 2166 2167 2168 static void vmxnet3_pre_save(void *opaque) 2169 { 2170 VMXNET3State *s = opaque; 2171 2172 s->mcast_list_buff_size = s->mcast_list_len * sizeof(MACAddr); 2173 } 2174 2175 static const VMStateDescription vmxstate_vmxnet3_mcast_list = { 2176 .name = "vmxnet3/mcast_list", 2177 .version_id = 1, 2178 .minimum_version_id = 1, 2179 .minimum_version_id_old = 1, 2180 .pre_load = vmxnet3_mcast_list_pre_load, 2181 .fields = (VMStateField[]) { 2182 VMSTATE_VBUFFER_UINT32(mcast_list, VMXNET3State, 0, NULL, 0, 2183 mcast_list_buff_size), 2184 VMSTATE_END_OF_LIST() 2185 } 2186 }; 2187 2188 static void vmxnet3_get_ring_from_file(QEMUFile *f, Vmxnet3Ring *r) 2189 { 2190 r->pa = qemu_get_be64(f); 2191 r->size = qemu_get_be32(f); 2192 r->cell_size = qemu_get_be32(f); 2193 r->next = qemu_get_be32(f); 2194 r->gen = qemu_get_byte(f); 2195 } 2196 2197 static void vmxnet3_put_ring_to_file(QEMUFile *f, Vmxnet3Ring *r) 2198 { 2199 qemu_put_be64(f, r->pa); 2200 qemu_put_be32(f, r->size); 2201 qemu_put_be32(f, r->cell_size); 2202 qemu_put_be32(f, r->next); 2203 qemu_put_byte(f, r->gen); 2204 } 2205 2206 static void vmxnet3_get_tx_stats_from_file(QEMUFile *f, 2207 struct UPT1_TxStats *tx_stat) 2208 { 2209 tx_stat->TSOPktsTxOK = qemu_get_be64(f); 2210 tx_stat->TSOBytesTxOK = qemu_get_be64(f); 2211 tx_stat->ucastPktsTxOK = qemu_get_be64(f); 2212 tx_stat->ucastBytesTxOK = qemu_get_be64(f); 2213 tx_stat->mcastPktsTxOK = qemu_get_be64(f); 2214 tx_stat->mcastBytesTxOK = qemu_get_be64(f); 2215 tx_stat->bcastPktsTxOK = qemu_get_be64(f); 2216 tx_stat->bcastBytesTxOK = qemu_get_be64(f); 2217 tx_stat->pktsTxError = qemu_get_be64(f); 2218 tx_stat->pktsTxDiscard = qemu_get_be64(f); 2219 } 2220 2221 static void vmxnet3_put_tx_stats_to_file(QEMUFile *f, 2222 struct UPT1_TxStats *tx_stat) 2223 { 2224 qemu_put_be64(f, tx_stat->TSOPktsTxOK); 2225 qemu_put_be64(f, tx_stat->TSOBytesTxOK); 2226 qemu_put_be64(f, tx_stat->ucastPktsTxOK); 2227 qemu_put_be64(f, tx_stat->ucastBytesTxOK); 2228 qemu_put_be64(f, tx_stat->mcastPktsTxOK); 2229 qemu_put_be64(f, tx_stat->mcastBytesTxOK); 2230 qemu_put_be64(f, tx_stat->bcastPktsTxOK); 2231 qemu_put_be64(f, tx_stat->bcastBytesTxOK); 2232 qemu_put_be64(f, tx_stat->pktsTxError); 2233 qemu_put_be64(f, tx_stat->pktsTxDiscard); 2234 } 2235 2236 static int vmxnet3_get_txq_descr(QEMUFile *f, void *pv, size_t size) 2237 { 2238 Vmxnet3TxqDescr *r = pv; 2239 2240 vmxnet3_get_ring_from_file(f, &r->tx_ring); 2241 vmxnet3_get_ring_from_file(f, &r->comp_ring); 2242 r->intr_idx = qemu_get_byte(f); 2243 r->tx_stats_pa = qemu_get_be64(f); 2244 2245 vmxnet3_get_tx_stats_from_file(f, &r->txq_stats); 2246 2247 return 0; 2248 } 2249 2250 static void vmxnet3_put_txq_descr(QEMUFile *f, void *pv, size_t size) 2251 { 2252 Vmxnet3TxqDescr *r = pv; 2253 2254 vmxnet3_put_ring_to_file(f, &r->tx_ring); 2255 vmxnet3_put_ring_to_file(f, &r->comp_ring); 2256 qemu_put_byte(f, r->intr_idx); 2257 qemu_put_be64(f, r->tx_stats_pa); 2258 vmxnet3_put_tx_stats_to_file(f, &r->txq_stats); 2259 } 2260 2261 const VMStateInfo txq_descr_info = { 2262 .name = "txq_descr", 2263 .get = vmxnet3_get_txq_descr, 2264 .put = vmxnet3_put_txq_descr 2265 }; 2266 2267 static void vmxnet3_get_rx_stats_from_file(QEMUFile *f, 2268 struct UPT1_RxStats *rx_stat) 2269 { 2270 rx_stat->LROPktsRxOK = qemu_get_be64(f); 2271 rx_stat->LROBytesRxOK = qemu_get_be64(f); 2272 rx_stat->ucastPktsRxOK = qemu_get_be64(f); 2273 rx_stat->ucastBytesRxOK = qemu_get_be64(f); 2274 rx_stat->mcastPktsRxOK = qemu_get_be64(f); 2275 rx_stat->mcastBytesRxOK = qemu_get_be64(f); 2276 rx_stat->bcastPktsRxOK = qemu_get_be64(f); 2277 rx_stat->bcastBytesRxOK = qemu_get_be64(f); 2278 rx_stat->pktsRxOutOfBuf = qemu_get_be64(f); 2279 rx_stat->pktsRxError = qemu_get_be64(f); 2280 } 2281 2282 static void vmxnet3_put_rx_stats_to_file(QEMUFile *f, 2283 struct UPT1_RxStats *rx_stat) 2284 { 2285 qemu_put_be64(f, rx_stat->LROPktsRxOK); 2286 qemu_put_be64(f, rx_stat->LROBytesRxOK); 2287 qemu_put_be64(f, rx_stat->ucastPktsRxOK); 2288 qemu_put_be64(f, rx_stat->ucastBytesRxOK); 2289 qemu_put_be64(f, rx_stat->mcastPktsRxOK); 2290 qemu_put_be64(f, rx_stat->mcastBytesRxOK); 2291 qemu_put_be64(f, rx_stat->bcastPktsRxOK); 2292 qemu_put_be64(f, rx_stat->bcastBytesRxOK); 2293 qemu_put_be64(f, rx_stat->pktsRxOutOfBuf); 2294 qemu_put_be64(f, rx_stat->pktsRxError); 2295 } 2296 2297 static int vmxnet3_get_rxq_descr(QEMUFile *f, void *pv, size_t size) 2298 { 2299 Vmxnet3RxqDescr *r = pv; 2300 int i; 2301 2302 for (i = 0; i < VMXNET3_RX_RINGS_PER_QUEUE; i++) { 2303 vmxnet3_get_ring_from_file(f, &r->rx_ring[i]); 2304 } 2305 2306 vmxnet3_get_ring_from_file(f, &r->comp_ring); 2307 r->intr_idx = qemu_get_byte(f); 2308 r->rx_stats_pa = qemu_get_be64(f); 2309 2310 vmxnet3_get_rx_stats_from_file(f, &r->rxq_stats); 2311 2312 return 0; 2313 } 2314 2315 static void vmxnet3_put_rxq_descr(QEMUFile *f, void *pv, size_t size) 2316 { 2317 Vmxnet3RxqDescr *r = pv; 2318 int i; 2319 2320 for (i = 0; i < VMXNET3_RX_RINGS_PER_QUEUE; i++) { 2321 vmxnet3_put_ring_to_file(f, &r->rx_ring[i]); 2322 } 2323 2324 vmxnet3_put_ring_to_file(f, &r->comp_ring); 2325 qemu_put_byte(f, r->intr_idx); 2326 qemu_put_be64(f, r->rx_stats_pa); 2327 vmxnet3_put_rx_stats_to_file(f, &r->rxq_stats); 2328 } 2329 2330 static int vmxnet3_post_load(void *opaque, int version_id) 2331 { 2332 VMXNET3State *s = opaque; 2333 PCIDevice *d = PCI_DEVICE(s); 2334 2335 vmxnet_tx_pkt_init(&s->tx_pkt, s->max_tx_frags, s->peer_has_vhdr); 2336 vmxnet_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr); 2337 2338 if (s->msix_used) { 2339 if (!vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS)) { 2340 VMW_WRPRN("Failed to re-use MSI-X vectors"); 2341 msix_uninit(d, &s->msix_bar, &s->msix_bar); 2342 s->msix_used = false; 2343 return -1; 2344 } 2345 } 2346 2347 return 0; 2348 } 2349 2350 const VMStateInfo rxq_descr_info = { 2351 .name = "rxq_descr", 2352 .get = vmxnet3_get_rxq_descr, 2353 .put = vmxnet3_put_rxq_descr 2354 }; 2355 2356 static int vmxnet3_get_int_state(QEMUFile *f, void *pv, size_t size) 2357 { 2358 Vmxnet3IntState *r = pv; 2359 2360 r->is_masked = qemu_get_byte(f); 2361 r->is_pending = qemu_get_byte(f); 2362 r->is_asserted = qemu_get_byte(f); 2363 2364 return 0; 2365 } 2366 2367 static void vmxnet3_put_int_state(QEMUFile *f, void *pv, size_t size) 2368 { 2369 Vmxnet3IntState *r = pv; 2370 2371 qemu_put_byte(f, r->is_masked); 2372 qemu_put_byte(f, r->is_pending); 2373 qemu_put_byte(f, r->is_asserted); 2374 } 2375 2376 const VMStateInfo int_state_info = { 2377 .name = "int_state", 2378 .get = vmxnet3_get_int_state, 2379 .put = vmxnet3_put_int_state 2380 }; 2381 2382 static const VMStateDescription vmstate_vmxnet3 = { 2383 .name = "vmxnet3", 2384 .version_id = 1, 2385 .minimum_version_id = 1, 2386 .minimum_version_id_old = 1, 2387 .pre_save = vmxnet3_pre_save, 2388 .post_load = vmxnet3_post_load, 2389 .fields = (VMStateField[]) { 2390 VMSTATE_PCI_DEVICE(parent_obj, VMXNET3State), 2391 VMSTATE_BOOL(rx_packets_compound, VMXNET3State), 2392 VMSTATE_BOOL(rx_vlan_stripping, VMXNET3State), 2393 VMSTATE_BOOL(lro_supported, VMXNET3State), 2394 VMSTATE_UINT32(rx_mode, VMXNET3State), 2395 VMSTATE_UINT32(mcast_list_len, VMXNET3State), 2396 VMSTATE_UINT32(mcast_list_buff_size, VMXNET3State), 2397 VMSTATE_UINT32_ARRAY(vlan_table, VMXNET3State, VMXNET3_VFT_SIZE), 2398 VMSTATE_UINT32(mtu, VMXNET3State), 2399 VMSTATE_UINT16(max_rx_frags, VMXNET3State), 2400 VMSTATE_UINT32(max_tx_frags, VMXNET3State), 2401 VMSTATE_UINT8(event_int_idx, VMXNET3State), 2402 VMSTATE_BOOL(auto_int_masking, VMXNET3State), 2403 VMSTATE_UINT8(txq_num, VMXNET3State), 2404 VMSTATE_UINT8(rxq_num, VMXNET3State), 2405 VMSTATE_UINT32(device_active, VMXNET3State), 2406 VMSTATE_UINT32(last_command, VMXNET3State), 2407 VMSTATE_UINT32(link_status_and_speed, VMXNET3State), 2408 VMSTATE_UINT32(temp_mac, VMXNET3State), 2409 VMSTATE_UINT64(drv_shmem, VMXNET3State), 2410 VMSTATE_UINT64(temp_shared_guest_driver_memory, VMXNET3State), 2411 2412 VMSTATE_ARRAY(txq_descr, VMXNET3State, 2413 VMXNET3_DEVICE_MAX_TX_QUEUES, 0, txq_descr_info, 2414 Vmxnet3TxqDescr), 2415 VMSTATE_ARRAY(rxq_descr, VMXNET3State, 2416 VMXNET3_DEVICE_MAX_RX_QUEUES, 0, rxq_descr_info, 2417 Vmxnet3RxqDescr), 2418 VMSTATE_ARRAY(interrupt_states, VMXNET3State, VMXNET3_MAX_INTRS, 2419 0, int_state_info, Vmxnet3IntState), 2420 2421 VMSTATE_END_OF_LIST() 2422 }, 2423 .subsections = (VMStateSubsection[]) { 2424 { 2425 .vmsd = &vmxstate_vmxnet3_mcast_list, 2426 .needed = vmxnet3_mc_list_needed 2427 }, 2428 { 2429 /* empty element. */ 2430 } 2431 } 2432 }; 2433 2434 static void 2435 vmxnet3_write_config(PCIDevice *pci_dev, uint32_t addr, uint32_t val, int len) 2436 { 2437 pci_default_write_config(pci_dev, addr, val, len); 2438 msix_write_config(pci_dev, addr, val, len); 2439 msi_write_config(pci_dev, addr, val, len); 2440 } 2441 2442 static Property vmxnet3_properties[] = { 2443 DEFINE_NIC_PROPERTIES(VMXNET3State, conf), 2444 DEFINE_PROP_END_OF_LIST(), 2445 }; 2446 2447 static void vmxnet3_class_init(ObjectClass *class, void *data) 2448 { 2449 DeviceClass *dc = DEVICE_CLASS(class); 2450 PCIDeviceClass *c = PCI_DEVICE_CLASS(class); 2451 2452 c->init = vmxnet3_pci_init; 2453 c->exit = vmxnet3_pci_uninit; 2454 c->vendor_id = PCI_VENDOR_ID_VMWARE; 2455 c->device_id = PCI_DEVICE_ID_VMWARE_VMXNET3; 2456 c->revision = PCI_DEVICE_ID_VMWARE_VMXNET3_REVISION; 2457 c->class_id = PCI_CLASS_NETWORK_ETHERNET; 2458 c->subsystem_vendor_id = PCI_VENDOR_ID_VMWARE; 2459 c->subsystem_id = PCI_DEVICE_ID_VMWARE_VMXNET3; 2460 c->config_write = vmxnet3_write_config, 2461 dc->desc = "VMWare Paravirtualized Ethernet v3"; 2462 dc->reset = vmxnet3_qdev_reset; 2463 dc->vmsd = &vmstate_vmxnet3; 2464 dc->props = vmxnet3_properties; 2465 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); 2466 } 2467 2468 static const TypeInfo vmxnet3_info = { 2469 .name = TYPE_VMXNET3, 2470 .parent = TYPE_PCI_DEVICE, 2471 .instance_size = sizeof(VMXNET3State), 2472 .class_init = vmxnet3_class_init, 2473 }; 2474 2475 static void vmxnet3_register_types(void) 2476 { 2477 VMW_CBPRN("vmxnet3_register_types called..."); 2478 type_register_static(&vmxnet3_info); 2479 } 2480 2481 type_init(vmxnet3_register_types) 2482