xref: /openbmc/qemu/hw/net/vmxnet3.c (revision 469b046e)
1 /*
2  * QEMU VMWARE VMXNET3 paravirtual NIC
3  *
4  * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
5  *
6  * Developed by Daynix Computing LTD (http://www.daynix.com)
7  *
8  * Authors:
9  * Dmitry Fleytman <dmitry@daynix.com>
10  * Tamir Shomer <tamirs@daynix.com>
11  * Yan Vugenfirer <yan@daynix.com>
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.
14  * See the COPYING file in the top-level directory.
15  *
16  */
17 
18 #include "hw/hw.h"
19 #include "hw/pci/pci.h"
20 #include "net/net.h"
21 #include "net/tap.h"
22 #include "net/checksum.h"
23 #include "sysemu/sysemu.h"
24 #include "qemu-common.h"
25 #include "qemu/bswap.h"
26 #include "hw/pci/msix.h"
27 #include "hw/pci/msi.h"
28 
29 #include "vmxnet3.h"
30 #include "vmxnet_debug.h"
31 #include "vmware_utils.h"
32 #include "vmxnet_tx_pkt.h"
33 #include "vmxnet_rx_pkt.h"
34 
35 #define PCI_DEVICE_ID_VMWARE_VMXNET3_REVISION 0x1
36 #define VMXNET3_MSIX_BAR_SIZE 0x2000
37 
38 #define VMXNET3_BAR0_IDX      (0)
39 #define VMXNET3_BAR1_IDX      (1)
40 #define VMXNET3_MSIX_BAR_IDX  (2)
41 
42 #define VMXNET3_OFF_MSIX_TABLE (0x000)
43 #define VMXNET3_OFF_MSIX_PBA   (0x800)
44 
45 /* Link speed in Mbps should be shifted by 16 */
46 #define VMXNET3_LINK_SPEED      (1000 << 16)
47 
48 /* Link status: 1 - up, 0 - down. */
49 #define VMXNET3_LINK_STATUS_UP  0x1
50 
51 /* Least significant bit should be set for revision and version */
52 #define VMXNET3_DEVICE_VERSION    0x1
53 #define VMXNET3_DEVICE_REVISION   0x1
54 
55 /* Number of interrupt vectors for non-MSIx modes */
56 #define VMXNET3_MAX_NMSIX_INTRS   (1)
57 
58 /* Macros for rings descriptors access */
59 #define VMXNET3_READ_TX_QUEUE_DESCR8(dpa, field) \
60     (vmw_shmem_ld8(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
61 
62 #define VMXNET3_WRITE_TX_QUEUE_DESCR8(dpa, field, value) \
63     (vmw_shmem_st8(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field, value)))
64 
65 #define VMXNET3_READ_TX_QUEUE_DESCR32(dpa, field) \
66     (vmw_shmem_ld32(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
67 
68 #define VMXNET3_WRITE_TX_QUEUE_DESCR32(dpa, field, value) \
69     (vmw_shmem_st32(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value))
70 
71 #define VMXNET3_READ_TX_QUEUE_DESCR64(dpa, field) \
72     (vmw_shmem_ld64(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
73 
74 #define VMXNET3_WRITE_TX_QUEUE_DESCR64(dpa, field, value) \
75     (vmw_shmem_st64(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value))
76 
77 #define VMXNET3_READ_RX_QUEUE_DESCR64(dpa, field) \
78     (vmw_shmem_ld64(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field)))
79 
80 #define VMXNET3_READ_RX_QUEUE_DESCR32(dpa, field) \
81     (vmw_shmem_ld32(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field)))
82 
83 #define VMXNET3_WRITE_RX_QUEUE_DESCR64(dpa, field, value) \
84     (vmw_shmem_st64(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value))
85 
86 #define VMXNET3_WRITE_RX_QUEUE_DESCR8(dpa, field, value) \
87     (vmw_shmem_st8(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value))
88 
89 /* Macros for guest driver shared area access */
90 #define VMXNET3_READ_DRV_SHARED64(shpa, field) \
91     (vmw_shmem_ld64(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
92 
93 #define VMXNET3_READ_DRV_SHARED32(shpa, field) \
94     (vmw_shmem_ld32(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
95 
96 #define VMXNET3_WRITE_DRV_SHARED32(shpa, field, val) \
97     (vmw_shmem_st32(shpa + offsetof(struct Vmxnet3_DriverShared, field), val))
98 
99 #define VMXNET3_READ_DRV_SHARED16(shpa, field) \
100     (vmw_shmem_ld16(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
101 
102 #define VMXNET3_READ_DRV_SHARED8(shpa, field) \
103     (vmw_shmem_ld8(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
104 
105 #define VMXNET3_READ_DRV_SHARED(shpa, field, b, l) \
106     (vmw_shmem_read(shpa + offsetof(struct Vmxnet3_DriverShared, field), b, l))
107 
108 #define VMXNET_FLAG_IS_SET(field, flag) (((field) & (flag)) == (flag))
109 
110 #define TYPE_VMXNET3 "vmxnet3"
111 #define VMXNET3(obj) OBJECT_CHECK(VMXNET3State, (obj), TYPE_VMXNET3)
112 
113 /* Cyclic ring abstraction */
114 typedef struct {
115     hwaddr pa;
116     size_t size;
117     size_t cell_size;
118     size_t next;
119     uint8_t gen;
120 } Vmxnet3Ring;
121 
122 static inline void vmxnet3_ring_init(Vmxnet3Ring *ring,
123                                      hwaddr pa,
124                                      size_t size,
125                                      size_t cell_size,
126                                      bool zero_region)
127 {
128     ring->pa = pa;
129     ring->size = size;
130     ring->cell_size = cell_size;
131     ring->gen = VMXNET3_INIT_GEN;
132     ring->next = 0;
133 
134     if (zero_region) {
135         vmw_shmem_set(pa, 0, size * cell_size);
136     }
137 }
138 
139 #define VMXNET3_RING_DUMP(macro, ring_name, ridx, r)                         \
140     macro("%s#%d: base %" PRIx64 " size %lu cell_size %lu gen %d next %lu",  \
141           (ring_name), (ridx),                                               \
142           (r)->pa, (r)->size, (r)->cell_size, (r)->gen, (r)->next)
143 
144 static inline void vmxnet3_ring_inc(Vmxnet3Ring *ring)
145 {
146     if (++ring->next >= ring->size) {
147         ring->next = 0;
148         ring->gen ^= 1;
149     }
150 }
151 
152 static inline void vmxnet3_ring_dec(Vmxnet3Ring *ring)
153 {
154     if (ring->next-- == 0) {
155         ring->next = ring->size - 1;
156         ring->gen ^= 1;
157     }
158 }
159 
160 static inline hwaddr vmxnet3_ring_curr_cell_pa(Vmxnet3Ring *ring)
161 {
162     return ring->pa + ring->next * ring->cell_size;
163 }
164 
165 static inline void vmxnet3_ring_read_curr_cell(Vmxnet3Ring *ring, void *buff)
166 {
167     vmw_shmem_read(vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size);
168 }
169 
170 static inline void vmxnet3_ring_write_curr_cell(Vmxnet3Ring *ring, void *buff)
171 {
172     vmw_shmem_write(vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size);
173 }
174 
175 static inline size_t vmxnet3_ring_curr_cell_idx(Vmxnet3Ring *ring)
176 {
177     return ring->next;
178 }
179 
180 static inline uint8_t vmxnet3_ring_curr_gen(Vmxnet3Ring *ring)
181 {
182     return ring->gen;
183 }
184 
185 /* Debug trace-related functions */
186 static inline void
187 vmxnet3_dump_tx_descr(struct Vmxnet3_TxDesc *descr)
188 {
189     VMW_PKPRN("TX DESCR: "
190               "addr %" PRIx64 ", len: %d, gen: %d, rsvd: %d, "
191               "dtype: %d, ext1: %d, msscof: %d, hlen: %d, om: %d, "
192               "eop: %d, cq: %d, ext2: %d, ti: %d, tci: %d",
193               le64_to_cpu(descr->addr), descr->len, descr->gen, descr->rsvd,
194               descr->dtype, descr->ext1, descr->msscof, descr->hlen, descr->om,
195               descr->eop, descr->cq, descr->ext2, descr->ti, descr->tci);
196 }
197 
198 static inline void
199 vmxnet3_dump_virt_hdr(struct virtio_net_hdr *vhdr)
200 {
201     VMW_PKPRN("VHDR: flags 0x%x, gso_type: 0x%x, hdr_len: %d, gso_size: %d, "
202               "csum_start: %d, csum_offset: %d",
203               vhdr->flags, vhdr->gso_type, vhdr->hdr_len, vhdr->gso_size,
204               vhdr->csum_start, vhdr->csum_offset);
205 }
206 
207 static inline void
208 vmxnet3_dump_rx_descr(struct Vmxnet3_RxDesc *descr)
209 {
210     VMW_PKPRN("RX DESCR: addr %" PRIx64 ", len: %d, gen: %d, rsvd: %d, "
211               "dtype: %d, ext1: %d, btype: %d",
212               le64_to_cpu(descr->addr), descr->len, descr->gen,
213               descr->rsvd, descr->dtype, descr->ext1, descr->btype);
214 }
215 
216 /* Device state and helper functions */
217 #define VMXNET3_RX_RINGS_PER_QUEUE (2)
218 
219 typedef struct {
220     Vmxnet3Ring tx_ring;
221     Vmxnet3Ring comp_ring;
222 
223     uint8_t intr_idx;
224     hwaddr tx_stats_pa;
225     struct UPT1_TxStats txq_stats;
226 } Vmxnet3TxqDescr;
227 
228 typedef struct {
229     Vmxnet3Ring rx_ring[VMXNET3_RX_RINGS_PER_QUEUE];
230     Vmxnet3Ring comp_ring;
231     uint8_t intr_idx;
232     hwaddr rx_stats_pa;
233     struct UPT1_RxStats rxq_stats;
234 } Vmxnet3RxqDescr;
235 
236 typedef struct {
237     bool is_masked;
238     bool is_pending;
239     bool is_asserted;
240 } Vmxnet3IntState;
241 
242 typedef struct {
243         PCIDevice parent_obj;
244         NICState *nic;
245         NICConf conf;
246         MemoryRegion bar0;
247         MemoryRegion bar1;
248         MemoryRegion msix_bar;
249 
250         Vmxnet3RxqDescr rxq_descr[VMXNET3_DEVICE_MAX_RX_QUEUES];
251         Vmxnet3TxqDescr txq_descr[VMXNET3_DEVICE_MAX_TX_QUEUES];
252 
253         /* Whether MSI-X support was installed successfully */
254         bool msix_used;
255         /* Whether MSI support was installed successfully */
256         bool msi_used;
257         hwaddr drv_shmem;
258         hwaddr temp_shared_guest_driver_memory;
259 
260         uint8_t txq_num;
261 
262         /* This boolean tells whether RX packet being indicated has to */
263         /* be split into head and body chunks from different RX rings  */
264         bool rx_packets_compound;
265 
266         bool rx_vlan_stripping;
267         bool lro_supported;
268 
269         uint8_t rxq_num;
270 
271         /* Network MTU */
272         uint32_t mtu;
273 
274         /* Maximum number of fragments for indicated TX packets */
275         uint32_t max_tx_frags;
276 
277         /* Maximum number of fragments for indicated RX packets */
278         uint16_t max_rx_frags;
279 
280         /* Index for events interrupt */
281         uint8_t event_int_idx;
282 
283         /* Whether automatic interrupts masking enabled */
284         bool auto_int_masking;
285 
286         bool peer_has_vhdr;
287 
288         /* TX packets to QEMU interface */
289         struct VmxnetTxPkt *tx_pkt;
290         uint32_t offload_mode;
291         uint32_t cso_or_gso_size;
292         uint16_t tci;
293         bool needs_vlan;
294 
295         struct VmxnetRxPkt *rx_pkt;
296 
297         bool tx_sop;
298         bool skip_current_tx_pkt;
299 
300         uint32_t device_active;
301         uint32_t last_command;
302 
303         uint32_t link_status_and_speed;
304 
305         Vmxnet3IntState interrupt_states[VMXNET3_MAX_INTRS];
306 
307         uint32_t temp_mac;   /* To store the low part first */
308 
309         MACAddr perm_mac;
310         uint32_t vlan_table[VMXNET3_VFT_SIZE];
311         uint32_t rx_mode;
312         MACAddr *mcast_list;
313         uint32_t mcast_list_len;
314         uint32_t mcast_list_buff_size; /* needed for live migration. */
315 } VMXNET3State;
316 
317 /* Interrupt management */
318 
319 /*
320  *This function returns sign whether interrupt line is in asserted state
321  * This depends on the type of interrupt used. For INTX interrupt line will
322  * be asserted until explicit deassertion, for MSI(X) interrupt line will
323  * be deasserted automatically due to notification semantics of the MSI(X)
324  * interrupts
325  */
326 static bool _vmxnet3_assert_interrupt_line(VMXNET3State *s, uint32_t int_idx)
327 {
328     PCIDevice *d = PCI_DEVICE(s);
329 
330     if (s->msix_used && msix_enabled(d)) {
331         VMW_IRPRN("Sending MSI-X notification for vector %u", int_idx);
332         msix_notify(d, int_idx);
333         return false;
334     }
335     if (s->msi_used && msi_enabled(d)) {
336         VMW_IRPRN("Sending MSI notification for vector %u", int_idx);
337         msi_notify(d, int_idx);
338         return false;
339     }
340 
341     VMW_IRPRN("Asserting line for interrupt %u", int_idx);
342     pci_irq_assert(d);
343     return true;
344 }
345 
346 static void _vmxnet3_deassert_interrupt_line(VMXNET3State *s, int lidx)
347 {
348     PCIDevice *d = PCI_DEVICE(s);
349 
350     /*
351      * This function should never be called for MSI(X) interrupts
352      * because deassertion never required for message interrupts
353      */
354     assert(!s->msix_used || !msix_enabled(d));
355     /*
356      * This function should never be called for MSI(X) interrupts
357      * because deassertion never required for message interrupts
358      */
359     assert(!s->msi_used || !msi_enabled(d));
360 
361     VMW_IRPRN("Deasserting line for interrupt %u", lidx);
362     pci_irq_deassert(d);
363 }
364 
365 static void vmxnet3_update_interrupt_line_state(VMXNET3State *s, int lidx)
366 {
367     if (!s->interrupt_states[lidx].is_pending &&
368        s->interrupt_states[lidx].is_asserted) {
369         VMW_IRPRN("New interrupt line state for index %d is DOWN", lidx);
370         _vmxnet3_deassert_interrupt_line(s, lidx);
371         s->interrupt_states[lidx].is_asserted = false;
372         return;
373     }
374 
375     if (s->interrupt_states[lidx].is_pending &&
376        !s->interrupt_states[lidx].is_masked &&
377        !s->interrupt_states[lidx].is_asserted) {
378         VMW_IRPRN("New interrupt line state for index %d is UP", lidx);
379         s->interrupt_states[lidx].is_asserted =
380             _vmxnet3_assert_interrupt_line(s, lidx);
381         s->interrupt_states[lidx].is_pending = false;
382         return;
383     }
384 }
385 
386 static void vmxnet3_trigger_interrupt(VMXNET3State *s, int lidx)
387 {
388     PCIDevice *d = PCI_DEVICE(s);
389     s->interrupt_states[lidx].is_pending = true;
390     vmxnet3_update_interrupt_line_state(s, lidx);
391 
392     if (s->msix_used && msix_enabled(d) && s->auto_int_masking) {
393         goto do_automask;
394     }
395 
396     if (s->msi_used && msi_enabled(d) && s->auto_int_masking) {
397         goto do_automask;
398     }
399 
400     return;
401 
402 do_automask:
403     s->interrupt_states[lidx].is_masked = true;
404     vmxnet3_update_interrupt_line_state(s, lidx);
405 }
406 
407 static bool vmxnet3_interrupt_asserted(VMXNET3State *s, int lidx)
408 {
409     return s->interrupt_states[lidx].is_asserted;
410 }
411 
412 static void vmxnet3_clear_interrupt(VMXNET3State *s, int int_idx)
413 {
414     s->interrupt_states[int_idx].is_pending = false;
415     if (s->auto_int_masking) {
416         s->interrupt_states[int_idx].is_masked = true;
417     }
418     vmxnet3_update_interrupt_line_state(s, int_idx);
419 }
420 
421 static void
422 vmxnet3_on_interrupt_mask_changed(VMXNET3State *s, int lidx, bool is_masked)
423 {
424     s->interrupt_states[lidx].is_masked = is_masked;
425     vmxnet3_update_interrupt_line_state(s, lidx);
426 }
427 
428 static bool vmxnet3_verify_driver_magic(hwaddr dshmem)
429 {
430     return (VMXNET3_READ_DRV_SHARED32(dshmem, magic) == VMXNET3_REV1_MAGIC);
431 }
432 
433 #define VMXNET3_GET_BYTE(x, byte_num) (((x) >> (byte_num)*8) & 0xFF)
434 #define VMXNET3_MAKE_BYTE(byte_num, val) \
435     (((uint32_t)((val) & 0xFF)) << (byte_num)*8)
436 
437 static void vmxnet3_set_variable_mac(VMXNET3State *s, uint32_t h, uint32_t l)
438 {
439     s->conf.macaddr.a[0] = VMXNET3_GET_BYTE(l,  0);
440     s->conf.macaddr.a[1] = VMXNET3_GET_BYTE(l,  1);
441     s->conf.macaddr.a[2] = VMXNET3_GET_BYTE(l,  2);
442     s->conf.macaddr.a[3] = VMXNET3_GET_BYTE(l,  3);
443     s->conf.macaddr.a[4] = VMXNET3_GET_BYTE(h, 0);
444     s->conf.macaddr.a[5] = VMXNET3_GET_BYTE(h, 1);
445 
446     VMW_CFPRN("Variable MAC: " VMXNET_MF, VMXNET_MA(s->conf.macaddr.a));
447 
448     qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
449 }
450 
451 static uint64_t vmxnet3_get_mac_low(MACAddr *addr)
452 {
453     return VMXNET3_MAKE_BYTE(0, addr->a[0]) |
454            VMXNET3_MAKE_BYTE(1, addr->a[1]) |
455            VMXNET3_MAKE_BYTE(2, addr->a[2]) |
456            VMXNET3_MAKE_BYTE(3, addr->a[3]);
457 }
458 
459 static uint64_t vmxnet3_get_mac_high(MACAddr *addr)
460 {
461     return VMXNET3_MAKE_BYTE(0, addr->a[4]) |
462            VMXNET3_MAKE_BYTE(1, addr->a[5]);
463 }
464 
465 static void
466 vmxnet3_inc_tx_consumption_counter(VMXNET3State *s, int qidx)
467 {
468     vmxnet3_ring_inc(&s->txq_descr[qidx].tx_ring);
469 }
470 
471 static inline void
472 vmxnet3_inc_rx_consumption_counter(VMXNET3State *s, int qidx, int ridx)
473 {
474     vmxnet3_ring_inc(&s->rxq_descr[qidx].rx_ring[ridx]);
475 }
476 
477 static inline void
478 vmxnet3_inc_tx_completion_counter(VMXNET3State *s, int qidx)
479 {
480     vmxnet3_ring_inc(&s->txq_descr[qidx].comp_ring);
481 }
482 
483 static void
484 vmxnet3_inc_rx_completion_counter(VMXNET3State *s, int qidx)
485 {
486     vmxnet3_ring_inc(&s->rxq_descr[qidx].comp_ring);
487 }
488 
489 static void
490 vmxnet3_dec_rx_completion_counter(VMXNET3State *s, int qidx)
491 {
492     vmxnet3_ring_dec(&s->rxq_descr[qidx].comp_ring);
493 }
494 
495 static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32 tx_ridx)
496 {
497     struct Vmxnet3_TxCompDesc txcq_descr;
498 
499     VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
500 
501     txcq_descr.txdIdx = tx_ridx;
502     txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
503 
504     vmxnet3_ring_write_curr_cell(&s->txq_descr[qidx].comp_ring, &txcq_descr);
505 
506     /* Flush changes in TX descriptor before changing the counter value */
507     smp_wmb();
508 
509     vmxnet3_inc_tx_completion_counter(s, qidx);
510     vmxnet3_trigger_interrupt(s, s->txq_descr[qidx].intr_idx);
511 }
512 
513 static bool
514 vmxnet3_setup_tx_offloads(VMXNET3State *s)
515 {
516     switch (s->offload_mode) {
517     case VMXNET3_OM_NONE:
518         vmxnet_tx_pkt_build_vheader(s->tx_pkt, false, false, 0);
519         break;
520 
521     case VMXNET3_OM_CSUM:
522         vmxnet_tx_pkt_build_vheader(s->tx_pkt, false, true, 0);
523         VMW_PKPRN("L4 CSO requested\n");
524         break;
525 
526     case VMXNET3_OM_TSO:
527         vmxnet_tx_pkt_build_vheader(s->tx_pkt, true, true,
528             s->cso_or_gso_size);
529         vmxnet_tx_pkt_update_ip_checksums(s->tx_pkt);
530         VMW_PKPRN("GSO offload requested.");
531         break;
532 
533     default:
534         g_assert_not_reached();
535         return false;
536     }
537 
538     return true;
539 }
540 
541 static void
542 vmxnet3_tx_retrieve_metadata(VMXNET3State *s,
543                              const struct Vmxnet3_TxDesc *txd)
544 {
545     s->offload_mode = txd->om;
546     s->cso_or_gso_size = txd->msscof;
547     s->tci = txd->tci;
548     s->needs_vlan = txd->ti;
549 }
550 
551 typedef enum {
552     VMXNET3_PKT_STATUS_OK,
553     VMXNET3_PKT_STATUS_ERROR,
554     VMXNET3_PKT_STATUS_DISCARD,/* only for tx */
555     VMXNET3_PKT_STATUS_OUT_OF_BUF /* only for rx */
556 } Vmxnet3PktStatus;
557 
558 static void
559 vmxnet3_on_tx_done_update_stats(VMXNET3State *s, int qidx,
560     Vmxnet3PktStatus status)
561 {
562     size_t tot_len = vmxnet_tx_pkt_get_total_len(s->tx_pkt);
563     struct UPT1_TxStats *stats = &s->txq_descr[qidx].txq_stats;
564 
565     switch (status) {
566     case VMXNET3_PKT_STATUS_OK:
567         switch (vmxnet_tx_pkt_get_packet_type(s->tx_pkt)) {
568         case ETH_PKT_BCAST:
569             stats->bcastPktsTxOK++;
570             stats->bcastBytesTxOK += tot_len;
571             break;
572         case ETH_PKT_MCAST:
573             stats->mcastPktsTxOK++;
574             stats->mcastBytesTxOK += tot_len;
575             break;
576         case ETH_PKT_UCAST:
577             stats->ucastPktsTxOK++;
578             stats->ucastBytesTxOK += tot_len;
579             break;
580         default:
581             g_assert_not_reached();
582         }
583 
584         if (s->offload_mode == VMXNET3_OM_TSO) {
585             /*
586              * According to VMWARE headers this statistic is a number
587              * of packets after segmentation but since we don't have
588              * this information in QEMU model, the best we can do is to
589              * provide number of non-segmented packets
590              */
591             stats->TSOPktsTxOK++;
592             stats->TSOBytesTxOK += tot_len;
593         }
594         break;
595 
596     case VMXNET3_PKT_STATUS_DISCARD:
597         stats->pktsTxDiscard++;
598         break;
599 
600     case VMXNET3_PKT_STATUS_ERROR:
601         stats->pktsTxError++;
602         break;
603 
604     default:
605         g_assert_not_reached();
606     }
607 }
608 
609 static void
610 vmxnet3_on_rx_done_update_stats(VMXNET3State *s,
611                                 int qidx,
612                                 Vmxnet3PktStatus status)
613 {
614     struct UPT1_RxStats *stats = &s->rxq_descr[qidx].rxq_stats;
615     size_t tot_len = vmxnet_rx_pkt_get_total_len(s->rx_pkt);
616 
617     switch (status) {
618     case VMXNET3_PKT_STATUS_OUT_OF_BUF:
619         stats->pktsRxOutOfBuf++;
620         break;
621 
622     case VMXNET3_PKT_STATUS_ERROR:
623         stats->pktsRxError++;
624         break;
625     case VMXNET3_PKT_STATUS_OK:
626         switch (vmxnet_rx_pkt_get_packet_type(s->rx_pkt)) {
627         case ETH_PKT_BCAST:
628             stats->bcastPktsRxOK++;
629             stats->bcastBytesRxOK += tot_len;
630             break;
631         case ETH_PKT_MCAST:
632             stats->mcastPktsRxOK++;
633             stats->mcastBytesRxOK += tot_len;
634             break;
635         case ETH_PKT_UCAST:
636             stats->ucastPktsRxOK++;
637             stats->ucastBytesRxOK += tot_len;
638             break;
639         default:
640             g_assert_not_reached();
641         }
642 
643         if (tot_len > s->mtu) {
644             stats->LROPktsRxOK++;
645             stats->LROBytesRxOK += tot_len;
646         }
647         break;
648     default:
649         g_assert_not_reached();
650     }
651 }
652 
653 static inline bool
654 vmxnet3_pop_next_tx_descr(VMXNET3State *s,
655                           int qidx,
656                           struct Vmxnet3_TxDesc *txd,
657                           uint32_t *descr_idx)
658 {
659     Vmxnet3Ring *ring = &s->txq_descr[qidx].tx_ring;
660 
661     vmxnet3_ring_read_curr_cell(ring, txd);
662     if (txd->gen == vmxnet3_ring_curr_gen(ring)) {
663         /* Only read after generation field verification */
664         smp_rmb();
665         /* Re-read to be sure we got the latest version */
666         vmxnet3_ring_read_curr_cell(ring, txd);
667         VMXNET3_RING_DUMP(VMW_RIPRN, "TX", qidx, ring);
668         *descr_idx = vmxnet3_ring_curr_cell_idx(ring);
669         vmxnet3_inc_tx_consumption_counter(s, qidx);
670         return true;
671     }
672 
673     return false;
674 }
675 
676 static bool
677 vmxnet3_send_packet(VMXNET3State *s, uint32_t qidx)
678 {
679     Vmxnet3PktStatus status = VMXNET3_PKT_STATUS_OK;
680 
681     if (!vmxnet3_setup_tx_offloads(s)) {
682         status = VMXNET3_PKT_STATUS_ERROR;
683         goto func_exit;
684     }
685 
686     /* debug prints */
687     vmxnet3_dump_virt_hdr(vmxnet_tx_pkt_get_vhdr(s->tx_pkt));
688     vmxnet_tx_pkt_dump(s->tx_pkt);
689 
690     if (!vmxnet_tx_pkt_send(s->tx_pkt, qemu_get_queue(s->nic))) {
691         status = VMXNET3_PKT_STATUS_DISCARD;
692         goto func_exit;
693     }
694 
695 func_exit:
696     vmxnet3_on_tx_done_update_stats(s, qidx, status);
697     return (status == VMXNET3_PKT_STATUS_OK);
698 }
699 
700 static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx)
701 {
702     struct Vmxnet3_TxDesc txd;
703     uint32_t txd_idx;
704     uint32_t data_len;
705     hwaddr data_pa;
706 
707     for (;;) {
708         if (!vmxnet3_pop_next_tx_descr(s, qidx, &txd, &txd_idx)) {
709             break;
710         }
711 
712         vmxnet3_dump_tx_descr(&txd);
713 
714         if (!s->skip_current_tx_pkt) {
715             data_len = (txd.len > 0) ? txd.len : VMXNET3_MAX_TX_BUF_SIZE;
716             data_pa = le64_to_cpu(txd.addr);
717 
718             if (!vmxnet_tx_pkt_add_raw_fragment(s->tx_pkt,
719                                                 data_pa,
720                                                 data_len)) {
721                 s->skip_current_tx_pkt = true;
722             }
723         }
724 
725         if (s->tx_sop) {
726             vmxnet3_tx_retrieve_metadata(s, &txd);
727             s->tx_sop = false;
728         }
729 
730         if (txd.eop) {
731             if (!s->skip_current_tx_pkt) {
732                 vmxnet_tx_pkt_parse(s->tx_pkt);
733 
734                 if (s->needs_vlan) {
735                     vmxnet_tx_pkt_setup_vlan_header(s->tx_pkt, s->tci);
736                 }
737 
738                 vmxnet3_send_packet(s, qidx);
739             } else {
740                 vmxnet3_on_tx_done_update_stats(s, qidx,
741                                                 VMXNET3_PKT_STATUS_ERROR);
742             }
743 
744             vmxnet3_complete_packet(s, qidx, txd_idx);
745             s->tx_sop = true;
746             s->skip_current_tx_pkt = false;
747             vmxnet_tx_pkt_reset(s->tx_pkt);
748         }
749     }
750 }
751 
752 static inline void
753 vmxnet3_read_next_rx_descr(VMXNET3State *s, int qidx, int ridx,
754                            struct Vmxnet3_RxDesc *dbuf, uint32_t *didx)
755 {
756     Vmxnet3Ring *ring = &s->rxq_descr[qidx].rx_ring[ridx];
757     *didx = vmxnet3_ring_curr_cell_idx(ring);
758     vmxnet3_ring_read_curr_cell(ring, dbuf);
759 }
760 
761 static inline uint8_t
762 vmxnet3_get_rx_ring_gen(VMXNET3State *s, int qidx, int ridx)
763 {
764     return s->rxq_descr[qidx].rx_ring[ridx].gen;
765 }
766 
767 static inline hwaddr
768 vmxnet3_pop_rxc_descr(VMXNET3State *s, int qidx, uint32_t *descr_gen)
769 {
770     uint8_t ring_gen;
771     struct Vmxnet3_RxCompDesc rxcd;
772 
773     hwaddr daddr =
774         vmxnet3_ring_curr_cell_pa(&s->rxq_descr[qidx].comp_ring);
775 
776     cpu_physical_memory_read(daddr, &rxcd, sizeof(struct Vmxnet3_RxCompDesc));
777     ring_gen = vmxnet3_ring_curr_gen(&s->rxq_descr[qidx].comp_ring);
778 
779     if (rxcd.gen != ring_gen) {
780         *descr_gen = ring_gen;
781         vmxnet3_inc_rx_completion_counter(s, qidx);
782         return daddr;
783     }
784 
785     return 0;
786 }
787 
788 static inline void
789 vmxnet3_revert_rxc_descr(VMXNET3State *s, int qidx)
790 {
791     vmxnet3_dec_rx_completion_counter(s, qidx);
792 }
793 
794 #define RXQ_IDX      (0)
795 #define RX_HEAD_BODY_RING (0)
796 #define RX_BODY_ONLY_RING (1)
797 
798 static bool
799 vmxnet3_get_next_head_rx_descr(VMXNET3State *s,
800                                struct Vmxnet3_RxDesc *descr_buf,
801                                uint32_t *descr_idx,
802                                uint32_t *ridx)
803 {
804     for (;;) {
805         uint32_t ring_gen;
806         vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING,
807                                    descr_buf, descr_idx);
808 
809         /* If no more free descriptors - return */
810         ring_gen = vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_HEAD_BODY_RING);
811         if (descr_buf->gen != ring_gen) {
812             return false;
813         }
814 
815         /* Only read after generation field verification */
816         smp_rmb();
817         /* Re-read to be sure we got the latest version */
818         vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING,
819                                    descr_buf, descr_idx);
820 
821         /* Mark current descriptor as used/skipped */
822         vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_HEAD_BODY_RING);
823 
824         /* If this is what we are looking for - return */
825         if (descr_buf->btype == VMXNET3_RXD_BTYPE_HEAD) {
826             *ridx = RX_HEAD_BODY_RING;
827             return true;
828         }
829     }
830 }
831 
832 static bool
833 vmxnet3_get_next_body_rx_descr(VMXNET3State *s,
834                                struct Vmxnet3_RxDesc *d,
835                                uint32_t *didx,
836                                uint32_t *ridx)
837 {
838     vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, d, didx);
839 
840     /* Try to find corresponding descriptor in head/body ring */
841     if (d->gen == vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_HEAD_BODY_RING)) {
842         /* Only read after generation field verification */
843         smp_rmb();
844         /* Re-read to be sure we got the latest version */
845         vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, d, didx);
846         if (d->btype == VMXNET3_RXD_BTYPE_BODY) {
847             vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_HEAD_BODY_RING);
848             *ridx = RX_HEAD_BODY_RING;
849             return true;
850         }
851     }
852 
853     /*
854      * If there is no free descriptors on head/body ring or next free
855      * descriptor is a head descriptor switch to body only ring
856      */
857     vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_BODY_ONLY_RING, d, didx);
858 
859     /* If no more free descriptors - return */
860     if (d->gen == vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_BODY_ONLY_RING)) {
861         /* Only read after generation field verification */
862         smp_rmb();
863         /* Re-read to be sure we got the latest version */
864         vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_BODY_ONLY_RING, d, didx);
865         assert(d->btype == VMXNET3_RXD_BTYPE_BODY);
866         *ridx = RX_BODY_ONLY_RING;
867         vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_BODY_ONLY_RING);
868         return true;
869     }
870 
871     return false;
872 }
873 
874 static inline bool
875 vmxnet3_get_next_rx_descr(VMXNET3State *s, bool is_head,
876                           struct Vmxnet3_RxDesc *descr_buf,
877                           uint32_t *descr_idx,
878                           uint32_t *ridx)
879 {
880     if (is_head || !s->rx_packets_compound) {
881         return vmxnet3_get_next_head_rx_descr(s, descr_buf, descr_idx, ridx);
882     } else {
883         return vmxnet3_get_next_body_rx_descr(s, descr_buf, descr_idx, ridx);
884     }
885 }
886 
887 static void vmxnet3_rx_update_descr(struct VmxnetRxPkt *pkt,
888     struct Vmxnet3_RxCompDesc *rxcd)
889 {
890     int csum_ok, is_gso;
891     bool isip4, isip6, istcp, isudp;
892     struct virtio_net_hdr *vhdr;
893     uint8_t offload_type;
894 
895     if (vmxnet_rx_pkt_is_vlan_stripped(pkt)) {
896         rxcd->ts = 1;
897         rxcd->tci = vmxnet_rx_pkt_get_vlan_tag(pkt);
898     }
899 
900     if (!vmxnet_rx_pkt_has_virt_hdr(pkt)) {
901         goto nocsum;
902     }
903 
904     vhdr = vmxnet_rx_pkt_get_vhdr(pkt);
905     /*
906      * Checksum is valid when lower level tell so or when lower level
907      * requires checksum offload telling that packet produced/bridged
908      * locally and did travel over network after last checksum calculation
909      * or production
910      */
911     csum_ok = VMXNET_FLAG_IS_SET(vhdr->flags, VIRTIO_NET_HDR_F_DATA_VALID) ||
912               VMXNET_FLAG_IS_SET(vhdr->flags, VIRTIO_NET_HDR_F_NEEDS_CSUM);
913 
914     offload_type = vhdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN;
915     is_gso = (offload_type != VIRTIO_NET_HDR_GSO_NONE) ? 1 : 0;
916 
917     if (!csum_ok && !is_gso) {
918         goto nocsum;
919     }
920 
921     vmxnet_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
922     if ((!istcp && !isudp) || (!isip4 && !isip6)) {
923         goto nocsum;
924     }
925 
926     rxcd->cnc = 0;
927     rxcd->v4 = isip4 ? 1 : 0;
928     rxcd->v6 = isip6 ? 1 : 0;
929     rxcd->tcp = istcp ? 1 : 0;
930     rxcd->udp = isudp ? 1 : 0;
931     rxcd->fcs = rxcd->tuc = rxcd->ipc = 1;
932     return;
933 
934 nocsum:
935     rxcd->cnc = 1;
936     return;
937 }
938 
939 static void
940 vmxnet3_physical_memory_writev(const struct iovec *iov,
941                                size_t start_iov_off,
942                                hwaddr target_addr,
943                                size_t bytes_to_copy)
944 {
945     size_t curr_off = 0;
946     size_t copied = 0;
947 
948     while (bytes_to_copy) {
949         if (start_iov_off < (curr_off + iov->iov_len)) {
950             size_t chunk_len =
951                 MIN((curr_off + iov->iov_len) - start_iov_off, bytes_to_copy);
952 
953             cpu_physical_memory_write(target_addr + copied,
954                                       iov->iov_base + start_iov_off - curr_off,
955                                       chunk_len);
956 
957             copied += chunk_len;
958             start_iov_off += chunk_len;
959             curr_off = start_iov_off;
960             bytes_to_copy -= chunk_len;
961         } else {
962             curr_off += iov->iov_len;
963         }
964         iov++;
965     }
966 }
967 
968 static bool
969 vmxnet3_indicate_packet(VMXNET3State *s)
970 {
971     struct Vmxnet3_RxDesc rxd;
972     bool is_head = true;
973     uint32_t rxd_idx;
974     uint32_t rx_ridx = 0;
975 
976     struct Vmxnet3_RxCompDesc rxcd;
977     uint32_t new_rxcd_gen = VMXNET3_INIT_GEN;
978     hwaddr new_rxcd_pa = 0;
979     hwaddr ready_rxcd_pa = 0;
980     struct iovec *data = vmxnet_rx_pkt_get_iovec(s->rx_pkt);
981     size_t bytes_copied = 0;
982     size_t bytes_left = vmxnet_rx_pkt_get_total_len(s->rx_pkt);
983     uint16_t num_frags = 0;
984     size_t chunk_size;
985 
986     vmxnet_rx_pkt_dump(s->rx_pkt);
987 
988     while (bytes_left > 0) {
989 
990         /* cannot add more frags to packet */
991         if (num_frags == s->max_rx_frags) {
992             break;
993         }
994 
995         new_rxcd_pa = vmxnet3_pop_rxc_descr(s, RXQ_IDX, &new_rxcd_gen);
996         if (!new_rxcd_pa) {
997             break;
998         }
999 
1000         if (!vmxnet3_get_next_rx_descr(s, is_head, &rxd, &rxd_idx, &rx_ridx)) {
1001             break;
1002         }
1003 
1004         chunk_size = MIN(bytes_left, rxd.len);
1005         vmxnet3_physical_memory_writev(data, bytes_copied,
1006                                        le64_to_cpu(rxd.addr), chunk_size);
1007         bytes_copied += chunk_size;
1008         bytes_left -= chunk_size;
1009 
1010         vmxnet3_dump_rx_descr(&rxd);
1011 
1012         if (0 != ready_rxcd_pa) {
1013             cpu_physical_memory_write(ready_rxcd_pa, &rxcd, sizeof(rxcd));
1014         }
1015 
1016         memset(&rxcd, 0, sizeof(struct Vmxnet3_RxCompDesc));
1017         rxcd.rxdIdx = rxd_idx;
1018         rxcd.len = chunk_size;
1019         rxcd.sop = is_head;
1020         rxcd.gen = new_rxcd_gen;
1021         rxcd.rqID = RXQ_IDX + rx_ridx * s->rxq_num;
1022 
1023         if (0 == bytes_left) {
1024             vmxnet3_rx_update_descr(s->rx_pkt, &rxcd);
1025         }
1026 
1027         VMW_RIPRN("RX Completion descriptor: rxRing: %lu rxIdx %lu len %lu "
1028                   "sop %d csum_correct %lu",
1029                   (unsigned long) rx_ridx,
1030                   (unsigned long) rxcd.rxdIdx,
1031                   (unsigned long) rxcd.len,
1032                   (int) rxcd.sop,
1033                   (unsigned long) rxcd.tuc);
1034 
1035         is_head = false;
1036         ready_rxcd_pa = new_rxcd_pa;
1037         new_rxcd_pa = 0;
1038         num_frags++;
1039     }
1040 
1041     if (0 != ready_rxcd_pa) {
1042         rxcd.eop = 1;
1043         rxcd.err = (0 != bytes_left);
1044         cpu_physical_memory_write(ready_rxcd_pa, &rxcd, sizeof(rxcd));
1045 
1046         /* Flush RX descriptor changes */
1047         smp_wmb();
1048     }
1049 
1050     if (0 != new_rxcd_pa) {
1051         vmxnet3_revert_rxc_descr(s, RXQ_IDX);
1052     }
1053 
1054     vmxnet3_trigger_interrupt(s, s->rxq_descr[RXQ_IDX].intr_idx);
1055 
1056     if (bytes_left == 0) {
1057         vmxnet3_on_rx_done_update_stats(s, RXQ_IDX, VMXNET3_PKT_STATUS_OK);
1058         return true;
1059     } else if (num_frags == s->max_rx_frags) {
1060         vmxnet3_on_rx_done_update_stats(s, RXQ_IDX, VMXNET3_PKT_STATUS_ERROR);
1061         return false;
1062     } else {
1063         vmxnet3_on_rx_done_update_stats(s, RXQ_IDX,
1064                                         VMXNET3_PKT_STATUS_OUT_OF_BUF);
1065         return false;
1066     }
1067 }
1068 
1069 static void
1070 vmxnet3_io_bar0_write(void *opaque, hwaddr addr,
1071                       uint64_t val, unsigned size)
1072 {
1073     VMXNET3State *s = opaque;
1074 
1075     if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_TXPROD,
1076                         VMXNET3_DEVICE_MAX_TX_QUEUES, VMXNET3_REG_ALIGN)) {
1077         int tx_queue_idx =
1078             VMW_MULTIREG_IDX_BY_ADDR(addr, VMXNET3_REG_TXPROD,
1079                                      VMXNET3_REG_ALIGN);
1080         assert(tx_queue_idx <= s->txq_num);
1081         vmxnet3_process_tx_queue(s, tx_queue_idx);
1082         return;
1083     }
1084 
1085     if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_IMR,
1086                         VMXNET3_MAX_INTRS, VMXNET3_REG_ALIGN)) {
1087         int l = VMW_MULTIREG_IDX_BY_ADDR(addr, VMXNET3_REG_IMR,
1088                                          VMXNET3_REG_ALIGN);
1089 
1090         VMW_CBPRN("Interrupt mask for line %d written: 0x%" PRIx64, l, val);
1091 
1092         vmxnet3_on_interrupt_mask_changed(s, l, val);
1093         return;
1094     }
1095 
1096     if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_RXPROD,
1097                         VMXNET3_DEVICE_MAX_RX_QUEUES, VMXNET3_REG_ALIGN) ||
1098        VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_RXPROD2,
1099                         VMXNET3_DEVICE_MAX_RX_QUEUES, VMXNET3_REG_ALIGN)) {
1100         return;
1101     }
1102 
1103     VMW_WRPRN("BAR0 unknown write [%" PRIx64 "] = %" PRIx64 ", size %d",
1104               (uint64_t) addr, val, size);
1105 }
1106 
1107 static uint64_t
1108 vmxnet3_io_bar0_read(void *opaque, hwaddr addr, unsigned size)
1109 {
1110     if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_IMR,
1111                         VMXNET3_MAX_INTRS, VMXNET3_REG_ALIGN)) {
1112         g_assert_not_reached();
1113     }
1114 
1115     VMW_CBPRN("BAR0 unknown read [%" PRIx64 "], size %d", addr, size);
1116     return 0;
1117 }
1118 
1119 static void vmxnet3_reset_interrupt_states(VMXNET3State *s)
1120 {
1121     int i;
1122     for (i = 0; i < ARRAY_SIZE(s->interrupt_states); i++) {
1123         s->interrupt_states[i].is_asserted = false;
1124         s->interrupt_states[i].is_pending = false;
1125         s->interrupt_states[i].is_masked = true;
1126     }
1127 }
1128 
1129 static void vmxnet3_reset_mac(VMXNET3State *s)
1130 {
1131     memcpy(&s->conf.macaddr.a, &s->perm_mac.a, sizeof(s->perm_mac.a));
1132     VMW_CFPRN("MAC address set to: " VMXNET_MF, VMXNET_MA(s->conf.macaddr.a));
1133 }
1134 
1135 static void vmxnet3_deactivate_device(VMXNET3State *s)
1136 {
1137     VMW_CBPRN("Deactivating vmxnet3...");
1138     s->device_active = false;
1139 }
1140 
1141 static void vmxnet3_reset(VMXNET3State *s)
1142 {
1143     VMW_CBPRN("Resetting vmxnet3...");
1144 
1145     vmxnet3_deactivate_device(s);
1146     vmxnet3_reset_interrupt_states(s);
1147     vmxnet_tx_pkt_reset(s->tx_pkt);
1148     s->drv_shmem = 0;
1149     s->tx_sop = true;
1150     s->skip_current_tx_pkt = false;
1151 }
1152 
1153 static void vmxnet3_update_rx_mode(VMXNET3State *s)
1154 {
1155     s->rx_mode = VMXNET3_READ_DRV_SHARED32(s->drv_shmem,
1156                                            devRead.rxFilterConf.rxMode);
1157     VMW_CFPRN("RX mode: 0x%08X", s->rx_mode);
1158 }
1159 
1160 static void vmxnet3_update_vlan_filters(VMXNET3State *s)
1161 {
1162     int i;
1163 
1164     /* Copy configuration from shared memory */
1165     VMXNET3_READ_DRV_SHARED(s->drv_shmem,
1166                             devRead.rxFilterConf.vfTable,
1167                             s->vlan_table,
1168                             sizeof(s->vlan_table));
1169 
1170     /* Invert byte order when needed */
1171     for (i = 0; i < ARRAY_SIZE(s->vlan_table); i++) {
1172         s->vlan_table[i] = le32_to_cpu(s->vlan_table[i]);
1173     }
1174 
1175     /* Dump configuration for debugging purposes */
1176     VMW_CFPRN("Configured VLANs:");
1177     for (i = 0; i < sizeof(s->vlan_table) * 8; i++) {
1178         if (VMXNET3_VFTABLE_ENTRY_IS_SET(s->vlan_table, i)) {
1179             VMW_CFPRN("\tVLAN %d is present", i);
1180         }
1181     }
1182 }
1183 
1184 static void vmxnet3_update_mcast_filters(VMXNET3State *s)
1185 {
1186     uint16_t list_bytes =
1187         VMXNET3_READ_DRV_SHARED16(s->drv_shmem,
1188                                   devRead.rxFilterConf.mfTableLen);
1189 
1190     s->mcast_list_len = list_bytes / sizeof(s->mcast_list[0]);
1191 
1192     s->mcast_list = g_realloc(s->mcast_list, list_bytes);
1193     if (NULL == s->mcast_list) {
1194         if (0 == s->mcast_list_len) {
1195             VMW_CFPRN("Current multicast list is empty");
1196         } else {
1197             VMW_ERPRN("Failed to allocate multicast list of %d elements",
1198                       s->mcast_list_len);
1199         }
1200         s->mcast_list_len = 0;
1201     } else {
1202         int i;
1203         hwaddr mcast_list_pa =
1204             VMXNET3_READ_DRV_SHARED64(s->drv_shmem,
1205                                       devRead.rxFilterConf.mfTablePA);
1206 
1207         cpu_physical_memory_read(mcast_list_pa, s->mcast_list, list_bytes);
1208         VMW_CFPRN("Current multicast list len is %d:", s->mcast_list_len);
1209         for (i = 0; i < s->mcast_list_len; i++) {
1210             VMW_CFPRN("\t" VMXNET_MF, VMXNET_MA(s->mcast_list[i].a));
1211         }
1212     }
1213 }
1214 
1215 static void vmxnet3_setup_rx_filtering(VMXNET3State *s)
1216 {
1217     vmxnet3_update_rx_mode(s);
1218     vmxnet3_update_vlan_filters(s);
1219     vmxnet3_update_mcast_filters(s);
1220 }
1221 
1222 static uint32_t vmxnet3_get_interrupt_config(VMXNET3State *s)
1223 {
1224     uint32_t interrupt_mode = VMXNET3_IT_AUTO | (VMXNET3_IMM_AUTO << 2);
1225     VMW_CFPRN("Interrupt config is 0x%X", interrupt_mode);
1226     return interrupt_mode;
1227 }
1228 
1229 static void vmxnet3_fill_stats(VMXNET3State *s)
1230 {
1231     int i;
1232     for (i = 0; i < s->txq_num; i++) {
1233         cpu_physical_memory_write(s->txq_descr[i].tx_stats_pa,
1234                                   &s->txq_descr[i].txq_stats,
1235                                   sizeof(s->txq_descr[i].txq_stats));
1236     }
1237 
1238     for (i = 0; i < s->rxq_num; i++) {
1239         cpu_physical_memory_write(s->rxq_descr[i].rx_stats_pa,
1240                                   &s->rxq_descr[i].rxq_stats,
1241                                   sizeof(s->rxq_descr[i].rxq_stats));
1242     }
1243 }
1244 
1245 static void vmxnet3_adjust_by_guest_type(VMXNET3State *s)
1246 {
1247     struct Vmxnet3_GOSInfo gos;
1248 
1249     VMXNET3_READ_DRV_SHARED(s->drv_shmem, devRead.misc.driverInfo.gos,
1250                             &gos, sizeof(gos));
1251     s->rx_packets_compound =
1252         (gos.gosType == VMXNET3_GOS_TYPE_WIN) ? false : true;
1253 
1254     VMW_CFPRN("Guest type specifics: RXCOMPOUND: %d", s->rx_packets_compound);
1255 }
1256 
1257 static void
1258 vmxnet3_dump_conf_descr(const char *name,
1259                         struct Vmxnet3_VariableLenConfDesc *pm_descr)
1260 {
1261     VMW_CFPRN("%s descriptor dump: Version %u, Length %u",
1262               name, pm_descr->confVer, pm_descr->confLen);
1263 
1264 };
1265 
1266 static void vmxnet3_update_pm_state(VMXNET3State *s)
1267 {
1268     struct Vmxnet3_VariableLenConfDesc pm_descr;
1269 
1270     pm_descr.confLen =
1271         VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.pmConfDesc.confLen);
1272     pm_descr.confVer =
1273         VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.pmConfDesc.confVer);
1274     pm_descr.confPA =
1275         VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.pmConfDesc.confPA);
1276 
1277     vmxnet3_dump_conf_descr("PM State", &pm_descr);
1278 }
1279 
1280 static void vmxnet3_update_features(VMXNET3State *s)
1281 {
1282     uint32_t guest_features;
1283     int rxcso_supported;
1284 
1285     guest_features = VMXNET3_READ_DRV_SHARED32(s->drv_shmem,
1286                                                devRead.misc.uptFeatures);
1287 
1288     rxcso_supported = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_RXCSUM);
1289     s->rx_vlan_stripping = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_RXVLAN);
1290     s->lro_supported = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_LRO);
1291 
1292     VMW_CFPRN("Features configuration: LRO: %d, RXCSUM: %d, VLANSTRIP: %d",
1293               s->lro_supported, rxcso_supported,
1294               s->rx_vlan_stripping);
1295     if (s->peer_has_vhdr) {
1296         qemu_set_offload(qemu_get_queue(s->nic)->peer,
1297                          rxcso_supported,
1298                          s->lro_supported,
1299                          s->lro_supported,
1300                          0,
1301                          0);
1302     }
1303 }
1304 
1305 static bool vmxnet3_verify_intx(VMXNET3State *s, int intx)
1306 {
1307     return s->msix_used || s->msi_used || (intx ==
1308            (pci_get_byte(s->parent_obj.config + PCI_INTERRUPT_PIN) - 1));
1309 }
1310 
1311 static void vmxnet3_validate_interrupt_idx(bool is_msix, int idx)
1312 {
1313     int max_ints = is_msix ? VMXNET3_MAX_INTRS : VMXNET3_MAX_NMSIX_INTRS;
1314     if (idx >= max_ints) {
1315         hw_error("Bad interrupt index: %d\n", idx);
1316     }
1317 }
1318 
1319 static void vmxnet3_validate_interrupts(VMXNET3State *s)
1320 {
1321     int i;
1322 
1323     VMW_CFPRN("Verifying event interrupt index (%d)", s->event_int_idx);
1324     vmxnet3_validate_interrupt_idx(s->msix_used, s->event_int_idx);
1325 
1326     for (i = 0; i < s->txq_num; i++) {
1327         int idx = s->txq_descr[i].intr_idx;
1328         VMW_CFPRN("Verifying TX queue %d interrupt index (%d)", i, idx);
1329         vmxnet3_validate_interrupt_idx(s->msix_used, idx);
1330     }
1331 
1332     for (i = 0; i < s->rxq_num; i++) {
1333         int idx = s->rxq_descr[i].intr_idx;
1334         VMW_CFPRN("Verifying RX queue %d interrupt index (%d)", i, idx);
1335         vmxnet3_validate_interrupt_idx(s->msix_used, idx);
1336     }
1337 }
1338 
1339 static void vmxnet3_validate_queues(VMXNET3State *s)
1340 {
1341     /*
1342     * txq_num and rxq_num are total number of queues
1343     * configured by guest. These numbers must not
1344     * exceed corresponding maximal values.
1345     */
1346 
1347     if (s->txq_num > VMXNET3_DEVICE_MAX_TX_QUEUES) {
1348         hw_error("Bad TX queues number: %d\n", s->txq_num);
1349     }
1350 
1351     if (s->rxq_num > VMXNET3_DEVICE_MAX_RX_QUEUES) {
1352         hw_error("Bad RX queues number: %d\n", s->rxq_num);
1353     }
1354 }
1355 
1356 static void vmxnet3_activate_device(VMXNET3State *s)
1357 {
1358     int i;
1359     static const uint32_t VMXNET3_DEF_TX_THRESHOLD = 1;
1360     hwaddr qdescr_table_pa;
1361     uint64_t pa;
1362     uint32_t size;
1363 
1364     /* Verify configuration consistency */
1365     if (!vmxnet3_verify_driver_magic(s->drv_shmem)) {
1366         VMW_ERPRN("Device configuration received from driver is invalid");
1367         return;
1368     }
1369 
1370     vmxnet3_adjust_by_guest_type(s);
1371     vmxnet3_update_features(s);
1372     vmxnet3_update_pm_state(s);
1373     vmxnet3_setup_rx_filtering(s);
1374     /* Cache fields from shared memory */
1375     s->mtu = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.misc.mtu);
1376     VMW_CFPRN("MTU is %u", s->mtu);
1377 
1378     s->max_rx_frags =
1379         VMXNET3_READ_DRV_SHARED16(s->drv_shmem, devRead.misc.maxNumRxSG);
1380 
1381     if (s->max_rx_frags == 0) {
1382         s->max_rx_frags = 1;
1383     }
1384 
1385     VMW_CFPRN("Max RX fragments is %u", s->max_rx_frags);
1386 
1387     s->event_int_idx =
1388         VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.intrConf.eventIntrIdx);
1389     assert(vmxnet3_verify_intx(s, s->event_int_idx));
1390     VMW_CFPRN("Events interrupt line is %u", s->event_int_idx);
1391 
1392     s->auto_int_masking =
1393         VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.intrConf.autoMask);
1394     VMW_CFPRN("Automatic interrupt masking is %d", (int)s->auto_int_masking);
1395 
1396     s->txq_num =
1397         VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.misc.numTxQueues);
1398     s->rxq_num =
1399         VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.misc.numRxQueues);
1400 
1401     VMW_CFPRN("Number of TX/RX queues %u/%u", s->txq_num, s->rxq_num);
1402     vmxnet3_validate_queues(s);
1403 
1404     qdescr_table_pa =
1405         VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.misc.queueDescPA);
1406     VMW_CFPRN("TX queues descriptors table is at 0x%" PRIx64, qdescr_table_pa);
1407 
1408     /*
1409      * Worst-case scenario is a packet that holds all TX rings space so
1410      * we calculate total size of all TX rings for max TX fragments number
1411      */
1412     s->max_tx_frags = 0;
1413 
1414     /* TX queues */
1415     for (i = 0; i < s->txq_num; i++) {
1416         hwaddr qdescr_pa =
1417             qdescr_table_pa + i * sizeof(struct Vmxnet3_TxQueueDesc);
1418 
1419         /* Read interrupt number for this TX queue */
1420         s->txq_descr[i].intr_idx =
1421             VMXNET3_READ_TX_QUEUE_DESCR8(qdescr_pa, conf.intrIdx);
1422         assert(vmxnet3_verify_intx(s, s->txq_descr[i].intr_idx));
1423 
1424         VMW_CFPRN("TX Queue %d interrupt: %d", i, s->txq_descr[i].intr_idx);
1425 
1426         /* Read rings memory locations for TX queues */
1427         pa = VMXNET3_READ_TX_QUEUE_DESCR64(qdescr_pa, conf.txRingBasePA);
1428         size = VMXNET3_READ_TX_QUEUE_DESCR32(qdescr_pa, conf.txRingSize);
1429 
1430         vmxnet3_ring_init(&s->txq_descr[i].tx_ring, pa, size,
1431                           sizeof(struct Vmxnet3_TxDesc), false);
1432         VMXNET3_RING_DUMP(VMW_CFPRN, "TX", i, &s->txq_descr[i].tx_ring);
1433 
1434         s->max_tx_frags += size;
1435 
1436         /* TXC ring */
1437         pa = VMXNET3_READ_TX_QUEUE_DESCR64(qdescr_pa, conf.compRingBasePA);
1438         size = VMXNET3_READ_TX_QUEUE_DESCR32(qdescr_pa, conf.compRingSize);
1439         vmxnet3_ring_init(&s->txq_descr[i].comp_ring, pa, size,
1440                           sizeof(struct Vmxnet3_TxCompDesc), true);
1441         VMXNET3_RING_DUMP(VMW_CFPRN, "TXC", i, &s->txq_descr[i].comp_ring);
1442 
1443         s->txq_descr[i].tx_stats_pa =
1444             qdescr_pa + offsetof(struct Vmxnet3_TxQueueDesc, stats);
1445 
1446         memset(&s->txq_descr[i].txq_stats, 0,
1447                sizeof(s->txq_descr[i].txq_stats));
1448 
1449         /* Fill device-managed parameters for queues */
1450         VMXNET3_WRITE_TX_QUEUE_DESCR32(qdescr_pa,
1451                                        ctrl.txThreshold,
1452                                        VMXNET3_DEF_TX_THRESHOLD);
1453     }
1454 
1455     /* Preallocate TX packet wrapper */
1456     VMW_CFPRN("Max TX fragments is %u", s->max_tx_frags);
1457     vmxnet_tx_pkt_init(&s->tx_pkt, s->max_tx_frags, s->peer_has_vhdr);
1458     vmxnet_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr);
1459 
1460     /* Read rings memory locations for RX queues */
1461     for (i = 0; i < s->rxq_num; i++) {
1462         int j;
1463         hwaddr qd_pa =
1464             qdescr_table_pa + s->txq_num * sizeof(struct Vmxnet3_TxQueueDesc) +
1465             i * sizeof(struct Vmxnet3_RxQueueDesc);
1466 
1467         /* Read interrupt number for this RX queue */
1468         s->rxq_descr[i].intr_idx =
1469             VMXNET3_READ_TX_QUEUE_DESCR8(qd_pa, conf.intrIdx);
1470         assert(vmxnet3_verify_intx(s, s->rxq_descr[i].intr_idx));
1471 
1472         VMW_CFPRN("RX Queue %d interrupt: %d", i, s->rxq_descr[i].intr_idx);
1473 
1474         /* Read rings memory locations */
1475         for (j = 0; j < VMXNET3_RX_RINGS_PER_QUEUE; j++) {
1476             /* RX rings */
1477             pa = VMXNET3_READ_RX_QUEUE_DESCR64(qd_pa, conf.rxRingBasePA[j]);
1478             size = VMXNET3_READ_RX_QUEUE_DESCR32(qd_pa, conf.rxRingSize[j]);
1479             vmxnet3_ring_init(&s->rxq_descr[i].rx_ring[j], pa, size,
1480                               sizeof(struct Vmxnet3_RxDesc), false);
1481             VMW_CFPRN("RX queue %d:%d: Base: %" PRIx64 ", Size: %d",
1482                       i, j, pa, size);
1483         }
1484 
1485         /* RXC ring */
1486         pa = VMXNET3_READ_RX_QUEUE_DESCR64(qd_pa, conf.compRingBasePA);
1487         size = VMXNET3_READ_RX_QUEUE_DESCR32(qd_pa, conf.compRingSize);
1488         vmxnet3_ring_init(&s->rxq_descr[i].comp_ring, pa, size,
1489                           sizeof(struct Vmxnet3_RxCompDesc), true);
1490         VMW_CFPRN("RXC queue %d: Base: %" PRIx64 ", Size: %d", i, pa, size);
1491 
1492         s->rxq_descr[i].rx_stats_pa =
1493             qd_pa + offsetof(struct Vmxnet3_RxQueueDesc, stats);
1494         memset(&s->rxq_descr[i].rxq_stats, 0,
1495                sizeof(s->rxq_descr[i].rxq_stats));
1496     }
1497 
1498     vmxnet3_validate_interrupts(s);
1499 
1500     /* Make sure everything is in place before device activation */
1501     smp_wmb();
1502 
1503     vmxnet3_reset_mac(s);
1504 
1505     s->device_active = true;
1506 }
1507 
1508 static void vmxnet3_handle_command(VMXNET3State *s, uint64_t cmd)
1509 {
1510     s->last_command = cmd;
1511 
1512     switch (cmd) {
1513     case VMXNET3_CMD_GET_PERM_MAC_HI:
1514         VMW_CBPRN("Set: Get upper part of permanent MAC");
1515         break;
1516 
1517     case VMXNET3_CMD_GET_PERM_MAC_LO:
1518         VMW_CBPRN("Set: Get lower part of permanent MAC");
1519         break;
1520 
1521     case VMXNET3_CMD_GET_STATS:
1522         VMW_CBPRN("Set: Get device statistics");
1523         vmxnet3_fill_stats(s);
1524         break;
1525 
1526     case VMXNET3_CMD_ACTIVATE_DEV:
1527         VMW_CBPRN("Set: Activating vmxnet3 device");
1528         vmxnet3_activate_device(s);
1529         break;
1530 
1531     case VMXNET3_CMD_UPDATE_RX_MODE:
1532         VMW_CBPRN("Set: Update rx mode");
1533         vmxnet3_update_rx_mode(s);
1534         break;
1535 
1536     case VMXNET3_CMD_UPDATE_VLAN_FILTERS:
1537         VMW_CBPRN("Set: Update VLAN filters");
1538         vmxnet3_update_vlan_filters(s);
1539         break;
1540 
1541     case VMXNET3_CMD_UPDATE_MAC_FILTERS:
1542         VMW_CBPRN("Set: Update MAC filters");
1543         vmxnet3_update_mcast_filters(s);
1544         break;
1545 
1546     case VMXNET3_CMD_UPDATE_FEATURE:
1547         VMW_CBPRN("Set: Update features");
1548         vmxnet3_update_features(s);
1549         break;
1550 
1551     case VMXNET3_CMD_UPDATE_PMCFG:
1552         VMW_CBPRN("Set: Update power management config");
1553         vmxnet3_update_pm_state(s);
1554         break;
1555 
1556     case VMXNET3_CMD_GET_LINK:
1557         VMW_CBPRN("Set: Get link");
1558         break;
1559 
1560     case VMXNET3_CMD_RESET_DEV:
1561         VMW_CBPRN("Set: Reset device");
1562         vmxnet3_reset(s);
1563         break;
1564 
1565     case VMXNET3_CMD_QUIESCE_DEV:
1566         VMW_CBPRN("Set: VMXNET3_CMD_QUIESCE_DEV - pause the device");
1567         vmxnet3_deactivate_device(s);
1568         break;
1569 
1570     case VMXNET3_CMD_GET_CONF_INTR:
1571         VMW_CBPRN("Set: VMXNET3_CMD_GET_CONF_INTR - interrupt configuration");
1572         break;
1573 
1574     default:
1575         VMW_CBPRN("Received unknown command: %" PRIx64, cmd);
1576         break;
1577     }
1578 }
1579 
1580 static uint64_t vmxnet3_get_command_status(VMXNET3State *s)
1581 {
1582     uint64_t ret;
1583 
1584     switch (s->last_command) {
1585     case VMXNET3_CMD_ACTIVATE_DEV:
1586         ret = (s->device_active) ? 0 : -1;
1587         VMW_CFPRN("Device active: %" PRIx64, ret);
1588         break;
1589 
1590     case VMXNET3_CMD_RESET_DEV:
1591     case VMXNET3_CMD_QUIESCE_DEV:
1592     case VMXNET3_CMD_GET_QUEUE_STATUS:
1593         ret = 0;
1594         break;
1595 
1596     case VMXNET3_CMD_GET_LINK:
1597         ret = s->link_status_and_speed;
1598         VMW_CFPRN("Link and speed: %" PRIx64, ret);
1599         break;
1600 
1601     case VMXNET3_CMD_GET_PERM_MAC_LO:
1602         ret = vmxnet3_get_mac_low(&s->perm_mac);
1603         break;
1604 
1605     case VMXNET3_CMD_GET_PERM_MAC_HI:
1606         ret = vmxnet3_get_mac_high(&s->perm_mac);
1607         break;
1608 
1609     case VMXNET3_CMD_GET_CONF_INTR:
1610         ret = vmxnet3_get_interrupt_config(s);
1611         break;
1612 
1613     default:
1614         VMW_WRPRN("Received request for unknown command: %x", s->last_command);
1615         ret = -1;
1616         break;
1617     }
1618 
1619     return ret;
1620 }
1621 
1622 static void vmxnet3_set_events(VMXNET3State *s, uint32_t val)
1623 {
1624     uint32_t events;
1625 
1626     VMW_CBPRN("Setting events: 0x%x", val);
1627     events = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, ecr) | val;
1628     VMXNET3_WRITE_DRV_SHARED32(s->drv_shmem, ecr, events);
1629 }
1630 
1631 static void vmxnet3_ack_events(VMXNET3State *s, uint32_t val)
1632 {
1633     uint32_t events;
1634 
1635     VMW_CBPRN("Clearing events: 0x%x", val);
1636     events = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, ecr) & ~val;
1637     VMXNET3_WRITE_DRV_SHARED32(s->drv_shmem, ecr, events);
1638 }
1639 
1640 static void
1641 vmxnet3_io_bar1_write(void *opaque,
1642                       hwaddr addr,
1643                       uint64_t val,
1644                       unsigned size)
1645 {
1646     VMXNET3State *s = opaque;
1647 
1648     switch (addr) {
1649     /* Vmxnet3 Revision Report Selection */
1650     case VMXNET3_REG_VRRS:
1651         VMW_CBPRN("Write BAR1 [VMXNET3_REG_VRRS] = %" PRIx64 ", size %d",
1652                   val, size);
1653         break;
1654 
1655     /* UPT Version Report Selection */
1656     case VMXNET3_REG_UVRS:
1657         VMW_CBPRN("Write BAR1 [VMXNET3_REG_UVRS] = %" PRIx64 ", size %d",
1658                   val, size);
1659         break;
1660 
1661     /* Driver Shared Address Low */
1662     case VMXNET3_REG_DSAL:
1663         VMW_CBPRN("Write BAR1 [VMXNET3_REG_DSAL] = %" PRIx64 ", size %d",
1664                   val, size);
1665         /*
1666          * Guest driver will first write the low part of the shared
1667          * memory address. We save it to temp variable and set the
1668          * shared address only after we get the high part
1669          */
1670         if (0 == val) {
1671             s->device_active = false;
1672         }
1673         s->temp_shared_guest_driver_memory = val;
1674         s->drv_shmem = 0;
1675         break;
1676 
1677     /* Driver Shared Address High */
1678     case VMXNET3_REG_DSAH:
1679         VMW_CBPRN("Write BAR1 [VMXNET3_REG_DSAH] = %" PRIx64 ", size %d",
1680                   val, size);
1681         /*
1682          * Set the shared memory between guest driver and device.
1683          * We already should have low address part.
1684          */
1685         s->drv_shmem = s->temp_shared_guest_driver_memory | (val << 32);
1686         break;
1687 
1688     /* Command */
1689     case VMXNET3_REG_CMD:
1690         VMW_CBPRN("Write BAR1 [VMXNET3_REG_CMD] = %" PRIx64 ", size %d",
1691                   val, size);
1692         vmxnet3_handle_command(s, val);
1693         break;
1694 
1695     /* MAC Address Low */
1696     case VMXNET3_REG_MACL:
1697         VMW_CBPRN("Write BAR1 [VMXNET3_REG_MACL] = %" PRIx64 ", size %d",
1698                   val, size);
1699         s->temp_mac = val;
1700         break;
1701 
1702     /* MAC Address High */
1703     case VMXNET3_REG_MACH:
1704         VMW_CBPRN("Write BAR1 [VMXNET3_REG_MACH] = %" PRIx64 ", size %d",
1705                   val, size);
1706         vmxnet3_set_variable_mac(s, val, s->temp_mac);
1707         break;
1708 
1709     /* Interrupt Cause Register */
1710     case VMXNET3_REG_ICR:
1711         VMW_CBPRN("Write BAR1 [VMXNET3_REG_ICR] = %" PRIx64 ", size %d",
1712                   val, size);
1713         g_assert_not_reached();
1714         break;
1715 
1716     /* Event Cause Register */
1717     case VMXNET3_REG_ECR:
1718         VMW_CBPRN("Write BAR1 [VMXNET3_REG_ECR] = %" PRIx64 ", size %d",
1719                   val, size);
1720         vmxnet3_ack_events(s, val);
1721         break;
1722 
1723     default:
1724         VMW_CBPRN("Unknown Write to BAR1 [%" PRIx64 "] = %" PRIx64 ", size %d",
1725                   addr, val, size);
1726         break;
1727     }
1728 }
1729 
1730 static uint64_t
1731 vmxnet3_io_bar1_read(void *opaque, hwaddr addr, unsigned size)
1732 {
1733         VMXNET3State *s = opaque;
1734         uint64_t ret = 0;
1735 
1736         switch (addr) {
1737         /* Vmxnet3 Revision Report Selection */
1738         case VMXNET3_REG_VRRS:
1739             VMW_CBPRN("Read BAR1 [VMXNET3_REG_VRRS], size %d", size);
1740             ret = VMXNET3_DEVICE_REVISION;
1741             break;
1742 
1743         /* UPT Version Report Selection */
1744         case VMXNET3_REG_UVRS:
1745             VMW_CBPRN("Read BAR1 [VMXNET3_REG_UVRS], size %d", size);
1746             ret = VMXNET3_DEVICE_VERSION;
1747             break;
1748 
1749         /* Command */
1750         case VMXNET3_REG_CMD:
1751             VMW_CBPRN("Read BAR1 [VMXNET3_REG_CMD], size %d", size);
1752             ret = vmxnet3_get_command_status(s);
1753             break;
1754 
1755         /* MAC Address Low */
1756         case VMXNET3_REG_MACL:
1757             VMW_CBPRN("Read BAR1 [VMXNET3_REG_MACL], size %d", size);
1758             ret = vmxnet3_get_mac_low(&s->conf.macaddr);
1759             break;
1760 
1761         /* MAC Address High */
1762         case VMXNET3_REG_MACH:
1763             VMW_CBPRN("Read BAR1 [VMXNET3_REG_MACH], size %d", size);
1764             ret = vmxnet3_get_mac_high(&s->conf.macaddr);
1765             break;
1766 
1767         /*
1768          * Interrupt Cause Register
1769          * Used for legacy interrupts only so interrupt index always 0
1770          */
1771         case VMXNET3_REG_ICR:
1772             VMW_CBPRN("Read BAR1 [VMXNET3_REG_ICR], size %d", size);
1773             if (vmxnet3_interrupt_asserted(s, 0)) {
1774                 vmxnet3_clear_interrupt(s, 0);
1775                 ret = true;
1776             } else {
1777                 ret = false;
1778             }
1779             break;
1780 
1781         default:
1782             VMW_CBPRN("Unknow read BAR1[%" PRIx64 "], %d bytes", addr, size);
1783             break;
1784         }
1785 
1786         return ret;
1787 }
1788 
1789 static int
1790 vmxnet3_can_receive(NetClientState *nc)
1791 {
1792     VMXNET3State *s = qemu_get_nic_opaque(nc);
1793     return s->device_active &&
1794            VMXNET_FLAG_IS_SET(s->link_status_and_speed, VMXNET3_LINK_STATUS_UP);
1795 }
1796 
1797 static inline bool
1798 vmxnet3_is_registered_vlan(VMXNET3State *s, const void *data)
1799 {
1800     uint16_t vlan_tag = eth_get_pkt_tci(data) & VLAN_VID_MASK;
1801     if (IS_SPECIAL_VLAN_ID(vlan_tag)) {
1802         return true;
1803     }
1804 
1805     return VMXNET3_VFTABLE_ENTRY_IS_SET(s->vlan_table, vlan_tag);
1806 }
1807 
1808 static bool
1809 vmxnet3_is_allowed_mcast_group(VMXNET3State *s, const uint8_t *group_mac)
1810 {
1811     int i;
1812     for (i = 0; i < s->mcast_list_len; i++) {
1813         if (!memcmp(group_mac, s->mcast_list[i].a, sizeof(s->mcast_list[i]))) {
1814             return true;
1815         }
1816     }
1817     return false;
1818 }
1819 
1820 static bool
1821 vmxnet3_rx_filter_may_indicate(VMXNET3State *s, const void *data,
1822     size_t size)
1823 {
1824     struct eth_header *ehdr = PKT_GET_ETH_HDR(data);
1825 
1826     if (VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_PROMISC)) {
1827         return true;
1828     }
1829 
1830     if (!vmxnet3_is_registered_vlan(s, data)) {
1831         return false;
1832     }
1833 
1834     switch (vmxnet_rx_pkt_get_packet_type(s->rx_pkt)) {
1835     case ETH_PKT_UCAST:
1836         if (!VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_UCAST)) {
1837             return false;
1838         }
1839         if (memcmp(s->conf.macaddr.a, ehdr->h_dest, ETH_ALEN)) {
1840             return false;
1841         }
1842         break;
1843 
1844     case ETH_PKT_BCAST:
1845         if (!VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_BCAST)) {
1846             return false;
1847         }
1848         break;
1849 
1850     case ETH_PKT_MCAST:
1851         if (VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_ALL_MULTI)) {
1852             return true;
1853         }
1854         if (!VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_MCAST)) {
1855             return false;
1856         }
1857         if (!vmxnet3_is_allowed_mcast_group(s, ehdr->h_dest)) {
1858             return false;
1859         }
1860         break;
1861 
1862     default:
1863         g_assert_not_reached();
1864     }
1865 
1866     return true;
1867 }
1868 
1869 static ssize_t
1870 vmxnet3_receive(NetClientState *nc, const uint8_t *buf, size_t size)
1871 {
1872     VMXNET3State *s = qemu_get_nic_opaque(nc);
1873     size_t bytes_indicated;
1874 
1875     if (!vmxnet3_can_receive(nc)) {
1876         VMW_PKPRN("Cannot receive now");
1877         return -1;
1878     }
1879 
1880     if (s->peer_has_vhdr) {
1881         vmxnet_rx_pkt_set_vhdr(s->rx_pkt, (struct virtio_net_hdr *)buf);
1882         buf += sizeof(struct virtio_net_hdr);
1883         size -= sizeof(struct virtio_net_hdr);
1884     }
1885 
1886     vmxnet_rx_pkt_set_packet_type(s->rx_pkt,
1887         get_eth_packet_type(PKT_GET_ETH_HDR(buf)));
1888 
1889     if (vmxnet3_rx_filter_may_indicate(s, buf, size)) {
1890         vmxnet_rx_pkt_attach_data(s->rx_pkt, buf, size, s->rx_vlan_stripping);
1891         bytes_indicated = vmxnet3_indicate_packet(s) ? size : -1;
1892         if (bytes_indicated < size) {
1893             VMW_PKPRN("RX: %lu of %lu bytes indicated", bytes_indicated, size);
1894         }
1895     } else {
1896         VMW_PKPRN("Packet dropped by RX filter");
1897         bytes_indicated = size;
1898     }
1899 
1900     assert(size > 0);
1901     assert(bytes_indicated != 0);
1902     return bytes_indicated;
1903 }
1904 
1905 static void vmxnet3_cleanup(NetClientState *nc)
1906 {
1907     VMXNET3State *s = qemu_get_nic_opaque(nc);
1908     s->nic = NULL;
1909 }
1910 
1911 static void vmxnet3_set_link_status(NetClientState *nc)
1912 {
1913     VMXNET3State *s = qemu_get_nic_opaque(nc);
1914 
1915     if (nc->link_down) {
1916         s->link_status_and_speed &= ~VMXNET3_LINK_STATUS_UP;
1917     } else {
1918         s->link_status_and_speed |= VMXNET3_LINK_STATUS_UP;
1919     }
1920 
1921     vmxnet3_set_events(s, VMXNET3_ECR_LINK);
1922     vmxnet3_trigger_interrupt(s, s->event_int_idx);
1923 }
1924 
1925 static NetClientInfo net_vmxnet3_info = {
1926         .type = NET_CLIENT_OPTIONS_KIND_NIC,
1927         .size = sizeof(NICState),
1928         .can_receive = vmxnet3_can_receive,
1929         .receive = vmxnet3_receive,
1930         .cleanup = vmxnet3_cleanup,
1931         .link_status_changed = vmxnet3_set_link_status,
1932 };
1933 
1934 static bool vmxnet3_peer_has_vnet_hdr(VMXNET3State *s)
1935 {
1936     NetClientState *nc = qemu_get_queue(s->nic);
1937 
1938     if (qemu_has_vnet_hdr(nc->peer)) {
1939         return true;
1940     }
1941 
1942     VMW_WRPRN("Peer has no virtio extension. Task offloads will be emulated.");
1943     return false;
1944 }
1945 
1946 static void vmxnet3_net_uninit(VMXNET3State *s)
1947 {
1948     g_free(s->mcast_list);
1949     vmxnet_tx_pkt_reset(s->tx_pkt);
1950     vmxnet_tx_pkt_uninit(s->tx_pkt);
1951     vmxnet_rx_pkt_uninit(s->rx_pkt);
1952     qemu_del_nic(s->nic);
1953 }
1954 
1955 static void vmxnet3_net_init(VMXNET3State *s)
1956 {
1957     DeviceState *d = DEVICE(s);
1958 
1959     VMW_CBPRN("vmxnet3_net_init called...");
1960 
1961     qemu_macaddr_default_if_unset(&s->conf.macaddr);
1962 
1963     /* Windows guest will query the address that was set on init */
1964     memcpy(&s->perm_mac.a, &s->conf.macaddr.a, sizeof(s->perm_mac.a));
1965 
1966     s->mcast_list = NULL;
1967     s->mcast_list_len = 0;
1968 
1969     s->link_status_and_speed = VMXNET3_LINK_SPEED | VMXNET3_LINK_STATUS_UP;
1970 
1971     VMW_CFPRN("Permanent MAC: " MAC_FMT, MAC_ARG(s->perm_mac.a));
1972 
1973     s->nic = qemu_new_nic(&net_vmxnet3_info, &s->conf,
1974                           object_get_typename(OBJECT(s)),
1975                           d->id, s);
1976 
1977     s->peer_has_vhdr = vmxnet3_peer_has_vnet_hdr(s);
1978     s->tx_sop = true;
1979     s->skip_current_tx_pkt = false;
1980     s->tx_pkt = NULL;
1981     s->rx_pkt = NULL;
1982     s->rx_vlan_stripping = false;
1983     s->lro_supported = false;
1984 
1985     if (s->peer_has_vhdr) {
1986         qemu_set_vnet_hdr_len(qemu_get_queue(s->nic)->peer,
1987             sizeof(struct virtio_net_hdr));
1988 
1989         qemu_using_vnet_hdr(qemu_get_queue(s->nic)->peer, 1);
1990     }
1991 
1992     qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
1993 }
1994 
1995 static void
1996 vmxnet3_unuse_msix_vectors(VMXNET3State *s, int num_vectors)
1997 {
1998     PCIDevice *d = PCI_DEVICE(s);
1999     int i;
2000     for (i = 0; i < num_vectors; i++) {
2001         msix_vector_unuse(d, i);
2002     }
2003 }
2004 
2005 static bool
2006 vmxnet3_use_msix_vectors(VMXNET3State *s, int num_vectors)
2007 {
2008     PCIDevice *d = PCI_DEVICE(s);
2009     int i;
2010     for (i = 0; i < num_vectors; i++) {
2011         int res = msix_vector_use(d, i);
2012         if (0 > res) {
2013             VMW_WRPRN("Failed to use MSI-X vector %d, error %d", i, res);
2014             vmxnet3_unuse_msix_vectors(s, i);
2015             return false;
2016         }
2017     }
2018     return true;
2019 }
2020 
2021 static bool
2022 vmxnet3_init_msix(VMXNET3State *s)
2023 {
2024     PCIDevice *d = PCI_DEVICE(s);
2025     int res = msix_init(d, VMXNET3_MAX_INTRS,
2026                         &s->msix_bar,
2027                         VMXNET3_MSIX_BAR_IDX, VMXNET3_OFF_MSIX_TABLE,
2028                         &s->msix_bar,
2029                         VMXNET3_MSIX_BAR_IDX, VMXNET3_OFF_MSIX_PBA,
2030                         0);
2031 
2032     if (0 > res) {
2033         VMW_WRPRN("Failed to initialize MSI-X, error %d", res);
2034         s->msix_used = false;
2035     } else {
2036         if (!vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS)) {
2037             VMW_WRPRN("Failed to use MSI-X vectors, error %d", res);
2038             msix_uninit(d, &s->msix_bar, &s->msix_bar);
2039             s->msix_used = false;
2040         } else {
2041             s->msix_used = true;
2042         }
2043     }
2044     return s->msix_used;
2045 }
2046 
2047 static void
2048 vmxnet3_cleanup_msix(VMXNET3State *s)
2049 {
2050     PCIDevice *d = PCI_DEVICE(s);
2051 
2052     if (s->msix_used) {
2053         vmxnet3_unuse_msix_vectors(s, VMXNET3_MAX_INTRS);
2054         msix_uninit(d, &s->msix_bar, &s->msix_bar);
2055     }
2056 }
2057 
2058 #define VMXNET3_MSI_OFFSET        (0x50)
2059 #define VMXNET3_USE_64BIT         (true)
2060 #define VMXNET3_PER_VECTOR_MASK   (false)
2061 
2062 static bool
2063 vmxnet3_init_msi(VMXNET3State *s)
2064 {
2065     PCIDevice *d = PCI_DEVICE(s);
2066     int res;
2067 
2068     res = msi_init(d, VMXNET3_MSI_OFFSET, VMXNET3_MAX_NMSIX_INTRS,
2069                    VMXNET3_USE_64BIT, VMXNET3_PER_VECTOR_MASK);
2070     if (0 > res) {
2071         VMW_WRPRN("Failed to initialize MSI, error %d", res);
2072         s->msi_used = false;
2073     } else {
2074         s->msi_used = true;
2075     }
2076 
2077     return s->msi_used;
2078 }
2079 
2080 static void
2081 vmxnet3_cleanup_msi(VMXNET3State *s)
2082 {
2083     PCIDevice *d = PCI_DEVICE(s);
2084 
2085     if (s->msi_used) {
2086         msi_uninit(d);
2087     }
2088 }
2089 
2090 static void
2091 vmxnet3_msix_save(QEMUFile *f, void *opaque)
2092 {
2093     PCIDevice *d = PCI_DEVICE(opaque);
2094     msix_save(d, f);
2095 }
2096 
2097 static int
2098 vmxnet3_msix_load(QEMUFile *f, void *opaque, int version_id)
2099 {
2100     PCIDevice *d = PCI_DEVICE(opaque);
2101     msix_load(d, f);
2102     return 0;
2103 }
2104 
2105 static const MemoryRegionOps b0_ops = {
2106     .read = vmxnet3_io_bar0_read,
2107     .write = vmxnet3_io_bar0_write,
2108     .endianness = DEVICE_LITTLE_ENDIAN,
2109     .impl = {
2110             .min_access_size = 4,
2111             .max_access_size = 4,
2112     },
2113 };
2114 
2115 static const MemoryRegionOps b1_ops = {
2116     .read = vmxnet3_io_bar1_read,
2117     .write = vmxnet3_io_bar1_write,
2118     .endianness = DEVICE_LITTLE_ENDIAN,
2119     .impl = {
2120             .min_access_size = 4,
2121             .max_access_size = 4,
2122     },
2123 };
2124 
2125 static int vmxnet3_pci_init(PCIDevice *pci_dev)
2126 {
2127     DeviceState *dev = DEVICE(pci_dev);
2128     VMXNET3State *s = VMXNET3(pci_dev);
2129 
2130     VMW_CBPRN("Starting init...");
2131 
2132     memory_region_init_io(&s->bar0, OBJECT(s), &b0_ops, s,
2133                           "vmxnet3-b0", VMXNET3_PT_REG_SIZE);
2134     pci_register_bar(pci_dev, VMXNET3_BAR0_IDX,
2135                      PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar0);
2136 
2137     memory_region_init_io(&s->bar1, OBJECT(s), &b1_ops, s,
2138                           "vmxnet3-b1", VMXNET3_VD_REG_SIZE);
2139     pci_register_bar(pci_dev, VMXNET3_BAR1_IDX,
2140                      PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar1);
2141 
2142     memory_region_init(&s->msix_bar, OBJECT(s), "vmxnet3-msix-bar",
2143                        VMXNET3_MSIX_BAR_SIZE);
2144     pci_register_bar(pci_dev, VMXNET3_MSIX_BAR_IDX,
2145                      PCI_BASE_ADDRESS_SPACE_MEMORY, &s->msix_bar);
2146 
2147     vmxnet3_reset_interrupt_states(s);
2148 
2149     /* Interrupt pin A */
2150     pci_dev->config[PCI_INTERRUPT_PIN] = 0x01;
2151 
2152     if (!vmxnet3_init_msix(s)) {
2153         VMW_WRPRN("Failed to initialize MSI-X, configuration is inconsistent.");
2154     }
2155 
2156     if (!vmxnet3_init_msi(s)) {
2157         VMW_WRPRN("Failed to initialize MSI, configuration is inconsistent.");
2158     }
2159 
2160     vmxnet3_net_init(s);
2161 
2162     register_savevm(dev, "vmxnet3-msix", -1, 1,
2163                     vmxnet3_msix_save, vmxnet3_msix_load, s);
2164 
2165     add_boot_device_path(s->conf.bootindex, dev, "/ethernet-phy@0");
2166 
2167     return 0;
2168 }
2169 
2170 
2171 static void vmxnet3_pci_uninit(PCIDevice *pci_dev)
2172 {
2173     DeviceState *dev = DEVICE(pci_dev);
2174     VMXNET3State *s = VMXNET3(pci_dev);
2175 
2176     VMW_CBPRN("Starting uninit...");
2177 
2178     unregister_savevm(dev, "vmxnet3-msix", s);
2179 
2180     vmxnet3_net_uninit(s);
2181 
2182     vmxnet3_cleanup_msix(s);
2183 
2184     vmxnet3_cleanup_msi(s);
2185 }
2186 
2187 static void vmxnet3_qdev_reset(DeviceState *dev)
2188 {
2189     PCIDevice *d = PCI_DEVICE(dev);
2190     VMXNET3State *s = VMXNET3(d);
2191 
2192     VMW_CBPRN("Starting QDEV reset...");
2193     vmxnet3_reset(s);
2194 }
2195 
2196 static bool vmxnet3_mc_list_needed(void *opaque)
2197 {
2198     return true;
2199 }
2200 
2201 static int vmxnet3_mcast_list_pre_load(void *opaque)
2202 {
2203     VMXNET3State *s = opaque;
2204 
2205     s->mcast_list = g_malloc(s->mcast_list_buff_size);
2206 
2207     return 0;
2208 }
2209 
2210 
2211 static void vmxnet3_pre_save(void *opaque)
2212 {
2213     VMXNET3State *s = opaque;
2214 
2215     s->mcast_list_buff_size = s->mcast_list_len * sizeof(MACAddr);
2216 }
2217 
2218 static const VMStateDescription vmxstate_vmxnet3_mcast_list = {
2219     .name = "vmxnet3/mcast_list",
2220     .version_id = 1,
2221     .minimum_version_id = 1,
2222     .pre_load = vmxnet3_mcast_list_pre_load,
2223     .fields = (VMStateField[]) {
2224         VMSTATE_VBUFFER_UINT32(mcast_list, VMXNET3State, 0, NULL, 0,
2225             mcast_list_buff_size),
2226         VMSTATE_END_OF_LIST()
2227     }
2228 };
2229 
2230 static void vmxnet3_get_ring_from_file(QEMUFile *f, Vmxnet3Ring *r)
2231 {
2232     r->pa = qemu_get_be64(f);
2233     r->size = qemu_get_be32(f);
2234     r->cell_size = qemu_get_be32(f);
2235     r->next = qemu_get_be32(f);
2236     r->gen = qemu_get_byte(f);
2237 }
2238 
2239 static void vmxnet3_put_ring_to_file(QEMUFile *f, Vmxnet3Ring *r)
2240 {
2241     qemu_put_be64(f, r->pa);
2242     qemu_put_be32(f, r->size);
2243     qemu_put_be32(f, r->cell_size);
2244     qemu_put_be32(f, r->next);
2245     qemu_put_byte(f, r->gen);
2246 }
2247 
2248 static void vmxnet3_get_tx_stats_from_file(QEMUFile *f,
2249     struct UPT1_TxStats *tx_stat)
2250 {
2251     tx_stat->TSOPktsTxOK = qemu_get_be64(f);
2252     tx_stat->TSOBytesTxOK = qemu_get_be64(f);
2253     tx_stat->ucastPktsTxOK = qemu_get_be64(f);
2254     tx_stat->ucastBytesTxOK = qemu_get_be64(f);
2255     tx_stat->mcastPktsTxOK = qemu_get_be64(f);
2256     tx_stat->mcastBytesTxOK = qemu_get_be64(f);
2257     tx_stat->bcastPktsTxOK = qemu_get_be64(f);
2258     tx_stat->bcastBytesTxOK = qemu_get_be64(f);
2259     tx_stat->pktsTxError = qemu_get_be64(f);
2260     tx_stat->pktsTxDiscard = qemu_get_be64(f);
2261 }
2262 
2263 static void vmxnet3_put_tx_stats_to_file(QEMUFile *f,
2264     struct UPT1_TxStats *tx_stat)
2265 {
2266     qemu_put_be64(f, tx_stat->TSOPktsTxOK);
2267     qemu_put_be64(f, tx_stat->TSOBytesTxOK);
2268     qemu_put_be64(f, tx_stat->ucastPktsTxOK);
2269     qemu_put_be64(f, tx_stat->ucastBytesTxOK);
2270     qemu_put_be64(f, tx_stat->mcastPktsTxOK);
2271     qemu_put_be64(f, tx_stat->mcastBytesTxOK);
2272     qemu_put_be64(f, tx_stat->bcastPktsTxOK);
2273     qemu_put_be64(f, tx_stat->bcastBytesTxOK);
2274     qemu_put_be64(f, tx_stat->pktsTxError);
2275     qemu_put_be64(f, tx_stat->pktsTxDiscard);
2276 }
2277 
2278 static int vmxnet3_get_txq_descr(QEMUFile *f, void *pv, size_t size)
2279 {
2280     Vmxnet3TxqDescr *r = pv;
2281 
2282     vmxnet3_get_ring_from_file(f, &r->tx_ring);
2283     vmxnet3_get_ring_from_file(f, &r->comp_ring);
2284     r->intr_idx = qemu_get_byte(f);
2285     r->tx_stats_pa = qemu_get_be64(f);
2286 
2287     vmxnet3_get_tx_stats_from_file(f, &r->txq_stats);
2288 
2289     return 0;
2290 }
2291 
2292 static void vmxnet3_put_txq_descr(QEMUFile *f, void *pv, size_t size)
2293 {
2294     Vmxnet3TxqDescr *r = pv;
2295 
2296     vmxnet3_put_ring_to_file(f, &r->tx_ring);
2297     vmxnet3_put_ring_to_file(f, &r->comp_ring);
2298     qemu_put_byte(f, r->intr_idx);
2299     qemu_put_be64(f, r->tx_stats_pa);
2300     vmxnet3_put_tx_stats_to_file(f, &r->txq_stats);
2301 }
2302 
2303 static const VMStateInfo txq_descr_info = {
2304     .name = "txq_descr",
2305     .get = vmxnet3_get_txq_descr,
2306     .put = vmxnet3_put_txq_descr
2307 };
2308 
2309 static void vmxnet3_get_rx_stats_from_file(QEMUFile *f,
2310     struct UPT1_RxStats *rx_stat)
2311 {
2312     rx_stat->LROPktsRxOK = qemu_get_be64(f);
2313     rx_stat->LROBytesRxOK = qemu_get_be64(f);
2314     rx_stat->ucastPktsRxOK = qemu_get_be64(f);
2315     rx_stat->ucastBytesRxOK = qemu_get_be64(f);
2316     rx_stat->mcastPktsRxOK = qemu_get_be64(f);
2317     rx_stat->mcastBytesRxOK = qemu_get_be64(f);
2318     rx_stat->bcastPktsRxOK = qemu_get_be64(f);
2319     rx_stat->bcastBytesRxOK = qemu_get_be64(f);
2320     rx_stat->pktsRxOutOfBuf = qemu_get_be64(f);
2321     rx_stat->pktsRxError = qemu_get_be64(f);
2322 }
2323 
2324 static void vmxnet3_put_rx_stats_to_file(QEMUFile *f,
2325     struct UPT1_RxStats *rx_stat)
2326 {
2327     qemu_put_be64(f, rx_stat->LROPktsRxOK);
2328     qemu_put_be64(f, rx_stat->LROBytesRxOK);
2329     qemu_put_be64(f, rx_stat->ucastPktsRxOK);
2330     qemu_put_be64(f, rx_stat->ucastBytesRxOK);
2331     qemu_put_be64(f, rx_stat->mcastPktsRxOK);
2332     qemu_put_be64(f, rx_stat->mcastBytesRxOK);
2333     qemu_put_be64(f, rx_stat->bcastPktsRxOK);
2334     qemu_put_be64(f, rx_stat->bcastBytesRxOK);
2335     qemu_put_be64(f, rx_stat->pktsRxOutOfBuf);
2336     qemu_put_be64(f, rx_stat->pktsRxError);
2337 }
2338 
2339 static int vmxnet3_get_rxq_descr(QEMUFile *f, void *pv, size_t size)
2340 {
2341     Vmxnet3RxqDescr *r = pv;
2342     int i;
2343 
2344     for (i = 0; i < VMXNET3_RX_RINGS_PER_QUEUE; i++) {
2345         vmxnet3_get_ring_from_file(f, &r->rx_ring[i]);
2346     }
2347 
2348     vmxnet3_get_ring_from_file(f, &r->comp_ring);
2349     r->intr_idx = qemu_get_byte(f);
2350     r->rx_stats_pa = qemu_get_be64(f);
2351 
2352     vmxnet3_get_rx_stats_from_file(f, &r->rxq_stats);
2353 
2354     return 0;
2355 }
2356 
2357 static void vmxnet3_put_rxq_descr(QEMUFile *f, void *pv, size_t size)
2358 {
2359     Vmxnet3RxqDescr *r = pv;
2360     int i;
2361 
2362     for (i = 0; i < VMXNET3_RX_RINGS_PER_QUEUE; i++) {
2363         vmxnet3_put_ring_to_file(f, &r->rx_ring[i]);
2364     }
2365 
2366     vmxnet3_put_ring_to_file(f, &r->comp_ring);
2367     qemu_put_byte(f, r->intr_idx);
2368     qemu_put_be64(f, r->rx_stats_pa);
2369     vmxnet3_put_rx_stats_to_file(f, &r->rxq_stats);
2370 }
2371 
2372 static int vmxnet3_post_load(void *opaque, int version_id)
2373 {
2374     VMXNET3State *s = opaque;
2375     PCIDevice *d = PCI_DEVICE(s);
2376 
2377     vmxnet_tx_pkt_init(&s->tx_pkt, s->max_tx_frags, s->peer_has_vhdr);
2378     vmxnet_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr);
2379 
2380     if (s->msix_used) {
2381         if  (!vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS)) {
2382             VMW_WRPRN("Failed to re-use MSI-X vectors");
2383             msix_uninit(d, &s->msix_bar, &s->msix_bar);
2384             s->msix_used = false;
2385             return -1;
2386         }
2387     }
2388 
2389     vmxnet3_validate_queues(s);
2390     vmxnet3_validate_interrupts(s);
2391 
2392     return 0;
2393 }
2394 
2395 static const VMStateInfo rxq_descr_info = {
2396     .name = "rxq_descr",
2397     .get = vmxnet3_get_rxq_descr,
2398     .put = vmxnet3_put_rxq_descr
2399 };
2400 
2401 static int vmxnet3_get_int_state(QEMUFile *f, void *pv, size_t size)
2402 {
2403     Vmxnet3IntState *r = pv;
2404 
2405     r->is_masked = qemu_get_byte(f);
2406     r->is_pending = qemu_get_byte(f);
2407     r->is_asserted = qemu_get_byte(f);
2408 
2409     return 0;
2410 }
2411 
2412 static void vmxnet3_put_int_state(QEMUFile *f, void *pv, size_t size)
2413 {
2414     Vmxnet3IntState *r = pv;
2415 
2416     qemu_put_byte(f, r->is_masked);
2417     qemu_put_byte(f, r->is_pending);
2418     qemu_put_byte(f, r->is_asserted);
2419 }
2420 
2421 static const VMStateInfo int_state_info = {
2422     .name = "int_state",
2423     .get = vmxnet3_get_int_state,
2424     .put = vmxnet3_put_int_state
2425 };
2426 
2427 static const VMStateDescription vmstate_vmxnet3 = {
2428     .name = "vmxnet3",
2429     .version_id = 1,
2430     .minimum_version_id = 1,
2431     .pre_save = vmxnet3_pre_save,
2432     .post_load = vmxnet3_post_load,
2433     .fields = (VMStateField[]) {
2434             VMSTATE_PCI_DEVICE(parent_obj, VMXNET3State),
2435             VMSTATE_BOOL(rx_packets_compound, VMXNET3State),
2436             VMSTATE_BOOL(rx_vlan_stripping, VMXNET3State),
2437             VMSTATE_BOOL(lro_supported, VMXNET3State),
2438             VMSTATE_UINT32(rx_mode, VMXNET3State),
2439             VMSTATE_UINT32(mcast_list_len, VMXNET3State),
2440             VMSTATE_UINT32(mcast_list_buff_size, VMXNET3State),
2441             VMSTATE_UINT32_ARRAY(vlan_table, VMXNET3State, VMXNET3_VFT_SIZE),
2442             VMSTATE_UINT32(mtu, VMXNET3State),
2443             VMSTATE_UINT16(max_rx_frags, VMXNET3State),
2444             VMSTATE_UINT32(max_tx_frags, VMXNET3State),
2445             VMSTATE_UINT8(event_int_idx, VMXNET3State),
2446             VMSTATE_BOOL(auto_int_masking, VMXNET3State),
2447             VMSTATE_UINT8(txq_num, VMXNET3State),
2448             VMSTATE_UINT8(rxq_num, VMXNET3State),
2449             VMSTATE_UINT32(device_active, VMXNET3State),
2450             VMSTATE_UINT32(last_command, VMXNET3State),
2451             VMSTATE_UINT32(link_status_and_speed, VMXNET3State),
2452             VMSTATE_UINT32(temp_mac, VMXNET3State),
2453             VMSTATE_UINT64(drv_shmem, VMXNET3State),
2454             VMSTATE_UINT64(temp_shared_guest_driver_memory, VMXNET3State),
2455 
2456             VMSTATE_ARRAY(txq_descr, VMXNET3State,
2457                 VMXNET3_DEVICE_MAX_TX_QUEUES, 0, txq_descr_info,
2458                 Vmxnet3TxqDescr),
2459             VMSTATE_ARRAY(rxq_descr, VMXNET3State,
2460                 VMXNET3_DEVICE_MAX_RX_QUEUES, 0, rxq_descr_info,
2461                 Vmxnet3RxqDescr),
2462             VMSTATE_ARRAY(interrupt_states, VMXNET3State, VMXNET3_MAX_INTRS,
2463                 0, int_state_info, Vmxnet3IntState),
2464 
2465             VMSTATE_END_OF_LIST()
2466     },
2467     .subsections = (VMStateSubsection[]) {
2468         {
2469             .vmsd = &vmxstate_vmxnet3_mcast_list,
2470             .needed = vmxnet3_mc_list_needed
2471         },
2472         {
2473             /* empty element. */
2474         }
2475     }
2476 };
2477 
2478 static void
2479 vmxnet3_write_config(PCIDevice *pci_dev, uint32_t addr, uint32_t val, int len)
2480 {
2481     pci_default_write_config(pci_dev, addr, val, len);
2482     msix_write_config(pci_dev, addr, val, len);
2483     msi_write_config(pci_dev, addr, val, len);
2484 }
2485 
2486 static Property vmxnet3_properties[] = {
2487     DEFINE_NIC_PROPERTIES(VMXNET3State, conf),
2488     DEFINE_PROP_END_OF_LIST(),
2489 };
2490 
2491 static void vmxnet3_class_init(ObjectClass *class, void *data)
2492 {
2493     DeviceClass *dc = DEVICE_CLASS(class);
2494     PCIDeviceClass *c = PCI_DEVICE_CLASS(class);
2495 
2496     c->init = vmxnet3_pci_init;
2497     c->exit = vmxnet3_pci_uninit;
2498     c->vendor_id = PCI_VENDOR_ID_VMWARE;
2499     c->device_id = PCI_DEVICE_ID_VMWARE_VMXNET3;
2500     c->revision = PCI_DEVICE_ID_VMWARE_VMXNET3_REVISION;
2501     c->class_id = PCI_CLASS_NETWORK_ETHERNET;
2502     c->subsystem_vendor_id = PCI_VENDOR_ID_VMWARE;
2503     c->subsystem_id = PCI_DEVICE_ID_VMWARE_VMXNET3;
2504     c->config_write = vmxnet3_write_config,
2505     dc->desc = "VMWare Paravirtualized Ethernet v3";
2506     dc->reset = vmxnet3_qdev_reset;
2507     dc->vmsd = &vmstate_vmxnet3;
2508     dc->props = vmxnet3_properties;
2509     set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
2510 }
2511 
2512 static const TypeInfo vmxnet3_info = {
2513     .name          = TYPE_VMXNET3,
2514     .parent        = TYPE_PCI_DEVICE,
2515     .instance_size = sizeof(VMXNET3State),
2516     .class_init    = vmxnet3_class_init,
2517 };
2518 
2519 static void vmxnet3_register_types(void)
2520 {
2521     VMW_CBPRN("vmxnet3_register_types called...");
2522     type_register_static(&vmxnet3_info);
2523 }
2524 
2525 type_init(vmxnet3_register_types)
2526