xref: /openbmc/qemu/hw/net/virtio-net.c (revision 500eb6db)
1 /*
2  * Virtio Network Device
3  *
4  * Copyright IBM, Corp. 2007
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/iov.h"
16 #include "qemu/module.h"
17 #include "hw/virtio/virtio.h"
18 #include "net/net.h"
19 #include "net/checksum.h"
20 #include "net/tap.h"
21 #include "qemu/error-report.h"
22 #include "qemu/timer.h"
23 #include "hw/virtio/virtio-net.h"
24 #include "net/vhost_net.h"
25 #include "net/announce.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "qapi/error.h"
28 #include "qapi/qapi-events-net.h"
29 #include "hw/virtio/virtio-access.h"
30 #include "migration/misc.h"
31 #include "standard-headers/linux/ethtool.h"
32 #include "trace.h"
33 
34 #define VIRTIO_NET_VM_VERSION    11
35 
36 #define MAC_TABLE_ENTRIES    64
37 #define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
38 
39 /* previously fixed value */
40 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
41 #define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
42 
43 /* for now, only allow larger queues; with virtio-1, guest can downsize */
44 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
45 #define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
46 
47 #define VIRTIO_NET_IP4_ADDR_SIZE   8        /* ipv4 saddr + daddr */
48 
49 #define VIRTIO_NET_TCP_FLAG         0x3F
50 #define VIRTIO_NET_TCP_HDR_LENGTH   0xF000
51 
52 /* IPv4 max payload, 16 bits in the header */
53 #define VIRTIO_NET_MAX_IP4_PAYLOAD (65535 - sizeof(struct ip_header))
54 #define VIRTIO_NET_MAX_TCP_PAYLOAD 65535
55 
56 /* header length value in ip header without option */
57 #define VIRTIO_NET_IP4_HEADER_LENGTH 5
58 
59 #define VIRTIO_NET_IP6_ADDR_SIZE   32      /* ipv6 saddr + daddr */
60 #define VIRTIO_NET_MAX_IP6_PAYLOAD VIRTIO_NET_MAX_TCP_PAYLOAD
61 
62 /* Purge coalesced packets timer interval, This value affects the performance
63    a lot, and should be tuned carefully, '300000'(300us) is the recommended
64    value to pass the WHQL test, '50000' can gain 2x netperf throughput with
65    tso/gso/gro 'off'. */
66 #define VIRTIO_NET_RSC_DEFAULT_INTERVAL 300000
67 
68 /* temporary until standard header include it */
69 #if !defined(VIRTIO_NET_HDR_F_RSC_INFO)
70 
71 #define VIRTIO_NET_HDR_F_RSC_INFO  4 /* rsc_ext data in csum_ fields */
72 #define VIRTIO_NET_F_RSC_EXT       61
73 
74 static inline __virtio16 *virtio_net_rsc_ext_num_packets(
75     struct virtio_net_hdr *hdr)
76 {
77     return &hdr->csum_start;
78 }
79 
80 static inline __virtio16 *virtio_net_rsc_ext_num_dupacks(
81     struct virtio_net_hdr *hdr)
82 {
83     return &hdr->csum_offset;
84 }
85 
86 #endif
87 
88 static VirtIOFeature feature_sizes[] = {
89     {.flags = 1ULL << VIRTIO_NET_F_MAC,
90      .end = virtio_endof(struct virtio_net_config, mac)},
91     {.flags = 1ULL << VIRTIO_NET_F_STATUS,
92      .end = virtio_endof(struct virtio_net_config, status)},
93     {.flags = 1ULL << VIRTIO_NET_F_MQ,
94      .end = virtio_endof(struct virtio_net_config, max_virtqueue_pairs)},
95     {.flags = 1ULL << VIRTIO_NET_F_MTU,
96      .end = virtio_endof(struct virtio_net_config, mtu)},
97     {.flags = 1ULL << VIRTIO_NET_F_SPEED_DUPLEX,
98      .end = virtio_endof(struct virtio_net_config, duplex)},
99     {}
100 };
101 
102 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
103 {
104     VirtIONet *n = qemu_get_nic_opaque(nc);
105 
106     return &n->vqs[nc->queue_index];
107 }
108 
109 static int vq2q(int queue_index)
110 {
111     return queue_index / 2;
112 }
113 
114 /* TODO
115  * - we could suppress RX interrupt if we were so inclined.
116  */
117 
118 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
119 {
120     VirtIONet *n = VIRTIO_NET(vdev);
121     struct virtio_net_config netcfg;
122 
123     virtio_stw_p(vdev, &netcfg.status, n->status);
124     virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
125     virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
126     memcpy(netcfg.mac, n->mac, ETH_ALEN);
127     virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed);
128     netcfg.duplex = n->net_conf.duplex;
129     memcpy(config, &netcfg, n->config_size);
130 }
131 
132 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
133 {
134     VirtIONet *n = VIRTIO_NET(vdev);
135     struct virtio_net_config netcfg = {};
136 
137     memcpy(&netcfg, config, n->config_size);
138 
139     if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
140         !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
141         memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
142         memcpy(n->mac, netcfg.mac, ETH_ALEN);
143         qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
144     }
145 }
146 
147 static bool virtio_net_started(VirtIONet *n, uint8_t status)
148 {
149     VirtIODevice *vdev = VIRTIO_DEVICE(n);
150     return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
151         (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
152 }
153 
154 static void virtio_net_announce_notify(VirtIONet *net)
155 {
156     VirtIODevice *vdev = VIRTIO_DEVICE(net);
157     trace_virtio_net_announce_notify();
158 
159     net->status |= VIRTIO_NET_S_ANNOUNCE;
160     virtio_notify_config(vdev);
161 }
162 
163 static void virtio_net_announce_timer(void *opaque)
164 {
165     VirtIONet *n = opaque;
166     trace_virtio_net_announce_timer(n->announce_timer.round);
167 
168     n->announce_timer.round--;
169     virtio_net_announce_notify(n);
170 }
171 
172 static void virtio_net_announce(NetClientState *nc)
173 {
174     VirtIONet *n = qemu_get_nic_opaque(nc);
175     VirtIODevice *vdev = VIRTIO_DEVICE(n);
176 
177     /*
178      * Make sure the virtio migration announcement timer isn't running
179      * If it is, let it trigger announcement so that we do not cause
180      * confusion.
181      */
182     if (n->announce_timer.round) {
183         return;
184     }
185 
186     if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
187         virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
188             virtio_net_announce_notify(n);
189     }
190 }
191 
192 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
193 {
194     VirtIODevice *vdev = VIRTIO_DEVICE(n);
195     NetClientState *nc = qemu_get_queue(n->nic);
196     int queues = n->multiqueue ? n->max_queues : 1;
197 
198     if (!get_vhost_net(nc->peer)) {
199         return;
200     }
201 
202     if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
203         !!n->vhost_started) {
204         return;
205     }
206     if (!n->vhost_started) {
207         int r, i;
208 
209         if (n->needs_vnet_hdr_swap) {
210             error_report("backend does not support %s vnet headers; "
211                          "falling back on userspace virtio",
212                          virtio_is_big_endian(vdev) ? "BE" : "LE");
213             return;
214         }
215 
216         /* Any packets outstanding? Purge them to avoid touching rings
217          * when vhost is running.
218          */
219         for (i = 0;  i < queues; i++) {
220             NetClientState *qnc = qemu_get_subqueue(n->nic, i);
221 
222             /* Purge both directions: TX and RX. */
223             qemu_net_queue_purge(qnc->peer->incoming_queue, qnc);
224             qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
225         }
226 
227         if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) {
228             r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu);
229             if (r < 0) {
230                 error_report("%uBytes MTU not supported by the backend",
231                              n->net_conf.mtu);
232 
233                 return;
234             }
235         }
236 
237         n->vhost_started = 1;
238         r = vhost_net_start(vdev, n->nic->ncs, queues);
239         if (r < 0) {
240             error_report("unable to start vhost net: %d: "
241                          "falling back on userspace virtio", -r);
242             n->vhost_started = 0;
243         }
244     } else {
245         vhost_net_stop(vdev, n->nic->ncs, queues);
246         n->vhost_started = 0;
247     }
248 }
249 
250 static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev,
251                                           NetClientState *peer,
252                                           bool enable)
253 {
254     if (virtio_is_big_endian(vdev)) {
255         return qemu_set_vnet_be(peer, enable);
256     } else {
257         return qemu_set_vnet_le(peer, enable);
258     }
259 }
260 
261 static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs,
262                                        int queues, bool enable)
263 {
264     int i;
265 
266     for (i = 0; i < queues; i++) {
267         if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 &&
268             enable) {
269             while (--i >= 0) {
270                 virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false);
271             }
272 
273             return true;
274         }
275     }
276 
277     return false;
278 }
279 
280 static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
281 {
282     VirtIODevice *vdev = VIRTIO_DEVICE(n);
283     int queues = n->multiqueue ? n->max_queues : 1;
284 
285     if (virtio_net_started(n, status)) {
286         /* Before using the device, we tell the network backend about the
287          * endianness to use when parsing vnet headers. If the backend
288          * can't do it, we fallback onto fixing the headers in the core
289          * virtio-net code.
290          */
291         n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs,
292                                                             queues, true);
293     } else if (virtio_net_started(n, vdev->status)) {
294         /* After using the device, we need to reset the network backend to
295          * the default (guest native endianness), otherwise the guest may
296          * lose network connectivity if it is rebooted into a different
297          * endianness.
298          */
299         virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false);
300     }
301 }
302 
303 static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq)
304 {
305     unsigned int dropped = virtqueue_drop_all(vq);
306     if (dropped) {
307         virtio_notify(vdev, vq);
308     }
309 }
310 
311 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
312 {
313     VirtIONet *n = VIRTIO_NET(vdev);
314     VirtIONetQueue *q;
315     int i;
316     uint8_t queue_status;
317 
318     virtio_net_vnet_endian_status(n, status);
319     virtio_net_vhost_status(n, status);
320 
321     for (i = 0; i < n->max_queues; i++) {
322         NetClientState *ncs = qemu_get_subqueue(n->nic, i);
323         bool queue_started;
324         q = &n->vqs[i];
325 
326         if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
327             queue_status = 0;
328         } else {
329             queue_status = status;
330         }
331         queue_started =
332             virtio_net_started(n, queue_status) && !n->vhost_started;
333 
334         if (queue_started) {
335             qemu_flush_queued_packets(ncs);
336         }
337 
338         if (!q->tx_waiting) {
339             continue;
340         }
341 
342         if (queue_started) {
343             if (q->tx_timer) {
344                 timer_mod(q->tx_timer,
345                                qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
346             } else {
347                 qemu_bh_schedule(q->tx_bh);
348             }
349         } else {
350             if (q->tx_timer) {
351                 timer_del(q->tx_timer);
352             } else {
353                 qemu_bh_cancel(q->tx_bh);
354             }
355             if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 &&
356                 (queue_status & VIRTIO_CONFIG_S_DRIVER_OK) &&
357                 vdev->vm_running) {
358                 /* if tx is waiting we are likely have some packets in tx queue
359                  * and disabled notification */
360                 q->tx_waiting = 0;
361                 virtio_queue_set_notification(q->tx_vq, 1);
362                 virtio_net_drop_tx_queue_data(vdev, q->tx_vq);
363             }
364         }
365     }
366 }
367 
368 static void virtio_net_set_link_status(NetClientState *nc)
369 {
370     VirtIONet *n = qemu_get_nic_opaque(nc);
371     VirtIODevice *vdev = VIRTIO_DEVICE(n);
372     uint16_t old_status = n->status;
373 
374     if (nc->link_down)
375         n->status &= ~VIRTIO_NET_S_LINK_UP;
376     else
377         n->status |= VIRTIO_NET_S_LINK_UP;
378 
379     if (n->status != old_status)
380         virtio_notify_config(vdev);
381 
382     virtio_net_set_status(vdev, vdev->status);
383 }
384 
385 static void rxfilter_notify(NetClientState *nc)
386 {
387     VirtIONet *n = qemu_get_nic_opaque(nc);
388 
389     if (nc->rxfilter_notify_enabled) {
390         gchar *path = object_get_canonical_path(OBJECT(n->qdev));
391         qapi_event_send_nic_rx_filter_changed(!!n->netclient_name,
392                                               n->netclient_name, path);
393         g_free(path);
394 
395         /* disable event notification to avoid events flooding */
396         nc->rxfilter_notify_enabled = 0;
397     }
398 }
399 
400 static intList *get_vlan_table(VirtIONet *n)
401 {
402     intList *list, *entry;
403     int i, j;
404 
405     list = NULL;
406     for (i = 0; i < MAX_VLAN >> 5; i++) {
407         for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
408             if (n->vlans[i] & (1U << j)) {
409                 entry = g_malloc0(sizeof(*entry));
410                 entry->value = (i << 5) + j;
411                 entry->next = list;
412                 list = entry;
413             }
414         }
415     }
416 
417     return list;
418 }
419 
420 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
421 {
422     VirtIONet *n = qemu_get_nic_opaque(nc);
423     VirtIODevice *vdev = VIRTIO_DEVICE(n);
424     RxFilterInfo *info;
425     strList *str_list, *entry;
426     int i;
427 
428     info = g_malloc0(sizeof(*info));
429     info->name = g_strdup(nc->name);
430     info->promiscuous = n->promisc;
431 
432     if (n->nouni) {
433         info->unicast = RX_STATE_NONE;
434     } else if (n->alluni) {
435         info->unicast = RX_STATE_ALL;
436     } else {
437         info->unicast = RX_STATE_NORMAL;
438     }
439 
440     if (n->nomulti) {
441         info->multicast = RX_STATE_NONE;
442     } else if (n->allmulti) {
443         info->multicast = RX_STATE_ALL;
444     } else {
445         info->multicast = RX_STATE_NORMAL;
446     }
447 
448     info->broadcast_allowed = n->nobcast;
449     info->multicast_overflow = n->mac_table.multi_overflow;
450     info->unicast_overflow = n->mac_table.uni_overflow;
451 
452     info->main_mac = qemu_mac_strdup_printf(n->mac);
453 
454     str_list = NULL;
455     for (i = 0; i < n->mac_table.first_multi; i++) {
456         entry = g_malloc0(sizeof(*entry));
457         entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
458         entry->next = str_list;
459         str_list = entry;
460     }
461     info->unicast_table = str_list;
462 
463     str_list = NULL;
464     for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
465         entry = g_malloc0(sizeof(*entry));
466         entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
467         entry->next = str_list;
468         str_list = entry;
469     }
470     info->multicast_table = str_list;
471     info->vlan_table = get_vlan_table(n);
472 
473     if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
474         info->vlan = RX_STATE_ALL;
475     } else if (!info->vlan_table) {
476         info->vlan = RX_STATE_NONE;
477     } else {
478         info->vlan = RX_STATE_NORMAL;
479     }
480 
481     /* enable event notification after query */
482     nc->rxfilter_notify_enabled = 1;
483 
484     return info;
485 }
486 
487 static void virtio_net_reset(VirtIODevice *vdev)
488 {
489     VirtIONet *n = VIRTIO_NET(vdev);
490     int i;
491 
492     /* Reset back to compatibility mode */
493     n->promisc = 1;
494     n->allmulti = 0;
495     n->alluni = 0;
496     n->nomulti = 0;
497     n->nouni = 0;
498     n->nobcast = 0;
499     /* multiqueue is disabled by default */
500     n->curr_queues = 1;
501     timer_del(n->announce_timer.tm);
502     n->announce_timer.round = 0;
503     n->status &= ~VIRTIO_NET_S_ANNOUNCE;
504 
505     /* Flush any MAC and VLAN filter table state */
506     n->mac_table.in_use = 0;
507     n->mac_table.first_multi = 0;
508     n->mac_table.multi_overflow = 0;
509     n->mac_table.uni_overflow = 0;
510     memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
511     memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
512     qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
513     memset(n->vlans, 0, MAX_VLAN >> 3);
514 
515     /* Flush any async TX */
516     for (i = 0;  i < n->max_queues; i++) {
517         NetClientState *nc = qemu_get_subqueue(n->nic, i);
518 
519         if (nc->peer) {
520             qemu_flush_or_purge_queued_packets(nc->peer, true);
521             assert(!virtio_net_get_subqueue(nc)->async_tx.elem);
522         }
523     }
524 }
525 
526 static void peer_test_vnet_hdr(VirtIONet *n)
527 {
528     NetClientState *nc = qemu_get_queue(n->nic);
529     if (!nc->peer) {
530         return;
531     }
532 
533     n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
534 }
535 
536 static int peer_has_vnet_hdr(VirtIONet *n)
537 {
538     return n->has_vnet_hdr;
539 }
540 
541 static int peer_has_ufo(VirtIONet *n)
542 {
543     if (!peer_has_vnet_hdr(n))
544         return 0;
545 
546     n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
547 
548     return n->has_ufo;
549 }
550 
551 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
552                                        int version_1)
553 {
554     int i;
555     NetClientState *nc;
556 
557     n->mergeable_rx_bufs = mergeable_rx_bufs;
558 
559     if (version_1) {
560         n->guest_hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
561     } else {
562         n->guest_hdr_len = n->mergeable_rx_bufs ?
563             sizeof(struct virtio_net_hdr_mrg_rxbuf) :
564             sizeof(struct virtio_net_hdr);
565     }
566 
567     for (i = 0; i < n->max_queues; i++) {
568         nc = qemu_get_subqueue(n->nic, i);
569 
570         if (peer_has_vnet_hdr(n) &&
571             qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
572             qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
573             n->host_hdr_len = n->guest_hdr_len;
574         }
575     }
576 }
577 
578 static int virtio_net_max_tx_queue_size(VirtIONet *n)
579 {
580     NetClientState *peer = n->nic_conf.peers.ncs[0];
581 
582     /*
583      * Backends other than vhost-user don't support max queue size.
584      */
585     if (!peer) {
586         return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
587     }
588 
589     if (peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
590         return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
591     }
592 
593     return VIRTQUEUE_MAX_SIZE;
594 }
595 
596 static int peer_attach(VirtIONet *n, int index)
597 {
598     NetClientState *nc = qemu_get_subqueue(n->nic, index);
599 
600     if (!nc->peer) {
601         return 0;
602     }
603 
604     if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
605         vhost_set_vring_enable(nc->peer, 1);
606     }
607 
608     if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
609         return 0;
610     }
611 
612     if (n->max_queues == 1) {
613         return 0;
614     }
615 
616     return tap_enable(nc->peer);
617 }
618 
619 static int peer_detach(VirtIONet *n, int index)
620 {
621     NetClientState *nc = qemu_get_subqueue(n->nic, index);
622 
623     if (!nc->peer) {
624         return 0;
625     }
626 
627     if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
628         vhost_set_vring_enable(nc->peer, 0);
629     }
630 
631     if (nc->peer->info->type !=  NET_CLIENT_DRIVER_TAP) {
632         return 0;
633     }
634 
635     return tap_disable(nc->peer);
636 }
637 
638 static void virtio_net_set_queues(VirtIONet *n)
639 {
640     int i;
641     int r;
642 
643     if (n->nic->peer_deleted) {
644         return;
645     }
646 
647     for (i = 0; i < n->max_queues; i++) {
648         if (i < n->curr_queues) {
649             r = peer_attach(n, i);
650             assert(!r);
651         } else {
652             r = peer_detach(n, i);
653             assert(!r);
654         }
655     }
656 }
657 
658 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
659 
660 static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
661                                         Error **errp)
662 {
663     VirtIONet *n = VIRTIO_NET(vdev);
664     NetClientState *nc = qemu_get_queue(n->nic);
665 
666     /* Firstly sync all virtio-net possible supported features */
667     features |= n->host_features;
668 
669     virtio_add_feature(&features, VIRTIO_NET_F_MAC);
670 
671     if (!peer_has_vnet_hdr(n)) {
672         virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
673         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
674         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
675         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
676 
677         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
678         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
679         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
680         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
681     }
682 
683     if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
684         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
685         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
686     }
687 
688     if (!get_vhost_net(nc->peer)) {
689         return features;
690     }
691 
692     features = vhost_net_get_features(get_vhost_net(nc->peer), features);
693     vdev->backend_features = features;
694 
695     if (n->mtu_bypass_backend &&
696             (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) {
697         features |= (1ULL << VIRTIO_NET_F_MTU);
698     }
699 
700     return features;
701 }
702 
703 static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
704 {
705     uint64_t features = 0;
706 
707     /* Linux kernel 2.6.25.  It understood MAC (as everyone must),
708      * but also these: */
709     virtio_add_feature(&features, VIRTIO_NET_F_MAC);
710     virtio_add_feature(&features, VIRTIO_NET_F_CSUM);
711     virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
712     virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
713     virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
714 
715     return features;
716 }
717 
718 static void virtio_net_apply_guest_offloads(VirtIONet *n)
719 {
720     qemu_set_offload(qemu_get_queue(n->nic)->peer,
721             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
722             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
723             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
724             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
725             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
726 }
727 
728 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features)
729 {
730     static const uint64_t guest_offloads_mask =
731         (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
732         (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
733         (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
734         (1ULL << VIRTIO_NET_F_GUEST_ECN)  |
735         (1ULL << VIRTIO_NET_F_GUEST_UFO);
736 
737     return guest_offloads_mask & features;
738 }
739 
740 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
741 {
742     VirtIODevice *vdev = VIRTIO_DEVICE(n);
743     return virtio_net_guest_offloads_by_features(vdev->guest_features);
744 }
745 
746 static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
747 {
748     VirtIONet *n = VIRTIO_NET(vdev);
749     int i;
750 
751     if (n->mtu_bypass_backend &&
752             !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) {
753         features &= ~(1ULL << VIRTIO_NET_F_MTU);
754     }
755 
756     virtio_net_set_multiqueue(n,
757                               virtio_has_feature(features, VIRTIO_NET_F_MQ));
758 
759     virtio_net_set_mrg_rx_bufs(n,
760                                virtio_has_feature(features,
761                                                   VIRTIO_NET_F_MRG_RXBUF),
762                                virtio_has_feature(features,
763                                                   VIRTIO_F_VERSION_1));
764 
765     n->rsc4_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
766         virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO4);
767     n->rsc6_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
768         virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO6);
769 
770     if (n->has_vnet_hdr) {
771         n->curr_guest_offloads =
772             virtio_net_guest_offloads_by_features(features);
773         virtio_net_apply_guest_offloads(n);
774     }
775 
776     for (i = 0;  i < n->max_queues; i++) {
777         NetClientState *nc = qemu_get_subqueue(n->nic, i);
778 
779         if (!get_vhost_net(nc->peer)) {
780             continue;
781         }
782         vhost_net_ack_features(get_vhost_net(nc->peer), features);
783     }
784 
785     if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
786         memset(n->vlans, 0, MAX_VLAN >> 3);
787     } else {
788         memset(n->vlans, 0xff, MAX_VLAN >> 3);
789     }
790 }
791 
792 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
793                                      struct iovec *iov, unsigned int iov_cnt)
794 {
795     uint8_t on;
796     size_t s;
797     NetClientState *nc = qemu_get_queue(n->nic);
798 
799     s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
800     if (s != sizeof(on)) {
801         return VIRTIO_NET_ERR;
802     }
803 
804     if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
805         n->promisc = on;
806     } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
807         n->allmulti = on;
808     } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
809         n->alluni = on;
810     } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
811         n->nomulti = on;
812     } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
813         n->nouni = on;
814     } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
815         n->nobcast = on;
816     } else {
817         return VIRTIO_NET_ERR;
818     }
819 
820     rxfilter_notify(nc);
821 
822     return VIRTIO_NET_OK;
823 }
824 
825 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
826                                      struct iovec *iov, unsigned int iov_cnt)
827 {
828     VirtIODevice *vdev = VIRTIO_DEVICE(n);
829     uint64_t offloads;
830     size_t s;
831 
832     if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
833         return VIRTIO_NET_ERR;
834     }
835 
836     s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
837     if (s != sizeof(offloads)) {
838         return VIRTIO_NET_ERR;
839     }
840 
841     if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
842         uint64_t supported_offloads;
843 
844         offloads = virtio_ldq_p(vdev, &offloads);
845 
846         if (!n->has_vnet_hdr) {
847             return VIRTIO_NET_ERR;
848         }
849 
850         n->rsc4_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) &&
851             virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO4);
852         n->rsc6_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) &&
853             virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO6);
854         virtio_clear_feature(&offloads, VIRTIO_NET_F_RSC_EXT);
855 
856         supported_offloads = virtio_net_supported_guest_offloads(n);
857         if (offloads & ~supported_offloads) {
858             return VIRTIO_NET_ERR;
859         }
860 
861         n->curr_guest_offloads = offloads;
862         virtio_net_apply_guest_offloads(n);
863 
864         return VIRTIO_NET_OK;
865     } else {
866         return VIRTIO_NET_ERR;
867     }
868 }
869 
870 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
871                                  struct iovec *iov, unsigned int iov_cnt)
872 {
873     VirtIODevice *vdev = VIRTIO_DEVICE(n);
874     struct virtio_net_ctrl_mac mac_data;
875     size_t s;
876     NetClientState *nc = qemu_get_queue(n->nic);
877 
878     if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
879         if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
880             return VIRTIO_NET_ERR;
881         }
882         s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
883         assert(s == sizeof(n->mac));
884         qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
885         rxfilter_notify(nc);
886 
887         return VIRTIO_NET_OK;
888     }
889 
890     if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
891         return VIRTIO_NET_ERR;
892     }
893 
894     int in_use = 0;
895     int first_multi = 0;
896     uint8_t uni_overflow = 0;
897     uint8_t multi_overflow = 0;
898     uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
899 
900     s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
901                    sizeof(mac_data.entries));
902     mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
903     if (s != sizeof(mac_data.entries)) {
904         goto error;
905     }
906     iov_discard_front(&iov, &iov_cnt, s);
907 
908     if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
909         goto error;
910     }
911 
912     if (mac_data.entries <= MAC_TABLE_ENTRIES) {
913         s = iov_to_buf(iov, iov_cnt, 0, macs,
914                        mac_data.entries * ETH_ALEN);
915         if (s != mac_data.entries * ETH_ALEN) {
916             goto error;
917         }
918         in_use += mac_data.entries;
919     } else {
920         uni_overflow = 1;
921     }
922 
923     iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
924 
925     first_multi = in_use;
926 
927     s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
928                    sizeof(mac_data.entries));
929     mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
930     if (s != sizeof(mac_data.entries)) {
931         goto error;
932     }
933 
934     iov_discard_front(&iov, &iov_cnt, s);
935 
936     if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
937         goto error;
938     }
939 
940     if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
941         s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
942                        mac_data.entries * ETH_ALEN);
943         if (s != mac_data.entries * ETH_ALEN) {
944             goto error;
945         }
946         in_use += mac_data.entries;
947     } else {
948         multi_overflow = 1;
949     }
950 
951     n->mac_table.in_use = in_use;
952     n->mac_table.first_multi = first_multi;
953     n->mac_table.uni_overflow = uni_overflow;
954     n->mac_table.multi_overflow = multi_overflow;
955     memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
956     g_free(macs);
957     rxfilter_notify(nc);
958 
959     return VIRTIO_NET_OK;
960 
961 error:
962     g_free(macs);
963     return VIRTIO_NET_ERR;
964 }
965 
966 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
967                                         struct iovec *iov, unsigned int iov_cnt)
968 {
969     VirtIODevice *vdev = VIRTIO_DEVICE(n);
970     uint16_t vid;
971     size_t s;
972     NetClientState *nc = qemu_get_queue(n->nic);
973 
974     s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
975     vid = virtio_lduw_p(vdev, &vid);
976     if (s != sizeof(vid)) {
977         return VIRTIO_NET_ERR;
978     }
979 
980     if (vid >= MAX_VLAN)
981         return VIRTIO_NET_ERR;
982 
983     if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
984         n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
985     else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
986         n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
987     else
988         return VIRTIO_NET_ERR;
989 
990     rxfilter_notify(nc);
991 
992     return VIRTIO_NET_OK;
993 }
994 
995 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
996                                       struct iovec *iov, unsigned int iov_cnt)
997 {
998     trace_virtio_net_handle_announce(n->announce_timer.round);
999     if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
1000         n->status & VIRTIO_NET_S_ANNOUNCE) {
1001         n->status &= ~VIRTIO_NET_S_ANNOUNCE;
1002         if (n->announce_timer.round) {
1003             qemu_announce_timer_step(&n->announce_timer);
1004         }
1005         return VIRTIO_NET_OK;
1006     } else {
1007         return VIRTIO_NET_ERR;
1008     }
1009 }
1010 
1011 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
1012                                 struct iovec *iov, unsigned int iov_cnt)
1013 {
1014     VirtIODevice *vdev = VIRTIO_DEVICE(n);
1015     struct virtio_net_ctrl_mq mq;
1016     size_t s;
1017     uint16_t queues;
1018 
1019     s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
1020     if (s != sizeof(mq)) {
1021         return VIRTIO_NET_ERR;
1022     }
1023 
1024     if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
1025         return VIRTIO_NET_ERR;
1026     }
1027 
1028     queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
1029 
1030     if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
1031         queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
1032         queues > n->max_queues ||
1033         !n->multiqueue) {
1034         return VIRTIO_NET_ERR;
1035     }
1036 
1037     n->curr_queues = queues;
1038     /* stop the backend before changing the number of queues to avoid handling a
1039      * disabled queue */
1040     virtio_net_set_status(vdev, vdev->status);
1041     virtio_net_set_queues(n);
1042 
1043     return VIRTIO_NET_OK;
1044 }
1045 
1046 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
1047 {
1048     VirtIONet *n = VIRTIO_NET(vdev);
1049     struct virtio_net_ctrl_hdr ctrl;
1050     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1051     VirtQueueElement *elem;
1052     size_t s;
1053     struct iovec *iov, *iov2;
1054     unsigned int iov_cnt;
1055 
1056     for (;;) {
1057         elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
1058         if (!elem) {
1059             break;
1060         }
1061         if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
1062             iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
1063             virtio_error(vdev, "virtio-net ctrl missing headers");
1064             virtqueue_detach_element(vq, elem, 0);
1065             g_free(elem);
1066             break;
1067         }
1068 
1069         iov_cnt = elem->out_num;
1070         iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num);
1071         s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
1072         iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
1073         if (s != sizeof(ctrl)) {
1074             status = VIRTIO_NET_ERR;
1075         } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
1076             status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
1077         } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
1078             status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
1079         } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
1080             status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
1081         } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
1082             status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt);
1083         } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
1084             status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
1085         } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
1086             status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
1087         }
1088 
1089         s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status));
1090         assert(s == sizeof(status));
1091 
1092         virtqueue_push(vq, elem, sizeof(status));
1093         virtio_notify(vdev, vq);
1094         g_free(iov2);
1095         g_free(elem);
1096     }
1097 }
1098 
1099 /* RX */
1100 
1101 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
1102 {
1103     VirtIONet *n = VIRTIO_NET(vdev);
1104     int queue_index = vq2q(virtio_get_queue_index(vq));
1105 
1106     qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
1107 }
1108 
1109 static int virtio_net_can_receive(NetClientState *nc)
1110 {
1111     VirtIONet *n = qemu_get_nic_opaque(nc);
1112     VirtIODevice *vdev = VIRTIO_DEVICE(n);
1113     VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1114 
1115     if (!vdev->vm_running) {
1116         return 0;
1117     }
1118 
1119     if (nc->queue_index >= n->curr_queues) {
1120         return 0;
1121     }
1122 
1123     if (!virtio_queue_ready(q->rx_vq) ||
1124         !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1125         return 0;
1126     }
1127 
1128     return 1;
1129 }
1130 
1131 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
1132 {
1133     VirtIONet *n = q->n;
1134     if (virtio_queue_empty(q->rx_vq) ||
1135         (n->mergeable_rx_bufs &&
1136          !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1137         virtio_queue_set_notification(q->rx_vq, 1);
1138 
1139         /* To avoid a race condition where the guest has made some buffers
1140          * available after the above check but before notification was
1141          * enabled, check for available buffers again.
1142          */
1143         if (virtio_queue_empty(q->rx_vq) ||
1144             (n->mergeable_rx_bufs &&
1145              !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1146             return 0;
1147         }
1148     }
1149 
1150     virtio_queue_set_notification(q->rx_vq, 0);
1151     return 1;
1152 }
1153 
1154 static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
1155 {
1156     virtio_tswap16s(vdev, &hdr->hdr_len);
1157     virtio_tswap16s(vdev, &hdr->gso_size);
1158     virtio_tswap16s(vdev, &hdr->csum_start);
1159     virtio_tswap16s(vdev, &hdr->csum_offset);
1160 }
1161 
1162 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
1163  * it never finds out that the packets don't have valid checksums.  This
1164  * causes dhclient to get upset.  Fedora's carried a patch for ages to
1165  * fix this with Xen but it hasn't appeared in an upstream release of
1166  * dhclient yet.
1167  *
1168  * To avoid breaking existing guests, we catch udp packets and add
1169  * checksums.  This is terrible but it's better than hacking the guest
1170  * kernels.
1171  *
1172  * N.B. if we introduce a zero-copy API, this operation is no longer free so
1173  * we should provide a mechanism to disable it to avoid polluting the host
1174  * cache.
1175  */
1176 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
1177                                         uint8_t *buf, size_t size)
1178 {
1179     if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
1180         (size > 27 && size < 1500) && /* normal sized MTU */
1181         (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
1182         (buf[23] == 17) && /* ip.protocol == UDP */
1183         (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
1184         net_checksum_calculate(buf, size);
1185         hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
1186     }
1187 }
1188 
1189 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
1190                            const void *buf, size_t size)
1191 {
1192     if (n->has_vnet_hdr) {
1193         /* FIXME this cast is evil */
1194         void *wbuf = (void *)buf;
1195         work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
1196                                     size - n->host_hdr_len);
1197 
1198         if (n->needs_vnet_hdr_swap) {
1199             virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
1200         }
1201         iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
1202     } else {
1203         struct virtio_net_hdr hdr = {
1204             .flags = 0,
1205             .gso_type = VIRTIO_NET_HDR_GSO_NONE
1206         };
1207         iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
1208     }
1209 }
1210 
1211 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
1212 {
1213     static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1214     static const uint8_t vlan[] = {0x81, 0x00};
1215     uint8_t *ptr = (uint8_t *)buf;
1216     int i;
1217 
1218     if (n->promisc)
1219         return 1;
1220 
1221     ptr += n->host_hdr_len;
1222 
1223     if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
1224         int vid = lduw_be_p(ptr + 14) & 0xfff;
1225         if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
1226             return 0;
1227     }
1228 
1229     if (ptr[0] & 1) { // multicast
1230         if (!memcmp(ptr, bcast, sizeof(bcast))) {
1231             return !n->nobcast;
1232         } else if (n->nomulti) {
1233             return 0;
1234         } else if (n->allmulti || n->mac_table.multi_overflow) {
1235             return 1;
1236         }
1237 
1238         for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
1239             if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1240                 return 1;
1241             }
1242         }
1243     } else { // unicast
1244         if (n->nouni) {
1245             return 0;
1246         } else if (n->alluni || n->mac_table.uni_overflow) {
1247             return 1;
1248         } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
1249             return 1;
1250         }
1251 
1252         for (i = 0; i < n->mac_table.first_multi; i++) {
1253             if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1254                 return 1;
1255             }
1256         }
1257     }
1258 
1259     return 0;
1260 }
1261 
1262 static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
1263                                       size_t size)
1264 {
1265     VirtIONet *n = qemu_get_nic_opaque(nc);
1266     VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1267     VirtIODevice *vdev = VIRTIO_DEVICE(n);
1268     struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
1269     struct virtio_net_hdr_mrg_rxbuf mhdr;
1270     unsigned mhdr_cnt = 0;
1271     size_t offset, i, guest_offset;
1272 
1273     if (!virtio_net_can_receive(nc)) {
1274         return -1;
1275     }
1276 
1277     /* hdr_len refers to the header we supply to the guest */
1278     if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
1279         return 0;
1280     }
1281 
1282     if (!receive_filter(n, buf, size))
1283         return size;
1284 
1285     offset = i = 0;
1286 
1287     while (offset < size) {
1288         VirtQueueElement *elem;
1289         int len, total;
1290         const struct iovec *sg;
1291 
1292         total = 0;
1293 
1294         elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
1295         if (!elem) {
1296             if (i) {
1297                 virtio_error(vdev, "virtio-net unexpected empty queue: "
1298                              "i %zd mergeable %d offset %zd, size %zd, "
1299                              "guest hdr len %zd, host hdr len %zd "
1300                              "guest features 0x%" PRIx64,
1301                              i, n->mergeable_rx_bufs, offset, size,
1302                              n->guest_hdr_len, n->host_hdr_len,
1303                              vdev->guest_features);
1304             }
1305             return -1;
1306         }
1307 
1308         if (elem->in_num < 1) {
1309             virtio_error(vdev,
1310                          "virtio-net receive queue contains no in buffers");
1311             virtqueue_detach_element(q->rx_vq, elem, 0);
1312             g_free(elem);
1313             return -1;
1314         }
1315 
1316         sg = elem->in_sg;
1317         if (i == 0) {
1318             assert(offset == 0);
1319             if (n->mergeable_rx_bufs) {
1320                 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
1321                                     sg, elem->in_num,
1322                                     offsetof(typeof(mhdr), num_buffers),
1323                                     sizeof(mhdr.num_buffers));
1324             }
1325 
1326             receive_header(n, sg, elem->in_num, buf, size);
1327             offset = n->host_hdr_len;
1328             total += n->guest_hdr_len;
1329             guest_offset = n->guest_hdr_len;
1330         } else {
1331             guest_offset = 0;
1332         }
1333 
1334         /* copy in packet.  ugh */
1335         len = iov_from_buf(sg, elem->in_num, guest_offset,
1336                            buf + offset, size - offset);
1337         total += len;
1338         offset += len;
1339         /* If buffers can't be merged, at this point we
1340          * must have consumed the complete packet.
1341          * Otherwise, drop it. */
1342         if (!n->mergeable_rx_bufs && offset < size) {
1343             virtqueue_unpop(q->rx_vq, elem, total);
1344             g_free(elem);
1345             return size;
1346         }
1347 
1348         /* signal other side */
1349         virtqueue_fill(q->rx_vq, elem, total, i++);
1350         g_free(elem);
1351     }
1352 
1353     if (mhdr_cnt) {
1354         virtio_stw_p(vdev, &mhdr.num_buffers, i);
1355         iov_from_buf(mhdr_sg, mhdr_cnt,
1356                      0,
1357                      &mhdr.num_buffers, sizeof mhdr.num_buffers);
1358     }
1359 
1360     virtqueue_flush(q->rx_vq, i);
1361     virtio_notify(vdev, q->rx_vq);
1362 
1363     return size;
1364 }
1365 
1366 static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
1367                                   size_t size)
1368 {
1369     ssize_t r;
1370 
1371     rcu_read_lock();
1372     r = virtio_net_receive_rcu(nc, buf, size);
1373     rcu_read_unlock();
1374     return r;
1375 }
1376 
1377 static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
1378                                          const uint8_t *buf,
1379                                          VirtioNetRscUnit *unit)
1380 {
1381     uint16_t ip_hdrlen;
1382     struct ip_header *ip;
1383 
1384     ip = (struct ip_header *)(buf + chain->n->guest_hdr_len
1385                               + sizeof(struct eth_header));
1386     unit->ip = (void *)ip;
1387     ip_hdrlen = (ip->ip_ver_len & 0xF) << 2;
1388     unit->ip_plen = &ip->ip_len;
1389     unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip) + ip_hdrlen);
1390     unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
1391     unit->payload = htons(*unit->ip_plen) - ip_hdrlen - unit->tcp_hdrlen;
1392 }
1393 
1394 static void virtio_net_rsc_extract_unit6(VirtioNetRscChain *chain,
1395                                          const uint8_t *buf,
1396                                          VirtioNetRscUnit *unit)
1397 {
1398     struct ip6_header *ip6;
1399 
1400     ip6 = (struct ip6_header *)(buf + chain->n->guest_hdr_len
1401                                  + sizeof(struct eth_header));
1402     unit->ip = ip6;
1403     unit->ip_plen = &(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen);
1404     unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip)\
1405                                         + sizeof(struct ip6_header));
1406     unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
1407 
1408     /* There is a difference between payload lenght in ipv4 and v6,
1409        ip header is excluded in ipv6 */
1410     unit->payload = htons(*unit->ip_plen) - unit->tcp_hdrlen;
1411 }
1412 
1413 static size_t virtio_net_rsc_drain_seg(VirtioNetRscChain *chain,
1414                                        VirtioNetRscSeg *seg)
1415 {
1416     int ret;
1417     struct virtio_net_hdr *h;
1418 
1419     h = (struct virtio_net_hdr *)seg->buf;
1420     h->flags = 0;
1421     h->gso_type = VIRTIO_NET_HDR_GSO_NONE;
1422 
1423     if (seg->is_coalesced) {
1424         *virtio_net_rsc_ext_num_packets(h) = seg->packets;
1425         *virtio_net_rsc_ext_num_dupacks(h) = seg->dup_ack;
1426         h->flags = VIRTIO_NET_HDR_F_RSC_INFO;
1427         if (chain->proto == ETH_P_IP) {
1428             h->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1429         } else {
1430             h->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1431         }
1432     }
1433 
1434     ret = virtio_net_do_receive(seg->nc, seg->buf, seg->size);
1435     QTAILQ_REMOVE(&chain->buffers, seg, next);
1436     g_free(seg->buf);
1437     g_free(seg);
1438 
1439     return ret;
1440 }
1441 
1442 static void virtio_net_rsc_purge(void *opq)
1443 {
1444     VirtioNetRscSeg *seg, *rn;
1445     VirtioNetRscChain *chain = (VirtioNetRscChain *)opq;
1446 
1447     QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn) {
1448         if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
1449             chain->stat.purge_failed++;
1450             continue;
1451         }
1452     }
1453 
1454     chain->stat.timer++;
1455     if (!QTAILQ_EMPTY(&chain->buffers)) {
1456         timer_mod(chain->drain_timer,
1457               qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout);
1458     }
1459 }
1460 
1461 static void virtio_net_rsc_cleanup(VirtIONet *n)
1462 {
1463     VirtioNetRscChain *chain, *rn_chain;
1464     VirtioNetRscSeg *seg, *rn_seg;
1465 
1466     QTAILQ_FOREACH_SAFE(chain, &n->rsc_chains, next, rn_chain) {
1467         QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn_seg) {
1468             QTAILQ_REMOVE(&chain->buffers, seg, next);
1469             g_free(seg->buf);
1470             g_free(seg);
1471         }
1472 
1473         timer_del(chain->drain_timer);
1474         timer_free(chain->drain_timer);
1475         QTAILQ_REMOVE(&n->rsc_chains, chain, next);
1476         g_free(chain);
1477     }
1478 }
1479 
1480 static void virtio_net_rsc_cache_buf(VirtioNetRscChain *chain,
1481                                      NetClientState *nc,
1482                                      const uint8_t *buf, size_t size)
1483 {
1484     uint16_t hdr_len;
1485     VirtioNetRscSeg *seg;
1486 
1487     hdr_len = chain->n->guest_hdr_len;
1488     seg = g_malloc(sizeof(VirtioNetRscSeg));
1489     seg->buf = g_malloc(hdr_len + sizeof(struct eth_header)
1490         + sizeof(struct ip6_header) + VIRTIO_NET_MAX_TCP_PAYLOAD);
1491     memcpy(seg->buf, buf, size);
1492     seg->size = size;
1493     seg->packets = 1;
1494     seg->dup_ack = 0;
1495     seg->is_coalesced = 0;
1496     seg->nc = nc;
1497 
1498     QTAILQ_INSERT_TAIL(&chain->buffers, seg, next);
1499     chain->stat.cache++;
1500 
1501     switch (chain->proto) {
1502     case ETH_P_IP:
1503         virtio_net_rsc_extract_unit4(chain, seg->buf, &seg->unit);
1504         break;
1505     case ETH_P_IPV6:
1506         virtio_net_rsc_extract_unit6(chain, seg->buf, &seg->unit);
1507         break;
1508     default:
1509         g_assert_not_reached();
1510     }
1511 }
1512 
1513 static int32_t virtio_net_rsc_handle_ack(VirtioNetRscChain *chain,
1514                                          VirtioNetRscSeg *seg,
1515                                          const uint8_t *buf,
1516                                          struct tcp_header *n_tcp,
1517                                          struct tcp_header *o_tcp)
1518 {
1519     uint32_t nack, oack;
1520     uint16_t nwin, owin;
1521 
1522     nack = htonl(n_tcp->th_ack);
1523     nwin = htons(n_tcp->th_win);
1524     oack = htonl(o_tcp->th_ack);
1525     owin = htons(o_tcp->th_win);
1526 
1527     if ((nack - oack) >= VIRTIO_NET_MAX_TCP_PAYLOAD) {
1528         chain->stat.ack_out_of_win++;
1529         return RSC_FINAL;
1530     } else if (nack == oack) {
1531         /* duplicated ack or window probe */
1532         if (nwin == owin) {
1533             /* duplicated ack, add dup ack count due to whql test up to 1 */
1534             chain->stat.dup_ack++;
1535             return RSC_FINAL;
1536         } else {
1537             /* Coalesce window update */
1538             o_tcp->th_win = n_tcp->th_win;
1539             chain->stat.win_update++;
1540             return RSC_COALESCE;
1541         }
1542     } else {
1543         /* pure ack, go to 'C', finalize*/
1544         chain->stat.pure_ack++;
1545         return RSC_FINAL;
1546     }
1547 }
1548 
1549 static int32_t virtio_net_rsc_coalesce_data(VirtioNetRscChain *chain,
1550                                             VirtioNetRscSeg *seg,
1551                                             const uint8_t *buf,
1552                                             VirtioNetRscUnit *n_unit)
1553 {
1554     void *data;
1555     uint16_t o_ip_len;
1556     uint32_t nseq, oseq;
1557     VirtioNetRscUnit *o_unit;
1558 
1559     o_unit = &seg->unit;
1560     o_ip_len = htons(*o_unit->ip_plen);
1561     nseq = htonl(n_unit->tcp->th_seq);
1562     oseq = htonl(o_unit->tcp->th_seq);
1563 
1564     /* out of order or retransmitted. */
1565     if ((nseq - oseq) > VIRTIO_NET_MAX_TCP_PAYLOAD) {
1566         chain->stat.data_out_of_win++;
1567         return RSC_FINAL;
1568     }
1569 
1570     data = ((uint8_t *)n_unit->tcp) + n_unit->tcp_hdrlen;
1571     if (nseq == oseq) {
1572         if ((o_unit->payload == 0) && n_unit->payload) {
1573             /* From no payload to payload, normal case, not a dup ack or etc */
1574             chain->stat.data_after_pure_ack++;
1575             goto coalesce;
1576         } else {
1577             return virtio_net_rsc_handle_ack(chain, seg, buf,
1578                                              n_unit->tcp, o_unit->tcp);
1579         }
1580     } else if ((nseq - oseq) != o_unit->payload) {
1581         /* Not a consistent packet, out of order */
1582         chain->stat.data_out_of_order++;
1583         return RSC_FINAL;
1584     } else {
1585 coalesce:
1586         if ((o_ip_len + n_unit->payload) > chain->max_payload) {
1587             chain->stat.over_size++;
1588             return RSC_FINAL;
1589         }
1590 
1591         /* Here comes the right data, the payload length in v4/v6 is different,
1592            so use the field value to update and record the new data len */
1593         o_unit->payload += n_unit->payload; /* update new data len */
1594 
1595         /* update field in ip header */
1596         *o_unit->ip_plen = htons(o_ip_len + n_unit->payload);
1597 
1598         /* Bring 'PUSH' big, the whql test guide says 'PUSH' can be coalesced
1599            for windows guest, while this may change the behavior for linux
1600            guest (only if it uses RSC feature). */
1601         o_unit->tcp->th_offset_flags = n_unit->tcp->th_offset_flags;
1602 
1603         o_unit->tcp->th_ack = n_unit->tcp->th_ack;
1604         o_unit->tcp->th_win = n_unit->tcp->th_win;
1605 
1606         memmove(seg->buf + seg->size, data, n_unit->payload);
1607         seg->size += n_unit->payload;
1608         seg->packets++;
1609         chain->stat.coalesced++;
1610         return RSC_COALESCE;
1611     }
1612 }
1613 
1614 static int32_t virtio_net_rsc_coalesce4(VirtioNetRscChain *chain,
1615                                         VirtioNetRscSeg *seg,
1616                                         const uint8_t *buf, size_t size,
1617                                         VirtioNetRscUnit *unit)
1618 {
1619     struct ip_header *ip1, *ip2;
1620 
1621     ip1 = (struct ip_header *)(unit->ip);
1622     ip2 = (struct ip_header *)(seg->unit.ip);
1623     if ((ip1->ip_src ^ ip2->ip_src) || (ip1->ip_dst ^ ip2->ip_dst)
1624         || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport)
1625         || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) {
1626         chain->stat.no_match++;
1627         return RSC_NO_MATCH;
1628     }
1629 
1630     return virtio_net_rsc_coalesce_data(chain, seg, buf, unit);
1631 }
1632 
1633 static int32_t virtio_net_rsc_coalesce6(VirtioNetRscChain *chain,
1634                                         VirtioNetRscSeg *seg,
1635                                         const uint8_t *buf, size_t size,
1636                                         VirtioNetRscUnit *unit)
1637 {
1638     struct ip6_header *ip1, *ip2;
1639 
1640     ip1 = (struct ip6_header *)(unit->ip);
1641     ip2 = (struct ip6_header *)(seg->unit.ip);
1642     if (memcmp(&ip1->ip6_src, &ip2->ip6_src, sizeof(struct in6_address))
1643         || memcmp(&ip1->ip6_dst, &ip2->ip6_dst, sizeof(struct in6_address))
1644         || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport)
1645         || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) {
1646             chain->stat.no_match++;
1647             return RSC_NO_MATCH;
1648     }
1649 
1650     return virtio_net_rsc_coalesce_data(chain, seg, buf, unit);
1651 }
1652 
1653 /* Packets with 'SYN' should bypass, other flag should be sent after drain
1654  * to prevent out of order */
1655 static int virtio_net_rsc_tcp_ctrl_check(VirtioNetRscChain *chain,
1656                                          struct tcp_header *tcp)
1657 {
1658     uint16_t tcp_hdr;
1659     uint16_t tcp_flag;
1660 
1661     tcp_flag = htons(tcp->th_offset_flags);
1662     tcp_hdr = (tcp_flag & VIRTIO_NET_TCP_HDR_LENGTH) >> 10;
1663     tcp_flag &= VIRTIO_NET_TCP_FLAG;
1664     tcp_flag = htons(tcp->th_offset_flags) & 0x3F;
1665     if (tcp_flag & TH_SYN) {
1666         chain->stat.tcp_syn++;
1667         return RSC_BYPASS;
1668     }
1669 
1670     if (tcp_flag & (TH_FIN | TH_URG | TH_RST | TH_ECE | TH_CWR)) {
1671         chain->stat.tcp_ctrl_drain++;
1672         return RSC_FINAL;
1673     }
1674 
1675     if (tcp_hdr > sizeof(struct tcp_header)) {
1676         chain->stat.tcp_all_opt++;
1677         return RSC_FINAL;
1678     }
1679 
1680     return RSC_CANDIDATE;
1681 }
1682 
1683 static size_t virtio_net_rsc_do_coalesce(VirtioNetRscChain *chain,
1684                                          NetClientState *nc,
1685                                          const uint8_t *buf, size_t size,
1686                                          VirtioNetRscUnit *unit)
1687 {
1688     int ret;
1689     VirtioNetRscSeg *seg, *nseg;
1690 
1691     if (QTAILQ_EMPTY(&chain->buffers)) {
1692         chain->stat.empty_cache++;
1693         virtio_net_rsc_cache_buf(chain, nc, buf, size);
1694         timer_mod(chain->drain_timer,
1695               qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout);
1696         return size;
1697     }
1698 
1699     QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) {
1700         if (chain->proto == ETH_P_IP) {
1701             ret = virtio_net_rsc_coalesce4(chain, seg, buf, size, unit);
1702         } else {
1703             ret = virtio_net_rsc_coalesce6(chain, seg, buf, size, unit);
1704         }
1705 
1706         if (ret == RSC_FINAL) {
1707             if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
1708                 /* Send failed */
1709                 chain->stat.final_failed++;
1710                 return 0;
1711             }
1712 
1713             /* Send current packet */
1714             return virtio_net_do_receive(nc, buf, size);
1715         } else if (ret == RSC_NO_MATCH) {
1716             continue;
1717         } else {
1718             /* Coalesced, mark coalesced flag to tell calc cksum for ipv4 */
1719             seg->is_coalesced = 1;
1720             return size;
1721         }
1722     }
1723 
1724     chain->stat.no_match_cache++;
1725     virtio_net_rsc_cache_buf(chain, nc, buf, size);
1726     return size;
1727 }
1728 
1729 /* Drain a connection data, this is to avoid out of order segments */
1730 static size_t virtio_net_rsc_drain_flow(VirtioNetRscChain *chain,
1731                                         NetClientState *nc,
1732                                         const uint8_t *buf, size_t size,
1733                                         uint16_t ip_start, uint16_t ip_size,
1734                                         uint16_t tcp_port)
1735 {
1736     VirtioNetRscSeg *seg, *nseg;
1737     uint32_t ppair1, ppair2;
1738 
1739     ppair1 = *(uint32_t *)(buf + tcp_port);
1740     QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) {
1741         ppair2 = *(uint32_t *)(seg->buf + tcp_port);
1742         if (memcmp(buf + ip_start, seg->buf + ip_start, ip_size)
1743             || (ppair1 != ppair2)) {
1744             continue;
1745         }
1746         if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
1747             chain->stat.drain_failed++;
1748         }
1749 
1750         break;
1751     }
1752 
1753     return virtio_net_do_receive(nc, buf, size);
1754 }
1755 
1756 static int32_t virtio_net_rsc_sanity_check4(VirtioNetRscChain *chain,
1757                                             struct ip_header *ip,
1758                                             const uint8_t *buf, size_t size)
1759 {
1760     uint16_t ip_len;
1761 
1762     /* Not an ipv4 packet */
1763     if (((ip->ip_ver_len & 0xF0) >> 4) != IP_HEADER_VERSION_4) {
1764         chain->stat.ip_option++;
1765         return RSC_BYPASS;
1766     }
1767 
1768     /* Don't handle packets with ip option */
1769     if ((ip->ip_ver_len & 0xF) != VIRTIO_NET_IP4_HEADER_LENGTH) {
1770         chain->stat.ip_option++;
1771         return RSC_BYPASS;
1772     }
1773 
1774     if (ip->ip_p != IPPROTO_TCP) {
1775         chain->stat.bypass_not_tcp++;
1776         return RSC_BYPASS;
1777     }
1778 
1779     /* Don't handle packets with ip fragment */
1780     if (!(htons(ip->ip_off) & IP_DF)) {
1781         chain->stat.ip_frag++;
1782         return RSC_BYPASS;
1783     }
1784 
1785     /* Don't handle packets with ecn flag */
1786     if (IPTOS_ECN(ip->ip_tos)) {
1787         chain->stat.ip_ecn++;
1788         return RSC_BYPASS;
1789     }
1790 
1791     ip_len = htons(ip->ip_len);
1792     if (ip_len < (sizeof(struct ip_header) + sizeof(struct tcp_header))
1793         || ip_len > (size - chain->n->guest_hdr_len -
1794                      sizeof(struct eth_header))) {
1795         chain->stat.ip_hacked++;
1796         return RSC_BYPASS;
1797     }
1798 
1799     return RSC_CANDIDATE;
1800 }
1801 
1802 static size_t virtio_net_rsc_receive4(VirtioNetRscChain *chain,
1803                                       NetClientState *nc,
1804                                       const uint8_t *buf, size_t size)
1805 {
1806     int32_t ret;
1807     uint16_t hdr_len;
1808     VirtioNetRscUnit unit;
1809 
1810     hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
1811 
1812     if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header)
1813         + sizeof(struct tcp_header))) {
1814         chain->stat.bypass_not_tcp++;
1815         return virtio_net_do_receive(nc, buf, size);
1816     }
1817 
1818     virtio_net_rsc_extract_unit4(chain, buf, &unit);
1819     if (virtio_net_rsc_sanity_check4(chain, unit.ip, buf, size)
1820         != RSC_CANDIDATE) {
1821         return virtio_net_do_receive(nc, buf, size);
1822     }
1823 
1824     ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp);
1825     if (ret == RSC_BYPASS) {
1826         return virtio_net_do_receive(nc, buf, size);
1827     } else if (ret == RSC_FINAL) {
1828         return virtio_net_rsc_drain_flow(chain, nc, buf, size,
1829                 ((hdr_len + sizeof(struct eth_header)) + 12),
1830                 VIRTIO_NET_IP4_ADDR_SIZE,
1831                 hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header));
1832     }
1833 
1834     return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit);
1835 }
1836 
1837 static int32_t virtio_net_rsc_sanity_check6(VirtioNetRscChain *chain,
1838                                             struct ip6_header *ip6,
1839                                             const uint8_t *buf, size_t size)
1840 {
1841     uint16_t ip_len;
1842 
1843     if (((ip6->ip6_ctlun.ip6_un1.ip6_un1_flow & 0xF0) >> 4)
1844         != IP_HEADER_VERSION_6) {
1845         return RSC_BYPASS;
1846     }
1847 
1848     /* Both option and protocol is checked in this */
1849     if (ip6->ip6_ctlun.ip6_un1.ip6_un1_nxt != IPPROTO_TCP) {
1850         chain->stat.bypass_not_tcp++;
1851         return RSC_BYPASS;
1852     }
1853 
1854     ip_len = htons(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen);
1855     if (ip_len < sizeof(struct tcp_header) ||
1856         ip_len > (size - chain->n->guest_hdr_len - sizeof(struct eth_header)
1857                   - sizeof(struct ip6_header))) {
1858         chain->stat.ip_hacked++;
1859         return RSC_BYPASS;
1860     }
1861 
1862     /* Don't handle packets with ecn flag */
1863     if (IP6_ECN(ip6->ip6_ctlun.ip6_un3.ip6_un3_ecn)) {
1864         chain->stat.ip_ecn++;
1865         return RSC_BYPASS;
1866     }
1867 
1868     return RSC_CANDIDATE;
1869 }
1870 
1871 static size_t virtio_net_rsc_receive6(void *opq, NetClientState *nc,
1872                                       const uint8_t *buf, size_t size)
1873 {
1874     int32_t ret;
1875     uint16_t hdr_len;
1876     VirtioNetRscChain *chain;
1877     VirtioNetRscUnit unit;
1878 
1879     chain = (VirtioNetRscChain *)opq;
1880     hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
1881 
1882     if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip6_header)
1883         + sizeof(tcp_header))) {
1884         return virtio_net_do_receive(nc, buf, size);
1885     }
1886 
1887     virtio_net_rsc_extract_unit6(chain, buf, &unit);
1888     if (RSC_CANDIDATE != virtio_net_rsc_sanity_check6(chain,
1889                                                  unit.ip, buf, size)) {
1890         return virtio_net_do_receive(nc, buf, size);
1891     }
1892 
1893     ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp);
1894     if (ret == RSC_BYPASS) {
1895         return virtio_net_do_receive(nc, buf, size);
1896     } else if (ret == RSC_FINAL) {
1897         return virtio_net_rsc_drain_flow(chain, nc, buf, size,
1898                 ((hdr_len + sizeof(struct eth_header)) + 8),
1899                 VIRTIO_NET_IP6_ADDR_SIZE,
1900                 hdr_len + sizeof(struct eth_header)
1901                 + sizeof(struct ip6_header));
1902     }
1903 
1904     return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit);
1905 }
1906 
1907 static VirtioNetRscChain *virtio_net_rsc_lookup_chain(VirtIONet *n,
1908                                                       NetClientState *nc,
1909                                                       uint16_t proto)
1910 {
1911     VirtioNetRscChain *chain;
1912 
1913     if ((proto != (uint16_t)ETH_P_IP) && (proto != (uint16_t)ETH_P_IPV6)) {
1914         return NULL;
1915     }
1916 
1917     QTAILQ_FOREACH(chain, &n->rsc_chains, next) {
1918         if (chain->proto == proto) {
1919             return chain;
1920         }
1921     }
1922 
1923     chain = g_malloc(sizeof(*chain));
1924     chain->n = n;
1925     chain->proto = proto;
1926     if (proto == (uint16_t)ETH_P_IP) {
1927         chain->max_payload = VIRTIO_NET_MAX_IP4_PAYLOAD;
1928         chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1929     } else {
1930         chain->max_payload = VIRTIO_NET_MAX_IP6_PAYLOAD;
1931         chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1932     }
1933     chain->drain_timer = timer_new_ns(QEMU_CLOCK_HOST,
1934                                       virtio_net_rsc_purge, chain);
1935     memset(&chain->stat, 0, sizeof(chain->stat));
1936 
1937     QTAILQ_INIT(&chain->buffers);
1938     QTAILQ_INSERT_TAIL(&n->rsc_chains, chain, next);
1939 
1940     return chain;
1941 }
1942 
1943 static ssize_t virtio_net_rsc_receive(NetClientState *nc,
1944                                       const uint8_t *buf,
1945                                       size_t size)
1946 {
1947     uint16_t proto;
1948     VirtioNetRscChain *chain;
1949     struct eth_header *eth;
1950     VirtIONet *n;
1951 
1952     n = qemu_get_nic_opaque(nc);
1953     if (size < (n->host_hdr_len + sizeof(struct eth_header))) {
1954         return virtio_net_do_receive(nc, buf, size);
1955     }
1956 
1957     eth = (struct eth_header *)(buf + n->guest_hdr_len);
1958     proto = htons(eth->h_proto);
1959 
1960     chain = virtio_net_rsc_lookup_chain(n, nc, proto);
1961     if (chain) {
1962         chain->stat.received++;
1963         if (proto == (uint16_t)ETH_P_IP && n->rsc4_enabled) {
1964             return virtio_net_rsc_receive4(chain, nc, buf, size);
1965         } else if (proto == (uint16_t)ETH_P_IPV6 && n->rsc6_enabled) {
1966             return virtio_net_rsc_receive6(chain, nc, buf, size);
1967         }
1968     }
1969     return virtio_net_do_receive(nc, buf, size);
1970 }
1971 
1972 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf,
1973                                   size_t size)
1974 {
1975     VirtIONet *n = qemu_get_nic_opaque(nc);
1976     if ((n->rsc4_enabled || n->rsc6_enabled)) {
1977         return virtio_net_rsc_receive(nc, buf, size);
1978     } else {
1979         return virtio_net_do_receive(nc, buf, size);
1980     }
1981 }
1982 
1983 static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
1984 
1985 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
1986 {
1987     VirtIONet *n = qemu_get_nic_opaque(nc);
1988     VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1989     VirtIODevice *vdev = VIRTIO_DEVICE(n);
1990 
1991     virtqueue_push(q->tx_vq, q->async_tx.elem, 0);
1992     virtio_notify(vdev, q->tx_vq);
1993 
1994     g_free(q->async_tx.elem);
1995     q->async_tx.elem = NULL;
1996 
1997     virtio_queue_set_notification(q->tx_vq, 1);
1998     virtio_net_flush_tx(q);
1999 }
2000 
2001 /* TX */
2002 static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
2003 {
2004     VirtIONet *n = q->n;
2005     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2006     VirtQueueElement *elem;
2007     int32_t num_packets = 0;
2008     int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
2009     if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
2010         return num_packets;
2011     }
2012 
2013     if (q->async_tx.elem) {
2014         virtio_queue_set_notification(q->tx_vq, 0);
2015         return num_packets;
2016     }
2017 
2018     for (;;) {
2019         ssize_t ret;
2020         unsigned int out_num;
2021         struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
2022         struct virtio_net_hdr_mrg_rxbuf mhdr;
2023 
2024         elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
2025         if (!elem) {
2026             break;
2027         }
2028 
2029         out_num = elem->out_num;
2030         out_sg = elem->out_sg;
2031         if (out_num < 1) {
2032             virtio_error(vdev, "virtio-net header not in first element");
2033             virtqueue_detach_element(q->tx_vq, elem, 0);
2034             g_free(elem);
2035             return -EINVAL;
2036         }
2037 
2038         if (n->has_vnet_hdr) {
2039             if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
2040                 n->guest_hdr_len) {
2041                 virtio_error(vdev, "virtio-net header incorrect");
2042                 virtqueue_detach_element(q->tx_vq, elem, 0);
2043                 g_free(elem);
2044                 return -EINVAL;
2045             }
2046             if (n->needs_vnet_hdr_swap) {
2047                 virtio_net_hdr_swap(vdev, (void *) &mhdr);
2048                 sg2[0].iov_base = &mhdr;
2049                 sg2[0].iov_len = n->guest_hdr_len;
2050                 out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
2051                                    out_sg, out_num,
2052                                    n->guest_hdr_len, -1);
2053                 if (out_num == VIRTQUEUE_MAX_SIZE) {
2054                     goto drop;
2055                 }
2056                 out_num += 1;
2057                 out_sg = sg2;
2058             }
2059         }
2060         /*
2061          * If host wants to see the guest header as is, we can
2062          * pass it on unchanged. Otherwise, copy just the parts
2063          * that host is interested in.
2064          */
2065         assert(n->host_hdr_len <= n->guest_hdr_len);
2066         if (n->host_hdr_len != n->guest_hdr_len) {
2067             unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
2068                                        out_sg, out_num,
2069                                        0, n->host_hdr_len);
2070             sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
2071                              out_sg, out_num,
2072                              n->guest_hdr_len, -1);
2073             out_num = sg_num;
2074             out_sg = sg;
2075         }
2076 
2077         ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
2078                                       out_sg, out_num, virtio_net_tx_complete);
2079         if (ret == 0) {
2080             virtio_queue_set_notification(q->tx_vq, 0);
2081             q->async_tx.elem = elem;
2082             return -EBUSY;
2083         }
2084 
2085 drop:
2086         virtqueue_push(q->tx_vq, elem, 0);
2087         virtio_notify(vdev, q->tx_vq);
2088         g_free(elem);
2089 
2090         if (++num_packets >= n->tx_burst) {
2091             break;
2092         }
2093     }
2094     return num_packets;
2095 }
2096 
2097 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
2098 {
2099     VirtIONet *n = VIRTIO_NET(vdev);
2100     VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
2101 
2102     if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
2103         virtio_net_drop_tx_queue_data(vdev, vq);
2104         return;
2105     }
2106 
2107     /* This happens when device was stopped but VCPU wasn't. */
2108     if (!vdev->vm_running) {
2109         q->tx_waiting = 1;
2110         return;
2111     }
2112 
2113     if (q->tx_waiting) {
2114         virtio_queue_set_notification(vq, 1);
2115         timer_del(q->tx_timer);
2116         q->tx_waiting = 0;
2117         if (virtio_net_flush_tx(q) == -EINVAL) {
2118             return;
2119         }
2120     } else {
2121         timer_mod(q->tx_timer,
2122                        qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2123         q->tx_waiting = 1;
2124         virtio_queue_set_notification(vq, 0);
2125     }
2126 }
2127 
2128 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
2129 {
2130     VirtIONet *n = VIRTIO_NET(vdev);
2131     VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
2132 
2133     if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
2134         virtio_net_drop_tx_queue_data(vdev, vq);
2135         return;
2136     }
2137 
2138     if (unlikely(q->tx_waiting)) {
2139         return;
2140     }
2141     q->tx_waiting = 1;
2142     /* This happens when device was stopped but VCPU wasn't. */
2143     if (!vdev->vm_running) {
2144         return;
2145     }
2146     virtio_queue_set_notification(vq, 0);
2147     qemu_bh_schedule(q->tx_bh);
2148 }
2149 
2150 static void virtio_net_tx_timer(void *opaque)
2151 {
2152     VirtIONetQueue *q = opaque;
2153     VirtIONet *n = q->n;
2154     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2155     /* This happens when device was stopped but BH wasn't. */
2156     if (!vdev->vm_running) {
2157         /* Make sure tx waiting is set, so we'll run when restarted. */
2158         assert(q->tx_waiting);
2159         return;
2160     }
2161 
2162     q->tx_waiting = 0;
2163 
2164     /* Just in case the driver is not ready on more */
2165     if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
2166         return;
2167     }
2168 
2169     virtio_queue_set_notification(q->tx_vq, 1);
2170     virtio_net_flush_tx(q);
2171 }
2172 
2173 static void virtio_net_tx_bh(void *opaque)
2174 {
2175     VirtIONetQueue *q = opaque;
2176     VirtIONet *n = q->n;
2177     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2178     int32_t ret;
2179 
2180     /* This happens when device was stopped but BH wasn't. */
2181     if (!vdev->vm_running) {
2182         /* Make sure tx waiting is set, so we'll run when restarted. */
2183         assert(q->tx_waiting);
2184         return;
2185     }
2186 
2187     q->tx_waiting = 0;
2188 
2189     /* Just in case the driver is not ready on more */
2190     if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
2191         return;
2192     }
2193 
2194     ret = virtio_net_flush_tx(q);
2195     if (ret == -EBUSY || ret == -EINVAL) {
2196         return; /* Notification re-enable handled by tx_complete or device
2197                  * broken */
2198     }
2199 
2200     /* If we flush a full burst of packets, assume there are
2201      * more coming and immediately reschedule */
2202     if (ret >= n->tx_burst) {
2203         qemu_bh_schedule(q->tx_bh);
2204         q->tx_waiting = 1;
2205         return;
2206     }
2207 
2208     /* If less than a full burst, re-enable notification and flush
2209      * anything that may have come in while we weren't looking.  If
2210      * we find something, assume the guest is still active and reschedule */
2211     virtio_queue_set_notification(q->tx_vq, 1);
2212     ret = virtio_net_flush_tx(q);
2213     if (ret == -EINVAL) {
2214         return;
2215     } else if (ret > 0) {
2216         virtio_queue_set_notification(q->tx_vq, 0);
2217         qemu_bh_schedule(q->tx_bh);
2218         q->tx_waiting = 1;
2219     }
2220 }
2221 
2222 static void virtio_net_add_queue(VirtIONet *n, int index)
2223 {
2224     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2225 
2226     n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
2227                                            virtio_net_handle_rx);
2228 
2229     if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
2230         n->vqs[index].tx_vq =
2231             virtio_add_queue(vdev, n->net_conf.tx_queue_size,
2232                              virtio_net_handle_tx_timer);
2233         n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2234                                               virtio_net_tx_timer,
2235                                               &n->vqs[index]);
2236     } else {
2237         n->vqs[index].tx_vq =
2238             virtio_add_queue(vdev, n->net_conf.tx_queue_size,
2239                              virtio_net_handle_tx_bh);
2240         n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
2241     }
2242 
2243     n->vqs[index].tx_waiting = 0;
2244     n->vqs[index].n = n;
2245 }
2246 
2247 static void virtio_net_del_queue(VirtIONet *n, int index)
2248 {
2249     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2250     VirtIONetQueue *q = &n->vqs[index];
2251     NetClientState *nc = qemu_get_subqueue(n->nic, index);
2252 
2253     qemu_purge_queued_packets(nc);
2254 
2255     virtio_del_queue(vdev, index * 2);
2256     if (q->tx_timer) {
2257         timer_del(q->tx_timer);
2258         timer_free(q->tx_timer);
2259         q->tx_timer = NULL;
2260     } else {
2261         qemu_bh_delete(q->tx_bh);
2262         q->tx_bh = NULL;
2263     }
2264     q->tx_waiting = 0;
2265     virtio_del_queue(vdev, index * 2 + 1);
2266 }
2267 
2268 static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
2269 {
2270     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2271     int old_num_queues = virtio_get_num_queues(vdev);
2272     int new_num_queues = new_max_queues * 2 + 1;
2273     int i;
2274 
2275     assert(old_num_queues >= 3);
2276     assert(old_num_queues % 2 == 1);
2277 
2278     if (old_num_queues == new_num_queues) {
2279         return;
2280     }
2281 
2282     /*
2283      * We always need to remove and add ctrl vq if
2284      * old_num_queues != new_num_queues. Remove ctrl_vq first,
2285      * and then we only enter one of the following two loops.
2286      */
2287     virtio_del_queue(vdev, old_num_queues - 1);
2288 
2289     for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
2290         /* new_num_queues < old_num_queues */
2291         virtio_net_del_queue(n, i / 2);
2292     }
2293 
2294     for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
2295         /* new_num_queues > old_num_queues */
2296         virtio_net_add_queue(n, i / 2);
2297     }
2298 
2299     /* add ctrl_vq last */
2300     n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
2301 }
2302 
2303 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
2304 {
2305     int max = multiqueue ? n->max_queues : 1;
2306 
2307     n->multiqueue = multiqueue;
2308     virtio_net_change_num_queues(n, max);
2309 
2310     virtio_net_set_queues(n);
2311 }
2312 
2313 static int virtio_net_post_load_device(void *opaque, int version_id)
2314 {
2315     VirtIONet *n = opaque;
2316     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2317     int i, link_down;
2318 
2319     trace_virtio_net_post_load_device();
2320     virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs,
2321                                virtio_vdev_has_feature(vdev,
2322                                                        VIRTIO_F_VERSION_1));
2323 
2324     /* MAC_TABLE_ENTRIES may be different from the saved image */
2325     if (n->mac_table.in_use > MAC_TABLE_ENTRIES) {
2326         n->mac_table.in_use = 0;
2327     }
2328 
2329     if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
2330         n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
2331     }
2332 
2333     if (peer_has_vnet_hdr(n)) {
2334         virtio_net_apply_guest_offloads(n);
2335     }
2336 
2337     virtio_net_set_queues(n);
2338 
2339     /* Find the first multicast entry in the saved MAC filter */
2340     for (i = 0; i < n->mac_table.in_use; i++) {
2341         if (n->mac_table.macs[i * ETH_ALEN] & 1) {
2342             break;
2343         }
2344     }
2345     n->mac_table.first_multi = i;
2346 
2347     /* nc.link_down can't be migrated, so infer link_down according
2348      * to link status bit in n->status */
2349     link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
2350     for (i = 0; i < n->max_queues; i++) {
2351         qemu_get_subqueue(n->nic, i)->link_down = link_down;
2352     }
2353 
2354     if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
2355         virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
2356         qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
2357                                   QEMU_CLOCK_VIRTUAL,
2358                                   virtio_net_announce_timer, n);
2359         if (n->announce_timer.round) {
2360             timer_mod(n->announce_timer.tm,
2361                       qemu_clock_get_ms(n->announce_timer.type));
2362         } else {
2363             qemu_announce_timer_del(&n->announce_timer);
2364         }
2365     }
2366 
2367     return 0;
2368 }
2369 
2370 /* tx_waiting field of a VirtIONetQueue */
2371 static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = {
2372     .name = "virtio-net-queue-tx_waiting",
2373     .fields = (VMStateField[]) {
2374         VMSTATE_UINT32(tx_waiting, VirtIONetQueue),
2375         VMSTATE_END_OF_LIST()
2376    },
2377 };
2378 
2379 static bool max_queues_gt_1(void *opaque, int version_id)
2380 {
2381     return VIRTIO_NET(opaque)->max_queues > 1;
2382 }
2383 
2384 static bool has_ctrl_guest_offloads(void *opaque, int version_id)
2385 {
2386     return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque),
2387                                    VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
2388 }
2389 
2390 static bool mac_table_fits(void *opaque, int version_id)
2391 {
2392     return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES;
2393 }
2394 
2395 static bool mac_table_doesnt_fit(void *opaque, int version_id)
2396 {
2397     return !mac_table_fits(opaque, version_id);
2398 }
2399 
2400 /* This temporary type is shared by all the WITH_TMP methods
2401  * although only some fields are used by each.
2402  */
2403 struct VirtIONetMigTmp {
2404     VirtIONet      *parent;
2405     VirtIONetQueue *vqs_1;
2406     uint16_t        curr_queues_1;
2407     uint8_t         has_ufo;
2408     uint32_t        has_vnet_hdr;
2409 };
2410 
2411 /* The 2nd and subsequent tx_waiting flags are loaded later than
2412  * the 1st entry in the queues and only if there's more than one
2413  * entry.  We use the tmp mechanism to calculate a temporary
2414  * pointer and count and also validate the count.
2415  */
2416 
2417 static int virtio_net_tx_waiting_pre_save(void *opaque)
2418 {
2419     struct VirtIONetMigTmp *tmp = opaque;
2420 
2421     tmp->vqs_1 = tmp->parent->vqs + 1;
2422     tmp->curr_queues_1 = tmp->parent->curr_queues - 1;
2423     if (tmp->parent->curr_queues == 0) {
2424         tmp->curr_queues_1 = 0;
2425     }
2426 
2427     return 0;
2428 }
2429 
2430 static int virtio_net_tx_waiting_pre_load(void *opaque)
2431 {
2432     struct VirtIONetMigTmp *tmp = opaque;
2433 
2434     /* Reuse the pointer setup from save */
2435     virtio_net_tx_waiting_pre_save(opaque);
2436 
2437     if (tmp->parent->curr_queues > tmp->parent->max_queues) {
2438         error_report("virtio-net: curr_queues %x > max_queues %x",
2439             tmp->parent->curr_queues, tmp->parent->max_queues);
2440 
2441         return -EINVAL;
2442     }
2443 
2444     return 0; /* all good */
2445 }
2446 
2447 static const VMStateDescription vmstate_virtio_net_tx_waiting = {
2448     .name      = "virtio-net-tx_waiting",
2449     .pre_load  = virtio_net_tx_waiting_pre_load,
2450     .pre_save  = virtio_net_tx_waiting_pre_save,
2451     .fields    = (VMStateField[]) {
2452         VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp,
2453                                      curr_queues_1,
2454                                      vmstate_virtio_net_queue_tx_waiting,
2455                                      struct VirtIONetQueue),
2456         VMSTATE_END_OF_LIST()
2457     },
2458 };
2459 
2460 /* the 'has_ufo' flag is just tested; if the incoming stream has the
2461  * flag set we need to check that we have it
2462  */
2463 static int virtio_net_ufo_post_load(void *opaque, int version_id)
2464 {
2465     struct VirtIONetMigTmp *tmp = opaque;
2466 
2467     if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) {
2468         error_report("virtio-net: saved image requires TUN_F_UFO support");
2469         return -EINVAL;
2470     }
2471 
2472     return 0;
2473 }
2474 
2475 static int virtio_net_ufo_pre_save(void *opaque)
2476 {
2477     struct VirtIONetMigTmp *tmp = opaque;
2478 
2479     tmp->has_ufo = tmp->parent->has_ufo;
2480 
2481     return 0;
2482 }
2483 
2484 static const VMStateDescription vmstate_virtio_net_has_ufo = {
2485     .name      = "virtio-net-ufo",
2486     .post_load = virtio_net_ufo_post_load,
2487     .pre_save  = virtio_net_ufo_pre_save,
2488     .fields    = (VMStateField[]) {
2489         VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp),
2490         VMSTATE_END_OF_LIST()
2491     },
2492 };
2493 
2494 /* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
2495  * flag set we need to check that we have it
2496  */
2497 static int virtio_net_vnet_post_load(void *opaque, int version_id)
2498 {
2499     struct VirtIONetMigTmp *tmp = opaque;
2500 
2501     if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) {
2502         error_report("virtio-net: saved image requires vnet_hdr=on");
2503         return -EINVAL;
2504     }
2505 
2506     return 0;
2507 }
2508 
2509 static int virtio_net_vnet_pre_save(void *opaque)
2510 {
2511     struct VirtIONetMigTmp *tmp = opaque;
2512 
2513     tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr;
2514 
2515     return 0;
2516 }
2517 
2518 static const VMStateDescription vmstate_virtio_net_has_vnet = {
2519     .name      = "virtio-net-vnet",
2520     .post_load = virtio_net_vnet_post_load,
2521     .pre_save  = virtio_net_vnet_pre_save,
2522     .fields    = (VMStateField[]) {
2523         VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp),
2524         VMSTATE_END_OF_LIST()
2525     },
2526 };
2527 
2528 static const VMStateDescription vmstate_virtio_net_device = {
2529     .name = "virtio-net-device",
2530     .version_id = VIRTIO_NET_VM_VERSION,
2531     .minimum_version_id = VIRTIO_NET_VM_VERSION,
2532     .post_load = virtio_net_post_load_device,
2533     .fields = (VMStateField[]) {
2534         VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN),
2535         VMSTATE_STRUCT_POINTER(vqs, VirtIONet,
2536                                vmstate_virtio_net_queue_tx_waiting,
2537                                VirtIONetQueue),
2538         VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet),
2539         VMSTATE_UINT16(status, VirtIONet),
2540         VMSTATE_UINT8(promisc, VirtIONet),
2541         VMSTATE_UINT8(allmulti, VirtIONet),
2542         VMSTATE_UINT32(mac_table.in_use, VirtIONet),
2543 
2544         /* Guarded pair: If it fits we load it, else we throw it away
2545          * - can happen if source has a larger MAC table.; post-load
2546          *  sets flags in this case.
2547          */
2548         VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet,
2549                                 0, mac_table_fits, mac_table.in_use,
2550                                  ETH_ALEN),
2551         VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0,
2552                                      mac_table.in_use, ETH_ALEN),
2553 
2554         /* Note: This is an array of uint32's that's always been saved as a
2555          * buffer; hold onto your endiannesses; it's actually used as a bitmap
2556          * but based on the uint.
2557          */
2558         VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3),
2559         VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
2560                          vmstate_virtio_net_has_vnet),
2561         VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet),
2562         VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet),
2563         VMSTATE_UINT8(alluni, VirtIONet),
2564         VMSTATE_UINT8(nomulti, VirtIONet),
2565         VMSTATE_UINT8(nouni, VirtIONet),
2566         VMSTATE_UINT8(nobcast, VirtIONet),
2567         VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
2568                          vmstate_virtio_net_has_ufo),
2569         VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0,
2570                             vmstate_info_uint16_equal, uint16_t),
2571         VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1),
2572         VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
2573                          vmstate_virtio_net_tx_waiting),
2574         VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet,
2575                             has_ctrl_guest_offloads),
2576         VMSTATE_END_OF_LIST()
2577    },
2578 };
2579 
2580 static NetClientInfo net_virtio_info = {
2581     .type = NET_CLIENT_DRIVER_NIC,
2582     .size = sizeof(NICState),
2583     .can_receive = virtio_net_can_receive,
2584     .receive = virtio_net_receive,
2585     .link_status_changed = virtio_net_set_link_status,
2586     .query_rx_filter = virtio_net_query_rxfilter,
2587     .announce = virtio_net_announce,
2588 };
2589 
2590 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
2591 {
2592     VirtIONet *n = VIRTIO_NET(vdev);
2593     NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
2594     assert(n->vhost_started);
2595     return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
2596 }
2597 
2598 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
2599                                            bool mask)
2600 {
2601     VirtIONet *n = VIRTIO_NET(vdev);
2602     NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
2603     assert(n->vhost_started);
2604     vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
2605                              vdev, idx, mask);
2606 }
2607 
2608 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
2609 {
2610     virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
2611 
2612     n->config_size = virtio_feature_get_config_size(feature_sizes,
2613                                                     host_features);
2614 }
2615 
2616 void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
2617                                    const char *type)
2618 {
2619     /*
2620      * The name can be NULL, the netclient name will be type.x.
2621      */
2622     assert(type != NULL);
2623 
2624     g_free(n->netclient_name);
2625     g_free(n->netclient_type);
2626     n->netclient_name = g_strdup(name);
2627     n->netclient_type = g_strdup(type);
2628 }
2629 
2630 static void virtio_net_device_realize(DeviceState *dev, Error **errp)
2631 {
2632     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2633     VirtIONet *n = VIRTIO_NET(dev);
2634     NetClientState *nc;
2635     int i;
2636 
2637     if (n->net_conf.mtu) {
2638         n->host_features |= (1ULL << VIRTIO_NET_F_MTU);
2639     }
2640 
2641     if (n->net_conf.duplex_str) {
2642         if (strncmp(n->net_conf.duplex_str, "half", 5) == 0) {
2643             n->net_conf.duplex = DUPLEX_HALF;
2644         } else if (strncmp(n->net_conf.duplex_str, "full", 5) == 0) {
2645             n->net_conf.duplex = DUPLEX_FULL;
2646         } else {
2647             error_setg(errp, "'duplex' must be 'half' or 'full'");
2648         }
2649         n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX);
2650     } else {
2651         n->net_conf.duplex = DUPLEX_UNKNOWN;
2652     }
2653 
2654     if (n->net_conf.speed < SPEED_UNKNOWN) {
2655         error_setg(errp, "'speed' must be between 0 and INT_MAX");
2656     } else if (n->net_conf.speed >= 0) {
2657         n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX);
2658     }
2659 
2660     virtio_net_set_config_size(n, n->host_features);
2661     virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
2662 
2663     /*
2664      * We set a lower limit on RX queue size to what it always was.
2665      * Guests that want a smaller ring can always resize it without
2666      * help from us (using virtio 1 and up).
2667      */
2668     if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
2669         n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
2670         !is_power_of_2(n->net_conf.rx_queue_size)) {
2671         error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
2672                    "must be a power of 2 between %d and %d.",
2673                    n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
2674                    VIRTQUEUE_MAX_SIZE);
2675         virtio_cleanup(vdev);
2676         return;
2677     }
2678 
2679     if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
2680         n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
2681         !is_power_of_2(n->net_conf.tx_queue_size)) {
2682         error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
2683                    "must be a power of 2 between %d and %d",
2684                    n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
2685                    VIRTQUEUE_MAX_SIZE);
2686         virtio_cleanup(vdev);
2687         return;
2688     }
2689 
2690     n->max_queues = MAX(n->nic_conf.peers.queues, 1);
2691     if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
2692         error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
2693                    "must be a positive integer less than %d.",
2694                    n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2);
2695         virtio_cleanup(vdev);
2696         return;
2697     }
2698     n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
2699     n->curr_queues = 1;
2700     n->tx_timeout = n->net_conf.txtimer;
2701 
2702     if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
2703                        && strcmp(n->net_conf.tx, "bh")) {
2704         warn_report("virtio-net: "
2705                     "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
2706                     n->net_conf.tx);
2707         error_printf("Defaulting to \"bh\"");
2708     }
2709 
2710     n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n),
2711                                     n->net_conf.tx_queue_size);
2712 
2713     for (i = 0; i < n->max_queues; i++) {
2714         virtio_net_add_queue(n, i);
2715     }
2716 
2717     n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
2718     qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
2719     memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
2720     n->status = VIRTIO_NET_S_LINK_UP;
2721     qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
2722                               QEMU_CLOCK_VIRTUAL,
2723                               virtio_net_announce_timer, n);
2724     n->announce_timer.round = 0;
2725 
2726     if (n->netclient_type) {
2727         /*
2728          * Happen when virtio_net_set_netclient_name has been called.
2729          */
2730         n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
2731                               n->netclient_type, n->netclient_name, n);
2732     } else {
2733         n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
2734                               object_get_typename(OBJECT(dev)), dev->id, n);
2735     }
2736 
2737     peer_test_vnet_hdr(n);
2738     if (peer_has_vnet_hdr(n)) {
2739         for (i = 0; i < n->max_queues; i++) {
2740             qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
2741         }
2742         n->host_hdr_len = sizeof(struct virtio_net_hdr);
2743     } else {
2744         n->host_hdr_len = 0;
2745     }
2746 
2747     qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
2748 
2749     n->vqs[0].tx_waiting = 0;
2750     n->tx_burst = n->net_conf.txburst;
2751     virtio_net_set_mrg_rx_bufs(n, 0, 0);
2752     n->promisc = 1; /* for compatibility */
2753 
2754     n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
2755 
2756     n->vlans = g_malloc0(MAX_VLAN >> 3);
2757 
2758     nc = qemu_get_queue(n->nic);
2759     nc->rxfilter_notify_enabled = 1;
2760 
2761     QTAILQ_INIT(&n->rsc_chains);
2762     n->qdev = dev;
2763 }
2764 
2765 static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
2766 {
2767     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2768     VirtIONet *n = VIRTIO_NET(dev);
2769     int i, max_queues;
2770 
2771     /* This will stop vhost backend if appropriate. */
2772     virtio_net_set_status(vdev, 0);
2773 
2774     g_free(n->netclient_name);
2775     n->netclient_name = NULL;
2776     g_free(n->netclient_type);
2777     n->netclient_type = NULL;
2778 
2779     g_free(n->mac_table.macs);
2780     g_free(n->vlans);
2781 
2782     max_queues = n->multiqueue ? n->max_queues : 1;
2783     for (i = 0; i < max_queues; i++) {
2784         virtio_net_del_queue(n, i);
2785     }
2786 
2787     qemu_announce_timer_del(&n->announce_timer);
2788     g_free(n->vqs);
2789     qemu_del_nic(n->nic);
2790     virtio_net_rsc_cleanup(n);
2791     virtio_cleanup(vdev);
2792 }
2793 
2794 static void virtio_net_instance_init(Object *obj)
2795 {
2796     VirtIONet *n = VIRTIO_NET(obj);
2797 
2798     /*
2799      * The default config_size is sizeof(struct virtio_net_config).
2800      * Can be overriden with virtio_net_set_config_size.
2801      */
2802     n->config_size = sizeof(struct virtio_net_config);
2803     device_add_bootindex_property(obj, &n->nic_conf.bootindex,
2804                                   "bootindex", "/ethernet-phy@0",
2805                                   DEVICE(n), NULL);
2806 }
2807 
2808 static int virtio_net_pre_save(void *opaque)
2809 {
2810     VirtIONet *n = opaque;
2811 
2812     /* At this point, backend must be stopped, otherwise
2813      * it might keep writing to memory. */
2814     assert(!n->vhost_started);
2815 
2816     return 0;
2817 }
2818 
2819 static const VMStateDescription vmstate_virtio_net = {
2820     .name = "virtio-net",
2821     .minimum_version_id = VIRTIO_NET_VM_VERSION,
2822     .version_id = VIRTIO_NET_VM_VERSION,
2823     .fields = (VMStateField[]) {
2824         VMSTATE_VIRTIO_DEVICE,
2825         VMSTATE_END_OF_LIST()
2826     },
2827     .pre_save = virtio_net_pre_save,
2828 };
2829 
2830 static Property virtio_net_properties[] = {
2831     DEFINE_PROP_BIT64("csum", VirtIONet, host_features,
2832                     VIRTIO_NET_F_CSUM, true),
2833     DEFINE_PROP_BIT64("guest_csum", VirtIONet, host_features,
2834                     VIRTIO_NET_F_GUEST_CSUM, true),
2835     DEFINE_PROP_BIT64("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true),
2836     DEFINE_PROP_BIT64("guest_tso4", VirtIONet, host_features,
2837                     VIRTIO_NET_F_GUEST_TSO4, true),
2838     DEFINE_PROP_BIT64("guest_tso6", VirtIONet, host_features,
2839                     VIRTIO_NET_F_GUEST_TSO6, true),
2840     DEFINE_PROP_BIT64("guest_ecn", VirtIONet, host_features,
2841                     VIRTIO_NET_F_GUEST_ECN, true),
2842     DEFINE_PROP_BIT64("guest_ufo", VirtIONet, host_features,
2843                     VIRTIO_NET_F_GUEST_UFO, true),
2844     DEFINE_PROP_BIT64("guest_announce", VirtIONet, host_features,
2845                     VIRTIO_NET_F_GUEST_ANNOUNCE, true),
2846     DEFINE_PROP_BIT64("host_tso4", VirtIONet, host_features,
2847                     VIRTIO_NET_F_HOST_TSO4, true),
2848     DEFINE_PROP_BIT64("host_tso6", VirtIONet, host_features,
2849                     VIRTIO_NET_F_HOST_TSO6, true),
2850     DEFINE_PROP_BIT64("host_ecn", VirtIONet, host_features,
2851                     VIRTIO_NET_F_HOST_ECN, true),
2852     DEFINE_PROP_BIT64("host_ufo", VirtIONet, host_features,
2853                     VIRTIO_NET_F_HOST_UFO, true),
2854     DEFINE_PROP_BIT64("mrg_rxbuf", VirtIONet, host_features,
2855                     VIRTIO_NET_F_MRG_RXBUF, true),
2856     DEFINE_PROP_BIT64("status", VirtIONet, host_features,
2857                     VIRTIO_NET_F_STATUS, true),
2858     DEFINE_PROP_BIT64("ctrl_vq", VirtIONet, host_features,
2859                     VIRTIO_NET_F_CTRL_VQ, true),
2860     DEFINE_PROP_BIT64("ctrl_rx", VirtIONet, host_features,
2861                     VIRTIO_NET_F_CTRL_RX, true),
2862     DEFINE_PROP_BIT64("ctrl_vlan", VirtIONet, host_features,
2863                     VIRTIO_NET_F_CTRL_VLAN, true),
2864     DEFINE_PROP_BIT64("ctrl_rx_extra", VirtIONet, host_features,
2865                     VIRTIO_NET_F_CTRL_RX_EXTRA, true),
2866     DEFINE_PROP_BIT64("ctrl_mac_addr", VirtIONet, host_features,
2867                     VIRTIO_NET_F_CTRL_MAC_ADDR, true),
2868     DEFINE_PROP_BIT64("ctrl_guest_offloads", VirtIONet, host_features,
2869                     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
2870     DEFINE_PROP_BIT64("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
2871     DEFINE_PROP_BIT64("guest_rsc_ext", VirtIONet, host_features,
2872                     VIRTIO_NET_F_RSC_EXT, false),
2873     DEFINE_PROP_UINT32("rsc_interval", VirtIONet, rsc_timeout,
2874                        VIRTIO_NET_RSC_DEFAULT_INTERVAL),
2875     DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
2876     DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
2877                        TX_TIMER_INTERVAL),
2878     DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
2879     DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
2880     DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
2881                        VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
2882     DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
2883                        VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
2884     DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
2885     DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend,
2886                      true),
2887     DEFINE_PROP_INT32("speed", VirtIONet, net_conf.speed, SPEED_UNKNOWN),
2888     DEFINE_PROP_STRING("duplex", VirtIONet, net_conf.duplex_str),
2889     DEFINE_PROP_END_OF_LIST(),
2890 };
2891 
2892 static void virtio_net_class_init(ObjectClass *klass, void *data)
2893 {
2894     DeviceClass *dc = DEVICE_CLASS(klass);
2895     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
2896 
2897     dc->props = virtio_net_properties;
2898     dc->vmsd = &vmstate_virtio_net;
2899     set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
2900     vdc->realize = virtio_net_device_realize;
2901     vdc->unrealize = virtio_net_device_unrealize;
2902     vdc->get_config = virtio_net_get_config;
2903     vdc->set_config = virtio_net_set_config;
2904     vdc->get_features = virtio_net_get_features;
2905     vdc->set_features = virtio_net_set_features;
2906     vdc->bad_features = virtio_net_bad_features;
2907     vdc->reset = virtio_net_reset;
2908     vdc->set_status = virtio_net_set_status;
2909     vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
2910     vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
2911     vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO);
2912     vdc->vmsd = &vmstate_virtio_net_device;
2913 }
2914 
2915 static const TypeInfo virtio_net_info = {
2916     .name = TYPE_VIRTIO_NET,
2917     .parent = TYPE_VIRTIO_DEVICE,
2918     .instance_size = sizeof(VirtIONet),
2919     .instance_init = virtio_net_instance_init,
2920     .class_init = virtio_net_class_init,
2921 };
2922 
2923 static void virtio_register_types(void)
2924 {
2925     type_register_static(&virtio_net_info);
2926 }
2927 
2928 type_init(virtio_register_types)
2929