xref: /openbmc/qemu/hw/net/virtio-net.c (revision 709395f8)
1 /*
2  * Virtio Network Device
3  *
4  * Copyright IBM, Corp. 2007
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/iov.h"
16 #include "hw/virtio/virtio.h"
17 #include "net/net.h"
18 #include "net/checksum.h"
19 #include "net/tap.h"
20 #include "qemu/error-report.h"
21 #include "qemu/timer.h"
22 #include "hw/virtio/virtio-net.h"
23 #include "net/vhost_net.h"
24 #include "net/announce.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "qapi/error.h"
27 #include "qapi/qapi-events-net.h"
28 #include "hw/virtio/virtio-access.h"
29 #include "migration/misc.h"
30 #include "standard-headers/linux/ethtool.h"
31 #include "trace.h"
32 
33 #define VIRTIO_NET_VM_VERSION    11
34 
35 #define MAC_TABLE_ENTRIES    64
36 #define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
37 
38 /* previously fixed value */
39 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
40 #define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
41 
42 /* for now, only allow larger queues; with virtio-1, guest can downsize */
43 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
44 #define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
45 
46 #define VIRTIO_NET_IP4_ADDR_SIZE   8        /* ipv4 saddr + daddr */
47 
48 #define VIRTIO_NET_TCP_FLAG         0x3F
49 #define VIRTIO_NET_TCP_HDR_LENGTH   0xF000
50 
51 /* IPv4 max payload, 16 bits in the header */
52 #define VIRTIO_NET_MAX_IP4_PAYLOAD (65535 - sizeof(struct ip_header))
53 #define VIRTIO_NET_MAX_TCP_PAYLOAD 65535
54 
55 /* header length value in ip header without option */
56 #define VIRTIO_NET_IP4_HEADER_LENGTH 5
57 
58 #define VIRTIO_NET_IP6_ADDR_SIZE   32      /* ipv6 saddr + daddr */
59 #define VIRTIO_NET_MAX_IP6_PAYLOAD VIRTIO_NET_MAX_TCP_PAYLOAD
60 
61 /* Purge coalesced packets timer interval, This value affects the performance
62    a lot, and should be tuned carefully, '300000'(300us) is the recommended
63    value to pass the WHQL test, '50000' can gain 2x netperf throughput with
64    tso/gso/gro 'off'. */
65 #define VIRTIO_NET_RSC_DEFAULT_INTERVAL 300000
66 
67 /* temporary until standard header include it */
68 #if !defined(VIRTIO_NET_HDR_F_RSC_INFO)
69 
70 #define VIRTIO_NET_HDR_F_RSC_INFO  4 /* rsc_ext data in csum_ fields */
71 #define VIRTIO_NET_F_RSC_EXT       61
72 
73 static inline __virtio16 *virtio_net_rsc_ext_num_packets(
74     struct virtio_net_hdr *hdr)
75 {
76     return &hdr->csum_start;
77 }
78 
79 static inline __virtio16 *virtio_net_rsc_ext_num_dupacks(
80     struct virtio_net_hdr *hdr)
81 {
82     return &hdr->csum_offset;
83 }
84 
85 #endif
86 
87 static VirtIOFeature feature_sizes[] = {
88     {.flags = 1ULL << VIRTIO_NET_F_MAC,
89      .end = virtio_endof(struct virtio_net_config, mac)},
90     {.flags = 1ULL << VIRTIO_NET_F_STATUS,
91      .end = virtio_endof(struct virtio_net_config, status)},
92     {.flags = 1ULL << VIRTIO_NET_F_MQ,
93      .end = virtio_endof(struct virtio_net_config, max_virtqueue_pairs)},
94     {.flags = 1ULL << VIRTIO_NET_F_MTU,
95      .end = virtio_endof(struct virtio_net_config, mtu)},
96     {.flags = 1ULL << VIRTIO_NET_F_SPEED_DUPLEX,
97      .end = virtio_endof(struct virtio_net_config, duplex)},
98     {}
99 };
100 
101 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
102 {
103     VirtIONet *n = qemu_get_nic_opaque(nc);
104 
105     return &n->vqs[nc->queue_index];
106 }
107 
108 static int vq2q(int queue_index)
109 {
110     return queue_index / 2;
111 }
112 
113 /* TODO
114  * - we could suppress RX interrupt if we were so inclined.
115  */
116 
117 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
118 {
119     VirtIONet *n = VIRTIO_NET(vdev);
120     struct virtio_net_config netcfg;
121 
122     virtio_stw_p(vdev, &netcfg.status, n->status);
123     virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
124     virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
125     memcpy(netcfg.mac, n->mac, ETH_ALEN);
126     virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed);
127     netcfg.duplex = n->net_conf.duplex;
128     memcpy(config, &netcfg, n->config_size);
129 }
130 
131 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
132 {
133     VirtIONet *n = VIRTIO_NET(vdev);
134     struct virtio_net_config netcfg = {};
135 
136     memcpy(&netcfg, config, n->config_size);
137 
138     if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
139         !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
140         memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
141         memcpy(n->mac, netcfg.mac, ETH_ALEN);
142         qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
143     }
144 }
145 
146 static bool virtio_net_started(VirtIONet *n, uint8_t status)
147 {
148     VirtIODevice *vdev = VIRTIO_DEVICE(n);
149     return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
150         (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
151 }
152 
153 static void virtio_net_announce_notify(VirtIONet *net)
154 {
155     VirtIODevice *vdev = VIRTIO_DEVICE(net);
156     trace_virtio_net_announce_notify();
157 
158     net->status |= VIRTIO_NET_S_ANNOUNCE;
159     virtio_notify_config(vdev);
160 }
161 
162 static void virtio_net_announce_timer(void *opaque)
163 {
164     VirtIONet *n = opaque;
165     trace_virtio_net_announce_timer(n->announce_timer.round);
166 
167     n->announce_timer.round--;
168     virtio_net_announce_notify(n);
169 }
170 
171 static void virtio_net_announce(NetClientState *nc)
172 {
173     VirtIONet *n = qemu_get_nic_opaque(nc);
174     VirtIODevice *vdev = VIRTIO_DEVICE(n);
175 
176     /*
177      * Make sure the virtio migration announcement timer isn't running
178      * If it is, let it trigger announcement so that we do not cause
179      * confusion.
180      */
181     if (n->announce_timer.round) {
182         return;
183     }
184 
185     if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
186         virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
187             virtio_net_announce_notify(n);
188     }
189 }
190 
191 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
192 {
193     VirtIODevice *vdev = VIRTIO_DEVICE(n);
194     NetClientState *nc = qemu_get_queue(n->nic);
195     int queues = n->multiqueue ? n->max_queues : 1;
196 
197     if (!get_vhost_net(nc->peer)) {
198         return;
199     }
200 
201     if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
202         !!n->vhost_started) {
203         return;
204     }
205     if (!n->vhost_started) {
206         int r, i;
207 
208         if (n->needs_vnet_hdr_swap) {
209             error_report("backend does not support %s vnet headers; "
210                          "falling back on userspace virtio",
211                          virtio_is_big_endian(vdev) ? "BE" : "LE");
212             return;
213         }
214 
215         /* Any packets outstanding? Purge them to avoid touching rings
216          * when vhost is running.
217          */
218         for (i = 0;  i < queues; i++) {
219             NetClientState *qnc = qemu_get_subqueue(n->nic, i);
220 
221             /* Purge both directions: TX and RX. */
222             qemu_net_queue_purge(qnc->peer->incoming_queue, qnc);
223             qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
224         }
225 
226         if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) {
227             r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu);
228             if (r < 0) {
229                 error_report("%uBytes MTU not supported by the backend",
230                              n->net_conf.mtu);
231 
232                 return;
233             }
234         }
235 
236         n->vhost_started = 1;
237         r = vhost_net_start(vdev, n->nic->ncs, queues);
238         if (r < 0) {
239             error_report("unable to start vhost net: %d: "
240                          "falling back on userspace virtio", -r);
241             n->vhost_started = 0;
242         }
243     } else {
244         vhost_net_stop(vdev, n->nic->ncs, queues);
245         n->vhost_started = 0;
246     }
247 }
248 
249 static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev,
250                                           NetClientState *peer,
251                                           bool enable)
252 {
253     if (virtio_is_big_endian(vdev)) {
254         return qemu_set_vnet_be(peer, enable);
255     } else {
256         return qemu_set_vnet_le(peer, enable);
257     }
258 }
259 
260 static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs,
261                                        int queues, bool enable)
262 {
263     int i;
264 
265     for (i = 0; i < queues; i++) {
266         if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 &&
267             enable) {
268             while (--i >= 0) {
269                 virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false);
270             }
271 
272             return true;
273         }
274     }
275 
276     return false;
277 }
278 
279 static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
280 {
281     VirtIODevice *vdev = VIRTIO_DEVICE(n);
282     int queues = n->multiqueue ? n->max_queues : 1;
283 
284     if (virtio_net_started(n, status)) {
285         /* Before using the device, we tell the network backend about the
286          * endianness to use when parsing vnet headers. If the backend
287          * can't do it, we fallback onto fixing the headers in the core
288          * virtio-net code.
289          */
290         n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs,
291                                                             queues, true);
292     } else if (virtio_net_started(n, vdev->status)) {
293         /* After using the device, we need to reset the network backend to
294          * the default (guest native endianness), otherwise the guest may
295          * lose network connectivity if it is rebooted into a different
296          * endianness.
297          */
298         virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false);
299     }
300 }
301 
302 static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq)
303 {
304     unsigned int dropped = virtqueue_drop_all(vq);
305     if (dropped) {
306         virtio_notify(vdev, vq);
307     }
308 }
309 
310 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
311 {
312     VirtIONet *n = VIRTIO_NET(vdev);
313     VirtIONetQueue *q;
314     int i;
315     uint8_t queue_status;
316 
317     virtio_net_vnet_endian_status(n, status);
318     virtio_net_vhost_status(n, status);
319 
320     for (i = 0; i < n->max_queues; i++) {
321         NetClientState *ncs = qemu_get_subqueue(n->nic, i);
322         bool queue_started;
323         q = &n->vqs[i];
324 
325         if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
326             queue_status = 0;
327         } else {
328             queue_status = status;
329         }
330         queue_started =
331             virtio_net_started(n, queue_status) && !n->vhost_started;
332 
333         if (queue_started) {
334             qemu_flush_queued_packets(ncs);
335         }
336 
337         if (!q->tx_waiting) {
338             continue;
339         }
340 
341         if (queue_started) {
342             if (q->tx_timer) {
343                 timer_mod(q->tx_timer,
344                                qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
345             } else {
346                 qemu_bh_schedule(q->tx_bh);
347             }
348         } else {
349             if (q->tx_timer) {
350                 timer_del(q->tx_timer);
351             } else {
352                 qemu_bh_cancel(q->tx_bh);
353             }
354             if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 &&
355                 (queue_status & VIRTIO_CONFIG_S_DRIVER_OK) &&
356                 vdev->vm_running) {
357                 /* if tx is waiting we are likely have some packets in tx queue
358                  * and disabled notification */
359                 q->tx_waiting = 0;
360                 virtio_queue_set_notification(q->tx_vq, 1);
361                 virtio_net_drop_tx_queue_data(vdev, q->tx_vq);
362             }
363         }
364     }
365 }
366 
367 static void virtio_net_set_link_status(NetClientState *nc)
368 {
369     VirtIONet *n = qemu_get_nic_opaque(nc);
370     VirtIODevice *vdev = VIRTIO_DEVICE(n);
371     uint16_t old_status = n->status;
372 
373     if (nc->link_down)
374         n->status &= ~VIRTIO_NET_S_LINK_UP;
375     else
376         n->status |= VIRTIO_NET_S_LINK_UP;
377 
378     if (n->status != old_status)
379         virtio_notify_config(vdev);
380 
381     virtio_net_set_status(vdev, vdev->status);
382 }
383 
384 static void rxfilter_notify(NetClientState *nc)
385 {
386     VirtIONet *n = qemu_get_nic_opaque(nc);
387 
388     if (nc->rxfilter_notify_enabled) {
389         gchar *path = object_get_canonical_path(OBJECT(n->qdev));
390         qapi_event_send_nic_rx_filter_changed(!!n->netclient_name,
391                                               n->netclient_name, path);
392         g_free(path);
393 
394         /* disable event notification to avoid events flooding */
395         nc->rxfilter_notify_enabled = 0;
396     }
397 }
398 
399 static intList *get_vlan_table(VirtIONet *n)
400 {
401     intList *list, *entry;
402     int i, j;
403 
404     list = NULL;
405     for (i = 0; i < MAX_VLAN >> 5; i++) {
406         for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
407             if (n->vlans[i] & (1U << j)) {
408                 entry = g_malloc0(sizeof(*entry));
409                 entry->value = (i << 5) + j;
410                 entry->next = list;
411                 list = entry;
412             }
413         }
414     }
415 
416     return list;
417 }
418 
419 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
420 {
421     VirtIONet *n = qemu_get_nic_opaque(nc);
422     VirtIODevice *vdev = VIRTIO_DEVICE(n);
423     RxFilterInfo *info;
424     strList *str_list, *entry;
425     int i;
426 
427     info = g_malloc0(sizeof(*info));
428     info->name = g_strdup(nc->name);
429     info->promiscuous = n->promisc;
430 
431     if (n->nouni) {
432         info->unicast = RX_STATE_NONE;
433     } else if (n->alluni) {
434         info->unicast = RX_STATE_ALL;
435     } else {
436         info->unicast = RX_STATE_NORMAL;
437     }
438 
439     if (n->nomulti) {
440         info->multicast = RX_STATE_NONE;
441     } else if (n->allmulti) {
442         info->multicast = RX_STATE_ALL;
443     } else {
444         info->multicast = RX_STATE_NORMAL;
445     }
446 
447     info->broadcast_allowed = n->nobcast;
448     info->multicast_overflow = n->mac_table.multi_overflow;
449     info->unicast_overflow = n->mac_table.uni_overflow;
450 
451     info->main_mac = qemu_mac_strdup_printf(n->mac);
452 
453     str_list = NULL;
454     for (i = 0; i < n->mac_table.first_multi; i++) {
455         entry = g_malloc0(sizeof(*entry));
456         entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
457         entry->next = str_list;
458         str_list = entry;
459     }
460     info->unicast_table = str_list;
461 
462     str_list = NULL;
463     for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
464         entry = g_malloc0(sizeof(*entry));
465         entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
466         entry->next = str_list;
467         str_list = entry;
468     }
469     info->multicast_table = str_list;
470     info->vlan_table = get_vlan_table(n);
471 
472     if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
473         info->vlan = RX_STATE_ALL;
474     } else if (!info->vlan_table) {
475         info->vlan = RX_STATE_NONE;
476     } else {
477         info->vlan = RX_STATE_NORMAL;
478     }
479 
480     /* enable event notification after query */
481     nc->rxfilter_notify_enabled = 1;
482 
483     return info;
484 }
485 
486 static void virtio_net_reset(VirtIODevice *vdev)
487 {
488     VirtIONet *n = VIRTIO_NET(vdev);
489     int i;
490 
491     /* Reset back to compatibility mode */
492     n->promisc = 1;
493     n->allmulti = 0;
494     n->alluni = 0;
495     n->nomulti = 0;
496     n->nouni = 0;
497     n->nobcast = 0;
498     /* multiqueue is disabled by default */
499     n->curr_queues = 1;
500     timer_del(n->announce_timer.tm);
501     n->announce_timer.round = 0;
502     n->status &= ~VIRTIO_NET_S_ANNOUNCE;
503 
504     /* Flush any MAC and VLAN filter table state */
505     n->mac_table.in_use = 0;
506     n->mac_table.first_multi = 0;
507     n->mac_table.multi_overflow = 0;
508     n->mac_table.uni_overflow = 0;
509     memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
510     memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
511     qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
512     memset(n->vlans, 0, MAX_VLAN >> 3);
513 
514     /* Flush any async TX */
515     for (i = 0;  i < n->max_queues; i++) {
516         NetClientState *nc = qemu_get_subqueue(n->nic, i);
517 
518         if (nc->peer) {
519             qemu_flush_or_purge_queued_packets(nc->peer, true);
520             assert(!virtio_net_get_subqueue(nc)->async_tx.elem);
521         }
522     }
523 }
524 
525 static void peer_test_vnet_hdr(VirtIONet *n)
526 {
527     NetClientState *nc = qemu_get_queue(n->nic);
528     if (!nc->peer) {
529         return;
530     }
531 
532     n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
533 }
534 
535 static int peer_has_vnet_hdr(VirtIONet *n)
536 {
537     return n->has_vnet_hdr;
538 }
539 
540 static int peer_has_ufo(VirtIONet *n)
541 {
542     if (!peer_has_vnet_hdr(n))
543         return 0;
544 
545     n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
546 
547     return n->has_ufo;
548 }
549 
550 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
551                                        int version_1)
552 {
553     int i;
554     NetClientState *nc;
555 
556     n->mergeable_rx_bufs = mergeable_rx_bufs;
557 
558     if (version_1) {
559         n->guest_hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
560     } else {
561         n->guest_hdr_len = n->mergeable_rx_bufs ?
562             sizeof(struct virtio_net_hdr_mrg_rxbuf) :
563             sizeof(struct virtio_net_hdr);
564     }
565 
566     for (i = 0; i < n->max_queues; i++) {
567         nc = qemu_get_subqueue(n->nic, i);
568 
569         if (peer_has_vnet_hdr(n) &&
570             qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
571             qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
572             n->host_hdr_len = n->guest_hdr_len;
573         }
574     }
575 }
576 
577 static int virtio_net_max_tx_queue_size(VirtIONet *n)
578 {
579     NetClientState *peer = n->nic_conf.peers.ncs[0];
580 
581     /*
582      * Backends other than vhost-user don't support max queue size.
583      */
584     if (!peer) {
585         return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
586     }
587 
588     if (peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
589         return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
590     }
591 
592     return VIRTQUEUE_MAX_SIZE;
593 }
594 
595 static int peer_attach(VirtIONet *n, int index)
596 {
597     NetClientState *nc = qemu_get_subqueue(n->nic, index);
598 
599     if (!nc->peer) {
600         return 0;
601     }
602 
603     if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
604         vhost_set_vring_enable(nc->peer, 1);
605     }
606 
607     if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
608         return 0;
609     }
610 
611     if (n->max_queues == 1) {
612         return 0;
613     }
614 
615     return tap_enable(nc->peer);
616 }
617 
618 static int peer_detach(VirtIONet *n, int index)
619 {
620     NetClientState *nc = qemu_get_subqueue(n->nic, index);
621 
622     if (!nc->peer) {
623         return 0;
624     }
625 
626     if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
627         vhost_set_vring_enable(nc->peer, 0);
628     }
629 
630     if (nc->peer->info->type !=  NET_CLIENT_DRIVER_TAP) {
631         return 0;
632     }
633 
634     return tap_disable(nc->peer);
635 }
636 
637 static void virtio_net_set_queues(VirtIONet *n)
638 {
639     int i;
640     int r;
641 
642     if (n->nic->peer_deleted) {
643         return;
644     }
645 
646     for (i = 0; i < n->max_queues; i++) {
647         if (i < n->curr_queues) {
648             r = peer_attach(n, i);
649             assert(!r);
650         } else {
651             r = peer_detach(n, i);
652             assert(!r);
653         }
654     }
655 }
656 
657 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
658 
659 static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
660                                         Error **errp)
661 {
662     VirtIONet *n = VIRTIO_NET(vdev);
663     NetClientState *nc = qemu_get_queue(n->nic);
664 
665     /* Firstly sync all virtio-net possible supported features */
666     features |= n->host_features;
667 
668     virtio_add_feature(&features, VIRTIO_NET_F_MAC);
669 
670     if (!peer_has_vnet_hdr(n)) {
671         virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
672         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
673         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
674         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
675 
676         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
677         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
678         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
679         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
680     }
681 
682     if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
683         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
684         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
685     }
686 
687     if (!get_vhost_net(nc->peer)) {
688         return features;
689     }
690 
691     features = vhost_net_get_features(get_vhost_net(nc->peer), features);
692     vdev->backend_features = features;
693 
694     if (n->mtu_bypass_backend &&
695             (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) {
696         features |= (1ULL << VIRTIO_NET_F_MTU);
697     }
698 
699     return features;
700 }
701 
702 static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
703 {
704     uint64_t features = 0;
705 
706     /* Linux kernel 2.6.25.  It understood MAC (as everyone must),
707      * but also these: */
708     virtio_add_feature(&features, VIRTIO_NET_F_MAC);
709     virtio_add_feature(&features, VIRTIO_NET_F_CSUM);
710     virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
711     virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
712     virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
713 
714     return features;
715 }
716 
717 static void virtio_net_apply_guest_offloads(VirtIONet *n)
718 {
719     qemu_set_offload(qemu_get_queue(n->nic)->peer,
720             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
721             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
722             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
723             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
724             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
725 }
726 
727 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features)
728 {
729     static const uint64_t guest_offloads_mask =
730         (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
731         (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
732         (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
733         (1ULL << VIRTIO_NET_F_GUEST_ECN)  |
734         (1ULL << VIRTIO_NET_F_GUEST_UFO);
735 
736     return guest_offloads_mask & features;
737 }
738 
739 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
740 {
741     VirtIODevice *vdev = VIRTIO_DEVICE(n);
742     return virtio_net_guest_offloads_by_features(vdev->guest_features);
743 }
744 
745 static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
746 {
747     VirtIONet *n = VIRTIO_NET(vdev);
748     int i;
749 
750     if (n->mtu_bypass_backend &&
751             !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) {
752         features &= ~(1ULL << VIRTIO_NET_F_MTU);
753     }
754 
755     virtio_net_set_multiqueue(n,
756                               virtio_has_feature(features, VIRTIO_NET_F_MQ));
757 
758     virtio_net_set_mrg_rx_bufs(n,
759                                virtio_has_feature(features,
760                                                   VIRTIO_NET_F_MRG_RXBUF),
761                                virtio_has_feature(features,
762                                                   VIRTIO_F_VERSION_1));
763 
764     n->rsc4_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
765         virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO4);
766     n->rsc6_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
767         virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO6);
768 
769     if (n->has_vnet_hdr) {
770         n->curr_guest_offloads =
771             virtio_net_guest_offloads_by_features(features);
772         virtio_net_apply_guest_offloads(n);
773     }
774 
775     for (i = 0;  i < n->max_queues; i++) {
776         NetClientState *nc = qemu_get_subqueue(n->nic, i);
777 
778         if (!get_vhost_net(nc->peer)) {
779             continue;
780         }
781         vhost_net_ack_features(get_vhost_net(nc->peer), features);
782     }
783 
784     if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
785         memset(n->vlans, 0, MAX_VLAN >> 3);
786     } else {
787         memset(n->vlans, 0xff, MAX_VLAN >> 3);
788     }
789 }
790 
791 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
792                                      struct iovec *iov, unsigned int iov_cnt)
793 {
794     uint8_t on;
795     size_t s;
796     NetClientState *nc = qemu_get_queue(n->nic);
797 
798     s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
799     if (s != sizeof(on)) {
800         return VIRTIO_NET_ERR;
801     }
802 
803     if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
804         n->promisc = on;
805     } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
806         n->allmulti = on;
807     } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
808         n->alluni = on;
809     } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
810         n->nomulti = on;
811     } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
812         n->nouni = on;
813     } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
814         n->nobcast = on;
815     } else {
816         return VIRTIO_NET_ERR;
817     }
818 
819     rxfilter_notify(nc);
820 
821     return VIRTIO_NET_OK;
822 }
823 
824 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
825                                      struct iovec *iov, unsigned int iov_cnt)
826 {
827     VirtIODevice *vdev = VIRTIO_DEVICE(n);
828     uint64_t offloads;
829     size_t s;
830 
831     if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
832         return VIRTIO_NET_ERR;
833     }
834 
835     s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
836     if (s != sizeof(offloads)) {
837         return VIRTIO_NET_ERR;
838     }
839 
840     if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
841         uint64_t supported_offloads;
842 
843         offloads = virtio_ldq_p(vdev, &offloads);
844 
845         if (!n->has_vnet_hdr) {
846             return VIRTIO_NET_ERR;
847         }
848 
849         n->rsc4_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) &&
850             virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO4);
851         n->rsc6_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) &&
852             virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO6);
853         virtio_clear_feature(&offloads, VIRTIO_NET_F_RSC_EXT);
854 
855         supported_offloads = virtio_net_supported_guest_offloads(n);
856         if (offloads & ~supported_offloads) {
857             return VIRTIO_NET_ERR;
858         }
859 
860         n->curr_guest_offloads = offloads;
861         virtio_net_apply_guest_offloads(n);
862 
863         return VIRTIO_NET_OK;
864     } else {
865         return VIRTIO_NET_ERR;
866     }
867 }
868 
869 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
870                                  struct iovec *iov, unsigned int iov_cnt)
871 {
872     VirtIODevice *vdev = VIRTIO_DEVICE(n);
873     struct virtio_net_ctrl_mac mac_data;
874     size_t s;
875     NetClientState *nc = qemu_get_queue(n->nic);
876 
877     if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
878         if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
879             return VIRTIO_NET_ERR;
880         }
881         s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
882         assert(s == sizeof(n->mac));
883         qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
884         rxfilter_notify(nc);
885 
886         return VIRTIO_NET_OK;
887     }
888 
889     if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
890         return VIRTIO_NET_ERR;
891     }
892 
893     int in_use = 0;
894     int first_multi = 0;
895     uint8_t uni_overflow = 0;
896     uint8_t multi_overflow = 0;
897     uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
898 
899     s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
900                    sizeof(mac_data.entries));
901     mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
902     if (s != sizeof(mac_data.entries)) {
903         goto error;
904     }
905     iov_discard_front(&iov, &iov_cnt, s);
906 
907     if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
908         goto error;
909     }
910 
911     if (mac_data.entries <= MAC_TABLE_ENTRIES) {
912         s = iov_to_buf(iov, iov_cnt, 0, macs,
913                        mac_data.entries * ETH_ALEN);
914         if (s != mac_data.entries * ETH_ALEN) {
915             goto error;
916         }
917         in_use += mac_data.entries;
918     } else {
919         uni_overflow = 1;
920     }
921 
922     iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
923 
924     first_multi = in_use;
925 
926     s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
927                    sizeof(mac_data.entries));
928     mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
929     if (s != sizeof(mac_data.entries)) {
930         goto error;
931     }
932 
933     iov_discard_front(&iov, &iov_cnt, s);
934 
935     if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
936         goto error;
937     }
938 
939     if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
940         s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
941                        mac_data.entries * ETH_ALEN);
942         if (s != mac_data.entries * ETH_ALEN) {
943             goto error;
944         }
945         in_use += mac_data.entries;
946     } else {
947         multi_overflow = 1;
948     }
949 
950     n->mac_table.in_use = in_use;
951     n->mac_table.first_multi = first_multi;
952     n->mac_table.uni_overflow = uni_overflow;
953     n->mac_table.multi_overflow = multi_overflow;
954     memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
955     g_free(macs);
956     rxfilter_notify(nc);
957 
958     return VIRTIO_NET_OK;
959 
960 error:
961     g_free(macs);
962     return VIRTIO_NET_ERR;
963 }
964 
965 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
966                                         struct iovec *iov, unsigned int iov_cnt)
967 {
968     VirtIODevice *vdev = VIRTIO_DEVICE(n);
969     uint16_t vid;
970     size_t s;
971     NetClientState *nc = qemu_get_queue(n->nic);
972 
973     s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
974     vid = virtio_lduw_p(vdev, &vid);
975     if (s != sizeof(vid)) {
976         return VIRTIO_NET_ERR;
977     }
978 
979     if (vid >= MAX_VLAN)
980         return VIRTIO_NET_ERR;
981 
982     if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
983         n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
984     else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
985         n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
986     else
987         return VIRTIO_NET_ERR;
988 
989     rxfilter_notify(nc);
990 
991     return VIRTIO_NET_OK;
992 }
993 
994 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
995                                       struct iovec *iov, unsigned int iov_cnt)
996 {
997     trace_virtio_net_handle_announce(n->announce_timer.round);
998     if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
999         n->status & VIRTIO_NET_S_ANNOUNCE) {
1000         n->status &= ~VIRTIO_NET_S_ANNOUNCE;
1001         if (n->announce_timer.round) {
1002             qemu_announce_timer_step(&n->announce_timer);
1003         }
1004         return VIRTIO_NET_OK;
1005     } else {
1006         return VIRTIO_NET_ERR;
1007     }
1008 }
1009 
1010 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
1011                                 struct iovec *iov, unsigned int iov_cnt)
1012 {
1013     VirtIODevice *vdev = VIRTIO_DEVICE(n);
1014     struct virtio_net_ctrl_mq mq;
1015     size_t s;
1016     uint16_t queues;
1017 
1018     s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
1019     if (s != sizeof(mq)) {
1020         return VIRTIO_NET_ERR;
1021     }
1022 
1023     if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
1024         return VIRTIO_NET_ERR;
1025     }
1026 
1027     queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
1028 
1029     if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
1030         queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
1031         queues > n->max_queues ||
1032         !n->multiqueue) {
1033         return VIRTIO_NET_ERR;
1034     }
1035 
1036     n->curr_queues = queues;
1037     /* stop the backend before changing the number of queues to avoid handling a
1038      * disabled queue */
1039     virtio_net_set_status(vdev, vdev->status);
1040     virtio_net_set_queues(n);
1041 
1042     return VIRTIO_NET_OK;
1043 }
1044 
1045 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
1046 {
1047     VirtIONet *n = VIRTIO_NET(vdev);
1048     struct virtio_net_ctrl_hdr ctrl;
1049     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1050     VirtQueueElement *elem;
1051     size_t s;
1052     struct iovec *iov, *iov2;
1053     unsigned int iov_cnt;
1054 
1055     for (;;) {
1056         elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
1057         if (!elem) {
1058             break;
1059         }
1060         if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
1061             iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
1062             virtio_error(vdev, "virtio-net ctrl missing headers");
1063             virtqueue_detach_element(vq, elem, 0);
1064             g_free(elem);
1065             break;
1066         }
1067 
1068         iov_cnt = elem->out_num;
1069         iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num);
1070         s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
1071         iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
1072         if (s != sizeof(ctrl)) {
1073             status = VIRTIO_NET_ERR;
1074         } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
1075             status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
1076         } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
1077             status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
1078         } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
1079             status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
1080         } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
1081             status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt);
1082         } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
1083             status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
1084         } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
1085             status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
1086         }
1087 
1088         s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status));
1089         assert(s == sizeof(status));
1090 
1091         virtqueue_push(vq, elem, sizeof(status));
1092         virtio_notify(vdev, vq);
1093         g_free(iov2);
1094         g_free(elem);
1095     }
1096 }
1097 
1098 /* RX */
1099 
1100 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
1101 {
1102     VirtIONet *n = VIRTIO_NET(vdev);
1103     int queue_index = vq2q(virtio_get_queue_index(vq));
1104 
1105     qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
1106 }
1107 
1108 static int virtio_net_can_receive(NetClientState *nc)
1109 {
1110     VirtIONet *n = qemu_get_nic_opaque(nc);
1111     VirtIODevice *vdev = VIRTIO_DEVICE(n);
1112     VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1113 
1114     if (!vdev->vm_running) {
1115         return 0;
1116     }
1117 
1118     if (nc->queue_index >= n->curr_queues) {
1119         return 0;
1120     }
1121 
1122     if (!virtio_queue_ready(q->rx_vq) ||
1123         !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1124         return 0;
1125     }
1126 
1127     return 1;
1128 }
1129 
1130 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
1131 {
1132     VirtIONet *n = q->n;
1133     if (virtio_queue_empty(q->rx_vq) ||
1134         (n->mergeable_rx_bufs &&
1135          !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1136         virtio_queue_set_notification(q->rx_vq, 1);
1137 
1138         /* To avoid a race condition where the guest has made some buffers
1139          * available after the above check but before notification was
1140          * enabled, check for available buffers again.
1141          */
1142         if (virtio_queue_empty(q->rx_vq) ||
1143             (n->mergeable_rx_bufs &&
1144              !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1145             return 0;
1146         }
1147     }
1148 
1149     virtio_queue_set_notification(q->rx_vq, 0);
1150     return 1;
1151 }
1152 
1153 static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
1154 {
1155     virtio_tswap16s(vdev, &hdr->hdr_len);
1156     virtio_tswap16s(vdev, &hdr->gso_size);
1157     virtio_tswap16s(vdev, &hdr->csum_start);
1158     virtio_tswap16s(vdev, &hdr->csum_offset);
1159 }
1160 
1161 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
1162  * it never finds out that the packets don't have valid checksums.  This
1163  * causes dhclient to get upset.  Fedora's carried a patch for ages to
1164  * fix this with Xen but it hasn't appeared in an upstream release of
1165  * dhclient yet.
1166  *
1167  * To avoid breaking existing guests, we catch udp packets and add
1168  * checksums.  This is terrible but it's better than hacking the guest
1169  * kernels.
1170  *
1171  * N.B. if we introduce a zero-copy API, this operation is no longer free so
1172  * we should provide a mechanism to disable it to avoid polluting the host
1173  * cache.
1174  */
1175 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
1176                                         uint8_t *buf, size_t size)
1177 {
1178     if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
1179         (size > 27 && size < 1500) && /* normal sized MTU */
1180         (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
1181         (buf[23] == 17) && /* ip.protocol == UDP */
1182         (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
1183         net_checksum_calculate(buf, size);
1184         hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
1185     }
1186 }
1187 
1188 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
1189                            const void *buf, size_t size)
1190 {
1191     if (n->has_vnet_hdr) {
1192         /* FIXME this cast is evil */
1193         void *wbuf = (void *)buf;
1194         work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
1195                                     size - n->host_hdr_len);
1196 
1197         if (n->needs_vnet_hdr_swap) {
1198             virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
1199         }
1200         iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
1201     } else {
1202         struct virtio_net_hdr hdr = {
1203             .flags = 0,
1204             .gso_type = VIRTIO_NET_HDR_GSO_NONE
1205         };
1206         iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
1207     }
1208 }
1209 
1210 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
1211 {
1212     static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1213     static const uint8_t vlan[] = {0x81, 0x00};
1214     uint8_t *ptr = (uint8_t *)buf;
1215     int i;
1216 
1217     if (n->promisc)
1218         return 1;
1219 
1220     ptr += n->host_hdr_len;
1221 
1222     if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
1223         int vid = lduw_be_p(ptr + 14) & 0xfff;
1224         if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
1225             return 0;
1226     }
1227 
1228     if (ptr[0] & 1) { // multicast
1229         if (!memcmp(ptr, bcast, sizeof(bcast))) {
1230             return !n->nobcast;
1231         } else if (n->nomulti) {
1232             return 0;
1233         } else if (n->allmulti || n->mac_table.multi_overflow) {
1234             return 1;
1235         }
1236 
1237         for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
1238             if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1239                 return 1;
1240             }
1241         }
1242     } else { // unicast
1243         if (n->nouni) {
1244             return 0;
1245         } else if (n->alluni || n->mac_table.uni_overflow) {
1246             return 1;
1247         } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
1248             return 1;
1249         }
1250 
1251         for (i = 0; i < n->mac_table.first_multi; i++) {
1252             if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1253                 return 1;
1254             }
1255         }
1256     }
1257 
1258     return 0;
1259 }
1260 
1261 static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
1262                                       size_t size)
1263 {
1264     VirtIONet *n = qemu_get_nic_opaque(nc);
1265     VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1266     VirtIODevice *vdev = VIRTIO_DEVICE(n);
1267     struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
1268     struct virtio_net_hdr_mrg_rxbuf mhdr;
1269     unsigned mhdr_cnt = 0;
1270     size_t offset, i, guest_offset;
1271 
1272     if (!virtio_net_can_receive(nc)) {
1273         return -1;
1274     }
1275 
1276     /* hdr_len refers to the header we supply to the guest */
1277     if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
1278         return 0;
1279     }
1280 
1281     if (!receive_filter(n, buf, size))
1282         return size;
1283 
1284     offset = i = 0;
1285 
1286     while (offset < size) {
1287         VirtQueueElement *elem;
1288         int len, total;
1289         const struct iovec *sg;
1290 
1291         total = 0;
1292 
1293         elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
1294         if (!elem) {
1295             if (i) {
1296                 virtio_error(vdev, "virtio-net unexpected empty queue: "
1297                              "i %zd mergeable %d offset %zd, size %zd, "
1298                              "guest hdr len %zd, host hdr len %zd "
1299                              "guest features 0x%" PRIx64,
1300                              i, n->mergeable_rx_bufs, offset, size,
1301                              n->guest_hdr_len, n->host_hdr_len,
1302                              vdev->guest_features);
1303             }
1304             return -1;
1305         }
1306 
1307         if (elem->in_num < 1) {
1308             virtio_error(vdev,
1309                          "virtio-net receive queue contains no in buffers");
1310             virtqueue_detach_element(q->rx_vq, elem, 0);
1311             g_free(elem);
1312             return -1;
1313         }
1314 
1315         sg = elem->in_sg;
1316         if (i == 0) {
1317             assert(offset == 0);
1318             if (n->mergeable_rx_bufs) {
1319                 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
1320                                     sg, elem->in_num,
1321                                     offsetof(typeof(mhdr), num_buffers),
1322                                     sizeof(mhdr.num_buffers));
1323             }
1324 
1325             receive_header(n, sg, elem->in_num, buf, size);
1326             offset = n->host_hdr_len;
1327             total += n->guest_hdr_len;
1328             guest_offset = n->guest_hdr_len;
1329         } else {
1330             guest_offset = 0;
1331         }
1332 
1333         /* copy in packet.  ugh */
1334         len = iov_from_buf(sg, elem->in_num, guest_offset,
1335                            buf + offset, size - offset);
1336         total += len;
1337         offset += len;
1338         /* If buffers can't be merged, at this point we
1339          * must have consumed the complete packet.
1340          * Otherwise, drop it. */
1341         if (!n->mergeable_rx_bufs && offset < size) {
1342             virtqueue_unpop(q->rx_vq, elem, total);
1343             g_free(elem);
1344             return size;
1345         }
1346 
1347         /* signal other side */
1348         virtqueue_fill(q->rx_vq, elem, total, i++);
1349         g_free(elem);
1350     }
1351 
1352     if (mhdr_cnt) {
1353         virtio_stw_p(vdev, &mhdr.num_buffers, i);
1354         iov_from_buf(mhdr_sg, mhdr_cnt,
1355                      0,
1356                      &mhdr.num_buffers, sizeof mhdr.num_buffers);
1357     }
1358 
1359     virtqueue_flush(q->rx_vq, i);
1360     virtio_notify(vdev, q->rx_vq);
1361 
1362     return size;
1363 }
1364 
1365 static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
1366                                   size_t size)
1367 {
1368     ssize_t r;
1369 
1370     rcu_read_lock();
1371     r = virtio_net_receive_rcu(nc, buf, size);
1372     rcu_read_unlock();
1373     return r;
1374 }
1375 
1376 static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
1377                                          const uint8_t *buf,
1378                                          VirtioNetRscUnit *unit)
1379 {
1380     uint16_t ip_hdrlen;
1381     struct ip_header *ip;
1382 
1383     ip = (struct ip_header *)(buf + chain->n->guest_hdr_len
1384                               + sizeof(struct eth_header));
1385     unit->ip = (void *)ip;
1386     ip_hdrlen = (ip->ip_ver_len & 0xF) << 2;
1387     unit->ip_plen = &ip->ip_len;
1388     unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip) + ip_hdrlen);
1389     unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
1390     unit->payload = htons(*unit->ip_plen) - ip_hdrlen - unit->tcp_hdrlen;
1391 }
1392 
1393 static void virtio_net_rsc_extract_unit6(VirtioNetRscChain *chain,
1394                                          const uint8_t *buf,
1395                                          VirtioNetRscUnit *unit)
1396 {
1397     struct ip6_header *ip6;
1398 
1399     ip6 = (struct ip6_header *)(buf + chain->n->guest_hdr_len
1400                                  + sizeof(struct eth_header));
1401     unit->ip = ip6;
1402     unit->ip_plen = &(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen);
1403     unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip)\
1404                                         + sizeof(struct ip6_header));
1405     unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
1406 
1407     /* There is a difference between payload lenght in ipv4 and v6,
1408        ip header is excluded in ipv6 */
1409     unit->payload = htons(*unit->ip_plen) - unit->tcp_hdrlen;
1410 }
1411 
1412 static size_t virtio_net_rsc_drain_seg(VirtioNetRscChain *chain,
1413                                        VirtioNetRscSeg *seg)
1414 {
1415     int ret;
1416     struct virtio_net_hdr *h;
1417 
1418     h = (struct virtio_net_hdr *)seg->buf;
1419     h->flags = 0;
1420     h->gso_type = VIRTIO_NET_HDR_GSO_NONE;
1421 
1422     if (seg->is_coalesced) {
1423         *virtio_net_rsc_ext_num_packets(h) = seg->packets;
1424         *virtio_net_rsc_ext_num_dupacks(h) = seg->dup_ack;
1425         h->flags = VIRTIO_NET_HDR_F_RSC_INFO;
1426         if (chain->proto == ETH_P_IP) {
1427             h->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1428         } else {
1429             h->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1430         }
1431     }
1432 
1433     ret = virtio_net_do_receive(seg->nc, seg->buf, seg->size);
1434     QTAILQ_REMOVE(&chain->buffers, seg, next);
1435     g_free(seg->buf);
1436     g_free(seg);
1437 
1438     return ret;
1439 }
1440 
1441 static void virtio_net_rsc_purge(void *opq)
1442 {
1443     VirtioNetRscSeg *seg, *rn;
1444     VirtioNetRscChain *chain = (VirtioNetRscChain *)opq;
1445 
1446     QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn) {
1447         if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
1448             chain->stat.purge_failed++;
1449             continue;
1450         }
1451     }
1452 
1453     chain->stat.timer++;
1454     if (!QTAILQ_EMPTY(&chain->buffers)) {
1455         timer_mod(chain->drain_timer,
1456               qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout);
1457     }
1458 }
1459 
1460 static void virtio_net_rsc_cleanup(VirtIONet *n)
1461 {
1462     VirtioNetRscChain *chain, *rn_chain;
1463     VirtioNetRscSeg *seg, *rn_seg;
1464 
1465     QTAILQ_FOREACH_SAFE(chain, &n->rsc_chains, next, rn_chain) {
1466         QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn_seg) {
1467             QTAILQ_REMOVE(&chain->buffers, seg, next);
1468             g_free(seg->buf);
1469             g_free(seg);
1470         }
1471 
1472         timer_del(chain->drain_timer);
1473         timer_free(chain->drain_timer);
1474         QTAILQ_REMOVE(&n->rsc_chains, chain, next);
1475         g_free(chain);
1476     }
1477 }
1478 
1479 static void virtio_net_rsc_cache_buf(VirtioNetRscChain *chain,
1480                                      NetClientState *nc,
1481                                      const uint8_t *buf, size_t size)
1482 {
1483     uint16_t hdr_len;
1484     VirtioNetRscSeg *seg;
1485 
1486     hdr_len = chain->n->guest_hdr_len;
1487     seg = g_malloc(sizeof(VirtioNetRscSeg));
1488     seg->buf = g_malloc(hdr_len + sizeof(struct eth_header)
1489         + sizeof(struct ip6_header) + VIRTIO_NET_MAX_TCP_PAYLOAD);
1490     memcpy(seg->buf, buf, size);
1491     seg->size = size;
1492     seg->packets = 1;
1493     seg->dup_ack = 0;
1494     seg->is_coalesced = 0;
1495     seg->nc = nc;
1496 
1497     QTAILQ_INSERT_TAIL(&chain->buffers, seg, next);
1498     chain->stat.cache++;
1499 
1500     switch (chain->proto) {
1501     case ETH_P_IP:
1502         virtio_net_rsc_extract_unit4(chain, seg->buf, &seg->unit);
1503         break;
1504     case ETH_P_IPV6:
1505         virtio_net_rsc_extract_unit6(chain, seg->buf, &seg->unit);
1506         break;
1507     default:
1508         g_assert_not_reached();
1509     }
1510 }
1511 
1512 static int32_t virtio_net_rsc_handle_ack(VirtioNetRscChain *chain,
1513                                          VirtioNetRscSeg *seg,
1514                                          const uint8_t *buf,
1515                                          struct tcp_header *n_tcp,
1516                                          struct tcp_header *o_tcp)
1517 {
1518     uint32_t nack, oack;
1519     uint16_t nwin, owin;
1520 
1521     nack = htonl(n_tcp->th_ack);
1522     nwin = htons(n_tcp->th_win);
1523     oack = htonl(o_tcp->th_ack);
1524     owin = htons(o_tcp->th_win);
1525 
1526     if ((nack - oack) >= VIRTIO_NET_MAX_TCP_PAYLOAD) {
1527         chain->stat.ack_out_of_win++;
1528         return RSC_FINAL;
1529     } else if (nack == oack) {
1530         /* duplicated ack or window probe */
1531         if (nwin == owin) {
1532             /* duplicated ack, add dup ack count due to whql test up to 1 */
1533             chain->stat.dup_ack++;
1534             return RSC_FINAL;
1535         } else {
1536             /* Coalesce window update */
1537             o_tcp->th_win = n_tcp->th_win;
1538             chain->stat.win_update++;
1539             return RSC_COALESCE;
1540         }
1541     } else {
1542         /* pure ack, go to 'C', finalize*/
1543         chain->stat.pure_ack++;
1544         return RSC_FINAL;
1545     }
1546 }
1547 
1548 static int32_t virtio_net_rsc_coalesce_data(VirtioNetRscChain *chain,
1549                                             VirtioNetRscSeg *seg,
1550                                             const uint8_t *buf,
1551                                             VirtioNetRscUnit *n_unit)
1552 {
1553     void *data;
1554     uint16_t o_ip_len;
1555     uint32_t nseq, oseq;
1556     VirtioNetRscUnit *o_unit;
1557 
1558     o_unit = &seg->unit;
1559     o_ip_len = htons(*o_unit->ip_plen);
1560     nseq = htonl(n_unit->tcp->th_seq);
1561     oseq = htonl(o_unit->tcp->th_seq);
1562 
1563     /* out of order or retransmitted. */
1564     if ((nseq - oseq) > VIRTIO_NET_MAX_TCP_PAYLOAD) {
1565         chain->stat.data_out_of_win++;
1566         return RSC_FINAL;
1567     }
1568 
1569     data = ((uint8_t *)n_unit->tcp) + n_unit->tcp_hdrlen;
1570     if (nseq == oseq) {
1571         if ((o_unit->payload == 0) && n_unit->payload) {
1572             /* From no payload to payload, normal case, not a dup ack or etc */
1573             chain->stat.data_after_pure_ack++;
1574             goto coalesce;
1575         } else {
1576             return virtio_net_rsc_handle_ack(chain, seg, buf,
1577                                              n_unit->tcp, o_unit->tcp);
1578         }
1579     } else if ((nseq - oseq) != o_unit->payload) {
1580         /* Not a consistent packet, out of order */
1581         chain->stat.data_out_of_order++;
1582         return RSC_FINAL;
1583     } else {
1584 coalesce:
1585         if ((o_ip_len + n_unit->payload) > chain->max_payload) {
1586             chain->stat.over_size++;
1587             return RSC_FINAL;
1588         }
1589 
1590         /* Here comes the right data, the payload length in v4/v6 is different,
1591            so use the field value to update and record the new data len */
1592         o_unit->payload += n_unit->payload; /* update new data len */
1593 
1594         /* update field in ip header */
1595         *o_unit->ip_plen = htons(o_ip_len + n_unit->payload);
1596 
1597         /* Bring 'PUSH' big, the whql test guide says 'PUSH' can be coalesced
1598            for windows guest, while this may change the behavior for linux
1599            guest (only if it uses RSC feature). */
1600         o_unit->tcp->th_offset_flags = n_unit->tcp->th_offset_flags;
1601 
1602         o_unit->tcp->th_ack = n_unit->tcp->th_ack;
1603         o_unit->tcp->th_win = n_unit->tcp->th_win;
1604 
1605         memmove(seg->buf + seg->size, data, n_unit->payload);
1606         seg->size += n_unit->payload;
1607         seg->packets++;
1608         chain->stat.coalesced++;
1609         return RSC_COALESCE;
1610     }
1611 }
1612 
1613 static int32_t virtio_net_rsc_coalesce4(VirtioNetRscChain *chain,
1614                                         VirtioNetRscSeg *seg,
1615                                         const uint8_t *buf, size_t size,
1616                                         VirtioNetRscUnit *unit)
1617 {
1618     struct ip_header *ip1, *ip2;
1619 
1620     ip1 = (struct ip_header *)(unit->ip);
1621     ip2 = (struct ip_header *)(seg->unit.ip);
1622     if ((ip1->ip_src ^ ip2->ip_src) || (ip1->ip_dst ^ ip2->ip_dst)
1623         || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport)
1624         || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) {
1625         chain->stat.no_match++;
1626         return RSC_NO_MATCH;
1627     }
1628 
1629     return virtio_net_rsc_coalesce_data(chain, seg, buf, unit);
1630 }
1631 
1632 static int32_t virtio_net_rsc_coalesce6(VirtioNetRscChain *chain,
1633                                         VirtioNetRscSeg *seg,
1634                                         const uint8_t *buf, size_t size,
1635                                         VirtioNetRscUnit *unit)
1636 {
1637     struct ip6_header *ip1, *ip2;
1638 
1639     ip1 = (struct ip6_header *)(unit->ip);
1640     ip2 = (struct ip6_header *)(seg->unit.ip);
1641     if (memcmp(&ip1->ip6_src, &ip2->ip6_src, sizeof(struct in6_address))
1642         || memcmp(&ip1->ip6_dst, &ip2->ip6_dst, sizeof(struct in6_address))
1643         || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport)
1644         || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) {
1645             chain->stat.no_match++;
1646             return RSC_NO_MATCH;
1647     }
1648 
1649     return virtio_net_rsc_coalesce_data(chain, seg, buf, unit);
1650 }
1651 
1652 /* Packets with 'SYN' should bypass, other flag should be sent after drain
1653  * to prevent out of order */
1654 static int virtio_net_rsc_tcp_ctrl_check(VirtioNetRscChain *chain,
1655                                          struct tcp_header *tcp)
1656 {
1657     uint16_t tcp_hdr;
1658     uint16_t tcp_flag;
1659 
1660     tcp_flag = htons(tcp->th_offset_flags);
1661     tcp_hdr = (tcp_flag & VIRTIO_NET_TCP_HDR_LENGTH) >> 10;
1662     tcp_flag &= VIRTIO_NET_TCP_FLAG;
1663     tcp_flag = htons(tcp->th_offset_flags) & 0x3F;
1664     if (tcp_flag & TH_SYN) {
1665         chain->stat.tcp_syn++;
1666         return RSC_BYPASS;
1667     }
1668 
1669     if (tcp_flag & (TH_FIN | TH_URG | TH_RST | TH_ECE | TH_CWR)) {
1670         chain->stat.tcp_ctrl_drain++;
1671         return RSC_FINAL;
1672     }
1673 
1674     if (tcp_hdr > sizeof(struct tcp_header)) {
1675         chain->stat.tcp_all_opt++;
1676         return RSC_FINAL;
1677     }
1678 
1679     return RSC_CANDIDATE;
1680 }
1681 
1682 static size_t virtio_net_rsc_do_coalesce(VirtioNetRscChain *chain,
1683                                          NetClientState *nc,
1684                                          const uint8_t *buf, size_t size,
1685                                          VirtioNetRscUnit *unit)
1686 {
1687     int ret;
1688     VirtioNetRscSeg *seg, *nseg;
1689 
1690     if (QTAILQ_EMPTY(&chain->buffers)) {
1691         chain->stat.empty_cache++;
1692         virtio_net_rsc_cache_buf(chain, nc, buf, size);
1693         timer_mod(chain->drain_timer,
1694               qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout);
1695         return size;
1696     }
1697 
1698     QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) {
1699         if (chain->proto == ETH_P_IP) {
1700             ret = virtio_net_rsc_coalesce4(chain, seg, buf, size, unit);
1701         } else {
1702             ret = virtio_net_rsc_coalesce6(chain, seg, buf, size, unit);
1703         }
1704 
1705         if (ret == RSC_FINAL) {
1706             if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
1707                 /* Send failed */
1708                 chain->stat.final_failed++;
1709                 return 0;
1710             }
1711 
1712             /* Send current packet */
1713             return virtio_net_do_receive(nc, buf, size);
1714         } else if (ret == RSC_NO_MATCH) {
1715             continue;
1716         } else {
1717             /* Coalesced, mark coalesced flag to tell calc cksum for ipv4 */
1718             seg->is_coalesced = 1;
1719             return size;
1720         }
1721     }
1722 
1723     chain->stat.no_match_cache++;
1724     virtio_net_rsc_cache_buf(chain, nc, buf, size);
1725     return size;
1726 }
1727 
1728 /* Drain a connection data, this is to avoid out of order segments */
1729 static size_t virtio_net_rsc_drain_flow(VirtioNetRscChain *chain,
1730                                         NetClientState *nc,
1731                                         const uint8_t *buf, size_t size,
1732                                         uint16_t ip_start, uint16_t ip_size,
1733                                         uint16_t tcp_port)
1734 {
1735     VirtioNetRscSeg *seg, *nseg;
1736     uint32_t ppair1, ppair2;
1737 
1738     ppair1 = *(uint32_t *)(buf + tcp_port);
1739     QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) {
1740         ppair2 = *(uint32_t *)(seg->buf + tcp_port);
1741         if (memcmp(buf + ip_start, seg->buf + ip_start, ip_size)
1742             || (ppair1 != ppair2)) {
1743             continue;
1744         }
1745         if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
1746             chain->stat.drain_failed++;
1747         }
1748 
1749         break;
1750     }
1751 
1752     return virtio_net_do_receive(nc, buf, size);
1753 }
1754 
1755 static int32_t virtio_net_rsc_sanity_check4(VirtioNetRscChain *chain,
1756                                             struct ip_header *ip,
1757                                             const uint8_t *buf, size_t size)
1758 {
1759     uint16_t ip_len;
1760 
1761     /* Not an ipv4 packet */
1762     if (((ip->ip_ver_len & 0xF0) >> 4) != IP_HEADER_VERSION_4) {
1763         chain->stat.ip_option++;
1764         return RSC_BYPASS;
1765     }
1766 
1767     /* Don't handle packets with ip option */
1768     if ((ip->ip_ver_len & 0xF) != VIRTIO_NET_IP4_HEADER_LENGTH) {
1769         chain->stat.ip_option++;
1770         return RSC_BYPASS;
1771     }
1772 
1773     if (ip->ip_p != IPPROTO_TCP) {
1774         chain->stat.bypass_not_tcp++;
1775         return RSC_BYPASS;
1776     }
1777 
1778     /* Don't handle packets with ip fragment */
1779     if (!(htons(ip->ip_off) & IP_DF)) {
1780         chain->stat.ip_frag++;
1781         return RSC_BYPASS;
1782     }
1783 
1784     /* Don't handle packets with ecn flag */
1785     if (IPTOS_ECN(ip->ip_tos)) {
1786         chain->stat.ip_ecn++;
1787         return RSC_BYPASS;
1788     }
1789 
1790     ip_len = htons(ip->ip_len);
1791     if (ip_len < (sizeof(struct ip_header) + sizeof(struct tcp_header))
1792         || ip_len > (size - chain->n->guest_hdr_len -
1793                      sizeof(struct eth_header))) {
1794         chain->stat.ip_hacked++;
1795         return RSC_BYPASS;
1796     }
1797 
1798     return RSC_CANDIDATE;
1799 }
1800 
1801 static size_t virtio_net_rsc_receive4(VirtioNetRscChain *chain,
1802                                       NetClientState *nc,
1803                                       const uint8_t *buf, size_t size)
1804 {
1805     int32_t ret;
1806     uint16_t hdr_len;
1807     VirtioNetRscUnit unit;
1808 
1809     hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
1810 
1811     if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header)
1812         + sizeof(struct tcp_header))) {
1813         chain->stat.bypass_not_tcp++;
1814         return virtio_net_do_receive(nc, buf, size);
1815     }
1816 
1817     virtio_net_rsc_extract_unit4(chain, buf, &unit);
1818     if (virtio_net_rsc_sanity_check4(chain, unit.ip, buf, size)
1819         != RSC_CANDIDATE) {
1820         return virtio_net_do_receive(nc, buf, size);
1821     }
1822 
1823     ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp);
1824     if (ret == RSC_BYPASS) {
1825         return virtio_net_do_receive(nc, buf, size);
1826     } else if (ret == RSC_FINAL) {
1827         return virtio_net_rsc_drain_flow(chain, nc, buf, size,
1828                 ((hdr_len + sizeof(struct eth_header)) + 12),
1829                 VIRTIO_NET_IP4_ADDR_SIZE,
1830                 hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header));
1831     }
1832 
1833     return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit);
1834 }
1835 
1836 static int32_t virtio_net_rsc_sanity_check6(VirtioNetRscChain *chain,
1837                                             struct ip6_header *ip6,
1838                                             const uint8_t *buf, size_t size)
1839 {
1840     uint16_t ip_len;
1841 
1842     if (((ip6->ip6_ctlun.ip6_un1.ip6_un1_flow & 0xF0) >> 4)
1843         != IP_HEADER_VERSION_6) {
1844         return RSC_BYPASS;
1845     }
1846 
1847     /* Both option and protocol is checked in this */
1848     if (ip6->ip6_ctlun.ip6_un1.ip6_un1_nxt != IPPROTO_TCP) {
1849         chain->stat.bypass_not_tcp++;
1850         return RSC_BYPASS;
1851     }
1852 
1853     ip_len = htons(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen);
1854     if (ip_len < sizeof(struct tcp_header) ||
1855         ip_len > (size - chain->n->guest_hdr_len - sizeof(struct eth_header)
1856                   - sizeof(struct ip6_header))) {
1857         chain->stat.ip_hacked++;
1858         return RSC_BYPASS;
1859     }
1860 
1861     /* Don't handle packets with ecn flag */
1862     if (IP6_ECN(ip6->ip6_ctlun.ip6_un3.ip6_un3_ecn)) {
1863         chain->stat.ip_ecn++;
1864         return RSC_BYPASS;
1865     }
1866 
1867     return RSC_CANDIDATE;
1868 }
1869 
1870 static size_t virtio_net_rsc_receive6(void *opq, NetClientState *nc,
1871                                       const uint8_t *buf, size_t size)
1872 {
1873     int32_t ret;
1874     uint16_t hdr_len;
1875     VirtioNetRscChain *chain;
1876     VirtioNetRscUnit unit;
1877 
1878     chain = (VirtioNetRscChain *)opq;
1879     hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
1880 
1881     if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip6_header)
1882         + sizeof(tcp_header))) {
1883         return virtio_net_do_receive(nc, buf, size);
1884     }
1885 
1886     virtio_net_rsc_extract_unit6(chain, buf, &unit);
1887     if (RSC_CANDIDATE != virtio_net_rsc_sanity_check6(chain,
1888                                                  unit.ip, buf, size)) {
1889         return virtio_net_do_receive(nc, buf, size);
1890     }
1891 
1892     ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp);
1893     if (ret == RSC_BYPASS) {
1894         return virtio_net_do_receive(nc, buf, size);
1895     } else if (ret == RSC_FINAL) {
1896         return virtio_net_rsc_drain_flow(chain, nc, buf, size,
1897                 ((hdr_len + sizeof(struct eth_header)) + 8),
1898                 VIRTIO_NET_IP6_ADDR_SIZE,
1899                 hdr_len + sizeof(struct eth_header)
1900                 + sizeof(struct ip6_header));
1901     }
1902 
1903     return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit);
1904 }
1905 
1906 static VirtioNetRscChain *virtio_net_rsc_lookup_chain(VirtIONet *n,
1907                                                       NetClientState *nc,
1908                                                       uint16_t proto)
1909 {
1910     VirtioNetRscChain *chain;
1911 
1912     if ((proto != (uint16_t)ETH_P_IP) && (proto != (uint16_t)ETH_P_IPV6)) {
1913         return NULL;
1914     }
1915 
1916     QTAILQ_FOREACH(chain, &n->rsc_chains, next) {
1917         if (chain->proto == proto) {
1918             return chain;
1919         }
1920     }
1921 
1922     chain = g_malloc(sizeof(*chain));
1923     chain->n = n;
1924     chain->proto = proto;
1925     if (proto == (uint16_t)ETH_P_IP) {
1926         chain->max_payload = VIRTIO_NET_MAX_IP4_PAYLOAD;
1927         chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1928     } else {
1929         chain->max_payload = VIRTIO_NET_MAX_IP6_PAYLOAD;
1930         chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1931     }
1932     chain->drain_timer = timer_new_ns(QEMU_CLOCK_HOST,
1933                                       virtio_net_rsc_purge, chain);
1934     memset(&chain->stat, 0, sizeof(chain->stat));
1935 
1936     QTAILQ_INIT(&chain->buffers);
1937     QTAILQ_INSERT_TAIL(&n->rsc_chains, chain, next);
1938 
1939     return chain;
1940 }
1941 
1942 static ssize_t virtio_net_rsc_receive(NetClientState *nc,
1943                                       const uint8_t *buf,
1944                                       size_t size)
1945 {
1946     uint16_t proto;
1947     VirtioNetRscChain *chain;
1948     struct eth_header *eth;
1949     VirtIONet *n;
1950 
1951     n = qemu_get_nic_opaque(nc);
1952     if (size < (n->host_hdr_len + sizeof(struct eth_header))) {
1953         return virtio_net_do_receive(nc, buf, size);
1954     }
1955 
1956     eth = (struct eth_header *)(buf + n->guest_hdr_len);
1957     proto = htons(eth->h_proto);
1958 
1959     chain = virtio_net_rsc_lookup_chain(n, nc, proto);
1960     if (chain) {
1961         chain->stat.received++;
1962         if (proto == (uint16_t)ETH_P_IP && n->rsc4_enabled) {
1963             return virtio_net_rsc_receive4(chain, nc, buf, size);
1964         } else if (proto == (uint16_t)ETH_P_IPV6 && n->rsc6_enabled) {
1965             return virtio_net_rsc_receive6(chain, nc, buf, size);
1966         }
1967     }
1968     return virtio_net_do_receive(nc, buf, size);
1969 }
1970 
1971 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf,
1972                                   size_t size)
1973 {
1974     VirtIONet *n = qemu_get_nic_opaque(nc);
1975     if ((n->rsc4_enabled || n->rsc6_enabled)) {
1976         return virtio_net_rsc_receive(nc, buf, size);
1977     } else {
1978         return virtio_net_do_receive(nc, buf, size);
1979     }
1980 }
1981 
1982 static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
1983 
1984 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
1985 {
1986     VirtIONet *n = qemu_get_nic_opaque(nc);
1987     VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1988     VirtIODevice *vdev = VIRTIO_DEVICE(n);
1989 
1990     virtqueue_push(q->tx_vq, q->async_tx.elem, 0);
1991     virtio_notify(vdev, q->tx_vq);
1992 
1993     g_free(q->async_tx.elem);
1994     q->async_tx.elem = NULL;
1995 
1996     virtio_queue_set_notification(q->tx_vq, 1);
1997     virtio_net_flush_tx(q);
1998 }
1999 
2000 /* TX */
2001 static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
2002 {
2003     VirtIONet *n = q->n;
2004     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2005     VirtQueueElement *elem;
2006     int32_t num_packets = 0;
2007     int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
2008     if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
2009         return num_packets;
2010     }
2011 
2012     if (q->async_tx.elem) {
2013         virtio_queue_set_notification(q->tx_vq, 0);
2014         return num_packets;
2015     }
2016 
2017     for (;;) {
2018         ssize_t ret;
2019         unsigned int out_num;
2020         struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
2021         struct virtio_net_hdr_mrg_rxbuf mhdr;
2022 
2023         elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
2024         if (!elem) {
2025             break;
2026         }
2027 
2028         out_num = elem->out_num;
2029         out_sg = elem->out_sg;
2030         if (out_num < 1) {
2031             virtio_error(vdev, "virtio-net header not in first element");
2032             virtqueue_detach_element(q->tx_vq, elem, 0);
2033             g_free(elem);
2034             return -EINVAL;
2035         }
2036 
2037         if (n->has_vnet_hdr) {
2038             if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
2039                 n->guest_hdr_len) {
2040                 virtio_error(vdev, "virtio-net header incorrect");
2041                 virtqueue_detach_element(q->tx_vq, elem, 0);
2042                 g_free(elem);
2043                 return -EINVAL;
2044             }
2045             if (n->needs_vnet_hdr_swap) {
2046                 virtio_net_hdr_swap(vdev, (void *) &mhdr);
2047                 sg2[0].iov_base = &mhdr;
2048                 sg2[0].iov_len = n->guest_hdr_len;
2049                 out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
2050                                    out_sg, out_num,
2051                                    n->guest_hdr_len, -1);
2052                 if (out_num == VIRTQUEUE_MAX_SIZE) {
2053                     goto drop;
2054                 }
2055                 out_num += 1;
2056                 out_sg = sg2;
2057             }
2058         }
2059         /*
2060          * If host wants to see the guest header as is, we can
2061          * pass it on unchanged. Otherwise, copy just the parts
2062          * that host is interested in.
2063          */
2064         assert(n->host_hdr_len <= n->guest_hdr_len);
2065         if (n->host_hdr_len != n->guest_hdr_len) {
2066             unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
2067                                        out_sg, out_num,
2068                                        0, n->host_hdr_len);
2069             sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
2070                              out_sg, out_num,
2071                              n->guest_hdr_len, -1);
2072             out_num = sg_num;
2073             out_sg = sg;
2074         }
2075 
2076         ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
2077                                       out_sg, out_num, virtio_net_tx_complete);
2078         if (ret == 0) {
2079             virtio_queue_set_notification(q->tx_vq, 0);
2080             q->async_tx.elem = elem;
2081             return -EBUSY;
2082         }
2083 
2084 drop:
2085         virtqueue_push(q->tx_vq, elem, 0);
2086         virtio_notify(vdev, q->tx_vq);
2087         g_free(elem);
2088 
2089         if (++num_packets >= n->tx_burst) {
2090             break;
2091         }
2092     }
2093     return num_packets;
2094 }
2095 
2096 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
2097 {
2098     VirtIONet *n = VIRTIO_NET(vdev);
2099     VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
2100 
2101     if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
2102         virtio_net_drop_tx_queue_data(vdev, vq);
2103         return;
2104     }
2105 
2106     /* This happens when device was stopped but VCPU wasn't. */
2107     if (!vdev->vm_running) {
2108         q->tx_waiting = 1;
2109         return;
2110     }
2111 
2112     if (q->tx_waiting) {
2113         virtio_queue_set_notification(vq, 1);
2114         timer_del(q->tx_timer);
2115         q->tx_waiting = 0;
2116         if (virtio_net_flush_tx(q) == -EINVAL) {
2117             return;
2118         }
2119     } else {
2120         timer_mod(q->tx_timer,
2121                        qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2122         q->tx_waiting = 1;
2123         virtio_queue_set_notification(vq, 0);
2124     }
2125 }
2126 
2127 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
2128 {
2129     VirtIONet *n = VIRTIO_NET(vdev);
2130     VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
2131 
2132     if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
2133         virtio_net_drop_tx_queue_data(vdev, vq);
2134         return;
2135     }
2136 
2137     if (unlikely(q->tx_waiting)) {
2138         return;
2139     }
2140     q->tx_waiting = 1;
2141     /* This happens when device was stopped but VCPU wasn't. */
2142     if (!vdev->vm_running) {
2143         return;
2144     }
2145     virtio_queue_set_notification(vq, 0);
2146     qemu_bh_schedule(q->tx_bh);
2147 }
2148 
2149 static void virtio_net_tx_timer(void *opaque)
2150 {
2151     VirtIONetQueue *q = opaque;
2152     VirtIONet *n = q->n;
2153     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2154     /* This happens when device was stopped but BH wasn't. */
2155     if (!vdev->vm_running) {
2156         /* Make sure tx waiting is set, so we'll run when restarted. */
2157         assert(q->tx_waiting);
2158         return;
2159     }
2160 
2161     q->tx_waiting = 0;
2162 
2163     /* Just in case the driver is not ready on more */
2164     if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
2165         return;
2166     }
2167 
2168     virtio_queue_set_notification(q->tx_vq, 1);
2169     virtio_net_flush_tx(q);
2170 }
2171 
2172 static void virtio_net_tx_bh(void *opaque)
2173 {
2174     VirtIONetQueue *q = opaque;
2175     VirtIONet *n = q->n;
2176     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2177     int32_t ret;
2178 
2179     /* This happens when device was stopped but BH wasn't. */
2180     if (!vdev->vm_running) {
2181         /* Make sure tx waiting is set, so we'll run when restarted. */
2182         assert(q->tx_waiting);
2183         return;
2184     }
2185 
2186     q->tx_waiting = 0;
2187 
2188     /* Just in case the driver is not ready on more */
2189     if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
2190         return;
2191     }
2192 
2193     ret = virtio_net_flush_tx(q);
2194     if (ret == -EBUSY || ret == -EINVAL) {
2195         return; /* Notification re-enable handled by tx_complete or device
2196                  * broken */
2197     }
2198 
2199     /* If we flush a full burst of packets, assume there are
2200      * more coming and immediately reschedule */
2201     if (ret >= n->tx_burst) {
2202         qemu_bh_schedule(q->tx_bh);
2203         q->tx_waiting = 1;
2204         return;
2205     }
2206 
2207     /* If less than a full burst, re-enable notification and flush
2208      * anything that may have come in while we weren't looking.  If
2209      * we find something, assume the guest is still active and reschedule */
2210     virtio_queue_set_notification(q->tx_vq, 1);
2211     ret = virtio_net_flush_tx(q);
2212     if (ret == -EINVAL) {
2213         return;
2214     } else if (ret > 0) {
2215         virtio_queue_set_notification(q->tx_vq, 0);
2216         qemu_bh_schedule(q->tx_bh);
2217         q->tx_waiting = 1;
2218     }
2219 }
2220 
2221 static void virtio_net_add_queue(VirtIONet *n, int index)
2222 {
2223     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2224 
2225     n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
2226                                            virtio_net_handle_rx);
2227 
2228     if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
2229         n->vqs[index].tx_vq =
2230             virtio_add_queue(vdev, n->net_conf.tx_queue_size,
2231                              virtio_net_handle_tx_timer);
2232         n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2233                                               virtio_net_tx_timer,
2234                                               &n->vqs[index]);
2235     } else {
2236         n->vqs[index].tx_vq =
2237             virtio_add_queue(vdev, n->net_conf.tx_queue_size,
2238                              virtio_net_handle_tx_bh);
2239         n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
2240     }
2241 
2242     n->vqs[index].tx_waiting = 0;
2243     n->vqs[index].n = n;
2244 }
2245 
2246 static void virtio_net_del_queue(VirtIONet *n, int index)
2247 {
2248     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2249     VirtIONetQueue *q = &n->vqs[index];
2250     NetClientState *nc = qemu_get_subqueue(n->nic, index);
2251 
2252     qemu_purge_queued_packets(nc);
2253 
2254     virtio_del_queue(vdev, index * 2);
2255     if (q->tx_timer) {
2256         timer_del(q->tx_timer);
2257         timer_free(q->tx_timer);
2258         q->tx_timer = NULL;
2259     } else {
2260         qemu_bh_delete(q->tx_bh);
2261         q->tx_bh = NULL;
2262     }
2263     q->tx_waiting = 0;
2264     virtio_del_queue(vdev, index * 2 + 1);
2265 }
2266 
2267 static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
2268 {
2269     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2270     int old_num_queues = virtio_get_num_queues(vdev);
2271     int new_num_queues = new_max_queues * 2 + 1;
2272     int i;
2273 
2274     assert(old_num_queues >= 3);
2275     assert(old_num_queues % 2 == 1);
2276 
2277     if (old_num_queues == new_num_queues) {
2278         return;
2279     }
2280 
2281     /*
2282      * We always need to remove and add ctrl vq if
2283      * old_num_queues != new_num_queues. Remove ctrl_vq first,
2284      * and then we only enter one of the following too loops.
2285      */
2286     virtio_del_queue(vdev, old_num_queues - 1);
2287 
2288     for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
2289         /* new_num_queues < old_num_queues */
2290         virtio_net_del_queue(n, i / 2);
2291     }
2292 
2293     for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
2294         /* new_num_queues > old_num_queues */
2295         virtio_net_add_queue(n, i / 2);
2296     }
2297 
2298     /* add ctrl_vq last */
2299     n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
2300 }
2301 
2302 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
2303 {
2304     int max = multiqueue ? n->max_queues : 1;
2305 
2306     n->multiqueue = multiqueue;
2307     virtio_net_change_num_queues(n, max);
2308 
2309     virtio_net_set_queues(n);
2310 }
2311 
2312 static int virtio_net_post_load_device(void *opaque, int version_id)
2313 {
2314     VirtIONet *n = opaque;
2315     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2316     int i, link_down;
2317 
2318     trace_virtio_net_post_load_device();
2319     virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs,
2320                                virtio_vdev_has_feature(vdev,
2321                                                        VIRTIO_F_VERSION_1));
2322 
2323     /* MAC_TABLE_ENTRIES may be different from the saved image */
2324     if (n->mac_table.in_use > MAC_TABLE_ENTRIES) {
2325         n->mac_table.in_use = 0;
2326     }
2327 
2328     if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
2329         n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
2330     }
2331 
2332     if (peer_has_vnet_hdr(n)) {
2333         virtio_net_apply_guest_offloads(n);
2334     }
2335 
2336     virtio_net_set_queues(n);
2337 
2338     /* Find the first multicast entry in the saved MAC filter */
2339     for (i = 0; i < n->mac_table.in_use; i++) {
2340         if (n->mac_table.macs[i * ETH_ALEN] & 1) {
2341             break;
2342         }
2343     }
2344     n->mac_table.first_multi = i;
2345 
2346     /* nc.link_down can't be migrated, so infer link_down according
2347      * to link status bit in n->status */
2348     link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
2349     for (i = 0; i < n->max_queues; i++) {
2350         qemu_get_subqueue(n->nic, i)->link_down = link_down;
2351     }
2352 
2353     if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
2354         virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
2355         qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
2356                                   QEMU_CLOCK_VIRTUAL,
2357                                   virtio_net_announce_timer, n);
2358         if (n->announce_timer.round) {
2359             timer_mod(n->announce_timer.tm,
2360                       qemu_clock_get_ms(n->announce_timer.type));
2361         } else {
2362             qemu_announce_timer_del(&n->announce_timer);
2363         }
2364     }
2365 
2366     return 0;
2367 }
2368 
2369 /* tx_waiting field of a VirtIONetQueue */
2370 static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = {
2371     .name = "virtio-net-queue-tx_waiting",
2372     .fields = (VMStateField[]) {
2373         VMSTATE_UINT32(tx_waiting, VirtIONetQueue),
2374         VMSTATE_END_OF_LIST()
2375    },
2376 };
2377 
2378 static bool max_queues_gt_1(void *opaque, int version_id)
2379 {
2380     return VIRTIO_NET(opaque)->max_queues > 1;
2381 }
2382 
2383 static bool has_ctrl_guest_offloads(void *opaque, int version_id)
2384 {
2385     return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque),
2386                                    VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
2387 }
2388 
2389 static bool mac_table_fits(void *opaque, int version_id)
2390 {
2391     return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES;
2392 }
2393 
2394 static bool mac_table_doesnt_fit(void *opaque, int version_id)
2395 {
2396     return !mac_table_fits(opaque, version_id);
2397 }
2398 
2399 /* This temporary type is shared by all the WITH_TMP methods
2400  * although only some fields are used by each.
2401  */
2402 struct VirtIONetMigTmp {
2403     VirtIONet      *parent;
2404     VirtIONetQueue *vqs_1;
2405     uint16_t        curr_queues_1;
2406     uint8_t         has_ufo;
2407     uint32_t        has_vnet_hdr;
2408 };
2409 
2410 /* The 2nd and subsequent tx_waiting flags are loaded later than
2411  * the 1st entry in the queues and only if there's more than one
2412  * entry.  We use the tmp mechanism to calculate a temporary
2413  * pointer and count and also validate the count.
2414  */
2415 
2416 static int virtio_net_tx_waiting_pre_save(void *opaque)
2417 {
2418     struct VirtIONetMigTmp *tmp = opaque;
2419 
2420     tmp->vqs_1 = tmp->parent->vqs + 1;
2421     tmp->curr_queues_1 = tmp->parent->curr_queues - 1;
2422     if (tmp->parent->curr_queues == 0) {
2423         tmp->curr_queues_1 = 0;
2424     }
2425 
2426     return 0;
2427 }
2428 
2429 static int virtio_net_tx_waiting_pre_load(void *opaque)
2430 {
2431     struct VirtIONetMigTmp *tmp = opaque;
2432 
2433     /* Reuse the pointer setup from save */
2434     virtio_net_tx_waiting_pre_save(opaque);
2435 
2436     if (tmp->parent->curr_queues > tmp->parent->max_queues) {
2437         error_report("virtio-net: curr_queues %x > max_queues %x",
2438             tmp->parent->curr_queues, tmp->parent->max_queues);
2439 
2440         return -EINVAL;
2441     }
2442 
2443     return 0; /* all good */
2444 }
2445 
2446 static const VMStateDescription vmstate_virtio_net_tx_waiting = {
2447     .name      = "virtio-net-tx_waiting",
2448     .pre_load  = virtio_net_tx_waiting_pre_load,
2449     .pre_save  = virtio_net_tx_waiting_pre_save,
2450     .fields    = (VMStateField[]) {
2451         VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp,
2452                                      curr_queues_1,
2453                                      vmstate_virtio_net_queue_tx_waiting,
2454                                      struct VirtIONetQueue),
2455         VMSTATE_END_OF_LIST()
2456     },
2457 };
2458 
2459 /* the 'has_ufo' flag is just tested; if the incoming stream has the
2460  * flag set we need to check that we have it
2461  */
2462 static int virtio_net_ufo_post_load(void *opaque, int version_id)
2463 {
2464     struct VirtIONetMigTmp *tmp = opaque;
2465 
2466     if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) {
2467         error_report("virtio-net: saved image requires TUN_F_UFO support");
2468         return -EINVAL;
2469     }
2470 
2471     return 0;
2472 }
2473 
2474 static int virtio_net_ufo_pre_save(void *opaque)
2475 {
2476     struct VirtIONetMigTmp *tmp = opaque;
2477 
2478     tmp->has_ufo = tmp->parent->has_ufo;
2479 
2480     return 0;
2481 }
2482 
2483 static const VMStateDescription vmstate_virtio_net_has_ufo = {
2484     .name      = "virtio-net-ufo",
2485     .post_load = virtio_net_ufo_post_load,
2486     .pre_save  = virtio_net_ufo_pre_save,
2487     .fields    = (VMStateField[]) {
2488         VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp),
2489         VMSTATE_END_OF_LIST()
2490     },
2491 };
2492 
2493 /* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
2494  * flag set we need to check that we have it
2495  */
2496 static int virtio_net_vnet_post_load(void *opaque, int version_id)
2497 {
2498     struct VirtIONetMigTmp *tmp = opaque;
2499 
2500     if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) {
2501         error_report("virtio-net: saved image requires vnet_hdr=on");
2502         return -EINVAL;
2503     }
2504 
2505     return 0;
2506 }
2507 
2508 static int virtio_net_vnet_pre_save(void *opaque)
2509 {
2510     struct VirtIONetMigTmp *tmp = opaque;
2511 
2512     tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr;
2513 
2514     return 0;
2515 }
2516 
2517 static const VMStateDescription vmstate_virtio_net_has_vnet = {
2518     .name      = "virtio-net-vnet",
2519     .post_load = virtio_net_vnet_post_load,
2520     .pre_save  = virtio_net_vnet_pre_save,
2521     .fields    = (VMStateField[]) {
2522         VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp),
2523         VMSTATE_END_OF_LIST()
2524     },
2525 };
2526 
2527 static const VMStateDescription vmstate_virtio_net_device = {
2528     .name = "virtio-net-device",
2529     .version_id = VIRTIO_NET_VM_VERSION,
2530     .minimum_version_id = VIRTIO_NET_VM_VERSION,
2531     .post_load = virtio_net_post_load_device,
2532     .fields = (VMStateField[]) {
2533         VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN),
2534         VMSTATE_STRUCT_POINTER(vqs, VirtIONet,
2535                                vmstate_virtio_net_queue_tx_waiting,
2536                                VirtIONetQueue),
2537         VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet),
2538         VMSTATE_UINT16(status, VirtIONet),
2539         VMSTATE_UINT8(promisc, VirtIONet),
2540         VMSTATE_UINT8(allmulti, VirtIONet),
2541         VMSTATE_UINT32(mac_table.in_use, VirtIONet),
2542 
2543         /* Guarded pair: If it fits we load it, else we throw it away
2544          * - can happen if source has a larger MAC table.; post-load
2545          *  sets flags in this case.
2546          */
2547         VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet,
2548                                 0, mac_table_fits, mac_table.in_use,
2549                                  ETH_ALEN),
2550         VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0,
2551                                      mac_table.in_use, ETH_ALEN),
2552 
2553         /* Note: This is an array of uint32's that's always been saved as a
2554          * buffer; hold onto your endiannesses; it's actually used as a bitmap
2555          * but based on the uint.
2556          */
2557         VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3),
2558         VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
2559                          vmstate_virtio_net_has_vnet),
2560         VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet),
2561         VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet),
2562         VMSTATE_UINT8(alluni, VirtIONet),
2563         VMSTATE_UINT8(nomulti, VirtIONet),
2564         VMSTATE_UINT8(nouni, VirtIONet),
2565         VMSTATE_UINT8(nobcast, VirtIONet),
2566         VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
2567                          vmstate_virtio_net_has_ufo),
2568         VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0,
2569                             vmstate_info_uint16_equal, uint16_t),
2570         VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1),
2571         VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
2572                          vmstate_virtio_net_tx_waiting),
2573         VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet,
2574                             has_ctrl_guest_offloads),
2575         VMSTATE_END_OF_LIST()
2576    },
2577 };
2578 
2579 static NetClientInfo net_virtio_info = {
2580     .type = NET_CLIENT_DRIVER_NIC,
2581     .size = sizeof(NICState),
2582     .can_receive = virtio_net_can_receive,
2583     .receive = virtio_net_receive,
2584     .link_status_changed = virtio_net_set_link_status,
2585     .query_rx_filter = virtio_net_query_rxfilter,
2586     .announce = virtio_net_announce,
2587 };
2588 
2589 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
2590 {
2591     VirtIONet *n = VIRTIO_NET(vdev);
2592     NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
2593     assert(n->vhost_started);
2594     return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
2595 }
2596 
2597 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
2598                                            bool mask)
2599 {
2600     VirtIONet *n = VIRTIO_NET(vdev);
2601     NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
2602     assert(n->vhost_started);
2603     vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
2604                              vdev, idx, mask);
2605 }
2606 
2607 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
2608 {
2609     virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
2610 
2611     n->config_size = virtio_feature_get_config_size(feature_sizes,
2612                                                     host_features);
2613 }
2614 
2615 void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
2616                                    const char *type)
2617 {
2618     /*
2619      * The name can be NULL, the netclient name will be type.x.
2620      */
2621     assert(type != NULL);
2622 
2623     g_free(n->netclient_name);
2624     g_free(n->netclient_type);
2625     n->netclient_name = g_strdup(name);
2626     n->netclient_type = g_strdup(type);
2627 }
2628 
2629 static void virtio_net_device_realize(DeviceState *dev, Error **errp)
2630 {
2631     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2632     VirtIONet *n = VIRTIO_NET(dev);
2633     NetClientState *nc;
2634     int i;
2635 
2636     if (n->net_conf.mtu) {
2637         n->host_features |= (1ULL << VIRTIO_NET_F_MTU);
2638     }
2639 
2640     if (n->net_conf.duplex_str) {
2641         if (strncmp(n->net_conf.duplex_str, "half", 5) == 0) {
2642             n->net_conf.duplex = DUPLEX_HALF;
2643         } else if (strncmp(n->net_conf.duplex_str, "full", 5) == 0) {
2644             n->net_conf.duplex = DUPLEX_FULL;
2645         } else {
2646             error_setg(errp, "'duplex' must be 'half' or 'full'");
2647         }
2648         n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX);
2649     } else {
2650         n->net_conf.duplex = DUPLEX_UNKNOWN;
2651     }
2652 
2653     if (n->net_conf.speed < SPEED_UNKNOWN) {
2654         error_setg(errp, "'speed' must be between 0 and INT_MAX");
2655     } else if (n->net_conf.speed >= 0) {
2656         n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX);
2657     }
2658 
2659     virtio_net_set_config_size(n, n->host_features);
2660     virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
2661 
2662     /*
2663      * We set a lower limit on RX queue size to what it always was.
2664      * Guests that want a smaller ring can always resize it without
2665      * help from us (using virtio 1 and up).
2666      */
2667     if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
2668         n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
2669         !is_power_of_2(n->net_conf.rx_queue_size)) {
2670         error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
2671                    "must be a power of 2 between %d and %d.",
2672                    n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
2673                    VIRTQUEUE_MAX_SIZE);
2674         virtio_cleanup(vdev);
2675         return;
2676     }
2677 
2678     if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
2679         n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
2680         !is_power_of_2(n->net_conf.tx_queue_size)) {
2681         error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
2682                    "must be a power of 2 between %d and %d",
2683                    n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
2684                    VIRTQUEUE_MAX_SIZE);
2685         virtio_cleanup(vdev);
2686         return;
2687     }
2688 
2689     n->max_queues = MAX(n->nic_conf.peers.queues, 1);
2690     if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
2691         error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
2692                    "must be a positive integer less than %d.",
2693                    n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2);
2694         virtio_cleanup(vdev);
2695         return;
2696     }
2697     n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
2698     n->curr_queues = 1;
2699     n->tx_timeout = n->net_conf.txtimer;
2700 
2701     if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
2702                        && strcmp(n->net_conf.tx, "bh")) {
2703         warn_report("virtio-net: "
2704                     "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
2705                     n->net_conf.tx);
2706         error_printf("Defaulting to \"bh\"");
2707     }
2708 
2709     n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n),
2710                                     n->net_conf.tx_queue_size);
2711 
2712     for (i = 0; i < n->max_queues; i++) {
2713         virtio_net_add_queue(n, i);
2714     }
2715 
2716     n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
2717     qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
2718     memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
2719     n->status = VIRTIO_NET_S_LINK_UP;
2720     qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
2721                               QEMU_CLOCK_VIRTUAL,
2722                               virtio_net_announce_timer, n);
2723     n->announce_timer.round = 0;
2724 
2725     if (n->netclient_type) {
2726         /*
2727          * Happen when virtio_net_set_netclient_name has been called.
2728          */
2729         n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
2730                               n->netclient_type, n->netclient_name, n);
2731     } else {
2732         n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
2733                               object_get_typename(OBJECT(dev)), dev->id, n);
2734     }
2735 
2736     peer_test_vnet_hdr(n);
2737     if (peer_has_vnet_hdr(n)) {
2738         for (i = 0; i < n->max_queues; i++) {
2739             qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
2740         }
2741         n->host_hdr_len = sizeof(struct virtio_net_hdr);
2742     } else {
2743         n->host_hdr_len = 0;
2744     }
2745 
2746     qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
2747 
2748     n->vqs[0].tx_waiting = 0;
2749     n->tx_burst = n->net_conf.txburst;
2750     virtio_net_set_mrg_rx_bufs(n, 0, 0);
2751     n->promisc = 1; /* for compatibility */
2752 
2753     n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
2754 
2755     n->vlans = g_malloc0(MAX_VLAN >> 3);
2756 
2757     nc = qemu_get_queue(n->nic);
2758     nc->rxfilter_notify_enabled = 1;
2759 
2760     QTAILQ_INIT(&n->rsc_chains);
2761     n->qdev = dev;
2762 }
2763 
2764 static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
2765 {
2766     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2767     VirtIONet *n = VIRTIO_NET(dev);
2768     int i, max_queues;
2769 
2770     /* This will stop vhost backend if appropriate. */
2771     virtio_net_set_status(vdev, 0);
2772 
2773     g_free(n->netclient_name);
2774     n->netclient_name = NULL;
2775     g_free(n->netclient_type);
2776     n->netclient_type = NULL;
2777 
2778     g_free(n->mac_table.macs);
2779     g_free(n->vlans);
2780 
2781     max_queues = n->multiqueue ? n->max_queues : 1;
2782     for (i = 0; i < max_queues; i++) {
2783         virtio_net_del_queue(n, i);
2784     }
2785 
2786     qemu_announce_timer_del(&n->announce_timer);
2787     g_free(n->vqs);
2788     qemu_del_nic(n->nic);
2789     virtio_net_rsc_cleanup(n);
2790     virtio_cleanup(vdev);
2791 }
2792 
2793 static void virtio_net_instance_init(Object *obj)
2794 {
2795     VirtIONet *n = VIRTIO_NET(obj);
2796 
2797     /*
2798      * The default config_size is sizeof(struct virtio_net_config).
2799      * Can be overriden with virtio_net_set_config_size.
2800      */
2801     n->config_size = sizeof(struct virtio_net_config);
2802     device_add_bootindex_property(obj, &n->nic_conf.bootindex,
2803                                   "bootindex", "/ethernet-phy@0",
2804                                   DEVICE(n), NULL);
2805 }
2806 
2807 static int virtio_net_pre_save(void *opaque)
2808 {
2809     VirtIONet *n = opaque;
2810 
2811     /* At this point, backend must be stopped, otherwise
2812      * it might keep writing to memory. */
2813     assert(!n->vhost_started);
2814 
2815     return 0;
2816 }
2817 
2818 static const VMStateDescription vmstate_virtio_net = {
2819     .name = "virtio-net",
2820     .minimum_version_id = VIRTIO_NET_VM_VERSION,
2821     .version_id = VIRTIO_NET_VM_VERSION,
2822     .fields = (VMStateField[]) {
2823         VMSTATE_VIRTIO_DEVICE,
2824         VMSTATE_END_OF_LIST()
2825     },
2826     .pre_save = virtio_net_pre_save,
2827 };
2828 
2829 static Property virtio_net_properties[] = {
2830     DEFINE_PROP_BIT64("csum", VirtIONet, host_features,
2831                     VIRTIO_NET_F_CSUM, true),
2832     DEFINE_PROP_BIT64("guest_csum", VirtIONet, host_features,
2833                     VIRTIO_NET_F_GUEST_CSUM, true),
2834     DEFINE_PROP_BIT64("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true),
2835     DEFINE_PROP_BIT64("guest_tso4", VirtIONet, host_features,
2836                     VIRTIO_NET_F_GUEST_TSO4, true),
2837     DEFINE_PROP_BIT64("guest_tso6", VirtIONet, host_features,
2838                     VIRTIO_NET_F_GUEST_TSO6, true),
2839     DEFINE_PROP_BIT64("guest_ecn", VirtIONet, host_features,
2840                     VIRTIO_NET_F_GUEST_ECN, true),
2841     DEFINE_PROP_BIT64("guest_ufo", VirtIONet, host_features,
2842                     VIRTIO_NET_F_GUEST_UFO, true),
2843     DEFINE_PROP_BIT64("guest_announce", VirtIONet, host_features,
2844                     VIRTIO_NET_F_GUEST_ANNOUNCE, true),
2845     DEFINE_PROP_BIT64("host_tso4", VirtIONet, host_features,
2846                     VIRTIO_NET_F_HOST_TSO4, true),
2847     DEFINE_PROP_BIT64("host_tso6", VirtIONet, host_features,
2848                     VIRTIO_NET_F_HOST_TSO6, true),
2849     DEFINE_PROP_BIT64("host_ecn", VirtIONet, host_features,
2850                     VIRTIO_NET_F_HOST_ECN, true),
2851     DEFINE_PROP_BIT64("host_ufo", VirtIONet, host_features,
2852                     VIRTIO_NET_F_HOST_UFO, true),
2853     DEFINE_PROP_BIT64("mrg_rxbuf", VirtIONet, host_features,
2854                     VIRTIO_NET_F_MRG_RXBUF, true),
2855     DEFINE_PROP_BIT64("status", VirtIONet, host_features,
2856                     VIRTIO_NET_F_STATUS, true),
2857     DEFINE_PROP_BIT64("ctrl_vq", VirtIONet, host_features,
2858                     VIRTIO_NET_F_CTRL_VQ, true),
2859     DEFINE_PROP_BIT64("ctrl_rx", VirtIONet, host_features,
2860                     VIRTIO_NET_F_CTRL_RX, true),
2861     DEFINE_PROP_BIT64("ctrl_vlan", VirtIONet, host_features,
2862                     VIRTIO_NET_F_CTRL_VLAN, true),
2863     DEFINE_PROP_BIT64("ctrl_rx_extra", VirtIONet, host_features,
2864                     VIRTIO_NET_F_CTRL_RX_EXTRA, true),
2865     DEFINE_PROP_BIT64("ctrl_mac_addr", VirtIONet, host_features,
2866                     VIRTIO_NET_F_CTRL_MAC_ADDR, true),
2867     DEFINE_PROP_BIT64("ctrl_guest_offloads", VirtIONet, host_features,
2868                     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
2869     DEFINE_PROP_BIT64("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
2870     DEFINE_PROP_BIT64("guest_rsc_ext", VirtIONet, host_features,
2871                     VIRTIO_NET_F_RSC_EXT, false),
2872     DEFINE_PROP_UINT32("rsc_interval", VirtIONet, rsc_timeout,
2873                        VIRTIO_NET_RSC_DEFAULT_INTERVAL),
2874     DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
2875     DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
2876                        TX_TIMER_INTERVAL),
2877     DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
2878     DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
2879     DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
2880                        VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
2881     DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
2882                        VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
2883     DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
2884     DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend,
2885                      true),
2886     DEFINE_PROP_INT32("speed", VirtIONet, net_conf.speed, SPEED_UNKNOWN),
2887     DEFINE_PROP_STRING("duplex", VirtIONet, net_conf.duplex_str),
2888     DEFINE_PROP_END_OF_LIST(),
2889 };
2890 
2891 static void virtio_net_class_init(ObjectClass *klass, void *data)
2892 {
2893     DeviceClass *dc = DEVICE_CLASS(klass);
2894     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
2895 
2896     dc->props = virtio_net_properties;
2897     dc->vmsd = &vmstate_virtio_net;
2898     set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
2899     vdc->realize = virtio_net_device_realize;
2900     vdc->unrealize = virtio_net_device_unrealize;
2901     vdc->get_config = virtio_net_get_config;
2902     vdc->set_config = virtio_net_set_config;
2903     vdc->get_features = virtio_net_get_features;
2904     vdc->set_features = virtio_net_set_features;
2905     vdc->bad_features = virtio_net_bad_features;
2906     vdc->reset = virtio_net_reset;
2907     vdc->set_status = virtio_net_set_status;
2908     vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
2909     vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
2910     vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO);
2911     vdc->vmsd = &vmstate_virtio_net_device;
2912 }
2913 
2914 static const TypeInfo virtio_net_info = {
2915     .name = TYPE_VIRTIO_NET,
2916     .parent = TYPE_VIRTIO_DEVICE,
2917     .instance_size = sizeof(VirtIONet),
2918     .instance_init = virtio_net_instance_init,
2919     .class_init = virtio_net_class_init,
2920 };
2921 
2922 static void virtio_register_types(void)
2923 {
2924     type_register_static(&virtio_net_info);
2925 }
2926 
2927 type_init(virtio_register_types)
2928