xref: /openbmc/qemu/hw/net/virtio-net.c (revision 2deec9ab7d25d7cd8f57033bd0421c1f9f28d905)
1 /*
2  * Virtio Network Device
3  *
4  * Copyright IBM, Corp. 2007
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/atomic.h"
16 #include "qemu/iov.h"
17 #include "qemu/log.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/module.h"
20 #include "hw/virtio/virtio.h"
21 #include "net/net.h"
22 #include "net/checksum.h"
23 #include "net/tap.h"
24 #include "qemu/error-report.h"
25 #include "qemu/timer.h"
26 #include "qemu/option.h"
27 #include "qemu/option_int.h"
28 #include "qemu/config-file.h"
29 #include "qobject/qdict.h"
30 #include "hw/virtio/virtio-net.h"
31 #include "net/vhost_net.h"
32 #include "net/announce.h"
33 #include "hw/virtio/virtio-bus.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-events-net.h"
36 #include "hw/qdev-properties.h"
37 #include "qapi/qapi-types-migration.h"
38 #include "qapi/qapi-events-migration.h"
39 #include "hw/virtio/virtio-access.h"
40 #include "migration/misc.h"
41 #include "standard-headers/linux/ethtool.h"
42 #include "system/system.h"
43 #include "system/replay.h"
44 #include "trace.h"
45 #include "monitor/qdev.h"
46 #include "monitor/monitor.h"
47 #include "hw/pci/pci_device.h"
48 #include "net_rx_pkt.h"
49 #include "hw/virtio/vhost.h"
50 #include "system/qtest.h"
51 
52 #define VIRTIO_NET_VM_VERSION    11
53 
54 /* previously fixed value */
55 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
56 #define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
57 
58 /* for now, only allow larger queue_pairs; with virtio-1, guest can downsize */
59 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
60 #define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
61 
62 #define VIRTIO_NET_IP4_ADDR_SIZE   8        /* ipv4 saddr + daddr */
63 
64 #define VIRTIO_NET_TCP_FLAG         0x3F
65 #define VIRTIO_NET_TCP_HDR_LENGTH   0xF000
66 
67 /* IPv4 max payload, 16 bits in the header */
68 #define VIRTIO_NET_MAX_IP4_PAYLOAD (65535 - sizeof(struct ip_header))
69 #define VIRTIO_NET_MAX_TCP_PAYLOAD 65535
70 
71 /* header length value in ip header without option */
72 #define VIRTIO_NET_IP4_HEADER_LENGTH 5
73 
74 #define VIRTIO_NET_IP6_ADDR_SIZE   32      /* ipv6 saddr + daddr */
75 #define VIRTIO_NET_MAX_IP6_PAYLOAD VIRTIO_NET_MAX_TCP_PAYLOAD
76 
77 /* Purge coalesced packets timer interval, This value affects the performance
78    a lot, and should be tuned carefully, '300000'(300us) is the recommended
79    value to pass the WHQL test, '50000' can gain 2x netperf throughput with
80    tso/gso/gro 'off'. */
81 #define VIRTIO_NET_RSC_DEFAULT_INTERVAL 300000
82 
83 #define VIRTIO_NET_RSS_SUPPORTED_HASHES (VIRTIO_NET_RSS_HASH_TYPE_IPv4 | \
84                                          VIRTIO_NET_RSS_HASH_TYPE_TCPv4 | \
85                                          VIRTIO_NET_RSS_HASH_TYPE_UDPv4 | \
86                                          VIRTIO_NET_RSS_HASH_TYPE_IPv6 | \
87                                          VIRTIO_NET_RSS_HASH_TYPE_TCPv6 | \
88                                          VIRTIO_NET_RSS_HASH_TYPE_UDPv6 | \
89                                          VIRTIO_NET_RSS_HASH_TYPE_IP_EX | \
90                                          VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | \
91                                          VIRTIO_NET_RSS_HASH_TYPE_UDP_EX)
92 
93 static const VirtIOFeature feature_sizes[] = {
94     {.flags = 1ULL << VIRTIO_NET_F_MAC,
95      .end = endof(struct virtio_net_config, mac)},
96     {.flags = 1ULL << VIRTIO_NET_F_STATUS,
97      .end = endof(struct virtio_net_config, status)},
98     {.flags = 1ULL << VIRTIO_NET_F_MQ,
99      .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
100     {.flags = 1ULL << VIRTIO_NET_F_MTU,
101      .end = endof(struct virtio_net_config, mtu)},
102     {.flags = 1ULL << VIRTIO_NET_F_SPEED_DUPLEX,
103      .end = endof(struct virtio_net_config, duplex)},
104     {.flags = (1ULL << VIRTIO_NET_F_RSS) | (1ULL << VIRTIO_NET_F_HASH_REPORT),
105      .end = endof(struct virtio_net_config, supported_hash_types)},
106     {}
107 };
108 
109 static const VirtIOConfigSizeParams cfg_size_params = {
110     .min_size = endof(struct virtio_net_config, mac),
111     .max_size = sizeof(struct virtio_net_config),
112     .feature_sizes = feature_sizes
113 };
114 
115 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
116 {
117     VirtIONet *n = qemu_get_nic_opaque(nc);
118 
119     return &n->vqs[nc->queue_index];
120 }
121 
122 static int vq2q(int queue_index)
123 {
124     return queue_index / 2;
125 }
126 
127 static void flush_or_purge_queued_packets(NetClientState *nc)
128 {
129     if (!nc->peer) {
130         return;
131     }
132 
133     qemu_flush_or_purge_queued_packets(nc->peer, true);
134     assert(!virtio_net_get_subqueue(nc)->async_tx.elem);
135 }
136 
137 /* TODO
138  * - we could suppress RX interrupt if we were so inclined.
139  */
140 
141 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
142 {
143     VirtIONet *n = VIRTIO_NET(vdev);
144     struct virtio_net_config netcfg;
145     NetClientState *nc = qemu_get_queue(n->nic);
146     static const MACAddr zero = { .a = { 0, 0, 0, 0, 0, 0 } };
147 
148     int ret = 0;
149     memset(&netcfg, 0 , sizeof(struct virtio_net_config));
150     virtio_stw_p(vdev, &netcfg.status, n->status);
151     virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queue_pairs);
152     virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
153     memcpy(netcfg.mac, n->mac, ETH_ALEN);
154     virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed);
155     netcfg.duplex = n->net_conf.duplex;
156     netcfg.rss_max_key_size = VIRTIO_NET_RSS_MAX_KEY_SIZE;
157     virtio_stw_p(vdev, &netcfg.rss_max_indirection_table_length,
158                  virtio_host_has_feature(vdev, VIRTIO_NET_F_RSS) ?
159                  VIRTIO_NET_RSS_MAX_TABLE_LEN : 1);
160     virtio_stl_p(vdev, &netcfg.supported_hash_types,
161                  VIRTIO_NET_RSS_SUPPORTED_HASHES);
162     memcpy(config, &netcfg, n->config_size);
163 
164     /*
165      * Is this VDPA? No peer means not VDPA: there's no way to
166      * disconnect/reconnect a VDPA peer.
167      */
168     if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
169         ret = vhost_net_get_config(get_vhost_net(nc->peer), (uint8_t *)&netcfg,
170                                    n->config_size);
171         if (ret == -1) {
172             return;
173         }
174 
175         /*
176          * Some NIC/kernel combinations present 0 as the mac address.  As that
177          * is not a legal address, try to proceed with the address from the
178          * QEMU command line in the hope that the address has been configured
179          * correctly elsewhere - just not reported by the device.
180          */
181         if (memcmp(&netcfg.mac, &zero, sizeof(zero)) == 0) {
182             info_report("Zero hardware mac address detected. Ignoring.");
183             memcpy(netcfg.mac, n->mac, ETH_ALEN);
184         }
185 
186         netcfg.status |= virtio_tswap16(vdev,
187                                         n->status & VIRTIO_NET_S_ANNOUNCE);
188         memcpy(config, &netcfg, n->config_size);
189     }
190 }
191 
192 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
193 {
194     VirtIONet *n = VIRTIO_NET(vdev);
195     struct virtio_net_config netcfg = {};
196     NetClientState *nc = qemu_get_queue(n->nic);
197 
198     memcpy(&netcfg, config, n->config_size);
199 
200     if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
201         !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
202         memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
203         memcpy(n->mac, netcfg.mac, ETH_ALEN);
204         qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
205     }
206 
207     /*
208      * Is this VDPA? No peer means not VDPA: there's no way to
209      * disconnect/reconnect a VDPA peer.
210      */
211     if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
212         vhost_net_set_config(get_vhost_net(nc->peer),
213                              (uint8_t *)&netcfg, 0, n->config_size,
214                              VHOST_SET_CONFIG_TYPE_FRONTEND);
215       }
216 }
217 
218 static bool virtio_net_started(VirtIONet *n, uint8_t status)
219 {
220     VirtIODevice *vdev = VIRTIO_DEVICE(n);
221     return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
222         (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
223 }
224 
225 static void virtio_net_announce_notify(VirtIONet *net)
226 {
227     VirtIODevice *vdev = VIRTIO_DEVICE(net);
228     trace_virtio_net_announce_notify();
229 
230     net->status |= VIRTIO_NET_S_ANNOUNCE;
231     virtio_notify_config(vdev);
232 }
233 
234 static void virtio_net_announce_timer(void *opaque)
235 {
236     VirtIONet *n = opaque;
237     trace_virtio_net_announce_timer(n->announce_timer.round);
238 
239     n->announce_timer.round--;
240     virtio_net_announce_notify(n);
241 }
242 
243 static void virtio_net_announce(NetClientState *nc)
244 {
245     VirtIONet *n = qemu_get_nic_opaque(nc);
246     VirtIODevice *vdev = VIRTIO_DEVICE(n);
247 
248     /*
249      * Make sure the virtio migration announcement timer isn't running
250      * If it is, let it trigger announcement so that we do not cause
251      * confusion.
252      */
253     if (n->announce_timer.round) {
254         return;
255     }
256 
257     if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
258         virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
259             virtio_net_announce_notify(n);
260     }
261 }
262 
263 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
264 {
265     VirtIODevice *vdev = VIRTIO_DEVICE(n);
266     NetClientState *nc = qemu_get_queue(n->nic);
267     int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
268     int cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
269               n->max_ncs - n->max_queue_pairs : 0;
270 
271     if (!get_vhost_net(nc->peer)) {
272         return;
273     }
274 
275     if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
276         !!n->vhost_started) {
277         return;
278     }
279     if (!n->vhost_started) {
280         int r, i;
281 
282         if (n->needs_vnet_hdr_swap) {
283             error_report("backend does not support %s vnet headers; "
284                          "falling back on userspace virtio",
285                          virtio_is_big_endian(vdev) ? "BE" : "LE");
286             return;
287         }
288 
289         /* Any packets outstanding? Purge them to avoid touching rings
290          * when vhost is running.
291          */
292         for (i = 0;  i < queue_pairs; i++) {
293             NetClientState *qnc = qemu_get_subqueue(n->nic, i);
294 
295             /* Purge both directions: TX and RX. */
296             qemu_net_queue_purge(qnc->peer->incoming_queue, qnc);
297             qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
298         }
299 
300         if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) {
301             r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu);
302             if (r < 0) {
303                 error_report("%uBytes MTU not supported by the backend",
304                              n->net_conf.mtu);
305 
306                 return;
307             }
308         }
309 
310         n->vhost_started = 1;
311         r = vhost_net_start(vdev, n->nic->ncs, queue_pairs, cvq);
312         if (r < 0) {
313             error_report("unable to start vhost net: %d: "
314                          "falling back on userspace virtio", -r);
315             n->vhost_started = 0;
316         }
317     } else {
318         vhost_net_stop(vdev, n->nic->ncs, queue_pairs, cvq);
319         n->vhost_started = 0;
320     }
321 }
322 
323 static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev,
324                                           NetClientState *peer,
325                                           bool enable)
326 {
327     if (virtio_is_big_endian(vdev)) {
328         return qemu_set_vnet_be(peer, enable);
329     } else {
330         return qemu_set_vnet_le(peer, enable);
331     }
332 }
333 
334 static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs,
335                                        int queue_pairs, bool enable)
336 {
337     int i;
338 
339     for (i = 0; i < queue_pairs; i++) {
340         if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 &&
341             enable) {
342             while (--i >= 0) {
343                 virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false);
344             }
345 
346             return true;
347         }
348     }
349 
350     return false;
351 }
352 
353 static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
354 {
355     VirtIODevice *vdev = VIRTIO_DEVICE(n);
356     int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
357 
358     if (virtio_net_started(n, status)) {
359         /* Before using the device, we tell the network backend about the
360          * endianness to use when parsing vnet headers. If the backend
361          * can't do it, we fallback onto fixing the headers in the core
362          * virtio-net code.
363          */
364         n->needs_vnet_hdr_swap = n->has_vnet_hdr &&
365                                  virtio_net_set_vnet_endian(vdev, n->nic->ncs,
366                                                             queue_pairs, true);
367     } else if (virtio_net_started(n, vdev->status)) {
368         /* After using the device, we need to reset the network backend to
369          * the default (guest native endianness), otherwise the guest may
370          * lose network connectivity if it is rebooted into a different
371          * endianness.
372          */
373         virtio_net_set_vnet_endian(vdev, n->nic->ncs, queue_pairs, false);
374     }
375 }
376 
377 static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq)
378 {
379     unsigned int dropped = virtqueue_drop_all(vq);
380     if (dropped) {
381         virtio_notify(vdev, vq);
382     }
383 }
384 
385 static int virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
386 {
387     VirtIONet *n = VIRTIO_NET(vdev);
388     VirtIONetQueue *q;
389     int i;
390     uint8_t queue_status;
391 
392     virtio_net_vnet_endian_status(n, status);
393     virtio_net_vhost_status(n, status);
394 
395     for (i = 0; i < n->max_queue_pairs; i++) {
396         NetClientState *ncs = qemu_get_subqueue(n->nic, i);
397         bool queue_started;
398         q = &n->vqs[i];
399 
400         if ((!n->multiqueue && i != 0) || i >= n->curr_queue_pairs) {
401             queue_status = 0;
402         } else {
403             queue_status = status;
404         }
405         queue_started =
406             virtio_net_started(n, queue_status) && !n->vhost_started;
407 
408         if (queue_started) {
409             qemu_flush_queued_packets(ncs);
410         }
411 
412         if (!q->tx_waiting) {
413             continue;
414         }
415 
416         if (queue_started) {
417             if (q->tx_timer) {
418                 timer_mod(q->tx_timer,
419                                qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
420             } else {
421                 replay_bh_schedule_event(q->tx_bh);
422             }
423         } else {
424             if (q->tx_timer) {
425                 timer_del(q->tx_timer);
426             } else {
427                 qemu_bh_cancel(q->tx_bh);
428             }
429             if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 &&
430                 (queue_status & VIRTIO_CONFIG_S_DRIVER_OK) &&
431                 vdev->vm_running) {
432                 /* if tx is waiting we are likely have some packets in tx queue
433                  * and disabled notification */
434                 q->tx_waiting = 0;
435                 virtio_queue_set_notification(q->tx_vq, 1);
436                 virtio_net_drop_tx_queue_data(vdev, q->tx_vq);
437             }
438         }
439     }
440     return 0;
441 }
442 
443 static void virtio_net_set_link_status(NetClientState *nc)
444 {
445     VirtIONet *n = qemu_get_nic_opaque(nc);
446     VirtIODevice *vdev = VIRTIO_DEVICE(n);
447     uint16_t old_status = n->status;
448 
449     if (nc->link_down)
450         n->status &= ~VIRTIO_NET_S_LINK_UP;
451     else
452         n->status |= VIRTIO_NET_S_LINK_UP;
453 
454     if (n->status != old_status)
455         virtio_notify_config(vdev);
456 
457     virtio_net_set_status(vdev, vdev->status);
458 }
459 
460 static void rxfilter_notify(NetClientState *nc)
461 {
462     VirtIONet *n = qemu_get_nic_opaque(nc);
463 
464     if (nc->rxfilter_notify_enabled) {
465         char *path = object_get_canonical_path(OBJECT(n->qdev));
466         qapi_event_send_nic_rx_filter_changed(n->netclient_name, path);
467         g_free(path);
468 
469         /* disable event notification to avoid events flooding */
470         nc->rxfilter_notify_enabled = 0;
471     }
472 }
473 
474 static intList *get_vlan_table(VirtIONet *n)
475 {
476     intList *list;
477     int i, j;
478 
479     list = NULL;
480     for (i = 0; i < MAX_VLAN >> 5; i++) {
481         for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
482             if (n->vlans[i] & (1U << j)) {
483                 QAPI_LIST_PREPEND(list, (i << 5) + j);
484             }
485         }
486     }
487 
488     return list;
489 }
490 
491 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
492 {
493     VirtIONet *n = qemu_get_nic_opaque(nc);
494     VirtIODevice *vdev = VIRTIO_DEVICE(n);
495     RxFilterInfo *info;
496     strList *str_list;
497     int i;
498 
499     info = g_malloc0(sizeof(*info));
500     info->name = g_strdup(nc->name);
501     info->promiscuous = n->promisc;
502 
503     if (n->nouni) {
504         info->unicast = RX_STATE_NONE;
505     } else if (n->alluni) {
506         info->unicast = RX_STATE_ALL;
507     } else {
508         info->unicast = RX_STATE_NORMAL;
509     }
510 
511     if (n->nomulti) {
512         info->multicast = RX_STATE_NONE;
513     } else if (n->allmulti) {
514         info->multicast = RX_STATE_ALL;
515     } else {
516         info->multicast = RX_STATE_NORMAL;
517     }
518 
519     info->broadcast_allowed = n->nobcast;
520     info->multicast_overflow = n->mac_table.multi_overflow;
521     info->unicast_overflow = n->mac_table.uni_overflow;
522 
523     info->main_mac = qemu_mac_strdup_printf(n->mac);
524 
525     str_list = NULL;
526     for (i = 0; i < n->mac_table.first_multi; i++) {
527         QAPI_LIST_PREPEND(str_list,
528                       qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN));
529     }
530     info->unicast_table = str_list;
531 
532     str_list = NULL;
533     for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
534         QAPI_LIST_PREPEND(str_list,
535                       qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN));
536     }
537     info->multicast_table = str_list;
538     info->vlan_table = get_vlan_table(n);
539 
540     if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
541         info->vlan = RX_STATE_ALL;
542     } else if (!info->vlan_table) {
543         info->vlan = RX_STATE_NONE;
544     } else {
545         info->vlan = RX_STATE_NORMAL;
546     }
547 
548     /* enable event notification after query */
549     nc->rxfilter_notify_enabled = 1;
550 
551     return info;
552 }
553 
554 static void virtio_net_queue_reset(VirtIODevice *vdev, uint32_t queue_index)
555 {
556     VirtIONet *n = VIRTIO_NET(vdev);
557     NetClientState *nc;
558 
559     /* validate queue_index and skip for cvq */
560     if (queue_index >= n->max_queue_pairs * 2) {
561         return;
562     }
563 
564     nc = qemu_get_subqueue(n->nic, vq2q(queue_index));
565 
566     if (!nc->peer) {
567         return;
568     }
569 
570     if (get_vhost_net(nc->peer) &&
571         nc->peer->info->type == NET_CLIENT_DRIVER_TAP) {
572         vhost_net_virtqueue_reset(vdev, nc, queue_index);
573     }
574 
575     flush_or_purge_queued_packets(nc);
576 }
577 
578 static void virtio_net_queue_enable(VirtIODevice *vdev, uint32_t queue_index)
579 {
580     VirtIONet *n = VIRTIO_NET(vdev);
581     NetClientState *nc;
582     int r;
583 
584     /* validate queue_index and skip for cvq */
585     if (queue_index >= n->max_queue_pairs * 2) {
586         return;
587     }
588 
589     nc = qemu_get_subqueue(n->nic, vq2q(queue_index));
590 
591     if (!nc->peer || !vdev->vhost_started) {
592         return;
593     }
594 
595     if (get_vhost_net(nc->peer) &&
596         nc->peer->info->type == NET_CLIENT_DRIVER_TAP) {
597         r = vhost_net_virtqueue_restart(vdev, nc, queue_index);
598         if (r < 0) {
599             error_report("unable to restart vhost net virtqueue: %d, "
600                             "when resetting the queue", queue_index);
601         }
602     }
603 }
604 
605 static void peer_test_vnet_hdr(VirtIONet *n)
606 {
607     NetClientState *nc = qemu_get_queue(n->nic);
608     if (!nc->peer) {
609         return;
610     }
611 
612     n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
613 }
614 
615 static int peer_has_vnet_hdr(VirtIONet *n)
616 {
617     return n->has_vnet_hdr;
618 }
619 
620 static int peer_has_ufo(VirtIONet *n)
621 {
622     if (!peer_has_vnet_hdr(n))
623         return 0;
624 
625     n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
626 
627     return n->has_ufo;
628 }
629 
630 static int peer_has_uso(VirtIONet *n)
631 {
632     if (!peer_has_vnet_hdr(n)) {
633         return 0;
634     }
635 
636     return qemu_has_uso(qemu_get_queue(n->nic)->peer);
637 }
638 
639 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
640                                        int version_1, int hash_report)
641 {
642     int i;
643     NetClientState *nc;
644 
645     n->mergeable_rx_bufs = mergeable_rx_bufs;
646 
647     if (version_1) {
648         n->guest_hdr_len = hash_report ?
649             sizeof(struct virtio_net_hdr_v1_hash) :
650             sizeof(struct virtio_net_hdr_mrg_rxbuf);
651         n->rss_data.populate_hash = !!hash_report;
652     } else {
653         n->guest_hdr_len = n->mergeable_rx_bufs ?
654             sizeof(struct virtio_net_hdr_mrg_rxbuf) :
655             sizeof(struct virtio_net_hdr);
656         n->rss_data.populate_hash = false;
657     }
658 
659     for (i = 0; i < n->max_queue_pairs; i++) {
660         nc = qemu_get_subqueue(n->nic, i);
661 
662         if (peer_has_vnet_hdr(n) &&
663             qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
664             qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
665             n->host_hdr_len = n->guest_hdr_len;
666         }
667     }
668 }
669 
670 static int virtio_net_max_tx_queue_size(VirtIONet *n)
671 {
672     NetClientState *peer = n->nic_conf.peers.ncs[0];
673 
674     /*
675      * Backends other than vhost-user or vhost-vdpa don't support max queue
676      * size.
677      */
678     if (!peer) {
679         return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
680     }
681 
682     switch(peer->info->type) {
683     case NET_CLIENT_DRIVER_VHOST_USER:
684     case NET_CLIENT_DRIVER_VHOST_VDPA:
685         return VIRTQUEUE_MAX_SIZE;
686     default:
687         return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
688     };
689 }
690 
691 static int peer_attach(VirtIONet *n, int index)
692 {
693     NetClientState *nc = qemu_get_subqueue(n->nic, index);
694 
695     if (!nc->peer) {
696         return 0;
697     }
698 
699     if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
700         vhost_set_vring_enable(nc->peer, 1);
701     }
702 
703     if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
704         return 0;
705     }
706 
707     if (n->max_queue_pairs == 1) {
708         return 0;
709     }
710 
711     return tap_enable(nc->peer);
712 }
713 
714 static int peer_detach(VirtIONet *n, int index)
715 {
716     NetClientState *nc = qemu_get_subqueue(n->nic, index);
717 
718     if (!nc->peer) {
719         return 0;
720     }
721 
722     if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
723         vhost_set_vring_enable(nc->peer, 0);
724     }
725 
726     if (nc->peer->info->type !=  NET_CLIENT_DRIVER_TAP) {
727         return 0;
728     }
729 
730     return tap_disable(nc->peer);
731 }
732 
733 static void virtio_net_set_queue_pairs(VirtIONet *n)
734 {
735     int i;
736     int r;
737 
738     if (n->nic->peer_deleted) {
739         return;
740     }
741 
742     for (i = 0; i < n->max_queue_pairs; i++) {
743         if (i < n->curr_queue_pairs) {
744             r = peer_attach(n, i);
745             assert(!r);
746         } else {
747             r = peer_detach(n, i);
748             assert(!r);
749         }
750     }
751 }
752 
753 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
754 
755 static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
756 {
757     uint64_t features = 0;
758 
759     /* Linux kernel 2.6.25.  It understood MAC (as everyone must),
760      * but also these: */
761     virtio_add_feature(&features, VIRTIO_NET_F_MAC);
762     virtio_add_feature(&features, VIRTIO_NET_F_CSUM);
763     virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
764     virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
765     virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
766 
767     return features;
768 }
769 
770 static void virtio_net_apply_guest_offloads(VirtIONet *n)
771 {
772     qemu_set_offload(qemu_get_queue(n->nic)->peer,
773             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
774             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
775             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
776             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
777             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)),
778             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_USO4)),
779             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_USO6)));
780 }
781 
782 static uint64_t virtio_net_guest_offloads_by_features(uint64_t features)
783 {
784     static const uint64_t guest_offloads_mask =
785         (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
786         (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
787         (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
788         (1ULL << VIRTIO_NET_F_GUEST_ECN)  |
789         (1ULL << VIRTIO_NET_F_GUEST_UFO)  |
790         (1ULL << VIRTIO_NET_F_GUEST_USO4) |
791         (1ULL << VIRTIO_NET_F_GUEST_USO6);
792 
793     return guest_offloads_mask & features;
794 }
795 
796 uint64_t virtio_net_supported_guest_offloads(const VirtIONet *n)
797 {
798     VirtIODevice *vdev = VIRTIO_DEVICE(n);
799     return virtio_net_guest_offloads_by_features(vdev->guest_features);
800 }
801 
802 typedef struct {
803     VirtIONet *n;
804     DeviceState *dev;
805 } FailoverDevice;
806 
807 /**
808  * Set the failover primary device
809  *
810  * @opaque: FailoverId to setup
811  * @opts: opts for device we are handling
812  * @errp: returns an error if this function fails
813  */
814 static int failover_set_primary(DeviceState *dev, void *opaque)
815 {
816     FailoverDevice *fdev = opaque;
817     PCIDevice *pci_dev = (PCIDevice *)
818         object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE);
819 
820     if (!pci_dev) {
821         return 0;
822     }
823 
824     if (!g_strcmp0(pci_dev->failover_pair_id, fdev->n->netclient_name)) {
825         fdev->dev = dev;
826         return 1;
827     }
828 
829     return 0;
830 }
831 
832 /**
833  * Find the primary device for this failover virtio-net
834  *
835  * @n: VirtIONet device
836  * @errp: returns an error if this function fails
837  */
838 static DeviceState *failover_find_primary_device(VirtIONet *n)
839 {
840     FailoverDevice fdev = {
841         .n = n,
842     };
843 
844     qbus_walk_children(sysbus_get_default(), failover_set_primary, NULL,
845                        NULL, NULL, &fdev);
846     return fdev.dev;
847 }
848 
849 static void failover_add_primary(VirtIONet *n, Error **errp)
850 {
851     Error *err = NULL;
852     DeviceState *dev = failover_find_primary_device(n);
853 
854     if (dev) {
855         return;
856     }
857 
858     if (!n->primary_opts) {
859         error_setg(errp, "Primary device not found");
860         error_append_hint(errp, "Virtio-net failover will not work. Make "
861                           "sure primary device has parameter"
862                           " failover_pair_id=%s\n", n->netclient_name);
863         return;
864     }
865 
866     dev = qdev_device_add_from_qdict(n->primary_opts,
867                                      n->primary_opts_from_json,
868                                      &err);
869     if (err) {
870         qobject_unref(n->primary_opts);
871         n->primary_opts = NULL;
872     } else {
873         object_unref(OBJECT(dev));
874     }
875     error_propagate(errp, err);
876 }
877 
878 static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
879 {
880     VirtIONet *n = VIRTIO_NET(vdev);
881     Error *err = NULL;
882     int i;
883 
884     if (n->mtu_bypass_backend &&
885             !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) {
886         features &= ~(1ULL << VIRTIO_NET_F_MTU);
887     }
888 
889     virtio_net_set_multiqueue(n,
890                               virtio_has_feature(features, VIRTIO_NET_F_RSS) ||
891                               virtio_has_feature(features, VIRTIO_NET_F_MQ));
892 
893     virtio_net_set_mrg_rx_bufs(n,
894                                virtio_has_feature(features,
895                                                   VIRTIO_NET_F_MRG_RXBUF),
896                                virtio_has_feature(features,
897                                                   VIRTIO_F_VERSION_1),
898                                virtio_has_feature(features,
899                                                   VIRTIO_NET_F_HASH_REPORT));
900 
901     n->rsc4_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
902         virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO4);
903     n->rsc6_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
904         virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO6);
905     n->rss_data.redirect = virtio_has_feature(features, VIRTIO_NET_F_RSS);
906 
907     if (n->has_vnet_hdr) {
908         n->curr_guest_offloads =
909             virtio_net_guest_offloads_by_features(features);
910         virtio_net_apply_guest_offloads(n);
911     }
912 
913     for (i = 0;  i < n->max_queue_pairs; i++) {
914         NetClientState *nc = qemu_get_subqueue(n->nic, i);
915 
916         if (!get_vhost_net(nc->peer)) {
917             continue;
918         }
919         vhost_net_ack_features(get_vhost_net(nc->peer), features);
920 
921         /*
922          * keep acked_features in NetVhostUserState up-to-date so it
923          * can't miss any features configured by guest virtio driver.
924          */
925         vhost_net_save_acked_features(nc->peer);
926     }
927 
928     if (!virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
929         memset(n->vlans, 0xff, MAX_VLAN >> 3);
930     }
931 
932     if (virtio_has_feature(features, VIRTIO_NET_F_STANDBY)) {
933         qapi_event_send_failover_negotiated(n->netclient_name);
934         qatomic_set(&n->failover_primary_hidden, false);
935         failover_add_primary(n, &err);
936         if (err) {
937             if (!qtest_enabled()) {
938                 warn_report_err(err);
939             } else {
940                 error_free(err);
941             }
942         }
943     }
944 }
945 
946 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
947                                      struct iovec *iov, unsigned int iov_cnt)
948 {
949     uint8_t on;
950     size_t s;
951     NetClientState *nc = qemu_get_queue(n->nic);
952 
953     s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
954     if (s != sizeof(on)) {
955         return VIRTIO_NET_ERR;
956     }
957 
958     if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
959         n->promisc = on;
960     } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
961         n->allmulti = on;
962     } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
963         n->alluni = on;
964     } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
965         n->nomulti = on;
966     } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
967         n->nouni = on;
968     } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
969         n->nobcast = on;
970     } else {
971         return VIRTIO_NET_ERR;
972     }
973 
974     rxfilter_notify(nc);
975 
976     return VIRTIO_NET_OK;
977 }
978 
979 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
980                                      struct iovec *iov, unsigned int iov_cnt)
981 {
982     VirtIODevice *vdev = VIRTIO_DEVICE(n);
983     uint64_t offloads;
984     size_t s;
985 
986     if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
987         return VIRTIO_NET_ERR;
988     }
989 
990     s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
991     if (s != sizeof(offloads)) {
992         return VIRTIO_NET_ERR;
993     }
994 
995     if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
996         uint64_t supported_offloads;
997 
998         offloads = virtio_ldq_p(vdev, &offloads);
999 
1000         if (!n->has_vnet_hdr) {
1001             return VIRTIO_NET_ERR;
1002         }
1003 
1004         n->rsc4_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) &&
1005             virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO4);
1006         n->rsc6_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) &&
1007             virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO6);
1008         virtio_clear_feature(&offloads, VIRTIO_NET_F_RSC_EXT);
1009 
1010         supported_offloads = virtio_net_supported_guest_offloads(n);
1011         if (offloads & ~supported_offloads) {
1012             return VIRTIO_NET_ERR;
1013         }
1014 
1015         n->curr_guest_offloads = offloads;
1016         virtio_net_apply_guest_offloads(n);
1017 
1018         return VIRTIO_NET_OK;
1019     } else {
1020         return VIRTIO_NET_ERR;
1021     }
1022 }
1023 
1024 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
1025                                  struct iovec *iov, unsigned int iov_cnt)
1026 {
1027     VirtIODevice *vdev = VIRTIO_DEVICE(n);
1028     struct virtio_net_ctrl_mac mac_data;
1029     size_t s;
1030     NetClientState *nc = qemu_get_queue(n->nic);
1031 
1032     if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
1033         if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
1034             return VIRTIO_NET_ERR;
1035         }
1036         s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
1037         assert(s == sizeof(n->mac));
1038         qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
1039         rxfilter_notify(nc);
1040 
1041         return VIRTIO_NET_OK;
1042     }
1043 
1044     if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
1045         return VIRTIO_NET_ERR;
1046     }
1047 
1048     int in_use = 0;
1049     int first_multi = 0;
1050     uint8_t uni_overflow = 0;
1051     uint8_t multi_overflow = 0;
1052     uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
1053 
1054     s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
1055                    sizeof(mac_data.entries));
1056     mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
1057     if (s != sizeof(mac_data.entries)) {
1058         goto error;
1059     }
1060     iov_discard_front(&iov, &iov_cnt, s);
1061 
1062     if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
1063         goto error;
1064     }
1065 
1066     if (mac_data.entries <= MAC_TABLE_ENTRIES) {
1067         s = iov_to_buf(iov, iov_cnt, 0, macs,
1068                        mac_data.entries * ETH_ALEN);
1069         if (s != mac_data.entries * ETH_ALEN) {
1070             goto error;
1071         }
1072         in_use += mac_data.entries;
1073     } else {
1074         uni_overflow = 1;
1075     }
1076 
1077     iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
1078 
1079     first_multi = in_use;
1080 
1081     s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
1082                    sizeof(mac_data.entries));
1083     mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
1084     if (s != sizeof(mac_data.entries)) {
1085         goto error;
1086     }
1087 
1088     iov_discard_front(&iov, &iov_cnt, s);
1089 
1090     if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
1091         goto error;
1092     }
1093 
1094     if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
1095         s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
1096                        mac_data.entries * ETH_ALEN);
1097         if (s != mac_data.entries * ETH_ALEN) {
1098             goto error;
1099         }
1100         in_use += mac_data.entries;
1101     } else {
1102         multi_overflow = 1;
1103     }
1104 
1105     n->mac_table.in_use = in_use;
1106     n->mac_table.first_multi = first_multi;
1107     n->mac_table.uni_overflow = uni_overflow;
1108     n->mac_table.multi_overflow = multi_overflow;
1109     memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
1110     g_free(macs);
1111     rxfilter_notify(nc);
1112 
1113     return VIRTIO_NET_OK;
1114 
1115 error:
1116     g_free(macs);
1117     return VIRTIO_NET_ERR;
1118 }
1119 
1120 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
1121                                         struct iovec *iov, unsigned int iov_cnt)
1122 {
1123     VirtIODevice *vdev = VIRTIO_DEVICE(n);
1124     uint16_t vid;
1125     size_t s;
1126     NetClientState *nc = qemu_get_queue(n->nic);
1127 
1128     s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
1129     vid = virtio_lduw_p(vdev, &vid);
1130     if (s != sizeof(vid)) {
1131         return VIRTIO_NET_ERR;
1132     }
1133 
1134     if (vid >= MAX_VLAN)
1135         return VIRTIO_NET_ERR;
1136 
1137     if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
1138         n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
1139     else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
1140         n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
1141     else
1142         return VIRTIO_NET_ERR;
1143 
1144     rxfilter_notify(nc);
1145 
1146     return VIRTIO_NET_OK;
1147 }
1148 
1149 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
1150                                       struct iovec *iov, unsigned int iov_cnt)
1151 {
1152     trace_virtio_net_handle_announce(n->announce_timer.round);
1153     if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
1154         n->status & VIRTIO_NET_S_ANNOUNCE) {
1155         n->status &= ~VIRTIO_NET_S_ANNOUNCE;
1156         if (n->announce_timer.round) {
1157             qemu_announce_timer_step(&n->announce_timer);
1158         }
1159         return VIRTIO_NET_OK;
1160     } else {
1161         return VIRTIO_NET_ERR;
1162     }
1163 }
1164 
1165 static bool virtio_net_attach_ebpf_to_backend(NICState *nic, int prog_fd)
1166 {
1167     NetClientState *nc = qemu_get_peer(qemu_get_queue(nic), 0);
1168     if (nc == NULL || nc->info->set_steering_ebpf == NULL) {
1169         return false;
1170     }
1171 
1172     trace_virtio_net_rss_attach_ebpf(nic, prog_fd);
1173     return nc->info->set_steering_ebpf(nc, prog_fd);
1174 }
1175 
1176 static void rss_data_to_rss_config(struct VirtioNetRssData *data,
1177                                    struct EBPFRSSConfig *config)
1178 {
1179     config->redirect = data->redirect;
1180     config->populate_hash = data->populate_hash;
1181     config->hash_types = data->hash_types;
1182     config->indirections_len = data->indirections_len;
1183     config->default_queue = data->default_queue;
1184 }
1185 
1186 static bool virtio_net_attach_ebpf_rss(VirtIONet *n)
1187 {
1188     struct EBPFRSSConfig config = {};
1189 
1190     if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
1191         return false;
1192     }
1193 
1194     rss_data_to_rss_config(&n->rss_data, &config);
1195 
1196     if (!ebpf_rss_set_all(&n->ebpf_rss, &config,
1197                           n->rss_data.indirections_table, n->rss_data.key,
1198                           NULL)) {
1199         return false;
1200     }
1201 
1202     if (!virtio_net_attach_ebpf_to_backend(n->nic, n->ebpf_rss.program_fd)) {
1203         return false;
1204     }
1205 
1206     return true;
1207 }
1208 
1209 static void virtio_net_detach_ebpf_rss(VirtIONet *n)
1210 {
1211     virtio_net_attach_ebpf_to_backend(n->nic, -1);
1212 }
1213 
1214 static void virtio_net_commit_rss_config(VirtIONet *n)
1215 {
1216     if (n->rss_data.enabled) {
1217         n->rss_data.enabled_software_rss = n->rss_data.populate_hash;
1218         if (n->rss_data.populate_hash) {
1219             virtio_net_detach_ebpf_rss(n);
1220         } else if (!virtio_net_attach_ebpf_rss(n)) {
1221             if (get_vhost_net(qemu_get_queue(n->nic)->peer)) {
1222                 warn_report("Can't load eBPF RSS for vhost");
1223             } else {
1224                 warn_report("Can't load eBPF RSS - fallback to software RSS");
1225                 n->rss_data.enabled_software_rss = true;
1226             }
1227         }
1228 
1229         trace_virtio_net_rss_enable(n,
1230                                     n->rss_data.hash_types,
1231                                     n->rss_data.indirections_len,
1232                                     sizeof(n->rss_data.key));
1233     } else {
1234         virtio_net_detach_ebpf_rss(n);
1235         trace_virtio_net_rss_disable(n);
1236     }
1237 }
1238 
1239 static void virtio_net_disable_rss(VirtIONet *n)
1240 {
1241     if (!n->rss_data.enabled) {
1242         return;
1243     }
1244 
1245     n->rss_data.enabled = false;
1246     virtio_net_commit_rss_config(n);
1247 }
1248 
1249 static bool virtio_net_load_ebpf_fds(VirtIONet *n, Error **errp)
1250 {
1251     int fds[EBPF_RSS_MAX_FDS] = { [0 ... EBPF_RSS_MAX_FDS - 1] = -1};
1252     int ret = true;
1253     int i = 0;
1254 
1255     if (n->nr_ebpf_rss_fds != EBPF_RSS_MAX_FDS) {
1256         error_setg(errp, "Expected %d file descriptors but got %d",
1257                    EBPF_RSS_MAX_FDS, n->nr_ebpf_rss_fds);
1258         return false;
1259     }
1260 
1261     for (i = 0; i < n->nr_ebpf_rss_fds; i++) {
1262         fds[i] = monitor_fd_param(monitor_cur(), n->ebpf_rss_fds[i], errp);
1263         if (fds[i] < 0) {
1264             ret = false;
1265             goto exit;
1266         }
1267     }
1268 
1269     ret = ebpf_rss_load_fds(&n->ebpf_rss, fds[0], fds[1], fds[2], fds[3], errp);
1270 
1271 exit:
1272     if (!ret) {
1273         for (i = 0; i < n->nr_ebpf_rss_fds && fds[i] != -1; i++) {
1274             close(fds[i]);
1275         }
1276     }
1277 
1278     return ret;
1279 }
1280 
1281 static bool virtio_net_load_ebpf(VirtIONet *n, Error **errp)
1282 {
1283     if (!virtio_net_attach_ebpf_to_backend(n->nic, -1)) {
1284         return true;
1285     }
1286 
1287     trace_virtio_net_rss_load(n, n->nr_ebpf_rss_fds, n->ebpf_rss_fds);
1288 
1289     /*
1290      * If user explicitly gave QEMU RSS FDs to use, then
1291      * failing to use them must be considered a fatal
1292      * error. If no RSS FDs were provided, QEMU is trying
1293      * eBPF on a "best effort" basis only, so report a
1294      * warning and allow fallback to software RSS.
1295      */
1296     if (n->ebpf_rss_fds) {
1297         return virtio_net_load_ebpf_fds(n, errp);
1298     }
1299 
1300     ebpf_rss_load(&n->ebpf_rss, &error_warn);
1301     return true;
1302 }
1303 
1304 static void virtio_net_unload_ebpf(VirtIONet *n)
1305 {
1306     virtio_net_attach_ebpf_to_backend(n->nic, -1);
1307     ebpf_rss_unload(&n->ebpf_rss);
1308 }
1309 
1310 static uint16_t virtio_net_handle_rss(VirtIONet *n,
1311                                       struct iovec *iov,
1312                                       unsigned int iov_cnt,
1313                                       bool do_rss)
1314 {
1315     VirtIODevice *vdev = VIRTIO_DEVICE(n);
1316     struct virtio_net_rss_config cfg;
1317     size_t s, offset = 0, size_get;
1318     uint16_t queue_pairs, i;
1319     struct {
1320         uint16_t us;
1321         uint8_t b;
1322     } QEMU_PACKED temp;
1323     const char *err_msg = "";
1324     uint32_t err_value = 0;
1325 
1326     if (do_rss && !virtio_vdev_has_feature(vdev, VIRTIO_NET_F_RSS)) {
1327         err_msg = "RSS is not negotiated";
1328         goto error;
1329     }
1330     if (!do_rss && !virtio_vdev_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) {
1331         err_msg = "Hash report is not negotiated";
1332         goto error;
1333     }
1334     size_get = offsetof(struct virtio_net_rss_config, indirection_table);
1335     s = iov_to_buf(iov, iov_cnt, offset, &cfg, size_get);
1336     if (s != size_get) {
1337         err_msg = "Short command buffer";
1338         err_value = (uint32_t)s;
1339         goto error;
1340     }
1341     n->rss_data.hash_types = virtio_ldl_p(vdev, &cfg.hash_types);
1342     n->rss_data.indirections_len =
1343         virtio_lduw_p(vdev, &cfg.indirection_table_mask);
1344     if (!do_rss) {
1345         n->rss_data.indirections_len = 0;
1346     }
1347     if (n->rss_data.indirections_len >= VIRTIO_NET_RSS_MAX_TABLE_LEN) {
1348         err_msg = "Too large indirection table";
1349         err_value = n->rss_data.indirections_len;
1350         goto error;
1351     }
1352     n->rss_data.indirections_len++;
1353     if (!is_power_of_2(n->rss_data.indirections_len)) {
1354         err_msg = "Invalid size of indirection table";
1355         err_value = n->rss_data.indirections_len;
1356         goto error;
1357     }
1358     n->rss_data.default_queue = do_rss ?
1359         virtio_lduw_p(vdev, &cfg.unclassified_queue) : 0;
1360     if (n->rss_data.default_queue >= n->max_queue_pairs) {
1361         err_msg = "Invalid default queue";
1362         err_value = n->rss_data.default_queue;
1363         goto error;
1364     }
1365     offset += size_get;
1366     size_get = sizeof(uint16_t) * n->rss_data.indirections_len;
1367     g_free(n->rss_data.indirections_table);
1368     n->rss_data.indirections_table = g_malloc(size_get);
1369     if (!n->rss_data.indirections_table) {
1370         err_msg = "Can't allocate indirections table";
1371         err_value = n->rss_data.indirections_len;
1372         goto error;
1373     }
1374     s = iov_to_buf(iov, iov_cnt, offset,
1375                    n->rss_data.indirections_table, size_get);
1376     if (s != size_get) {
1377         err_msg = "Short indirection table buffer";
1378         err_value = (uint32_t)s;
1379         goto error;
1380     }
1381     for (i = 0; i < n->rss_data.indirections_len; ++i) {
1382         uint16_t val = n->rss_data.indirections_table[i];
1383         n->rss_data.indirections_table[i] = virtio_lduw_p(vdev, &val);
1384     }
1385     offset += size_get;
1386     size_get = sizeof(temp);
1387     s = iov_to_buf(iov, iov_cnt, offset, &temp, size_get);
1388     if (s != size_get) {
1389         err_msg = "Can't get queue_pairs";
1390         err_value = (uint32_t)s;
1391         goto error;
1392     }
1393     queue_pairs = do_rss ? virtio_lduw_p(vdev, &temp.us) : n->curr_queue_pairs;
1394     if (queue_pairs == 0 || queue_pairs > n->max_queue_pairs) {
1395         err_msg = "Invalid number of queue_pairs";
1396         err_value = queue_pairs;
1397         goto error;
1398     }
1399     if (temp.b > VIRTIO_NET_RSS_MAX_KEY_SIZE) {
1400         err_msg = "Invalid key size";
1401         err_value = temp.b;
1402         goto error;
1403     }
1404     if (!temp.b && n->rss_data.hash_types) {
1405         err_msg = "No key provided";
1406         err_value = 0;
1407         goto error;
1408     }
1409     if (!temp.b && !n->rss_data.hash_types) {
1410         virtio_net_disable_rss(n);
1411         return queue_pairs;
1412     }
1413     offset += size_get;
1414     size_get = temp.b;
1415     s = iov_to_buf(iov, iov_cnt, offset, n->rss_data.key, size_get);
1416     if (s != size_get) {
1417         err_msg = "Can get key buffer";
1418         err_value = (uint32_t)s;
1419         goto error;
1420     }
1421     n->rss_data.enabled = true;
1422     virtio_net_commit_rss_config(n);
1423     return queue_pairs;
1424 error:
1425     trace_virtio_net_rss_error(n, err_msg, err_value);
1426     virtio_net_disable_rss(n);
1427     return 0;
1428 }
1429 
1430 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
1431                                 struct iovec *iov, unsigned int iov_cnt)
1432 {
1433     VirtIODevice *vdev = VIRTIO_DEVICE(n);
1434     uint16_t queue_pairs;
1435     NetClientState *nc = qemu_get_queue(n->nic);
1436 
1437     virtio_net_disable_rss(n);
1438     if (cmd == VIRTIO_NET_CTRL_MQ_HASH_CONFIG) {
1439         queue_pairs = virtio_net_handle_rss(n, iov, iov_cnt, false);
1440         return queue_pairs ? VIRTIO_NET_OK : VIRTIO_NET_ERR;
1441     }
1442     if (cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
1443         queue_pairs = virtio_net_handle_rss(n, iov, iov_cnt, true);
1444     } else if (cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
1445         struct virtio_net_ctrl_mq mq;
1446         size_t s;
1447         if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ)) {
1448             return VIRTIO_NET_ERR;
1449         }
1450         s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
1451         if (s != sizeof(mq)) {
1452             return VIRTIO_NET_ERR;
1453         }
1454         queue_pairs = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
1455 
1456     } else {
1457         return VIRTIO_NET_ERR;
1458     }
1459 
1460     if (queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
1461         queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
1462         queue_pairs > n->max_queue_pairs ||
1463         !n->multiqueue) {
1464         return VIRTIO_NET_ERR;
1465     }
1466 
1467     n->curr_queue_pairs = queue_pairs;
1468     if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
1469         /*
1470          * Avoid updating the backend for a vdpa device: We're only interested
1471          * in updating the device model queues.
1472          */
1473         return VIRTIO_NET_OK;
1474     }
1475     /* stop the backend before changing the number of queue_pairs to avoid handling a
1476      * disabled queue */
1477     virtio_net_set_status(vdev, vdev->status);
1478     virtio_net_set_queue_pairs(n);
1479 
1480     return VIRTIO_NET_OK;
1481 }
1482 
1483 size_t virtio_net_handle_ctrl_iov(VirtIODevice *vdev,
1484                                   const struct iovec *in_sg, unsigned in_num,
1485                                   const struct iovec *out_sg,
1486                                   unsigned out_num)
1487 {
1488     VirtIONet *n = VIRTIO_NET(vdev);
1489     struct virtio_net_ctrl_hdr ctrl;
1490     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1491     size_t s;
1492     struct iovec *iov, *iov2;
1493 
1494     if (iov_size(in_sg, in_num) < sizeof(status) ||
1495         iov_size(out_sg, out_num) < sizeof(ctrl)) {
1496         virtio_error(vdev, "virtio-net ctrl missing headers");
1497         return 0;
1498     }
1499 
1500     iov2 = iov = g_memdup2(out_sg, sizeof(struct iovec) * out_num);
1501     s = iov_to_buf(iov, out_num, 0, &ctrl, sizeof(ctrl));
1502     iov_discard_front(&iov, &out_num, sizeof(ctrl));
1503     if (s != sizeof(ctrl)) {
1504         status = VIRTIO_NET_ERR;
1505     } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
1506         status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, out_num);
1507     } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
1508         status = virtio_net_handle_mac(n, ctrl.cmd, iov, out_num);
1509     } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
1510         status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, out_num);
1511     } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
1512         status = virtio_net_handle_announce(n, ctrl.cmd, iov, out_num);
1513     } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
1514         status = virtio_net_handle_mq(n, ctrl.cmd, iov, out_num);
1515     } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
1516         status = virtio_net_handle_offloads(n, ctrl.cmd, iov, out_num);
1517     }
1518 
1519     s = iov_from_buf(in_sg, in_num, 0, &status, sizeof(status));
1520     assert(s == sizeof(status));
1521 
1522     g_free(iov2);
1523     return sizeof(status);
1524 }
1525 
1526 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
1527 {
1528     VirtQueueElement *elem;
1529 
1530     for (;;) {
1531         size_t written;
1532         elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
1533         if (!elem) {
1534             break;
1535         }
1536 
1537         written = virtio_net_handle_ctrl_iov(vdev, elem->in_sg, elem->in_num,
1538                                              elem->out_sg, elem->out_num);
1539         if (written > 0) {
1540             virtqueue_push(vq, elem, written);
1541             virtio_notify(vdev, vq);
1542             g_free(elem);
1543         } else {
1544             virtqueue_detach_element(vq, elem, 0);
1545             g_free(elem);
1546             break;
1547         }
1548     }
1549 }
1550 
1551 /* RX */
1552 
1553 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
1554 {
1555     VirtIONet *n = VIRTIO_NET(vdev);
1556     int queue_index = vq2q(virtio_get_queue_index(vq));
1557 
1558     qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
1559 }
1560 
1561 static bool virtio_net_can_receive(NetClientState *nc)
1562 {
1563     VirtIONet *n = qemu_get_nic_opaque(nc);
1564     VirtIODevice *vdev = VIRTIO_DEVICE(n);
1565     VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1566 
1567     if (!vdev->vm_running) {
1568         return false;
1569     }
1570 
1571     if (nc->queue_index >= n->curr_queue_pairs) {
1572         return false;
1573     }
1574 
1575     if (!virtio_queue_ready(q->rx_vq) ||
1576         !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1577         return false;
1578     }
1579 
1580     return true;
1581 }
1582 
1583 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
1584 {
1585     int opaque;
1586     unsigned int in_bytes;
1587     VirtIONet *n = q->n;
1588 
1589     while (virtio_queue_empty(q->rx_vq) || n->mergeable_rx_bufs) {
1590         opaque = virtqueue_get_avail_bytes(q->rx_vq, &in_bytes, NULL,
1591                                            bufsize, 0);
1592         /* Buffer is enough, disable notifiaction */
1593         if (bufsize <= in_bytes) {
1594             break;
1595         }
1596 
1597         if (virtio_queue_enable_notification_and_check(q->rx_vq, opaque)) {
1598             /* Guest has added some buffers, try again */
1599             continue;
1600         } else {
1601             return 0;
1602         }
1603     }
1604 
1605     virtio_queue_set_notification(q->rx_vq, 0);
1606 
1607     return 1;
1608 }
1609 
1610 static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
1611 {
1612     virtio_tswap16s(vdev, &hdr->hdr_len);
1613     virtio_tswap16s(vdev, &hdr->gso_size);
1614     virtio_tswap16s(vdev, &hdr->csum_start);
1615     virtio_tswap16s(vdev, &hdr->csum_offset);
1616 }
1617 
1618 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
1619  * it never finds out that the packets don't have valid checksums.  This
1620  * causes dhclient to get upset.  Fedora's carried a patch for ages to
1621  * fix this with Xen but it hasn't appeared in an upstream release of
1622  * dhclient yet.
1623  *
1624  * To avoid breaking existing guests, we catch udp packets and add
1625  * checksums.  This is terrible but it's better than hacking the guest
1626  * kernels.
1627  *
1628  * N.B. if we introduce a zero-copy API, this operation is no longer free so
1629  * we should provide a mechanism to disable it to avoid polluting the host
1630  * cache.
1631  */
1632 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
1633                                         uint8_t *buf, size_t size)
1634 {
1635     size_t csum_size = ETH_HLEN + sizeof(struct ip_header) +
1636                        sizeof(struct udp_header);
1637 
1638     if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
1639         (size >= csum_size && size < 1500) && /* normal sized MTU */
1640         (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
1641         (buf[23] == 17) && /* ip.protocol == UDP */
1642         (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
1643         net_checksum_calculate(buf, size, CSUM_UDP);
1644         hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
1645     }
1646 }
1647 
1648 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
1649                            const void *buf, size_t size)
1650 {
1651     if (n->has_vnet_hdr) {
1652         /* FIXME this cast is evil */
1653         void *wbuf = (void *)buf;
1654         work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
1655                                     size - n->host_hdr_len);
1656 
1657         if (n->needs_vnet_hdr_swap) {
1658             virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
1659         }
1660         iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
1661     } else {
1662         struct virtio_net_hdr hdr = {
1663             .flags = 0,
1664             .gso_type = VIRTIO_NET_HDR_GSO_NONE
1665         };
1666         iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
1667     }
1668 }
1669 
1670 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
1671 {
1672     static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1673     static const uint8_t vlan[] = {0x81, 0x00};
1674     uint8_t *ptr = (uint8_t *)buf;
1675     int i;
1676 
1677     if (n->promisc)
1678         return 1;
1679 
1680     ptr += n->host_hdr_len;
1681 
1682     if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
1683         int vid = lduw_be_p(ptr + 14) & 0xfff;
1684         if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
1685             return 0;
1686     }
1687 
1688     if (ptr[0] & 1) { // multicast
1689         if (!memcmp(ptr, bcast, sizeof(bcast))) {
1690             return !n->nobcast;
1691         } else if (n->nomulti) {
1692             return 0;
1693         } else if (n->allmulti || n->mac_table.multi_overflow) {
1694             return 1;
1695         }
1696 
1697         for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
1698             if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1699                 return 1;
1700             }
1701         }
1702     } else { // unicast
1703         if (n->nouni) {
1704             return 0;
1705         } else if (n->alluni || n->mac_table.uni_overflow) {
1706             return 1;
1707         } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
1708             return 1;
1709         }
1710 
1711         for (i = 0; i < n->mac_table.first_multi; i++) {
1712             if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1713                 return 1;
1714             }
1715         }
1716     }
1717 
1718     return 0;
1719 }
1720 
1721 static uint8_t virtio_net_get_hash_type(bool hasip4,
1722                                         bool hasip6,
1723                                         EthL4HdrProto l4hdr_proto,
1724                                         uint32_t types)
1725 {
1726     if (hasip4) {
1727         switch (l4hdr_proto) {
1728         case ETH_L4_HDR_PROTO_TCP:
1729             if (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
1730                 return NetPktRssIpV4Tcp;
1731             }
1732             break;
1733 
1734         case ETH_L4_HDR_PROTO_UDP:
1735             if (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
1736                 return NetPktRssIpV4Udp;
1737             }
1738             break;
1739 
1740         default:
1741             break;
1742         }
1743 
1744         if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
1745             return NetPktRssIpV4;
1746         }
1747     } else if (hasip6) {
1748         switch (l4hdr_proto) {
1749         case ETH_L4_HDR_PROTO_TCP:
1750             if (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) {
1751                 return NetPktRssIpV6TcpEx;
1752             }
1753             if (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
1754                 return NetPktRssIpV6Tcp;
1755             }
1756             break;
1757 
1758         case ETH_L4_HDR_PROTO_UDP:
1759             if (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) {
1760                 return NetPktRssIpV6UdpEx;
1761             }
1762             if (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
1763                 return NetPktRssIpV6Udp;
1764             }
1765             break;
1766 
1767         default:
1768             break;
1769         }
1770 
1771         if (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) {
1772             return NetPktRssIpV6Ex;
1773         }
1774         if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
1775             return NetPktRssIpV6;
1776         }
1777     }
1778     return 0xff;
1779 }
1780 
1781 static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
1782                                   size_t size,
1783                                   struct virtio_net_hdr_v1_hash *hdr)
1784 {
1785     VirtIONet *n = qemu_get_nic_opaque(nc);
1786     unsigned int index = nc->queue_index, new_index = index;
1787     struct NetRxPkt *pkt = n->rx_pkt;
1788     uint8_t net_hash_type;
1789     uint32_t hash;
1790     bool hasip4, hasip6;
1791     EthL4HdrProto l4hdr_proto;
1792     static const uint8_t reports[NetPktRssIpV6UdpEx + 1] = {
1793         VIRTIO_NET_HASH_REPORT_IPv4,
1794         VIRTIO_NET_HASH_REPORT_TCPv4,
1795         VIRTIO_NET_HASH_REPORT_TCPv6,
1796         VIRTIO_NET_HASH_REPORT_IPv6,
1797         VIRTIO_NET_HASH_REPORT_IPv6_EX,
1798         VIRTIO_NET_HASH_REPORT_TCPv6_EX,
1799         VIRTIO_NET_HASH_REPORT_UDPv4,
1800         VIRTIO_NET_HASH_REPORT_UDPv6,
1801         VIRTIO_NET_HASH_REPORT_UDPv6_EX
1802     };
1803     struct iovec iov = {
1804         .iov_base = (void *)buf,
1805         .iov_len = size
1806     };
1807 
1808     net_rx_pkt_set_protocols(pkt, &iov, 1, n->host_hdr_len);
1809     net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
1810     net_hash_type = virtio_net_get_hash_type(hasip4, hasip6, l4hdr_proto,
1811                                              n->rss_data.hash_types);
1812     if (net_hash_type > NetPktRssIpV6UdpEx) {
1813         if (n->rss_data.populate_hash) {
1814             hdr->hash_value = VIRTIO_NET_HASH_REPORT_NONE;
1815             hdr->hash_report = 0;
1816         }
1817         return n->rss_data.redirect ? n->rss_data.default_queue : -1;
1818     }
1819 
1820     hash = net_rx_pkt_calc_rss_hash(pkt, net_hash_type, n->rss_data.key);
1821 
1822     if (n->rss_data.populate_hash) {
1823         hdr->hash_value = hash;
1824         hdr->hash_report = reports[net_hash_type];
1825     }
1826 
1827     if (n->rss_data.redirect) {
1828         new_index = hash & (n->rss_data.indirections_len - 1);
1829         new_index = n->rss_data.indirections_table[new_index];
1830     }
1831 
1832     return (index == new_index) ? -1 : new_index;
1833 }
1834 
1835 static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
1836                                       size_t size)
1837 {
1838     VirtIONet *n = qemu_get_nic_opaque(nc);
1839     VirtIONetQueue *q;
1840     VirtIODevice *vdev = VIRTIO_DEVICE(n);
1841     QEMU_UNINITIALIZED VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE];
1842     QEMU_UNINITIALIZED size_t lens[VIRTQUEUE_MAX_SIZE];
1843     QEMU_UNINITIALIZED struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
1844     struct virtio_net_hdr_v1_hash extra_hdr;
1845     unsigned mhdr_cnt = 0;
1846     size_t offset, i, guest_offset, j;
1847     ssize_t err;
1848 
1849     memset(&extra_hdr, 0, sizeof(extra_hdr));
1850 
1851     if (n->rss_data.enabled && n->rss_data.enabled_software_rss) {
1852         int index = virtio_net_process_rss(nc, buf, size, &extra_hdr);
1853         if (index >= 0) {
1854             nc = qemu_get_subqueue(n->nic, index % n->curr_queue_pairs);
1855         }
1856     }
1857 
1858     if (!virtio_net_can_receive(nc)) {
1859         return -1;
1860     }
1861 
1862     q = virtio_net_get_subqueue(nc);
1863 
1864     /* hdr_len refers to the header we supply to the guest */
1865     if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
1866         return 0;
1867     }
1868 
1869     if (!receive_filter(n, buf, size))
1870         return size;
1871 
1872     offset = i = 0;
1873 
1874     while (offset < size) {
1875         VirtQueueElement *elem;
1876         int len, total;
1877         const struct iovec *sg;
1878 
1879         total = 0;
1880 
1881         if (i == VIRTQUEUE_MAX_SIZE) {
1882             virtio_error(vdev, "virtio-net unexpected long buffer chain");
1883             err = size;
1884             goto err;
1885         }
1886 
1887         elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
1888         if (!elem) {
1889             if (i) {
1890                 virtio_error(vdev, "virtio-net unexpected empty queue: "
1891                              "i %zd mergeable %d offset %zd, size %zd, "
1892                              "guest hdr len %zd, host hdr len %zd "
1893                              "guest features 0x%" PRIx64,
1894                              i, n->mergeable_rx_bufs, offset, size,
1895                              n->guest_hdr_len, n->host_hdr_len,
1896                              vdev->guest_features);
1897             }
1898             err = -1;
1899             goto err;
1900         }
1901 
1902         if (elem->in_num < 1) {
1903             virtio_error(vdev,
1904                          "virtio-net receive queue contains no in buffers");
1905             virtqueue_detach_element(q->rx_vq, elem, 0);
1906             g_free(elem);
1907             err = -1;
1908             goto err;
1909         }
1910 
1911         sg = elem->in_sg;
1912         if (i == 0) {
1913             assert(offset == 0);
1914             if (n->mergeable_rx_bufs) {
1915                 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
1916                                     sg, elem->in_num,
1917                                     offsetof(typeof(extra_hdr), hdr.num_buffers),
1918                                     sizeof(extra_hdr.hdr.num_buffers));
1919             } else {
1920                 extra_hdr.hdr.num_buffers = cpu_to_le16(1);
1921             }
1922 
1923             receive_header(n, sg, elem->in_num, buf, size);
1924             if (n->rss_data.populate_hash) {
1925                 offset = offsetof(typeof(extra_hdr), hash_value);
1926                 iov_from_buf(sg, elem->in_num, offset,
1927                              (char *)&extra_hdr + offset,
1928                              sizeof(extra_hdr.hash_value) +
1929                              sizeof(extra_hdr.hash_report));
1930             }
1931             offset = n->host_hdr_len;
1932             total += n->guest_hdr_len;
1933             guest_offset = n->guest_hdr_len;
1934         } else {
1935             guest_offset = 0;
1936         }
1937 
1938         /* copy in packet.  ugh */
1939         len = iov_from_buf(sg, elem->in_num, guest_offset,
1940                            buf + offset, size - offset);
1941         total += len;
1942         offset += len;
1943         /* If buffers can't be merged, at this point we
1944          * must have consumed the complete packet.
1945          * Otherwise, drop it. */
1946         if (!n->mergeable_rx_bufs && offset < size) {
1947             virtqueue_unpop(q->rx_vq, elem, total);
1948             g_free(elem);
1949             err = size;
1950             goto err;
1951         }
1952 
1953         elems[i] = elem;
1954         lens[i] = total;
1955         i++;
1956     }
1957 
1958     if (mhdr_cnt) {
1959         virtio_stw_p(vdev, &extra_hdr.hdr.num_buffers, i);
1960         iov_from_buf(mhdr_sg, mhdr_cnt,
1961                      0,
1962                      &extra_hdr.hdr.num_buffers,
1963                      sizeof extra_hdr.hdr.num_buffers);
1964     }
1965 
1966     for (j = 0; j < i; j++) {
1967         /* signal other side */
1968         virtqueue_fill(q->rx_vq, elems[j], lens[j], j);
1969         g_free(elems[j]);
1970     }
1971 
1972     virtqueue_flush(q->rx_vq, i);
1973     virtio_notify(vdev, q->rx_vq);
1974 
1975     return size;
1976 
1977 err:
1978     for (j = 0; j < i; j++) {
1979         virtqueue_detach_element(q->rx_vq, elems[j], lens[j]);
1980         g_free(elems[j]);
1981     }
1982 
1983     return err;
1984 }
1985 
1986 static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
1987                                   size_t size)
1988 {
1989     RCU_READ_LOCK_GUARD();
1990 
1991     return virtio_net_receive_rcu(nc, buf, size);
1992 }
1993 
1994 /*
1995  * Accessors to read and write the IP packet data length field. This
1996  * is a potentially unaligned network-byte-order 16 bit unsigned integer
1997  * pointed to by unit->ip_len.
1998  */
1999 static uint16_t read_unit_ip_len(VirtioNetRscUnit *unit)
2000 {
2001     return lduw_be_p(unit->ip_plen);
2002 }
2003 
2004 static void write_unit_ip_len(VirtioNetRscUnit *unit, uint16_t l)
2005 {
2006     stw_be_p(unit->ip_plen, l);
2007 }
2008 
2009 static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
2010                                          const uint8_t *buf,
2011                                          VirtioNetRscUnit *unit)
2012 {
2013     uint16_t ip_hdrlen;
2014     struct ip_header *ip;
2015 
2016     ip = (struct ip_header *)(buf + chain->n->guest_hdr_len
2017                               + sizeof(struct eth_header));
2018     unit->ip = (void *)ip;
2019     ip_hdrlen = (ip->ip_ver_len & 0xF) << 2;
2020     unit->ip_plen = &ip->ip_len;
2021     unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip) + ip_hdrlen);
2022     unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
2023     unit->payload = read_unit_ip_len(unit) - ip_hdrlen - unit->tcp_hdrlen;
2024 }
2025 
2026 static void virtio_net_rsc_extract_unit6(VirtioNetRscChain *chain,
2027                                          const uint8_t *buf,
2028                                          VirtioNetRscUnit *unit)
2029 {
2030     struct ip6_header *ip6;
2031 
2032     ip6 = (struct ip6_header *)(buf + chain->n->guest_hdr_len
2033                                  + sizeof(struct eth_header));
2034     unit->ip = ip6;
2035     unit->ip_plen = &(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen);
2036     unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip)
2037                                         + sizeof(struct ip6_header));
2038     unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
2039 
2040     /* There is a difference between payload length in ipv4 and v6,
2041        ip header is excluded in ipv6 */
2042     unit->payload = read_unit_ip_len(unit) - unit->tcp_hdrlen;
2043 }
2044 
2045 static size_t virtio_net_rsc_drain_seg(VirtioNetRscChain *chain,
2046                                        VirtioNetRscSeg *seg)
2047 {
2048     int ret;
2049     struct virtio_net_hdr_v1 *h;
2050 
2051     h = (struct virtio_net_hdr_v1 *)seg->buf;
2052     h->flags = 0;
2053     h->gso_type = VIRTIO_NET_HDR_GSO_NONE;
2054 
2055     if (seg->is_coalesced) {
2056         h->rsc.segments = seg->packets;
2057         h->rsc.dup_acks = seg->dup_ack;
2058         h->flags = VIRTIO_NET_HDR_F_RSC_INFO;
2059         if (chain->proto == ETH_P_IP) {
2060             h->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2061         } else {
2062             h->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2063         }
2064     }
2065 
2066     ret = virtio_net_do_receive(seg->nc, seg->buf, seg->size);
2067     QTAILQ_REMOVE(&chain->buffers, seg, next);
2068     g_free(seg->buf);
2069     g_free(seg);
2070 
2071     return ret;
2072 }
2073 
2074 static void virtio_net_rsc_purge(void *opq)
2075 {
2076     VirtioNetRscSeg *seg, *rn;
2077     VirtioNetRscChain *chain = (VirtioNetRscChain *)opq;
2078 
2079     QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn) {
2080         if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
2081             chain->stat.purge_failed++;
2082             continue;
2083         }
2084     }
2085 
2086     chain->stat.timer++;
2087     if (!QTAILQ_EMPTY(&chain->buffers)) {
2088         timer_mod(chain->drain_timer,
2089               qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + chain->n->rsc_timeout);
2090     }
2091 }
2092 
2093 static void virtio_net_rsc_cleanup(VirtIONet *n)
2094 {
2095     VirtioNetRscChain *chain, *rn_chain;
2096     VirtioNetRscSeg *seg, *rn_seg;
2097 
2098     QTAILQ_FOREACH_SAFE(chain, &n->rsc_chains, next, rn_chain) {
2099         QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn_seg) {
2100             QTAILQ_REMOVE(&chain->buffers, seg, next);
2101             g_free(seg->buf);
2102             g_free(seg);
2103         }
2104 
2105         timer_free(chain->drain_timer);
2106         QTAILQ_REMOVE(&n->rsc_chains, chain, next);
2107         g_free(chain);
2108     }
2109 }
2110 
2111 static void virtio_net_rsc_cache_buf(VirtioNetRscChain *chain,
2112                                      NetClientState *nc,
2113                                      const uint8_t *buf, size_t size)
2114 {
2115     uint16_t hdr_len;
2116     VirtioNetRscSeg *seg;
2117 
2118     hdr_len = chain->n->guest_hdr_len;
2119     seg = g_new(VirtioNetRscSeg, 1);
2120     seg->buf = g_malloc(hdr_len + sizeof(struct eth_header)
2121         + sizeof(struct ip6_header) + VIRTIO_NET_MAX_TCP_PAYLOAD);
2122     memcpy(seg->buf, buf, size);
2123     seg->size = size;
2124     seg->packets = 1;
2125     seg->dup_ack = 0;
2126     seg->is_coalesced = 0;
2127     seg->nc = nc;
2128 
2129     QTAILQ_INSERT_TAIL(&chain->buffers, seg, next);
2130     chain->stat.cache++;
2131 
2132     switch (chain->proto) {
2133     case ETH_P_IP:
2134         virtio_net_rsc_extract_unit4(chain, seg->buf, &seg->unit);
2135         break;
2136     case ETH_P_IPV6:
2137         virtio_net_rsc_extract_unit6(chain, seg->buf, &seg->unit);
2138         break;
2139     default:
2140         g_assert_not_reached();
2141     }
2142 }
2143 
2144 static int32_t virtio_net_rsc_handle_ack(VirtioNetRscChain *chain,
2145                                          VirtioNetRscSeg *seg,
2146                                          const uint8_t *buf,
2147                                          struct tcp_header *n_tcp,
2148                                          struct tcp_header *o_tcp)
2149 {
2150     uint32_t nack, oack;
2151     uint16_t nwin, owin;
2152 
2153     nack = htonl(n_tcp->th_ack);
2154     nwin = htons(n_tcp->th_win);
2155     oack = htonl(o_tcp->th_ack);
2156     owin = htons(o_tcp->th_win);
2157 
2158     if ((nack - oack) >= VIRTIO_NET_MAX_TCP_PAYLOAD) {
2159         chain->stat.ack_out_of_win++;
2160         return RSC_FINAL;
2161     } else if (nack == oack) {
2162         /* duplicated ack or window probe */
2163         if (nwin == owin) {
2164             /* duplicated ack, add dup ack count due to whql test up to 1 */
2165             chain->stat.dup_ack++;
2166             return RSC_FINAL;
2167         } else {
2168             /* Coalesce window update */
2169             o_tcp->th_win = n_tcp->th_win;
2170             chain->stat.win_update++;
2171             return RSC_COALESCE;
2172         }
2173     } else {
2174         /* pure ack, go to 'C', finalize*/
2175         chain->stat.pure_ack++;
2176         return RSC_FINAL;
2177     }
2178 }
2179 
2180 static int32_t virtio_net_rsc_coalesce_data(VirtioNetRscChain *chain,
2181                                             VirtioNetRscSeg *seg,
2182                                             const uint8_t *buf,
2183                                             VirtioNetRscUnit *n_unit)
2184 {
2185     void *data;
2186     uint16_t o_ip_len;
2187     uint32_t nseq, oseq;
2188     VirtioNetRscUnit *o_unit;
2189 
2190     o_unit = &seg->unit;
2191     o_ip_len = read_unit_ip_len(o_unit);
2192     nseq = htonl(n_unit->tcp->th_seq);
2193     oseq = htonl(o_unit->tcp->th_seq);
2194 
2195     /* out of order or retransmitted. */
2196     if ((nseq - oseq) > VIRTIO_NET_MAX_TCP_PAYLOAD) {
2197         chain->stat.data_out_of_win++;
2198         return RSC_FINAL;
2199     }
2200 
2201     data = ((uint8_t *)n_unit->tcp) + n_unit->tcp_hdrlen;
2202     if (nseq == oseq) {
2203         if ((o_unit->payload == 0) && n_unit->payload) {
2204             /* From no payload to payload, normal case, not a dup ack or etc */
2205             chain->stat.data_after_pure_ack++;
2206             goto coalesce;
2207         } else {
2208             return virtio_net_rsc_handle_ack(chain, seg, buf,
2209                                              n_unit->tcp, o_unit->tcp);
2210         }
2211     } else if ((nseq - oseq) != o_unit->payload) {
2212         /* Not a consistent packet, out of order */
2213         chain->stat.data_out_of_order++;
2214         return RSC_FINAL;
2215     } else {
2216 coalesce:
2217         if ((o_ip_len + n_unit->payload) > chain->max_payload) {
2218             chain->stat.over_size++;
2219             return RSC_FINAL;
2220         }
2221 
2222         /* Here comes the right data, the payload length in v4/v6 is different,
2223            so use the field value to update and record the new data len */
2224         o_unit->payload += n_unit->payload; /* update new data len */
2225 
2226         /* update field in ip header */
2227         write_unit_ip_len(o_unit, o_ip_len + n_unit->payload);
2228 
2229         /* Bring 'PUSH' big, the whql test guide says 'PUSH' can be coalesced
2230            for windows guest, while this may change the behavior for linux
2231            guest (only if it uses RSC feature). */
2232         o_unit->tcp->th_offset_flags = n_unit->tcp->th_offset_flags;
2233 
2234         o_unit->tcp->th_ack = n_unit->tcp->th_ack;
2235         o_unit->tcp->th_win = n_unit->tcp->th_win;
2236 
2237         memmove(seg->buf + seg->size, data, n_unit->payload);
2238         seg->size += n_unit->payload;
2239         seg->packets++;
2240         chain->stat.coalesced++;
2241         return RSC_COALESCE;
2242     }
2243 }
2244 
2245 static int32_t virtio_net_rsc_coalesce4(VirtioNetRscChain *chain,
2246                                         VirtioNetRscSeg *seg,
2247                                         const uint8_t *buf, size_t size,
2248                                         VirtioNetRscUnit *unit)
2249 {
2250     struct ip_header *ip1, *ip2;
2251 
2252     ip1 = (struct ip_header *)(unit->ip);
2253     ip2 = (struct ip_header *)(seg->unit.ip);
2254     if ((ip1->ip_src ^ ip2->ip_src) || (ip1->ip_dst ^ ip2->ip_dst)
2255         || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport)
2256         || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) {
2257         chain->stat.no_match++;
2258         return RSC_NO_MATCH;
2259     }
2260 
2261     return virtio_net_rsc_coalesce_data(chain, seg, buf, unit);
2262 }
2263 
2264 static int32_t virtio_net_rsc_coalesce6(VirtioNetRscChain *chain,
2265                                         VirtioNetRscSeg *seg,
2266                                         const uint8_t *buf, size_t size,
2267                                         VirtioNetRscUnit *unit)
2268 {
2269     struct ip6_header *ip1, *ip2;
2270 
2271     ip1 = (struct ip6_header *)(unit->ip);
2272     ip2 = (struct ip6_header *)(seg->unit.ip);
2273     if (memcmp(&ip1->ip6_src, &ip2->ip6_src, sizeof(struct in6_address))
2274         || memcmp(&ip1->ip6_dst, &ip2->ip6_dst, sizeof(struct in6_address))
2275         || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport)
2276         || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) {
2277             chain->stat.no_match++;
2278             return RSC_NO_MATCH;
2279     }
2280 
2281     return virtio_net_rsc_coalesce_data(chain, seg, buf, unit);
2282 }
2283 
2284 /* Packets with 'SYN' should bypass, other flag should be sent after drain
2285  * to prevent out of order */
2286 static int virtio_net_rsc_tcp_ctrl_check(VirtioNetRscChain *chain,
2287                                          struct tcp_header *tcp)
2288 {
2289     uint16_t tcp_hdr;
2290     uint16_t tcp_flag;
2291 
2292     tcp_flag = htons(tcp->th_offset_flags);
2293     tcp_hdr = (tcp_flag & VIRTIO_NET_TCP_HDR_LENGTH) >> 10;
2294     tcp_flag &= VIRTIO_NET_TCP_FLAG;
2295     if (tcp_flag & TH_SYN) {
2296         chain->stat.tcp_syn++;
2297         return RSC_BYPASS;
2298     }
2299 
2300     if (tcp_flag & (TH_FIN | TH_URG | TH_RST | TH_ECE | TH_CWR)) {
2301         chain->stat.tcp_ctrl_drain++;
2302         return RSC_FINAL;
2303     }
2304 
2305     if (tcp_hdr > sizeof(struct tcp_header)) {
2306         chain->stat.tcp_all_opt++;
2307         return RSC_FINAL;
2308     }
2309 
2310     return RSC_CANDIDATE;
2311 }
2312 
2313 static size_t virtio_net_rsc_do_coalesce(VirtioNetRscChain *chain,
2314                                          NetClientState *nc,
2315                                          const uint8_t *buf, size_t size,
2316                                          VirtioNetRscUnit *unit)
2317 {
2318     int ret;
2319     VirtioNetRscSeg *seg, *nseg;
2320 
2321     if (QTAILQ_EMPTY(&chain->buffers)) {
2322         chain->stat.empty_cache++;
2323         virtio_net_rsc_cache_buf(chain, nc, buf, size);
2324         timer_mod(chain->drain_timer,
2325               qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + chain->n->rsc_timeout);
2326         return size;
2327     }
2328 
2329     QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) {
2330         if (chain->proto == ETH_P_IP) {
2331             ret = virtio_net_rsc_coalesce4(chain, seg, buf, size, unit);
2332         } else {
2333             ret = virtio_net_rsc_coalesce6(chain, seg, buf, size, unit);
2334         }
2335 
2336         if (ret == RSC_FINAL) {
2337             if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
2338                 /* Send failed */
2339                 chain->stat.final_failed++;
2340                 return 0;
2341             }
2342 
2343             /* Send current packet */
2344             return virtio_net_do_receive(nc, buf, size);
2345         } else if (ret == RSC_NO_MATCH) {
2346             continue;
2347         } else {
2348             /* Coalesced, mark coalesced flag to tell calc cksum for ipv4 */
2349             seg->is_coalesced = 1;
2350             return size;
2351         }
2352     }
2353 
2354     chain->stat.no_match_cache++;
2355     virtio_net_rsc_cache_buf(chain, nc, buf, size);
2356     return size;
2357 }
2358 
2359 /* Drain a connection data, this is to avoid out of order segments */
2360 static size_t virtio_net_rsc_drain_flow(VirtioNetRscChain *chain,
2361                                         NetClientState *nc,
2362                                         const uint8_t *buf, size_t size,
2363                                         uint16_t ip_start, uint16_t ip_size,
2364                                         uint16_t tcp_port)
2365 {
2366     VirtioNetRscSeg *seg, *nseg;
2367     uint32_t ppair1, ppair2;
2368 
2369     ppair1 = *(uint32_t *)(buf + tcp_port);
2370     QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) {
2371         ppair2 = *(uint32_t *)(seg->buf + tcp_port);
2372         if (memcmp(buf + ip_start, seg->buf + ip_start, ip_size)
2373             || (ppair1 != ppair2)) {
2374             continue;
2375         }
2376         if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
2377             chain->stat.drain_failed++;
2378         }
2379 
2380         break;
2381     }
2382 
2383     return virtio_net_do_receive(nc, buf, size);
2384 }
2385 
2386 static int32_t virtio_net_rsc_sanity_check4(VirtioNetRscChain *chain,
2387                                             struct ip_header *ip,
2388                                             const uint8_t *buf, size_t size)
2389 {
2390     uint16_t ip_len;
2391 
2392     /* Not an ipv4 packet */
2393     if (((ip->ip_ver_len & 0xF0) >> 4) != IP_HEADER_VERSION_4) {
2394         chain->stat.ip_option++;
2395         return RSC_BYPASS;
2396     }
2397 
2398     /* Don't handle packets with ip option */
2399     if ((ip->ip_ver_len & 0xF) != VIRTIO_NET_IP4_HEADER_LENGTH) {
2400         chain->stat.ip_option++;
2401         return RSC_BYPASS;
2402     }
2403 
2404     if (ip->ip_p != IPPROTO_TCP) {
2405         chain->stat.bypass_not_tcp++;
2406         return RSC_BYPASS;
2407     }
2408 
2409     /* Don't handle packets with ip fragment */
2410     if (!(htons(ip->ip_off) & IP_DF)) {
2411         chain->stat.ip_frag++;
2412         return RSC_BYPASS;
2413     }
2414 
2415     /* Don't handle packets with ecn flag */
2416     if (IPTOS_ECN(ip->ip_tos)) {
2417         chain->stat.ip_ecn++;
2418         return RSC_BYPASS;
2419     }
2420 
2421     ip_len = htons(ip->ip_len);
2422     if (ip_len < (sizeof(struct ip_header) + sizeof(struct tcp_header))
2423         || ip_len > (size - chain->n->guest_hdr_len -
2424                      sizeof(struct eth_header))) {
2425         chain->stat.ip_hacked++;
2426         return RSC_BYPASS;
2427     }
2428 
2429     return RSC_CANDIDATE;
2430 }
2431 
2432 static size_t virtio_net_rsc_receive4(VirtioNetRscChain *chain,
2433                                       NetClientState *nc,
2434                                       const uint8_t *buf, size_t size)
2435 {
2436     int32_t ret;
2437     uint16_t hdr_len;
2438     VirtioNetRscUnit unit;
2439 
2440     hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
2441 
2442     if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header)
2443         + sizeof(struct tcp_header))) {
2444         chain->stat.bypass_not_tcp++;
2445         return virtio_net_do_receive(nc, buf, size);
2446     }
2447 
2448     virtio_net_rsc_extract_unit4(chain, buf, &unit);
2449     if (virtio_net_rsc_sanity_check4(chain, unit.ip, buf, size)
2450         != RSC_CANDIDATE) {
2451         return virtio_net_do_receive(nc, buf, size);
2452     }
2453 
2454     ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp);
2455     if (ret == RSC_BYPASS) {
2456         return virtio_net_do_receive(nc, buf, size);
2457     } else if (ret == RSC_FINAL) {
2458         return virtio_net_rsc_drain_flow(chain, nc, buf, size,
2459                 ((hdr_len + sizeof(struct eth_header)) + 12),
2460                 VIRTIO_NET_IP4_ADDR_SIZE,
2461                 hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header));
2462     }
2463 
2464     return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit);
2465 }
2466 
2467 static int32_t virtio_net_rsc_sanity_check6(VirtioNetRscChain *chain,
2468                                             struct ip6_header *ip6,
2469                                             const uint8_t *buf, size_t size)
2470 {
2471     uint16_t ip_len;
2472 
2473     if (((ip6->ip6_ctlun.ip6_un1.ip6_un1_flow & 0xF0) >> 4)
2474         != IP_HEADER_VERSION_6) {
2475         return RSC_BYPASS;
2476     }
2477 
2478     /* Both option and protocol is checked in this */
2479     if (ip6->ip6_ctlun.ip6_un1.ip6_un1_nxt != IPPROTO_TCP) {
2480         chain->stat.bypass_not_tcp++;
2481         return RSC_BYPASS;
2482     }
2483 
2484     ip_len = htons(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen);
2485     if (ip_len < sizeof(struct tcp_header) ||
2486         ip_len > (size - chain->n->guest_hdr_len - sizeof(struct eth_header)
2487                   - sizeof(struct ip6_header))) {
2488         chain->stat.ip_hacked++;
2489         return RSC_BYPASS;
2490     }
2491 
2492     /* Don't handle packets with ecn flag */
2493     if (IP6_ECN(ip6->ip6_ctlun.ip6_un3.ip6_un3_ecn)) {
2494         chain->stat.ip_ecn++;
2495         return RSC_BYPASS;
2496     }
2497 
2498     return RSC_CANDIDATE;
2499 }
2500 
2501 static size_t virtio_net_rsc_receive6(void *opq, NetClientState *nc,
2502                                       const uint8_t *buf, size_t size)
2503 {
2504     int32_t ret;
2505     uint16_t hdr_len;
2506     VirtioNetRscChain *chain;
2507     VirtioNetRscUnit unit;
2508 
2509     chain = opq;
2510     hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
2511 
2512     if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip6_header)
2513         + sizeof(tcp_header))) {
2514         return virtio_net_do_receive(nc, buf, size);
2515     }
2516 
2517     virtio_net_rsc_extract_unit6(chain, buf, &unit);
2518     if (RSC_CANDIDATE != virtio_net_rsc_sanity_check6(chain,
2519                                                  unit.ip, buf, size)) {
2520         return virtio_net_do_receive(nc, buf, size);
2521     }
2522 
2523     ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp);
2524     if (ret == RSC_BYPASS) {
2525         return virtio_net_do_receive(nc, buf, size);
2526     } else if (ret == RSC_FINAL) {
2527         return virtio_net_rsc_drain_flow(chain, nc, buf, size,
2528                 ((hdr_len + sizeof(struct eth_header)) + 8),
2529                 VIRTIO_NET_IP6_ADDR_SIZE,
2530                 hdr_len + sizeof(struct eth_header)
2531                 + sizeof(struct ip6_header));
2532     }
2533 
2534     return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit);
2535 }
2536 
2537 static VirtioNetRscChain *virtio_net_rsc_lookup_chain(VirtIONet *n,
2538                                                       NetClientState *nc,
2539                                                       uint16_t proto)
2540 {
2541     VirtioNetRscChain *chain;
2542 
2543     if ((proto != (uint16_t)ETH_P_IP) && (proto != (uint16_t)ETH_P_IPV6)) {
2544         return NULL;
2545     }
2546 
2547     QTAILQ_FOREACH(chain, &n->rsc_chains, next) {
2548         if (chain->proto == proto) {
2549             return chain;
2550         }
2551     }
2552 
2553     chain = g_malloc(sizeof(*chain));
2554     chain->n = n;
2555     chain->proto = proto;
2556     if (proto == (uint16_t)ETH_P_IP) {
2557         chain->max_payload = VIRTIO_NET_MAX_IP4_PAYLOAD;
2558         chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2559     } else {
2560         chain->max_payload = VIRTIO_NET_MAX_IP6_PAYLOAD;
2561         chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2562     }
2563     chain->drain_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2564                                       virtio_net_rsc_purge, chain);
2565     memset(&chain->stat, 0, sizeof(chain->stat));
2566 
2567     QTAILQ_INIT(&chain->buffers);
2568     QTAILQ_INSERT_TAIL(&n->rsc_chains, chain, next);
2569 
2570     return chain;
2571 }
2572 
2573 static ssize_t virtio_net_rsc_receive(NetClientState *nc,
2574                                       const uint8_t *buf,
2575                                       size_t size)
2576 {
2577     uint16_t proto;
2578     VirtioNetRscChain *chain;
2579     struct eth_header *eth;
2580     VirtIONet *n;
2581 
2582     n = qemu_get_nic_opaque(nc);
2583     if (size < (n->host_hdr_len + sizeof(struct eth_header))) {
2584         return virtio_net_do_receive(nc, buf, size);
2585     }
2586 
2587     eth = (struct eth_header *)(buf + n->guest_hdr_len);
2588     proto = htons(eth->h_proto);
2589 
2590     chain = virtio_net_rsc_lookup_chain(n, nc, proto);
2591     if (chain) {
2592         chain->stat.received++;
2593         if (proto == (uint16_t)ETH_P_IP && n->rsc4_enabled) {
2594             return virtio_net_rsc_receive4(chain, nc, buf, size);
2595         } else if (proto == (uint16_t)ETH_P_IPV6 && n->rsc6_enabled) {
2596             return virtio_net_rsc_receive6(chain, nc, buf, size);
2597         }
2598     }
2599     return virtio_net_do_receive(nc, buf, size);
2600 }
2601 
2602 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf,
2603                                   size_t size)
2604 {
2605     VirtIONet *n = qemu_get_nic_opaque(nc);
2606     if ((n->rsc4_enabled || n->rsc6_enabled)) {
2607         return virtio_net_rsc_receive(nc, buf, size);
2608     } else {
2609         return virtio_net_do_receive(nc, buf, size);
2610     }
2611 }
2612 
2613 static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
2614 
2615 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
2616 {
2617     VirtIONet *n = qemu_get_nic_opaque(nc);
2618     VirtIONetQueue *q = virtio_net_get_subqueue(nc);
2619     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2620     int ret;
2621 
2622     virtqueue_push(q->tx_vq, q->async_tx.elem, 0);
2623     virtio_notify(vdev, q->tx_vq);
2624 
2625     g_free(q->async_tx.elem);
2626     q->async_tx.elem = NULL;
2627 
2628     virtio_queue_set_notification(q->tx_vq, 1);
2629     ret = virtio_net_flush_tx(q);
2630     if (ret >= n->tx_burst) {
2631         /*
2632          * the flush has been stopped by tx_burst
2633          * we will not receive notification for the
2634          * remainining part, so re-schedule
2635          */
2636         virtio_queue_set_notification(q->tx_vq, 0);
2637         if (q->tx_bh) {
2638             replay_bh_schedule_event(q->tx_bh);
2639         } else {
2640             timer_mod(q->tx_timer,
2641                       qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2642         }
2643         q->tx_waiting = 1;
2644     }
2645 }
2646 
2647 /* TX */
2648 static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
2649 {
2650     VirtIONet *n = q->n;
2651     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2652     VirtQueueElement *elem;
2653     int32_t num_packets = 0;
2654     int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
2655     if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
2656         return num_packets;
2657     }
2658 
2659     if (q->async_tx.elem) {
2660         virtio_queue_set_notification(q->tx_vq, 0);
2661         return num_packets;
2662     }
2663 
2664     for (;;) {
2665         ssize_t ret;
2666         unsigned int out_num;
2667         struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
2668         struct virtio_net_hdr vhdr;
2669 
2670         elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
2671         if (!elem) {
2672             break;
2673         }
2674 
2675         out_num = elem->out_num;
2676         out_sg = elem->out_sg;
2677         if (out_num < 1) {
2678             virtio_error(vdev, "virtio-net header not in first element");
2679             goto detach;
2680         }
2681 
2682         if (n->needs_vnet_hdr_swap) {
2683             if (iov_to_buf(out_sg, out_num, 0, &vhdr, sizeof(vhdr)) <
2684                 sizeof(vhdr)) {
2685                 virtio_error(vdev, "virtio-net header incorrect");
2686                 goto detach;
2687             }
2688             virtio_net_hdr_swap(vdev, &vhdr);
2689             sg2[0].iov_base = &vhdr;
2690             sg2[0].iov_len = sizeof(vhdr);
2691             out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1, out_sg, out_num,
2692                                sizeof(vhdr), -1);
2693             if (out_num == VIRTQUEUE_MAX_SIZE) {
2694                 goto drop;
2695             }
2696             out_num += 1;
2697             out_sg = sg2;
2698         }
2699         /*
2700          * If host wants to see the guest header as is, we can
2701          * pass it on unchanged. Otherwise, copy just the parts
2702          * that host is interested in.
2703          */
2704         assert(n->host_hdr_len <= n->guest_hdr_len);
2705         if (n->host_hdr_len != n->guest_hdr_len) {
2706             if (iov_size(out_sg, out_num) < n->guest_hdr_len) {
2707                 virtio_error(vdev, "virtio-net header is invalid");
2708                 goto detach;
2709             }
2710             unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
2711                                        out_sg, out_num,
2712                                        0, n->host_hdr_len);
2713             sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
2714                              out_sg, out_num,
2715                              n->guest_hdr_len, -1);
2716             out_num = sg_num;
2717             out_sg = sg;
2718 
2719             if (out_num < 1) {
2720                 virtio_error(vdev, "virtio-net nothing to send");
2721                 goto detach;
2722             }
2723         }
2724 
2725         ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
2726                                       out_sg, out_num, virtio_net_tx_complete);
2727         if (ret == 0) {
2728             virtio_queue_set_notification(q->tx_vq, 0);
2729             q->async_tx.elem = elem;
2730             return -EBUSY;
2731         }
2732 
2733 drop:
2734         virtqueue_push(q->tx_vq, elem, 0);
2735         virtio_notify(vdev, q->tx_vq);
2736         g_free(elem);
2737 
2738         if (++num_packets >= n->tx_burst) {
2739             break;
2740         }
2741     }
2742     return num_packets;
2743 
2744 detach:
2745     virtqueue_detach_element(q->tx_vq, elem, 0);
2746     g_free(elem);
2747     return -EINVAL;
2748 }
2749 
2750 static void virtio_net_tx_timer(void *opaque);
2751 
2752 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
2753 {
2754     VirtIONet *n = VIRTIO_NET(vdev);
2755     VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
2756 
2757     if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
2758         virtio_net_drop_tx_queue_data(vdev, vq);
2759         return;
2760     }
2761 
2762     /* This happens when device was stopped but VCPU wasn't. */
2763     if (!vdev->vm_running) {
2764         q->tx_waiting = 1;
2765         return;
2766     }
2767 
2768     if (q->tx_waiting) {
2769         /* We already have queued packets, immediately flush */
2770         timer_del(q->tx_timer);
2771         virtio_net_tx_timer(q);
2772     } else {
2773         /* re-arm timer to flush it (and more) on next tick */
2774         timer_mod(q->tx_timer,
2775                   qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2776         q->tx_waiting = 1;
2777         virtio_queue_set_notification(vq, 0);
2778     }
2779 }
2780 
2781 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
2782 {
2783     VirtIONet *n = VIRTIO_NET(vdev);
2784     VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
2785 
2786     if (unlikely(n->vhost_started)) {
2787         return;
2788     }
2789 
2790     if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
2791         virtio_net_drop_tx_queue_data(vdev, vq);
2792         return;
2793     }
2794 
2795     if (unlikely(q->tx_waiting)) {
2796         return;
2797     }
2798     q->tx_waiting = 1;
2799     /* This happens when device was stopped but VCPU wasn't. */
2800     if (!vdev->vm_running) {
2801         return;
2802     }
2803     virtio_queue_set_notification(vq, 0);
2804     replay_bh_schedule_event(q->tx_bh);
2805 }
2806 
2807 static void virtio_net_tx_timer(void *opaque)
2808 {
2809     VirtIONetQueue *q = opaque;
2810     VirtIONet *n = q->n;
2811     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2812     int ret;
2813 
2814     /* This happens when device was stopped but BH wasn't. */
2815     if (!vdev->vm_running) {
2816         /* Make sure tx waiting is set, so we'll run when restarted. */
2817         assert(q->tx_waiting);
2818         return;
2819     }
2820 
2821     q->tx_waiting = 0;
2822 
2823     /* Just in case the driver is not ready on more */
2824     if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
2825         return;
2826     }
2827 
2828     ret = virtio_net_flush_tx(q);
2829     if (ret == -EBUSY || ret == -EINVAL) {
2830         return;
2831     }
2832     /*
2833      * If we flush a full burst of packets, assume there are
2834      * more coming and immediately rearm
2835      */
2836     if (ret >= n->tx_burst) {
2837         q->tx_waiting = 1;
2838         timer_mod(q->tx_timer,
2839                   qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2840         return;
2841     }
2842     /*
2843      * If less than a full burst, re-enable notification and flush
2844      * anything that may have come in while we weren't looking.  If
2845      * we find something, assume the guest is still active and rearm
2846      */
2847     virtio_queue_set_notification(q->tx_vq, 1);
2848     ret = virtio_net_flush_tx(q);
2849     if (ret > 0) {
2850         virtio_queue_set_notification(q->tx_vq, 0);
2851         q->tx_waiting = 1;
2852         timer_mod(q->tx_timer,
2853                   qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2854     }
2855 }
2856 
2857 static void virtio_net_tx_bh(void *opaque)
2858 {
2859     VirtIONetQueue *q = opaque;
2860     VirtIONet *n = q->n;
2861     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2862     int32_t ret;
2863 
2864     /* This happens when device was stopped but BH wasn't. */
2865     if (!vdev->vm_running) {
2866         /* Make sure tx waiting is set, so we'll run when restarted. */
2867         assert(q->tx_waiting);
2868         return;
2869     }
2870 
2871     q->tx_waiting = 0;
2872 
2873     /* Just in case the driver is not ready on more */
2874     if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
2875         return;
2876     }
2877 
2878     ret = virtio_net_flush_tx(q);
2879     if (ret == -EBUSY || ret == -EINVAL) {
2880         return; /* Notification re-enable handled by tx_complete or device
2881                  * broken */
2882     }
2883 
2884     /* If we flush a full burst of packets, assume there are
2885      * more coming and immediately reschedule */
2886     if (ret >= n->tx_burst) {
2887         replay_bh_schedule_event(q->tx_bh);
2888         q->tx_waiting = 1;
2889         return;
2890     }
2891 
2892     /* If less than a full burst, re-enable notification and flush
2893      * anything that may have come in while we weren't looking.  If
2894      * we find something, assume the guest is still active and reschedule */
2895     virtio_queue_set_notification(q->tx_vq, 1);
2896     ret = virtio_net_flush_tx(q);
2897     if (ret == -EINVAL) {
2898         return;
2899     } else if (ret > 0) {
2900         virtio_queue_set_notification(q->tx_vq, 0);
2901         replay_bh_schedule_event(q->tx_bh);
2902         q->tx_waiting = 1;
2903     }
2904 }
2905 
2906 static void virtio_net_add_queue(VirtIONet *n, int index)
2907 {
2908     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2909 
2910     n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
2911                                            virtio_net_handle_rx);
2912 
2913     if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
2914         n->vqs[index].tx_vq =
2915             virtio_add_queue(vdev, n->net_conf.tx_queue_size,
2916                              virtio_net_handle_tx_timer);
2917         n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2918                                               virtio_net_tx_timer,
2919                                               &n->vqs[index]);
2920     } else {
2921         n->vqs[index].tx_vq =
2922             virtio_add_queue(vdev, n->net_conf.tx_queue_size,
2923                              virtio_net_handle_tx_bh);
2924         n->vqs[index].tx_bh = qemu_bh_new_guarded(virtio_net_tx_bh, &n->vqs[index],
2925                                                   &DEVICE(vdev)->mem_reentrancy_guard);
2926     }
2927 
2928     n->vqs[index].tx_waiting = 0;
2929     n->vqs[index].n = n;
2930 }
2931 
2932 static void virtio_net_del_queue(VirtIONet *n, int index)
2933 {
2934     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2935     VirtIONetQueue *q = &n->vqs[index];
2936     NetClientState *nc = qemu_get_subqueue(n->nic, index);
2937 
2938     qemu_purge_queued_packets(nc);
2939 
2940     virtio_del_queue(vdev, index * 2);
2941     if (q->tx_timer) {
2942         timer_free(q->tx_timer);
2943         q->tx_timer = NULL;
2944     } else {
2945         qemu_bh_delete(q->tx_bh);
2946         q->tx_bh = NULL;
2947     }
2948     q->tx_waiting = 0;
2949     virtio_del_queue(vdev, index * 2 + 1);
2950 }
2951 
2952 static void virtio_net_change_num_queue_pairs(VirtIONet *n, int new_max_queue_pairs)
2953 {
2954     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2955     int old_num_queues = virtio_get_num_queues(vdev);
2956     int new_num_queues = new_max_queue_pairs * 2 + 1;
2957     int i;
2958 
2959     assert(old_num_queues >= 3);
2960     assert(old_num_queues % 2 == 1);
2961 
2962     if (old_num_queues == new_num_queues) {
2963         return;
2964     }
2965 
2966     /*
2967      * We always need to remove and add ctrl vq if
2968      * old_num_queues != new_num_queues. Remove ctrl_vq first,
2969      * and then we only enter one of the following two loops.
2970      */
2971     virtio_del_queue(vdev, old_num_queues - 1);
2972 
2973     for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
2974         /* new_num_queues < old_num_queues */
2975         virtio_net_del_queue(n, i / 2);
2976     }
2977 
2978     for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
2979         /* new_num_queues > old_num_queues */
2980         virtio_net_add_queue(n, i / 2);
2981     }
2982 
2983     /* add ctrl_vq last */
2984     n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
2985 }
2986 
2987 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
2988 {
2989     int max = multiqueue ? n->max_queue_pairs : 1;
2990 
2991     n->multiqueue = multiqueue;
2992     virtio_net_change_num_queue_pairs(n, max);
2993 
2994     virtio_net_set_queue_pairs(n);
2995 }
2996 
2997 static int virtio_net_pre_load_queues(VirtIODevice *vdev)
2998 {
2999     virtio_net_set_multiqueue(VIRTIO_NET(vdev),
3000                               virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_RSS) ||
3001                               virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MQ));
3002 
3003     return 0;
3004 }
3005 
3006 static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
3007                                         Error **errp)
3008 {
3009     VirtIONet *n = VIRTIO_NET(vdev);
3010     NetClientState *nc = qemu_get_queue(n->nic);
3011 
3012     /* Firstly sync all virtio-net possible supported features */
3013     features |= n->host_features;
3014 
3015     virtio_add_feature(&features, VIRTIO_NET_F_MAC);
3016 
3017     if (!peer_has_vnet_hdr(n)) {
3018         virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
3019         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
3020         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
3021         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
3022 
3023         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
3024         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
3025         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
3026         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
3027 
3028         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO);
3029         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4);
3030         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6);
3031 
3032         virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
3033     }
3034 
3035     if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
3036         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
3037         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
3038     }
3039 
3040     if (!peer_has_uso(n)) {
3041         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO);
3042         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4);
3043         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6);
3044     }
3045 
3046     if (!get_vhost_net(nc->peer)) {
3047         return features;
3048     }
3049 
3050     if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
3051         virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
3052     }
3053     features = vhost_net_get_features(get_vhost_net(nc->peer), features);
3054     vdev->backend_features = features;
3055 
3056     if (n->mtu_bypass_backend &&
3057             (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) {
3058         features |= (1ULL << VIRTIO_NET_F_MTU);
3059     }
3060 
3061     /*
3062      * Since GUEST_ANNOUNCE is emulated the feature bit could be set without
3063      * enabled. This happens in the vDPA case.
3064      *
3065      * Make sure the feature set is not incoherent, as the driver could refuse
3066      * to start.
3067      *
3068      * TODO: QEMU is able to emulate a CVQ just for guest_announce purposes,
3069      * helping guest to notify the new location with vDPA devices that does not
3070      * support it.
3071      */
3072     if (!virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_CTRL_VQ)) {
3073         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ANNOUNCE);
3074     }
3075 
3076     return features;
3077 }
3078 
3079 static int virtio_net_post_load_device(void *opaque, int version_id)
3080 {
3081     VirtIONet *n = opaque;
3082     VirtIODevice *vdev = VIRTIO_DEVICE(n);
3083     int i, link_down;
3084 
3085     trace_virtio_net_post_load_device();
3086     virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs,
3087                                virtio_vdev_has_feature(vdev,
3088                                                        VIRTIO_F_VERSION_1),
3089                                virtio_vdev_has_feature(vdev,
3090                                                        VIRTIO_NET_F_HASH_REPORT));
3091 
3092     /* MAC_TABLE_ENTRIES may be different from the saved image */
3093     if (n->mac_table.in_use > MAC_TABLE_ENTRIES) {
3094         n->mac_table.in_use = 0;
3095     }
3096 
3097     if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
3098         n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
3099     }
3100 
3101     /*
3102      * curr_guest_offloads will be later overwritten by the
3103      * virtio_set_features_nocheck call done from the virtio_load.
3104      * Here we make sure it is preserved and restored accordingly
3105      * in the virtio_net_post_load_virtio callback.
3106      */
3107     n->saved_guest_offloads = n->curr_guest_offloads;
3108 
3109     virtio_net_set_queue_pairs(n);
3110 
3111     /* Find the first multicast entry in the saved MAC filter */
3112     for (i = 0; i < n->mac_table.in_use; i++) {
3113         if (n->mac_table.macs[i * ETH_ALEN] & 1) {
3114             break;
3115         }
3116     }
3117     n->mac_table.first_multi = i;
3118 
3119     /* nc.link_down can't be migrated, so infer link_down according
3120      * to link status bit in n->status */
3121     link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
3122     for (i = 0; i < n->max_queue_pairs; i++) {
3123         qemu_get_subqueue(n->nic, i)->link_down = link_down;
3124     }
3125 
3126     if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
3127         virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
3128         qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
3129                                   QEMU_CLOCK_VIRTUAL,
3130                                   virtio_net_announce_timer, n);
3131         if (n->announce_timer.round) {
3132             timer_mod(n->announce_timer.tm,
3133                       qemu_clock_get_ms(n->announce_timer.type));
3134         } else {
3135             qemu_announce_timer_del(&n->announce_timer, false);
3136         }
3137     }
3138 
3139     virtio_net_commit_rss_config(n);
3140     return 0;
3141 }
3142 
3143 static int virtio_net_post_load_virtio(VirtIODevice *vdev)
3144 {
3145     VirtIONet *n = VIRTIO_NET(vdev);
3146     /*
3147      * The actual needed state is now in saved_guest_offloads,
3148      * see virtio_net_post_load_device for detail.
3149      * Restore it back and apply the desired offloads.
3150      */
3151     n->curr_guest_offloads = n->saved_guest_offloads;
3152     if (peer_has_vnet_hdr(n)) {
3153         virtio_net_apply_guest_offloads(n);
3154     }
3155 
3156     return 0;
3157 }
3158 
3159 /* tx_waiting field of a VirtIONetQueue */
3160 static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = {
3161     .name = "virtio-net-queue-tx_waiting",
3162     .fields = (const VMStateField[]) {
3163         VMSTATE_UINT32(tx_waiting, VirtIONetQueue),
3164         VMSTATE_END_OF_LIST()
3165    },
3166 };
3167 
3168 static bool max_queue_pairs_gt_1(void *opaque, int version_id)
3169 {
3170     return VIRTIO_NET(opaque)->max_queue_pairs > 1;
3171 }
3172 
3173 static bool has_ctrl_guest_offloads(void *opaque, int version_id)
3174 {
3175     return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque),
3176                                    VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
3177 }
3178 
3179 static bool mac_table_fits(void *opaque, int version_id)
3180 {
3181     return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES;
3182 }
3183 
3184 static bool mac_table_doesnt_fit(void *opaque, int version_id)
3185 {
3186     return !mac_table_fits(opaque, version_id);
3187 }
3188 
3189 /* This temporary type is shared by all the WITH_TMP methods
3190  * although only some fields are used by each.
3191  */
3192 struct VirtIONetMigTmp {
3193     VirtIONet      *parent;
3194     VirtIONetQueue *vqs_1;
3195     uint16_t        curr_queue_pairs_1;
3196     uint8_t         has_ufo;
3197     uint32_t        has_vnet_hdr;
3198 };
3199 
3200 /* The 2nd and subsequent tx_waiting flags are loaded later than
3201  * the 1st entry in the queue_pairs and only if there's more than one
3202  * entry.  We use the tmp mechanism to calculate a temporary
3203  * pointer and count and also validate the count.
3204  */
3205 
3206 static int virtio_net_tx_waiting_pre_save(void *opaque)
3207 {
3208     struct VirtIONetMigTmp *tmp = opaque;
3209 
3210     tmp->vqs_1 = tmp->parent->vqs + 1;
3211     tmp->curr_queue_pairs_1 = tmp->parent->curr_queue_pairs - 1;
3212     if (tmp->parent->curr_queue_pairs == 0) {
3213         tmp->curr_queue_pairs_1 = 0;
3214     }
3215 
3216     return 0;
3217 }
3218 
3219 static int virtio_net_tx_waiting_pre_load(void *opaque)
3220 {
3221     struct VirtIONetMigTmp *tmp = opaque;
3222 
3223     /* Reuse the pointer setup from save */
3224     virtio_net_tx_waiting_pre_save(opaque);
3225 
3226     if (tmp->parent->curr_queue_pairs > tmp->parent->max_queue_pairs) {
3227         error_report("virtio-net: curr_queue_pairs %x > max_queue_pairs %x",
3228             tmp->parent->curr_queue_pairs, tmp->parent->max_queue_pairs);
3229 
3230         return -EINVAL;
3231     }
3232 
3233     return 0; /* all good */
3234 }
3235 
3236 static const VMStateDescription vmstate_virtio_net_tx_waiting = {
3237     .name      = "virtio-net-tx_waiting",
3238     .pre_load  = virtio_net_tx_waiting_pre_load,
3239     .pre_save  = virtio_net_tx_waiting_pre_save,
3240     .fields    = (const VMStateField[]) {
3241         VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp,
3242                                      curr_queue_pairs_1,
3243                                      vmstate_virtio_net_queue_tx_waiting,
3244                                      struct VirtIONetQueue),
3245         VMSTATE_END_OF_LIST()
3246     },
3247 };
3248 
3249 /* the 'has_ufo' flag is just tested; if the incoming stream has the
3250  * flag set we need to check that we have it
3251  */
3252 static int virtio_net_ufo_post_load(void *opaque, int version_id)
3253 {
3254     struct VirtIONetMigTmp *tmp = opaque;
3255 
3256     if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) {
3257         error_report("virtio-net: saved image requires TUN_F_UFO support");
3258         return -EINVAL;
3259     }
3260 
3261     return 0;
3262 }
3263 
3264 static int virtio_net_ufo_pre_save(void *opaque)
3265 {
3266     struct VirtIONetMigTmp *tmp = opaque;
3267 
3268     tmp->has_ufo = tmp->parent->has_ufo;
3269 
3270     return 0;
3271 }
3272 
3273 static const VMStateDescription vmstate_virtio_net_has_ufo = {
3274     .name      = "virtio-net-ufo",
3275     .post_load = virtio_net_ufo_post_load,
3276     .pre_save  = virtio_net_ufo_pre_save,
3277     .fields    = (const VMStateField[]) {
3278         VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp),
3279         VMSTATE_END_OF_LIST()
3280     },
3281 };
3282 
3283 /* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
3284  * flag set we need to check that we have it
3285  */
3286 static int virtio_net_vnet_post_load(void *opaque, int version_id)
3287 {
3288     struct VirtIONetMigTmp *tmp = opaque;
3289 
3290     if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) {
3291         error_report("virtio-net: saved image requires vnet_hdr=on");
3292         return -EINVAL;
3293     }
3294 
3295     return 0;
3296 }
3297 
3298 static int virtio_net_vnet_pre_save(void *opaque)
3299 {
3300     struct VirtIONetMigTmp *tmp = opaque;
3301 
3302     tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr;
3303 
3304     return 0;
3305 }
3306 
3307 static const VMStateDescription vmstate_virtio_net_has_vnet = {
3308     .name      = "virtio-net-vnet",
3309     .post_load = virtio_net_vnet_post_load,
3310     .pre_save  = virtio_net_vnet_pre_save,
3311     .fields    = (const VMStateField[]) {
3312         VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp),
3313         VMSTATE_END_OF_LIST()
3314     },
3315 };
3316 
3317 static bool virtio_net_rss_needed(void *opaque)
3318 {
3319     return VIRTIO_NET(opaque)->rss_data.enabled;
3320 }
3321 
3322 static const VMStateDescription vmstate_virtio_net_rss = {
3323     .name      = "virtio-net-device/rss",
3324     .version_id = 1,
3325     .minimum_version_id = 1,
3326     .needed = virtio_net_rss_needed,
3327     .fields = (const VMStateField[]) {
3328         VMSTATE_BOOL(rss_data.enabled, VirtIONet),
3329         VMSTATE_BOOL(rss_data.redirect, VirtIONet),
3330         VMSTATE_BOOL(rss_data.populate_hash, VirtIONet),
3331         VMSTATE_UINT32(rss_data.hash_types, VirtIONet),
3332         VMSTATE_UINT16(rss_data.indirections_len, VirtIONet),
3333         VMSTATE_UINT16(rss_data.default_queue, VirtIONet),
3334         VMSTATE_UINT8_ARRAY(rss_data.key, VirtIONet,
3335                             VIRTIO_NET_RSS_MAX_KEY_SIZE),
3336         VMSTATE_VARRAY_UINT16_ALLOC(rss_data.indirections_table, VirtIONet,
3337                                     rss_data.indirections_len, 0,
3338                                     vmstate_info_uint16, uint16_t),
3339         VMSTATE_END_OF_LIST()
3340     },
3341 };
3342 
3343 static struct vhost_dev *virtio_net_get_vhost(VirtIODevice *vdev)
3344 {
3345     VirtIONet *n = VIRTIO_NET(vdev);
3346     NetClientState *nc;
3347     struct vhost_net *net;
3348 
3349     if (!n->nic) {
3350         return NULL;
3351     }
3352 
3353     nc = qemu_get_queue(n->nic);
3354     if (!nc) {
3355         return NULL;
3356     }
3357 
3358     net = get_vhost_net(nc->peer);
3359     if (!net) {
3360         return NULL;
3361     }
3362 
3363     return &net->dev;
3364 }
3365 
3366 static int vhost_user_net_save_state(QEMUFile *f, void *pv, size_t size,
3367                                      const VMStateField *field,
3368                                      JSONWriter *vmdesc)
3369 {
3370     VirtIONet *n = pv;
3371     VirtIODevice *vdev = VIRTIO_DEVICE(n);
3372     struct vhost_dev *vhdev;
3373     Error *local_error = NULL;
3374     int ret;
3375 
3376     vhdev = virtio_net_get_vhost(vdev);
3377     if (vhdev == NULL) {
3378         error_reportf_err(local_error,
3379                           "Error getting vhost back-end of %s device %s: ",
3380                           vdev->name, vdev->parent_obj.canonical_path);
3381         return -1;
3382     }
3383 
3384     ret = vhost_save_backend_state(vhdev, f, &local_error);
3385     if (ret < 0) {
3386         error_reportf_err(local_error,
3387                           "Error saving back-end state of %s device %s: ",
3388                           vdev->name, vdev->parent_obj.canonical_path);
3389         return ret;
3390     }
3391 
3392     return 0;
3393 }
3394 
3395 static int vhost_user_net_load_state(QEMUFile *f, void *pv, size_t size,
3396                                      const VMStateField *field)
3397 {
3398     VirtIONet *n = pv;
3399     VirtIODevice *vdev = VIRTIO_DEVICE(n);
3400     struct vhost_dev *vhdev;
3401     Error *local_error = NULL;
3402     int ret;
3403 
3404     vhdev = virtio_net_get_vhost(vdev);
3405     if (vhdev == NULL) {
3406         error_reportf_err(local_error,
3407                           "Error getting vhost back-end of %s device %s: ",
3408                           vdev->name, vdev->parent_obj.canonical_path);
3409         return -1;
3410     }
3411 
3412     ret = vhost_load_backend_state(vhdev, f, &local_error);
3413     if (ret < 0) {
3414         error_reportf_err(local_error,
3415                           "Error loading  back-end state of %s device %s: ",
3416                           vdev->name, vdev->parent_obj.canonical_path);
3417         return ret;
3418     }
3419 
3420     return 0;
3421 }
3422 
3423 static bool vhost_user_net_is_internal_migration(void *opaque)
3424 {
3425     VirtIONet *n = opaque;
3426     VirtIODevice *vdev = VIRTIO_DEVICE(n);
3427     struct vhost_dev *vhdev;
3428 
3429     vhdev = virtio_net_get_vhost(vdev);
3430     if (vhdev == NULL) {
3431         return false;
3432     }
3433 
3434     return vhost_supports_device_state(vhdev);
3435 }
3436 
3437 static const VMStateDescription vhost_user_net_backend_state = {
3438     .name = "virtio-net-device/backend",
3439     .version_id = 0,
3440     .needed = vhost_user_net_is_internal_migration,
3441     .fields = (const VMStateField[]) {
3442         {
3443             .name = "backend",
3444             .info = &(const VMStateInfo) {
3445                 .name = "virtio-net vhost-user backend state",
3446                 .get = vhost_user_net_load_state,
3447                 .put = vhost_user_net_save_state,
3448             },
3449          },
3450          VMSTATE_END_OF_LIST()
3451     }
3452 };
3453 
3454 static const VMStateDescription vmstate_virtio_net_device = {
3455     .name = "virtio-net-device",
3456     .version_id = VIRTIO_NET_VM_VERSION,
3457     .minimum_version_id = VIRTIO_NET_VM_VERSION,
3458     .post_load = virtio_net_post_load_device,
3459     .fields = (const VMStateField[]) {
3460         VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN),
3461         VMSTATE_STRUCT_POINTER(vqs, VirtIONet,
3462                                vmstate_virtio_net_queue_tx_waiting,
3463                                VirtIONetQueue),
3464         VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet),
3465         VMSTATE_UINT16(status, VirtIONet),
3466         VMSTATE_UINT8(promisc, VirtIONet),
3467         VMSTATE_UINT8(allmulti, VirtIONet),
3468         VMSTATE_UINT32(mac_table.in_use, VirtIONet),
3469 
3470         /* Guarded pair: If it fits we load it, else we throw it away
3471          * - can happen if source has a larger MAC table.; post-load
3472          *  sets flags in this case.
3473          */
3474         VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet,
3475                                 0, mac_table_fits, mac_table.in_use,
3476                                  ETH_ALEN),
3477         VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0,
3478                                      mac_table.in_use, ETH_ALEN),
3479 
3480         /* Note: This is an array of uint32's that's always been saved as a
3481          * buffer; hold onto your endiannesses; it's actually used as a bitmap
3482          * but based on the uint.
3483          */
3484         VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3),
3485         VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
3486                          vmstate_virtio_net_has_vnet),
3487         VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet),
3488         VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet),
3489         VMSTATE_UINT8(alluni, VirtIONet),
3490         VMSTATE_UINT8(nomulti, VirtIONet),
3491         VMSTATE_UINT8(nouni, VirtIONet),
3492         VMSTATE_UINT8(nobcast, VirtIONet),
3493         VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
3494                          vmstate_virtio_net_has_ufo),
3495         VMSTATE_SINGLE_TEST(max_queue_pairs, VirtIONet, max_queue_pairs_gt_1, 0,
3496                             vmstate_info_uint16_equal, uint16_t),
3497         VMSTATE_UINT16_TEST(curr_queue_pairs, VirtIONet, max_queue_pairs_gt_1),
3498         VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
3499                          vmstate_virtio_net_tx_waiting),
3500         VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet,
3501                             has_ctrl_guest_offloads),
3502         VMSTATE_END_OF_LIST()
3503     },
3504     .subsections = (const VMStateDescription * const []) {
3505         &vmstate_virtio_net_rss,
3506         &vhost_user_net_backend_state,
3507         NULL
3508     }
3509 };
3510 
3511 static NetClientInfo net_virtio_info = {
3512     .type = NET_CLIENT_DRIVER_NIC,
3513     .size = sizeof(NICState),
3514     .can_receive = virtio_net_can_receive,
3515     .receive = virtio_net_receive,
3516     .link_status_changed = virtio_net_set_link_status,
3517     .query_rx_filter = virtio_net_query_rxfilter,
3518     .announce = virtio_net_announce,
3519 };
3520 
3521 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
3522 {
3523     VirtIONet *n = VIRTIO_NET(vdev);
3524     NetClientState *nc;
3525     assert(n->vhost_started);
3526     if (!n->multiqueue && idx == 2) {
3527         /* Must guard against invalid features and bogus queue index
3528          * from being set by malicious guest, or penetrated through
3529          * buggy migration stream.
3530          */
3531         if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
3532             qemu_log_mask(LOG_GUEST_ERROR,
3533                           "%s: bogus vq index ignored\n", __func__);
3534             return false;
3535         }
3536         nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
3537     } else {
3538         nc = qemu_get_subqueue(n->nic, vq2q(idx));
3539     }
3540     /*
3541      * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
3542      * as the macro of configure interrupt's IDX, If this driver does not
3543      * support, the function will return false
3544      */
3545 
3546     if (idx == VIRTIO_CONFIG_IRQ_IDX) {
3547         return vhost_net_config_pending(get_vhost_net(nc->peer));
3548     }
3549     return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
3550 }
3551 
3552 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
3553                                            bool mask)
3554 {
3555     VirtIONet *n = VIRTIO_NET(vdev);
3556     NetClientState *nc;
3557     assert(n->vhost_started);
3558     if (!n->multiqueue && idx == 2) {
3559         /* Must guard against invalid features and bogus queue index
3560          * from being set by malicious guest, or penetrated through
3561          * buggy migration stream.
3562          */
3563         if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
3564             qemu_log_mask(LOG_GUEST_ERROR,
3565                           "%s: bogus vq index ignored\n", __func__);
3566             return;
3567         }
3568         nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
3569     } else {
3570         nc = qemu_get_subqueue(n->nic, vq2q(idx));
3571     }
3572     /*
3573      *Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
3574      * as the macro of configure interrupt's IDX, If this driver does not
3575      * support, the function will return
3576      */
3577 
3578     if (idx == VIRTIO_CONFIG_IRQ_IDX) {
3579         vhost_net_config_mask(get_vhost_net(nc->peer), vdev, mask);
3580         return;
3581     }
3582     vhost_net_virtqueue_mask(get_vhost_net(nc->peer), vdev, idx, mask);
3583 }
3584 
3585 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
3586 {
3587     virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
3588 
3589     n->config_size = virtio_get_config_size(&cfg_size_params, host_features);
3590 }
3591 
3592 void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
3593                                    const char *type)
3594 {
3595     /*
3596      * The name can be NULL, the netclient name will be type.x.
3597      */
3598     assert(type != NULL);
3599 
3600     g_free(n->netclient_name);
3601     g_free(n->netclient_type);
3602     n->netclient_name = g_strdup(name);
3603     n->netclient_type = g_strdup(type);
3604 }
3605 
3606 static bool failover_unplug_primary(VirtIONet *n, DeviceState *dev)
3607 {
3608     HotplugHandler *hotplug_ctrl;
3609     PCIDevice *pci_dev;
3610     Error *err = NULL;
3611 
3612     hotplug_ctrl = qdev_get_hotplug_handler(dev);
3613     if (hotplug_ctrl) {
3614         pci_dev = PCI_DEVICE(dev);
3615         pci_dev->partially_hotplugged = true;
3616         hotplug_handler_unplug_request(hotplug_ctrl, dev, &err);
3617         if (err) {
3618             error_report_err(err);
3619             return false;
3620         }
3621     } else {
3622         return false;
3623     }
3624     return true;
3625 }
3626 
3627 static bool failover_replug_primary(VirtIONet *n, DeviceState *dev,
3628                                     Error **errp)
3629 {
3630     Error *err = NULL;
3631     HotplugHandler *hotplug_ctrl;
3632     PCIDevice *pdev = PCI_DEVICE(dev);
3633     BusState *primary_bus;
3634 
3635     if (!pdev->partially_hotplugged) {
3636         return true;
3637     }
3638     primary_bus = dev->parent_bus;
3639     if (!primary_bus) {
3640         error_setg(errp, "virtio_net: couldn't find primary bus");
3641         return false;
3642     }
3643     qdev_set_parent_bus(dev, primary_bus, &error_abort);
3644     qatomic_set(&n->failover_primary_hidden, false);
3645     hotplug_ctrl = qdev_get_hotplug_handler(dev);
3646     if (hotplug_ctrl) {
3647         hotplug_handler_pre_plug(hotplug_ctrl, dev, &err);
3648         if (err) {
3649             goto out;
3650         }
3651         hotplug_handler_plug(hotplug_ctrl, dev, &err);
3652     }
3653     pdev->partially_hotplugged = false;
3654 
3655 out:
3656     error_propagate(errp, err);
3657     return !err;
3658 }
3659 
3660 static void virtio_net_handle_migration_primary(VirtIONet *n, MigrationEvent *e)
3661 {
3662     bool should_be_hidden;
3663     Error *err = NULL;
3664     DeviceState *dev = failover_find_primary_device(n);
3665 
3666     if (!dev) {
3667         return;
3668     }
3669 
3670     should_be_hidden = qatomic_read(&n->failover_primary_hidden);
3671 
3672     if (e->type == MIG_EVENT_PRECOPY_SETUP && !should_be_hidden) {
3673         if (failover_unplug_primary(n, dev)) {
3674             vmstate_unregister(VMSTATE_IF(dev), qdev_get_vmsd(dev), dev);
3675             qapi_event_send_unplug_primary(dev->id);
3676             qatomic_set(&n->failover_primary_hidden, true);
3677         } else {
3678             warn_report("couldn't unplug primary device");
3679         }
3680     } else if (e->type == MIG_EVENT_PRECOPY_FAILED) {
3681         /* We already unplugged the device let's plug it back */
3682         if (!failover_replug_primary(n, dev, &err)) {
3683             if (err) {
3684                 error_report_err(err);
3685             }
3686         }
3687     }
3688 }
3689 
3690 static int virtio_net_migration_state_notifier(NotifierWithReturn *notifier,
3691                                                MigrationEvent *e, Error **errp)
3692 {
3693     VirtIONet *n = container_of(notifier, VirtIONet, migration_state);
3694     virtio_net_handle_migration_primary(n, e);
3695     return 0;
3696 }
3697 
3698 static bool failover_hide_primary_device(DeviceListener *listener,
3699                                          const QDict *device_opts,
3700                                          bool from_json,
3701                                          Error **errp)
3702 {
3703     VirtIONet *n = container_of(listener, VirtIONet, primary_listener);
3704     const char *standby_id;
3705 
3706     if (!device_opts) {
3707         return false;
3708     }
3709 
3710     if (!qdict_haskey(device_opts, "failover_pair_id")) {
3711         return false;
3712     }
3713 
3714     if (!qdict_haskey(device_opts, "id")) {
3715         error_setg(errp, "Device with failover_pair_id needs to have id");
3716         return false;
3717     }
3718 
3719     standby_id = qdict_get_str(device_opts, "failover_pair_id");
3720     if (g_strcmp0(standby_id, n->netclient_name) != 0) {
3721         return false;
3722     }
3723 
3724     /*
3725      * The hide helper can be called several times for a given device.
3726      * Check there is only one primary for a virtio-net device but
3727      * don't duplicate the qdict several times if it's called for the same
3728      * device.
3729      */
3730     if (n->primary_opts) {
3731         const char *old, *new;
3732         /* devices with failover_pair_id always have an id */
3733         old = qdict_get_str(n->primary_opts, "id");
3734         new = qdict_get_str(device_opts, "id");
3735         if (strcmp(old, new) != 0) {
3736             error_setg(errp, "Cannot attach more than one primary device to "
3737                        "'%s': '%s' and '%s'", n->netclient_name, old, new);
3738             return false;
3739         }
3740     } else {
3741         n->primary_opts = qdict_clone_shallow(device_opts);
3742         n->primary_opts_from_json = from_json;
3743     }
3744 
3745     /* failover_primary_hidden is set during feature negotiation */
3746     return qatomic_read(&n->failover_primary_hidden);
3747 }
3748 
3749 static void virtio_net_device_realize(DeviceState *dev, Error **errp)
3750 {
3751     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3752     VirtIONet *n = VIRTIO_NET(dev);
3753     NetClientState *nc;
3754     int i;
3755 
3756     if (n->net_conf.mtu) {
3757         n->host_features |= (1ULL << VIRTIO_NET_F_MTU);
3758     }
3759 
3760     if (n->net_conf.duplex_str) {
3761         if (strncmp(n->net_conf.duplex_str, "half", 5) == 0) {
3762             n->net_conf.duplex = DUPLEX_HALF;
3763         } else if (strncmp(n->net_conf.duplex_str, "full", 5) == 0) {
3764             n->net_conf.duplex = DUPLEX_FULL;
3765         } else {
3766             error_setg(errp, "'duplex' must be 'half' or 'full'");
3767             return;
3768         }
3769         n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX);
3770     } else {
3771         n->net_conf.duplex = DUPLEX_UNKNOWN;
3772     }
3773 
3774     if (n->net_conf.speed < SPEED_UNKNOWN) {
3775         error_setg(errp, "'speed' must be between 0 and INT_MAX");
3776         return;
3777     }
3778     if (n->net_conf.speed >= 0) {
3779         n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX);
3780     }
3781 
3782     if (n->failover) {
3783         n->primary_listener.hide_device = failover_hide_primary_device;
3784         qatomic_set(&n->failover_primary_hidden, true);
3785         device_listener_register(&n->primary_listener);
3786         migration_add_notifier(&n->migration_state,
3787                                virtio_net_migration_state_notifier);
3788         n->host_features |= (1ULL << VIRTIO_NET_F_STANDBY);
3789     }
3790 
3791     virtio_net_set_config_size(n, n->host_features);
3792     virtio_init(vdev, VIRTIO_ID_NET, n->config_size);
3793 
3794     /*
3795      * We set a lower limit on RX queue size to what it always was.
3796      * Guests that want a smaller ring can always resize it without
3797      * help from us (using virtio 1 and up).
3798      */
3799     if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
3800         n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
3801         !is_power_of_2(n->net_conf.rx_queue_size)) {
3802         error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
3803                    "must be a power of 2 between %d and %d.",
3804                    n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
3805                    VIRTQUEUE_MAX_SIZE);
3806         virtio_cleanup(vdev);
3807         return;
3808     }
3809 
3810     if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
3811         n->net_conf.tx_queue_size > virtio_net_max_tx_queue_size(n) ||
3812         !is_power_of_2(n->net_conf.tx_queue_size)) {
3813         error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
3814                    "must be a power of 2 between %d and %d",
3815                    n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
3816                    virtio_net_max_tx_queue_size(n));
3817         virtio_cleanup(vdev);
3818         return;
3819     }
3820 
3821     n->max_ncs = MAX(n->nic_conf.peers.queues, 1);
3822 
3823     /*
3824      * Figure out the datapath queue pairs since the backend could
3825      * provide control queue via peers as well.
3826      */
3827     if (n->nic_conf.peers.queues) {
3828         for (i = 0; i < n->max_ncs; i++) {
3829             if (n->nic_conf.peers.ncs[i]->is_datapath) {
3830                 ++n->max_queue_pairs;
3831             }
3832         }
3833     }
3834     n->max_queue_pairs = MAX(n->max_queue_pairs, 1);
3835 
3836     if (n->max_queue_pairs * 2 + 1 > VIRTIO_QUEUE_MAX) {
3837         error_setg(errp, "Invalid number of queue pairs (= %" PRIu32 "), "
3838                    "must be a positive integer less than %d.",
3839                    n->max_queue_pairs, (VIRTIO_QUEUE_MAX - 1) / 2);
3840         virtio_cleanup(vdev);
3841         return;
3842     }
3843     n->vqs = g_new0(VirtIONetQueue, n->max_queue_pairs);
3844     n->curr_queue_pairs = 1;
3845     n->tx_timeout = n->net_conf.txtimer;
3846 
3847     if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
3848                        && strcmp(n->net_conf.tx, "bh")) {
3849         warn_report("virtio-net: "
3850                     "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
3851                     n->net_conf.tx);
3852         error_printf("Defaulting to \"bh\"");
3853     }
3854 
3855     n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n),
3856                                     n->net_conf.tx_queue_size);
3857 
3858     virtio_net_add_queue(n, 0);
3859 
3860     n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
3861     qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
3862     memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
3863     n->status = VIRTIO_NET_S_LINK_UP;
3864     qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
3865                               QEMU_CLOCK_VIRTUAL,
3866                               virtio_net_announce_timer, n);
3867     n->announce_timer.round = 0;
3868 
3869     if (n->netclient_type) {
3870         /*
3871          * Happen when virtio_net_set_netclient_name has been called.
3872          */
3873         n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
3874                               n->netclient_type, n->netclient_name,
3875                               &dev->mem_reentrancy_guard, n);
3876     } else {
3877         n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
3878                               object_get_typename(OBJECT(dev)), dev->id,
3879                               &dev->mem_reentrancy_guard, n);
3880     }
3881 
3882     for (i = 0; i < n->max_queue_pairs; i++) {
3883         n->nic->ncs[i].do_not_pad = true;
3884     }
3885 
3886     peer_test_vnet_hdr(n);
3887     if (peer_has_vnet_hdr(n)) {
3888         n->host_hdr_len = sizeof(struct virtio_net_hdr);
3889     } else {
3890         n->host_hdr_len = 0;
3891     }
3892 
3893     qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
3894 
3895     n->vqs[0].tx_waiting = 0;
3896     n->tx_burst = n->net_conf.txburst;
3897     virtio_net_set_mrg_rx_bufs(n, 0, 0, 0);
3898     n->promisc = 1; /* for compatibility */
3899 
3900     n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
3901 
3902     n->vlans = g_malloc0(MAX_VLAN >> 3);
3903 
3904     nc = qemu_get_queue(n->nic);
3905     nc->rxfilter_notify_enabled = 1;
3906 
3907    if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
3908         struct virtio_net_config netcfg = {};
3909         memcpy(&netcfg.mac, &n->nic_conf.macaddr, ETH_ALEN);
3910         vhost_net_set_config(get_vhost_net(nc->peer),
3911             (uint8_t *)&netcfg, 0, ETH_ALEN, VHOST_SET_CONFIG_TYPE_FRONTEND);
3912     }
3913     QTAILQ_INIT(&n->rsc_chains);
3914     n->qdev = dev;
3915 
3916     net_rx_pkt_init(&n->rx_pkt);
3917 
3918     if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
3919         virtio_net_load_ebpf(n, errp);
3920     }
3921 }
3922 
3923 static void virtio_net_device_unrealize(DeviceState *dev)
3924 {
3925     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3926     VirtIONet *n = VIRTIO_NET(dev);
3927     int i, max_queue_pairs;
3928 
3929     if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
3930         virtio_net_unload_ebpf(n);
3931     }
3932 
3933     /* This will stop vhost backend if appropriate. */
3934     virtio_net_set_status(vdev, 0);
3935 
3936     g_free(n->netclient_name);
3937     n->netclient_name = NULL;
3938     g_free(n->netclient_type);
3939     n->netclient_type = NULL;
3940 
3941     g_free(n->mac_table.macs);
3942     g_free(n->vlans);
3943 
3944     if (n->failover) {
3945         qobject_unref(n->primary_opts);
3946         device_listener_unregister(&n->primary_listener);
3947         migration_remove_notifier(&n->migration_state);
3948     } else {
3949         assert(n->primary_opts == NULL);
3950     }
3951 
3952     max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
3953     for (i = 0; i < max_queue_pairs; i++) {
3954         virtio_net_del_queue(n, i);
3955     }
3956     /* delete also control vq */
3957     virtio_del_queue(vdev, max_queue_pairs * 2);
3958     qemu_announce_timer_del(&n->announce_timer, false);
3959     g_free(n->vqs);
3960     qemu_del_nic(n->nic);
3961     virtio_net_rsc_cleanup(n);
3962     g_free(n->rss_data.indirections_table);
3963     net_rx_pkt_uninit(n->rx_pkt);
3964     virtio_cleanup(vdev);
3965 }
3966 
3967 static void virtio_net_reset(VirtIODevice *vdev)
3968 {
3969     VirtIONet *n = VIRTIO_NET(vdev);
3970     int i;
3971 
3972     /* Reset back to compatibility mode */
3973     n->promisc = 1;
3974     n->allmulti = 0;
3975     n->alluni = 0;
3976     n->nomulti = 0;
3977     n->nouni = 0;
3978     n->nobcast = 0;
3979     /* multiqueue is disabled by default */
3980     n->curr_queue_pairs = 1;
3981     timer_del(n->announce_timer.tm);
3982     n->announce_timer.round = 0;
3983     n->status &= ~VIRTIO_NET_S_ANNOUNCE;
3984 
3985     /* Flush any MAC and VLAN filter table state */
3986     n->mac_table.in_use = 0;
3987     n->mac_table.first_multi = 0;
3988     n->mac_table.multi_overflow = 0;
3989     n->mac_table.uni_overflow = 0;
3990     memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
3991     memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
3992     qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
3993     memset(n->vlans, 0, MAX_VLAN >> 3);
3994 
3995     /* Flush any async TX */
3996     for (i = 0;  i < n->max_queue_pairs; i++) {
3997         flush_or_purge_queued_packets(qemu_get_subqueue(n->nic, i));
3998     }
3999 
4000     virtio_net_disable_rss(n);
4001 }
4002 
4003 static void virtio_net_instance_init(Object *obj)
4004 {
4005     VirtIONet *n = VIRTIO_NET(obj);
4006 
4007     /*
4008      * The default config_size is sizeof(struct virtio_net_config).
4009      * Can be overridden with virtio_net_set_config_size.
4010      */
4011     n->config_size = sizeof(struct virtio_net_config);
4012     device_add_bootindex_property(obj, &n->nic_conf.bootindex,
4013                                   "bootindex", "/ethernet-phy@0",
4014                                   DEVICE(n));
4015 
4016     ebpf_rss_init(&n->ebpf_rss);
4017 }
4018 
4019 static int virtio_net_pre_save(void *opaque)
4020 {
4021     VirtIONet *n = opaque;
4022 
4023     /* At this point, backend must be stopped, otherwise
4024      * it might keep writing to memory. */
4025     assert(!n->vhost_started);
4026 
4027     return 0;
4028 }
4029 
4030 static bool primary_unplug_pending(void *opaque)
4031 {
4032     DeviceState *dev = opaque;
4033     DeviceState *primary;
4034     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
4035     VirtIONet *n = VIRTIO_NET(vdev);
4036 
4037     if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
4038         return false;
4039     }
4040     primary = failover_find_primary_device(n);
4041     return primary ? primary->pending_deleted_event : false;
4042 }
4043 
4044 static bool dev_unplug_pending(void *opaque)
4045 {
4046     DeviceState *dev = opaque;
4047     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
4048 
4049     return vdc->primary_unplug_pending(dev);
4050 }
4051 
4052 static const VMStateDescription vmstate_virtio_net = {
4053     .name = "virtio-net",
4054     .minimum_version_id = VIRTIO_NET_VM_VERSION,
4055     .version_id = VIRTIO_NET_VM_VERSION,
4056     .fields = (const VMStateField[]) {
4057         VMSTATE_VIRTIO_DEVICE,
4058         VMSTATE_END_OF_LIST()
4059     },
4060     .pre_save = virtio_net_pre_save,
4061     .dev_unplug_pending = dev_unplug_pending,
4062 };
4063 
4064 static const Property virtio_net_properties[] = {
4065     DEFINE_PROP_BIT64("csum", VirtIONet, host_features,
4066                     VIRTIO_NET_F_CSUM, true),
4067     DEFINE_PROP_BIT64("guest_csum", VirtIONet, host_features,
4068                     VIRTIO_NET_F_GUEST_CSUM, true),
4069     DEFINE_PROP_BIT64("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true),
4070     DEFINE_PROP_BIT64("guest_tso4", VirtIONet, host_features,
4071                     VIRTIO_NET_F_GUEST_TSO4, true),
4072     DEFINE_PROP_BIT64("guest_tso6", VirtIONet, host_features,
4073                     VIRTIO_NET_F_GUEST_TSO6, true),
4074     DEFINE_PROP_BIT64("guest_ecn", VirtIONet, host_features,
4075                     VIRTIO_NET_F_GUEST_ECN, true),
4076     DEFINE_PROP_BIT64("guest_ufo", VirtIONet, host_features,
4077                     VIRTIO_NET_F_GUEST_UFO, true),
4078     DEFINE_PROP_BIT64("guest_announce", VirtIONet, host_features,
4079                     VIRTIO_NET_F_GUEST_ANNOUNCE, true),
4080     DEFINE_PROP_BIT64("host_tso4", VirtIONet, host_features,
4081                     VIRTIO_NET_F_HOST_TSO4, true),
4082     DEFINE_PROP_BIT64("host_tso6", VirtIONet, host_features,
4083                     VIRTIO_NET_F_HOST_TSO6, true),
4084     DEFINE_PROP_BIT64("host_ecn", VirtIONet, host_features,
4085                     VIRTIO_NET_F_HOST_ECN, true),
4086     DEFINE_PROP_BIT64("host_ufo", VirtIONet, host_features,
4087                     VIRTIO_NET_F_HOST_UFO, true),
4088     DEFINE_PROP_BIT64("mrg_rxbuf", VirtIONet, host_features,
4089                     VIRTIO_NET_F_MRG_RXBUF, true),
4090     DEFINE_PROP_BIT64("status", VirtIONet, host_features,
4091                     VIRTIO_NET_F_STATUS, true),
4092     DEFINE_PROP_BIT64("ctrl_vq", VirtIONet, host_features,
4093                     VIRTIO_NET_F_CTRL_VQ, true),
4094     DEFINE_PROP_BIT64("ctrl_rx", VirtIONet, host_features,
4095                     VIRTIO_NET_F_CTRL_RX, true),
4096     DEFINE_PROP_BIT64("ctrl_vlan", VirtIONet, host_features,
4097                     VIRTIO_NET_F_CTRL_VLAN, true),
4098     DEFINE_PROP_BIT64("ctrl_rx_extra", VirtIONet, host_features,
4099                     VIRTIO_NET_F_CTRL_RX_EXTRA, true),
4100     DEFINE_PROP_BIT64("ctrl_mac_addr", VirtIONet, host_features,
4101                     VIRTIO_NET_F_CTRL_MAC_ADDR, true),
4102     DEFINE_PROP_BIT64("ctrl_guest_offloads", VirtIONet, host_features,
4103                     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
4104     DEFINE_PROP_BIT64("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
4105     DEFINE_PROP_BIT64("rss", VirtIONet, host_features,
4106                     VIRTIO_NET_F_RSS, false),
4107     DEFINE_PROP_BIT64("hash", VirtIONet, host_features,
4108                     VIRTIO_NET_F_HASH_REPORT, false),
4109     DEFINE_PROP_ARRAY("ebpf-rss-fds", VirtIONet, nr_ebpf_rss_fds,
4110                       ebpf_rss_fds, qdev_prop_string, char*),
4111     DEFINE_PROP_BIT64("guest_rsc_ext", VirtIONet, host_features,
4112                     VIRTIO_NET_F_RSC_EXT, false),
4113     DEFINE_PROP_UINT32("rsc_interval", VirtIONet, rsc_timeout,
4114                        VIRTIO_NET_RSC_DEFAULT_INTERVAL),
4115     DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
4116     DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
4117                        TX_TIMER_INTERVAL),
4118     DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
4119     DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
4120     DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
4121                        VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
4122     DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
4123                        VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
4124     DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
4125     DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend,
4126                      true),
4127     DEFINE_PROP_INT32("speed", VirtIONet, net_conf.speed, SPEED_UNKNOWN),
4128     DEFINE_PROP_STRING("duplex", VirtIONet, net_conf.duplex_str),
4129     DEFINE_PROP_BOOL("failover", VirtIONet, failover, false),
4130     DEFINE_PROP_BIT64("guest_uso4", VirtIONet, host_features,
4131                       VIRTIO_NET_F_GUEST_USO4, true),
4132     DEFINE_PROP_BIT64("guest_uso6", VirtIONet, host_features,
4133                       VIRTIO_NET_F_GUEST_USO6, true),
4134     DEFINE_PROP_BIT64("host_uso", VirtIONet, host_features,
4135                       VIRTIO_NET_F_HOST_USO, true),
4136 };
4137 
4138 static void virtio_net_class_init(ObjectClass *klass, const void *data)
4139 {
4140     DeviceClass *dc = DEVICE_CLASS(klass);
4141     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
4142 
4143     device_class_set_props(dc, virtio_net_properties);
4144     dc->vmsd = &vmstate_virtio_net;
4145     set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
4146     vdc->realize = virtio_net_device_realize;
4147     vdc->unrealize = virtio_net_device_unrealize;
4148     vdc->get_config = virtio_net_get_config;
4149     vdc->set_config = virtio_net_set_config;
4150     vdc->get_features = virtio_net_get_features;
4151     vdc->set_features = virtio_net_set_features;
4152     vdc->bad_features = virtio_net_bad_features;
4153     vdc->reset = virtio_net_reset;
4154     vdc->queue_reset = virtio_net_queue_reset;
4155     vdc->queue_enable = virtio_net_queue_enable;
4156     vdc->set_status = virtio_net_set_status;
4157     vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
4158     vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
4159     vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO);
4160     vdc->pre_load_queues = virtio_net_pre_load_queues;
4161     vdc->post_load = virtio_net_post_load_virtio;
4162     vdc->vmsd = &vmstate_virtio_net_device;
4163     vdc->primary_unplug_pending = primary_unplug_pending;
4164     vdc->get_vhost = virtio_net_get_vhost;
4165     vdc->toggle_device_iotlb = vhost_toggle_device_iotlb;
4166 }
4167 
4168 static const TypeInfo virtio_net_info = {
4169     .name = TYPE_VIRTIO_NET,
4170     .parent = TYPE_VIRTIO_DEVICE,
4171     .instance_size = sizeof(VirtIONet),
4172     .instance_init = virtio_net_instance_init,
4173     .class_init = virtio_net_class_init,
4174 };
4175 
4176 static void virtio_register_types(void)
4177 {
4178     type_register_static(&virtio_net_info);
4179 }
4180 
4181 type_init(virtio_register_types)
4182