xref: /openbmc/qemu/hw/net/virtio-net.c (revision 749c21cf6d8ff8eff094137c4ca1298c8f714066)
1 /*
2  * Virtio Network Device
3  *
4  * Copyright IBM, Corp. 2007
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/atomic.h"
16 #include "qemu/iov.h"
17 #include "qemu/log.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/module.h"
20 #include "hw/virtio/virtio.h"
21 #include "net/net.h"
22 #include "net/checksum.h"
23 #include "net/tap.h"
24 #include "qemu/error-report.h"
25 #include "qemu/timer.h"
26 #include "qemu/option.h"
27 #include "qemu/option_int.h"
28 #include "qemu/config-file.h"
29 #include "qobject/qdict.h"
30 #include "hw/virtio/virtio-net.h"
31 #include "net/vhost_net.h"
32 #include "net/announce.h"
33 #include "hw/virtio/virtio-bus.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-events-net.h"
36 #include "hw/qdev-properties.h"
37 #include "qapi/qapi-types-migration.h"
38 #include "qapi/qapi-events-migration.h"
39 #include "hw/virtio/virtio-access.h"
40 #include "migration/misc.h"
41 #include "standard-headers/linux/ethtool.h"
42 #include "system/system.h"
43 #include "system/replay.h"
44 #include "trace.h"
45 #include "monitor/qdev.h"
46 #include "monitor/monitor.h"
47 #include "hw/pci/pci_device.h"
48 #include "net_rx_pkt.h"
49 #include "hw/virtio/vhost.h"
50 #include "system/qtest.h"
51 
52 #define VIRTIO_NET_VM_VERSION    11
53 
54 /* previously fixed value */
55 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
56 #define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
57 
58 /* for now, only allow larger queue_pairs; with virtio-1, guest can downsize */
59 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
60 #define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
61 
62 #define VIRTIO_NET_IP4_ADDR_SIZE   8        /* ipv4 saddr + daddr */
63 
64 #define VIRTIO_NET_TCP_FLAG         0x3F
65 #define VIRTIO_NET_TCP_HDR_LENGTH   0xF000
66 
67 /* IPv4 max payload, 16 bits in the header */
68 #define VIRTIO_NET_MAX_IP4_PAYLOAD (65535 - sizeof(struct ip_header))
69 #define VIRTIO_NET_MAX_TCP_PAYLOAD 65535
70 
71 /* header length value in ip header without option */
72 #define VIRTIO_NET_IP4_HEADER_LENGTH 5
73 
74 #define VIRTIO_NET_IP6_ADDR_SIZE   32      /* ipv6 saddr + daddr */
75 #define VIRTIO_NET_MAX_IP6_PAYLOAD VIRTIO_NET_MAX_TCP_PAYLOAD
76 
77 /* Purge coalesced packets timer interval, This value affects the performance
78    a lot, and should be tuned carefully, '300000'(300us) is the recommended
79    value to pass the WHQL test, '50000' can gain 2x netperf throughput with
80    tso/gso/gro 'off'. */
81 #define VIRTIO_NET_RSC_DEFAULT_INTERVAL 300000
82 
83 #define VIRTIO_NET_RSS_SUPPORTED_HASHES (VIRTIO_NET_RSS_HASH_TYPE_IPv4 | \
84                                          VIRTIO_NET_RSS_HASH_TYPE_TCPv4 | \
85                                          VIRTIO_NET_RSS_HASH_TYPE_UDPv4 | \
86                                          VIRTIO_NET_RSS_HASH_TYPE_IPv6 | \
87                                          VIRTIO_NET_RSS_HASH_TYPE_TCPv6 | \
88                                          VIRTIO_NET_RSS_HASH_TYPE_UDPv6 | \
89                                          VIRTIO_NET_RSS_HASH_TYPE_IP_EX | \
90                                          VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | \
91                                          VIRTIO_NET_RSS_HASH_TYPE_UDP_EX)
92 
93 static const VirtIOFeature feature_sizes[] = {
94     {.flags = 1ULL << VIRTIO_NET_F_MAC,
95      .end = endof(struct virtio_net_config, mac)},
96     {.flags = 1ULL << VIRTIO_NET_F_STATUS,
97      .end = endof(struct virtio_net_config, status)},
98     {.flags = 1ULL << VIRTIO_NET_F_MQ,
99      .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
100     {.flags = 1ULL << VIRTIO_NET_F_MTU,
101      .end = endof(struct virtio_net_config, mtu)},
102     {.flags = 1ULL << VIRTIO_NET_F_SPEED_DUPLEX,
103      .end = endof(struct virtio_net_config, duplex)},
104     {.flags = (1ULL << VIRTIO_NET_F_RSS) | (1ULL << VIRTIO_NET_F_HASH_REPORT),
105      .end = endof(struct virtio_net_config, supported_hash_types)},
106     {}
107 };
108 
109 static const VirtIOConfigSizeParams cfg_size_params = {
110     .min_size = endof(struct virtio_net_config, mac),
111     .max_size = sizeof(struct virtio_net_config),
112     .feature_sizes = feature_sizes
113 };
114 
115 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
116 {
117     VirtIONet *n = qemu_get_nic_opaque(nc);
118 
119     return &n->vqs[nc->queue_index];
120 }
121 
122 static int vq2q(int queue_index)
123 {
124     return queue_index / 2;
125 }
126 
127 static void flush_or_purge_queued_packets(NetClientState *nc)
128 {
129     if (!nc->peer) {
130         return;
131     }
132 
133     qemu_flush_or_purge_queued_packets(nc->peer, true);
134     assert(!virtio_net_get_subqueue(nc)->async_tx.elem);
135 }
136 
137 /* TODO
138  * - we could suppress RX interrupt if we were so inclined.
139  */
140 
141 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
142 {
143     VirtIONet *n = VIRTIO_NET(vdev);
144     struct virtio_net_config netcfg;
145     NetClientState *nc = qemu_get_queue(n->nic);
146     static const MACAddr zero = { .a = { 0, 0, 0, 0, 0, 0 } };
147 
148     int ret = 0;
149     memset(&netcfg, 0 , sizeof(struct virtio_net_config));
150     virtio_stw_p(vdev, &netcfg.status, n->status);
151     virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queue_pairs);
152     virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
153     memcpy(netcfg.mac, n->mac, ETH_ALEN);
154     virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed);
155     netcfg.duplex = n->net_conf.duplex;
156     netcfg.rss_max_key_size = VIRTIO_NET_RSS_MAX_KEY_SIZE;
157     virtio_stw_p(vdev, &netcfg.rss_max_indirection_table_length,
158                  virtio_host_has_feature(vdev, VIRTIO_NET_F_RSS) ?
159                  VIRTIO_NET_RSS_MAX_TABLE_LEN : 1);
160     virtio_stl_p(vdev, &netcfg.supported_hash_types,
161                  n->rss_data.supported_hash_types);
162     memcpy(config, &netcfg, n->config_size);
163 
164     /*
165      * Is this VDPA? No peer means not VDPA: there's no way to
166      * disconnect/reconnect a VDPA peer.
167      */
168     if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
169         ret = vhost_net_get_config(get_vhost_net(nc->peer), (uint8_t *)&netcfg,
170                                    n->config_size);
171         if (ret == -1) {
172             return;
173         }
174 
175         /*
176          * Some NIC/kernel combinations present 0 as the mac address.  As that
177          * is not a legal address, try to proceed with the address from the
178          * QEMU command line in the hope that the address has been configured
179          * correctly elsewhere - just not reported by the device.
180          */
181         if (memcmp(&netcfg.mac, &zero, sizeof(zero)) == 0) {
182             info_report("Zero hardware mac address detected. Ignoring.");
183             memcpy(netcfg.mac, n->mac, ETH_ALEN);
184         }
185 
186         netcfg.status |= virtio_tswap16(vdev,
187                                         n->status & VIRTIO_NET_S_ANNOUNCE);
188         memcpy(config, &netcfg, n->config_size);
189     }
190 }
191 
192 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
193 {
194     VirtIONet *n = VIRTIO_NET(vdev);
195     struct virtio_net_config netcfg = {};
196     NetClientState *nc = qemu_get_queue(n->nic);
197 
198     memcpy(&netcfg, config, n->config_size);
199 
200     if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
201         !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
202         memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
203         memcpy(n->mac, netcfg.mac, ETH_ALEN);
204         qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
205     }
206 
207     /*
208      * Is this VDPA? No peer means not VDPA: there's no way to
209      * disconnect/reconnect a VDPA peer.
210      */
211     if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
212         vhost_net_set_config(get_vhost_net(nc->peer),
213                              (uint8_t *)&netcfg, 0, n->config_size,
214                              VHOST_SET_CONFIG_TYPE_FRONTEND);
215       }
216 }
217 
218 static bool virtio_net_started(VirtIONet *n, uint8_t status)
219 {
220     VirtIODevice *vdev = VIRTIO_DEVICE(n);
221     return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
222         (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
223 }
224 
225 static void virtio_net_announce_notify(VirtIONet *net)
226 {
227     VirtIODevice *vdev = VIRTIO_DEVICE(net);
228     trace_virtio_net_announce_notify();
229 
230     net->status |= VIRTIO_NET_S_ANNOUNCE;
231     virtio_notify_config(vdev);
232 }
233 
234 static void virtio_net_announce_timer(void *opaque)
235 {
236     VirtIONet *n = opaque;
237     trace_virtio_net_announce_timer(n->announce_timer.round);
238 
239     n->announce_timer.round--;
240     virtio_net_announce_notify(n);
241 }
242 
243 static void virtio_net_announce(NetClientState *nc)
244 {
245     VirtIONet *n = qemu_get_nic_opaque(nc);
246     VirtIODevice *vdev = VIRTIO_DEVICE(n);
247 
248     /*
249      * Make sure the virtio migration announcement timer isn't running
250      * If it is, let it trigger announcement so that we do not cause
251      * confusion.
252      */
253     if (n->announce_timer.round) {
254         return;
255     }
256 
257     if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
258         virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
259             virtio_net_announce_notify(n);
260     }
261 }
262 
263 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
264 {
265     VirtIODevice *vdev = VIRTIO_DEVICE(n);
266     NetClientState *nc = qemu_get_queue(n->nic);
267     int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
268     int cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
269               n->max_ncs - n->max_queue_pairs : 0;
270 
271     if (!get_vhost_net(nc->peer)) {
272         return;
273     }
274 
275     if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
276         !!n->vhost_started) {
277         return;
278     }
279     if (!n->vhost_started) {
280         int r, i;
281 
282         if (n->needs_vnet_hdr_swap) {
283             error_report("backend does not support %s vnet headers; "
284                          "falling back on userspace virtio",
285                          virtio_is_big_endian(vdev) ? "BE" : "LE");
286             return;
287         }
288 
289         /* Any packets outstanding? Purge them to avoid touching rings
290          * when vhost is running.
291          */
292         for (i = 0;  i < queue_pairs; i++) {
293             NetClientState *qnc = qemu_get_subqueue(n->nic, i);
294 
295             /* Purge both directions: TX and RX. */
296             qemu_net_queue_purge(qnc->peer->incoming_queue, qnc);
297             qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
298         }
299 
300         if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) {
301             r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu);
302             if (r < 0) {
303                 error_report("%uBytes MTU not supported by the backend",
304                              n->net_conf.mtu);
305 
306                 return;
307             }
308         }
309 
310         n->vhost_started = 1;
311         r = vhost_net_start(vdev, n->nic->ncs, queue_pairs, cvq);
312         if (r < 0) {
313             error_report("unable to start vhost net: %d: "
314                          "falling back on userspace virtio", -r);
315             n->vhost_started = 0;
316         }
317     } else {
318         vhost_net_stop(vdev, n->nic->ncs, queue_pairs, cvq);
319         n->vhost_started = 0;
320     }
321 }
322 
323 static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev,
324                                           NetClientState *peer,
325                                           bool enable)
326 {
327     if (virtio_is_big_endian(vdev)) {
328         return qemu_set_vnet_be(peer, enable);
329     } else {
330         return qemu_set_vnet_le(peer, enable);
331     }
332 }
333 
334 static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs,
335                                        int queue_pairs, bool enable)
336 {
337     int i;
338 
339     for (i = 0; i < queue_pairs; i++) {
340         if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 &&
341             enable) {
342             while (--i >= 0) {
343                 virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false);
344             }
345 
346             return true;
347         }
348     }
349 
350     return false;
351 }
352 
353 static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
354 {
355     VirtIODevice *vdev = VIRTIO_DEVICE(n);
356     int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
357 
358     if (virtio_net_started(n, status)) {
359         /* Before using the device, we tell the network backend about the
360          * endianness to use when parsing vnet headers. If the backend
361          * can't do it, we fallback onto fixing the headers in the core
362          * virtio-net code.
363          */
364         n->needs_vnet_hdr_swap = n->has_vnet_hdr &&
365                                  virtio_net_set_vnet_endian(vdev, n->nic->ncs,
366                                                             queue_pairs, true);
367     } else if (virtio_net_started(n, vdev->status)) {
368         /* After using the device, we need to reset the network backend to
369          * the default (guest native endianness), otherwise the guest may
370          * lose network connectivity if it is rebooted into a different
371          * endianness.
372          */
373         virtio_net_set_vnet_endian(vdev, n->nic->ncs, queue_pairs, false);
374     }
375 }
376 
377 static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq)
378 {
379     unsigned int dropped = virtqueue_drop_all(vq);
380     if (dropped) {
381         virtio_notify(vdev, vq);
382     }
383 }
384 
385 static int virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
386 {
387     VirtIONet *n = VIRTIO_NET(vdev);
388     VirtIONetQueue *q;
389     int i;
390     uint8_t queue_status;
391 
392     virtio_net_vnet_endian_status(n, status);
393     virtio_net_vhost_status(n, status);
394 
395     for (i = 0; i < n->max_queue_pairs; i++) {
396         NetClientState *ncs = qemu_get_subqueue(n->nic, i);
397         bool queue_started;
398         q = &n->vqs[i];
399 
400         if ((!n->multiqueue && i != 0) || i >= n->curr_queue_pairs) {
401             queue_status = 0;
402         } else {
403             queue_status = status;
404         }
405         queue_started =
406             virtio_net_started(n, queue_status) && !n->vhost_started;
407 
408         if (queue_started) {
409             qemu_flush_queued_packets(ncs);
410         }
411 
412         if (!q->tx_waiting) {
413             continue;
414         }
415 
416         if (queue_started) {
417             if (q->tx_timer) {
418                 timer_mod(q->tx_timer,
419                                qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
420             } else {
421                 replay_bh_schedule_event(q->tx_bh);
422             }
423         } else {
424             if (q->tx_timer) {
425                 timer_del(q->tx_timer);
426             } else {
427                 qemu_bh_cancel(q->tx_bh);
428             }
429             if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 &&
430                 (queue_status & VIRTIO_CONFIG_S_DRIVER_OK) &&
431                 vdev->vm_running) {
432                 /* if tx is waiting we are likely have some packets in tx queue
433                  * and disabled notification */
434                 q->tx_waiting = 0;
435                 virtio_queue_set_notification(q->tx_vq, 1);
436                 virtio_net_drop_tx_queue_data(vdev, q->tx_vq);
437             }
438         }
439     }
440     return 0;
441 }
442 
443 static void virtio_net_set_link_status(NetClientState *nc)
444 {
445     VirtIONet *n = qemu_get_nic_opaque(nc);
446     VirtIODevice *vdev = VIRTIO_DEVICE(n);
447     uint16_t old_status = n->status;
448 
449     if (nc->link_down)
450         n->status &= ~VIRTIO_NET_S_LINK_UP;
451     else
452         n->status |= VIRTIO_NET_S_LINK_UP;
453 
454     if (n->status != old_status)
455         virtio_notify_config(vdev);
456 
457     virtio_net_set_status(vdev, vdev->status);
458 }
459 
460 static void rxfilter_notify(NetClientState *nc)
461 {
462     VirtIONet *n = qemu_get_nic_opaque(nc);
463 
464     if (nc->rxfilter_notify_enabled) {
465         char *path = object_get_canonical_path(OBJECT(n->qdev));
466         qapi_event_send_nic_rx_filter_changed(n->netclient_name, path);
467         g_free(path);
468 
469         /* disable event notification to avoid events flooding */
470         nc->rxfilter_notify_enabled = 0;
471     }
472 }
473 
474 static intList *get_vlan_table(VirtIONet *n)
475 {
476     intList *list;
477     int i, j;
478 
479     list = NULL;
480     for (i = 0; i < MAX_VLAN >> 5; i++) {
481         for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
482             if (n->vlans[i] & (1U << j)) {
483                 QAPI_LIST_PREPEND(list, (i << 5) + j);
484             }
485         }
486     }
487 
488     return list;
489 }
490 
491 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
492 {
493     VirtIONet *n = qemu_get_nic_opaque(nc);
494     VirtIODevice *vdev = VIRTIO_DEVICE(n);
495     RxFilterInfo *info;
496     strList *str_list;
497     int i;
498 
499     info = g_malloc0(sizeof(*info));
500     info->name = g_strdup(nc->name);
501     info->promiscuous = n->promisc;
502 
503     if (n->nouni) {
504         info->unicast = RX_STATE_NONE;
505     } else if (n->alluni) {
506         info->unicast = RX_STATE_ALL;
507     } else {
508         info->unicast = RX_STATE_NORMAL;
509     }
510 
511     if (n->nomulti) {
512         info->multicast = RX_STATE_NONE;
513     } else if (n->allmulti) {
514         info->multicast = RX_STATE_ALL;
515     } else {
516         info->multicast = RX_STATE_NORMAL;
517     }
518 
519     info->broadcast_allowed = n->nobcast;
520     info->multicast_overflow = n->mac_table.multi_overflow;
521     info->unicast_overflow = n->mac_table.uni_overflow;
522 
523     info->main_mac = qemu_mac_strdup_printf(n->mac);
524 
525     str_list = NULL;
526     for (i = 0; i < n->mac_table.first_multi; i++) {
527         QAPI_LIST_PREPEND(str_list,
528                       qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN));
529     }
530     info->unicast_table = str_list;
531 
532     str_list = NULL;
533     for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
534         QAPI_LIST_PREPEND(str_list,
535                       qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN));
536     }
537     info->multicast_table = str_list;
538     info->vlan_table = get_vlan_table(n);
539 
540     if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
541         info->vlan = RX_STATE_ALL;
542     } else if (!info->vlan_table) {
543         info->vlan = RX_STATE_NONE;
544     } else {
545         info->vlan = RX_STATE_NORMAL;
546     }
547 
548     /* enable event notification after query */
549     nc->rxfilter_notify_enabled = 1;
550 
551     return info;
552 }
553 
554 static void virtio_net_queue_reset(VirtIODevice *vdev, uint32_t queue_index)
555 {
556     VirtIONet *n = VIRTIO_NET(vdev);
557     NetClientState *nc;
558 
559     /* validate queue_index and skip for cvq */
560     if (queue_index >= n->max_queue_pairs * 2) {
561         return;
562     }
563 
564     nc = qemu_get_subqueue(n->nic, vq2q(queue_index));
565 
566     if (!nc->peer) {
567         return;
568     }
569 
570     if (get_vhost_net(nc->peer) &&
571         nc->peer->info->type == NET_CLIENT_DRIVER_TAP) {
572         vhost_net_virtqueue_reset(vdev, nc, queue_index);
573     }
574 
575     flush_or_purge_queued_packets(nc);
576 }
577 
578 static void virtio_net_queue_enable(VirtIODevice *vdev, uint32_t queue_index)
579 {
580     VirtIONet *n = VIRTIO_NET(vdev);
581     NetClientState *nc;
582     int r;
583 
584     /* validate queue_index and skip for cvq */
585     if (queue_index >= n->max_queue_pairs * 2) {
586         return;
587     }
588 
589     nc = qemu_get_subqueue(n->nic, vq2q(queue_index));
590 
591     if (!nc->peer || !vdev->vhost_started) {
592         return;
593     }
594 
595     if (get_vhost_net(nc->peer) &&
596         nc->peer->info->type == NET_CLIENT_DRIVER_TAP) {
597         r = vhost_net_virtqueue_restart(vdev, nc, queue_index);
598         if (r < 0) {
599             error_report("unable to restart vhost net virtqueue: %d, "
600                             "when resetting the queue", queue_index);
601         }
602     }
603 }
604 
605 static void peer_test_vnet_hdr(VirtIONet *n)
606 {
607     NetClientState *nc = qemu_get_queue(n->nic);
608     if (!nc->peer) {
609         return;
610     }
611 
612     n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
613 }
614 
615 static int peer_has_vnet_hdr(VirtIONet *n)
616 {
617     return n->has_vnet_hdr;
618 }
619 
620 static int peer_has_ufo(VirtIONet *n)
621 {
622     if (!peer_has_vnet_hdr(n))
623         return 0;
624 
625     n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
626 
627     return n->has_ufo;
628 }
629 
630 static int peer_has_uso(VirtIONet *n)
631 {
632     if (!peer_has_vnet_hdr(n)) {
633         return 0;
634     }
635 
636     return qemu_has_uso(qemu_get_queue(n->nic)->peer);
637 }
638 
639 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
640                                        int version_1, int hash_report)
641 {
642     int i;
643     NetClientState *nc;
644 
645     n->mergeable_rx_bufs = mergeable_rx_bufs;
646 
647     if (version_1) {
648         n->guest_hdr_len = hash_report ?
649             sizeof(struct virtio_net_hdr_v1_hash) :
650             sizeof(struct virtio_net_hdr_mrg_rxbuf);
651         n->rss_data.populate_hash = !!hash_report;
652     } else {
653         n->guest_hdr_len = n->mergeable_rx_bufs ?
654             sizeof(struct virtio_net_hdr_mrg_rxbuf) :
655             sizeof(struct virtio_net_hdr);
656         n->rss_data.populate_hash = false;
657     }
658 
659     for (i = 0; i < n->max_queue_pairs; i++) {
660         nc = qemu_get_subqueue(n->nic, i);
661 
662         if (peer_has_vnet_hdr(n) &&
663             qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
664             qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
665             n->host_hdr_len = n->guest_hdr_len;
666         }
667     }
668 }
669 
670 static int virtio_net_max_tx_queue_size(VirtIONet *n)
671 {
672     NetClientState *peer = n->nic_conf.peers.ncs[0];
673 
674     /*
675      * Backends other than vhost-user or vhost-vdpa don't support max queue
676      * size.
677      */
678     if (!peer) {
679         return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
680     }
681 
682     switch(peer->info->type) {
683     case NET_CLIENT_DRIVER_VHOST_USER:
684     case NET_CLIENT_DRIVER_VHOST_VDPA:
685         return VIRTQUEUE_MAX_SIZE;
686     default:
687         return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
688     };
689 }
690 
691 static int peer_attach(VirtIONet *n, int index)
692 {
693     NetClientState *nc = qemu_get_subqueue(n->nic, index);
694 
695     if (!nc->peer) {
696         return 0;
697     }
698 
699     if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
700         vhost_set_vring_enable(nc->peer, 1);
701     }
702 
703     if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
704         return 0;
705     }
706 
707     if (n->max_queue_pairs == 1) {
708         return 0;
709     }
710 
711     return tap_enable(nc->peer);
712 }
713 
714 static int peer_detach(VirtIONet *n, int index)
715 {
716     NetClientState *nc = qemu_get_subqueue(n->nic, index);
717 
718     if (!nc->peer) {
719         return 0;
720     }
721 
722     if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
723         vhost_set_vring_enable(nc->peer, 0);
724     }
725 
726     if (nc->peer->info->type !=  NET_CLIENT_DRIVER_TAP) {
727         return 0;
728     }
729 
730     return tap_disable(nc->peer);
731 }
732 
733 static void virtio_net_set_queue_pairs(VirtIONet *n)
734 {
735     int i;
736     int r;
737 
738     if (n->nic->peer_deleted) {
739         return;
740     }
741 
742     for (i = 0; i < n->max_queue_pairs; i++) {
743         if (i < n->curr_queue_pairs) {
744             r = peer_attach(n, i);
745             assert(!r);
746         } else {
747             r = peer_detach(n, i);
748             assert(!r);
749         }
750     }
751 }
752 
753 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
754 
755 static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
756 {
757     uint64_t features = 0;
758 
759     /* Linux kernel 2.6.25.  It understood MAC (as everyone must),
760      * but also these: */
761     virtio_add_feature(&features, VIRTIO_NET_F_MAC);
762     virtio_add_feature(&features, VIRTIO_NET_F_CSUM);
763     virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
764     virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
765     virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
766 
767     return features;
768 }
769 
770 static void virtio_net_apply_guest_offloads(VirtIONet *n)
771 {
772     qemu_set_offload(qemu_get_queue(n->nic)->peer,
773             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
774             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
775             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
776             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
777             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)),
778             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_USO4)),
779             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_USO6)));
780 }
781 
782 static uint64_t virtio_net_guest_offloads_by_features(uint64_t features)
783 {
784     static const uint64_t guest_offloads_mask =
785         (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
786         (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
787         (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
788         (1ULL << VIRTIO_NET_F_GUEST_ECN)  |
789         (1ULL << VIRTIO_NET_F_GUEST_UFO)  |
790         (1ULL << VIRTIO_NET_F_GUEST_USO4) |
791         (1ULL << VIRTIO_NET_F_GUEST_USO6);
792 
793     return guest_offloads_mask & features;
794 }
795 
796 uint64_t virtio_net_supported_guest_offloads(const VirtIONet *n)
797 {
798     VirtIODevice *vdev = VIRTIO_DEVICE(n);
799     return virtio_net_guest_offloads_by_features(vdev->guest_features);
800 }
801 
802 typedef struct {
803     VirtIONet *n;
804     DeviceState *dev;
805 } FailoverDevice;
806 
807 /**
808  * Set the failover primary device
809  *
810  * @opaque: FailoverId to setup
811  * @opts: opts for device we are handling
812  * @errp: returns an error if this function fails
813  */
814 static int failover_set_primary(DeviceState *dev, void *opaque)
815 {
816     FailoverDevice *fdev = opaque;
817     PCIDevice *pci_dev = (PCIDevice *)
818         object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE);
819 
820     if (!pci_dev) {
821         return 0;
822     }
823 
824     if (!g_strcmp0(pci_dev->failover_pair_id, fdev->n->netclient_name)) {
825         fdev->dev = dev;
826         return 1;
827     }
828 
829     return 0;
830 }
831 
832 /**
833  * Find the primary device for this failover virtio-net
834  *
835  * @n: VirtIONet device
836  * @errp: returns an error if this function fails
837  */
838 static DeviceState *failover_find_primary_device(VirtIONet *n)
839 {
840     FailoverDevice fdev = {
841         .n = n,
842     };
843 
844     qbus_walk_children(sysbus_get_default(), failover_set_primary, NULL,
845                        NULL, NULL, &fdev);
846     return fdev.dev;
847 }
848 
849 static void failover_add_primary(VirtIONet *n, Error **errp)
850 {
851     Error *err = NULL;
852     DeviceState *dev = failover_find_primary_device(n);
853 
854     if (dev) {
855         return;
856     }
857 
858     if (!n->primary_opts) {
859         error_setg(errp, "Primary device not found");
860         error_append_hint(errp, "Virtio-net failover will not work. Make "
861                           "sure primary device has parameter"
862                           " failover_pair_id=%s\n", n->netclient_name);
863         return;
864     }
865 
866     dev = qdev_device_add_from_qdict(n->primary_opts,
867                                      n->primary_opts_from_json,
868                                      &err);
869     if (err) {
870         qobject_unref(n->primary_opts);
871         n->primary_opts = NULL;
872     } else {
873         object_unref(OBJECT(dev));
874     }
875     error_propagate(errp, err);
876 }
877 
878 static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
879 {
880     VirtIONet *n = VIRTIO_NET(vdev);
881     Error *err = NULL;
882     int i;
883 
884     if (n->mtu_bypass_backend &&
885             !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) {
886         features &= ~(1ULL << VIRTIO_NET_F_MTU);
887     }
888 
889     virtio_net_set_multiqueue(n,
890                               virtio_has_feature(features, VIRTIO_NET_F_RSS) ||
891                               virtio_has_feature(features, VIRTIO_NET_F_MQ));
892 
893     virtio_net_set_mrg_rx_bufs(n,
894                                virtio_has_feature(features,
895                                                   VIRTIO_NET_F_MRG_RXBUF),
896                                virtio_has_feature(features,
897                                                   VIRTIO_F_VERSION_1),
898                                virtio_has_feature(features,
899                                                   VIRTIO_NET_F_HASH_REPORT));
900 
901     n->rsc4_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
902         virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO4);
903     n->rsc6_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
904         virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO6);
905     n->rss_data.redirect = virtio_has_feature(features, VIRTIO_NET_F_RSS);
906 
907     if (n->has_vnet_hdr) {
908         n->curr_guest_offloads =
909             virtio_net_guest_offloads_by_features(features);
910         virtio_net_apply_guest_offloads(n);
911     }
912 
913     for (i = 0;  i < n->max_queue_pairs; i++) {
914         NetClientState *nc = qemu_get_subqueue(n->nic, i);
915 
916         if (!get_vhost_net(nc->peer)) {
917             continue;
918         }
919         vhost_net_ack_features(get_vhost_net(nc->peer), features);
920 
921         /*
922          * keep acked_features in NetVhostUserState up-to-date so it
923          * can't miss any features configured by guest virtio driver.
924          */
925         vhost_net_save_acked_features(nc->peer);
926     }
927 
928     if (!virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
929         memset(n->vlans, 0xff, MAX_VLAN >> 3);
930     }
931 
932     if (virtio_has_feature(features, VIRTIO_NET_F_STANDBY)) {
933         qapi_event_send_failover_negotiated(n->netclient_name);
934         qatomic_set(&n->failover_primary_hidden, false);
935         failover_add_primary(n, &err);
936         if (err) {
937             if (!qtest_enabled()) {
938                 warn_report_err(err);
939             } else {
940                 error_free(err);
941             }
942         }
943     }
944 }
945 
946 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
947                                      struct iovec *iov, unsigned int iov_cnt)
948 {
949     uint8_t on;
950     size_t s;
951     NetClientState *nc = qemu_get_queue(n->nic);
952 
953     s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
954     if (s != sizeof(on)) {
955         return VIRTIO_NET_ERR;
956     }
957 
958     if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
959         n->promisc = on;
960     } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
961         n->allmulti = on;
962     } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
963         n->alluni = on;
964     } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
965         n->nomulti = on;
966     } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
967         n->nouni = on;
968     } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
969         n->nobcast = on;
970     } else {
971         return VIRTIO_NET_ERR;
972     }
973 
974     rxfilter_notify(nc);
975 
976     return VIRTIO_NET_OK;
977 }
978 
979 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
980                                      struct iovec *iov, unsigned int iov_cnt)
981 {
982     VirtIODevice *vdev = VIRTIO_DEVICE(n);
983     uint64_t offloads;
984     size_t s;
985 
986     if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
987         return VIRTIO_NET_ERR;
988     }
989 
990     s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
991     if (s != sizeof(offloads)) {
992         return VIRTIO_NET_ERR;
993     }
994 
995     if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
996         uint64_t supported_offloads;
997 
998         offloads = virtio_ldq_p(vdev, &offloads);
999 
1000         if (!n->has_vnet_hdr) {
1001             return VIRTIO_NET_ERR;
1002         }
1003 
1004         n->rsc4_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) &&
1005             virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO4);
1006         n->rsc6_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) &&
1007             virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO6);
1008         virtio_clear_feature(&offloads, VIRTIO_NET_F_RSC_EXT);
1009 
1010         supported_offloads = virtio_net_supported_guest_offloads(n);
1011         if (offloads & ~supported_offloads) {
1012             return VIRTIO_NET_ERR;
1013         }
1014 
1015         n->curr_guest_offloads = offloads;
1016         virtio_net_apply_guest_offloads(n);
1017 
1018         return VIRTIO_NET_OK;
1019     } else {
1020         return VIRTIO_NET_ERR;
1021     }
1022 }
1023 
1024 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
1025                                  struct iovec *iov, unsigned int iov_cnt)
1026 {
1027     VirtIODevice *vdev = VIRTIO_DEVICE(n);
1028     struct virtio_net_ctrl_mac mac_data;
1029     size_t s;
1030     NetClientState *nc = qemu_get_queue(n->nic);
1031 
1032     if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
1033         if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
1034             return VIRTIO_NET_ERR;
1035         }
1036         s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
1037         assert(s == sizeof(n->mac));
1038         qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
1039         rxfilter_notify(nc);
1040 
1041         return VIRTIO_NET_OK;
1042     }
1043 
1044     if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
1045         return VIRTIO_NET_ERR;
1046     }
1047 
1048     int in_use = 0;
1049     int first_multi = 0;
1050     uint8_t uni_overflow = 0;
1051     uint8_t multi_overflow = 0;
1052     uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
1053 
1054     s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
1055                    sizeof(mac_data.entries));
1056     mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
1057     if (s != sizeof(mac_data.entries)) {
1058         goto error;
1059     }
1060     iov_discard_front(&iov, &iov_cnt, s);
1061 
1062     if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
1063         goto error;
1064     }
1065 
1066     if (mac_data.entries <= MAC_TABLE_ENTRIES) {
1067         s = iov_to_buf(iov, iov_cnt, 0, macs,
1068                        mac_data.entries * ETH_ALEN);
1069         if (s != mac_data.entries * ETH_ALEN) {
1070             goto error;
1071         }
1072         in_use += mac_data.entries;
1073     } else {
1074         uni_overflow = 1;
1075     }
1076 
1077     iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
1078 
1079     first_multi = in_use;
1080 
1081     s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
1082                    sizeof(mac_data.entries));
1083     mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
1084     if (s != sizeof(mac_data.entries)) {
1085         goto error;
1086     }
1087 
1088     iov_discard_front(&iov, &iov_cnt, s);
1089 
1090     if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
1091         goto error;
1092     }
1093 
1094     if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
1095         s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
1096                        mac_data.entries * ETH_ALEN);
1097         if (s != mac_data.entries * ETH_ALEN) {
1098             goto error;
1099         }
1100         in_use += mac_data.entries;
1101     } else {
1102         multi_overflow = 1;
1103     }
1104 
1105     n->mac_table.in_use = in_use;
1106     n->mac_table.first_multi = first_multi;
1107     n->mac_table.uni_overflow = uni_overflow;
1108     n->mac_table.multi_overflow = multi_overflow;
1109     memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
1110     g_free(macs);
1111     rxfilter_notify(nc);
1112 
1113     return VIRTIO_NET_OK;
1114 
1115 error:
1116     g_free(macs);
1117     return VIRTIO_NET_ERR;
1118 }
1119 
1120 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
1121                                         struct iovec *iov, unsigned int iov_cnt)
1122 {
1123     VirtIODevice *vdev = VIRTIO_DEVICE(n);
1124     uint16_t vid;
1125     size_t s;
1126     NetClientState *nc = qemu_get_queue(n->nic);
1127 
1128     s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
1129     vid = virtio_lduw_p(vdev, &vid);
1130     if (s != sizeof(vid)) {
1131         return VIRTIO_NET_ERR;
1132     }
1133 
1134     if (vid >= MAX_VLAN)
1135         return VIRTIO_NET_ERR;
1136 
1137     if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
1138         n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
1139     else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
1140         n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
1141     else
1142         return VIRTIO_NET_ERR;
1143 
1144     rxfilter_notify(nc);
1145 
1146     return VIRTIO_NET_OK;
1147 }
1148 
1149 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
1150                                       struct iovec *iov, unsigned int iov_cnt)
1151 {
1152     trace_virtio_net_handle_announce(n->announce_timer.round);
1153     if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
1154         n->status & VIRTIO_NET_S_ANNOUNCE) {
1155         n->status &= ~VIRTIO_NET_S_ANNOUNCE;
1156         if (n->announce_timer.round) {
1157             qemu_announce_timer_step(&n->announce_timer);
1158         }
1159         return VIRTIO_NET_OK;
1160     } else {
1161         return VIRTIO_NET_ERR;
1162     }
1163 }
1164 
1165 static bool virtio_net_attach_ebpf_to_backend(NICState *nic, int prog_fd)
1166 {
1167     NetClientState *nc = qemu_get_peer(qemu_get_queue(nic), 0);
1168     if (nc == NULL || nc->info->set_steering_ebpf == NULL) {
1169         return false;
1170     }
1171 
1172     trace_virtio_net_rss_attach_ebpf(nic, prog_fd);
1173     return nc->info->set_steering_ebpf(nc, prog_fd);
1174 }
1175 
1176 static void rss_data_to_rss_config(struct VirtioNetRssData *data,
1177                                    struct EBPFRSSConfig *config)
1178 {
1179     config->redirect = data->redirect;
1180     config->populate_hash = data->populate_hash;
1181     config->hash_types = data->runtime_hash_types;
1182     config->indirections_len = data->indirections_len;
1183     config->default_queue = data->default_queue;
1184 }
1185 
1186 static bool virtio_net_attach_ebpf_rss(VirtIONet *n)
1187 {
1188     struct EBPFRSSConfig config = {};
1189 
1190     if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
1191         return false;
1192     }
1193 
1194     rss_data_to_rss_config(&n->rss_data, &config);
1195 
1196     if (!ebpf_rss_set_all(&n->ebpf_rss, &config,
1197                           n->rss_data.indirections_table, n->rss_data.key,
1198                           NULL)) {
1199         return false;
1200     }
1201 
1202     if (!virtio_net_attach_ebpf_to_backend(n->nic, n->ebpf_rss.program_fd)) {
1203         return false;
1204     }
1205 
1206     return true;
1207 }
1208 
1209 static void virtio_net_detach_ebpf_rss(VirtIONet *n)
1210 {
1211     virtio_net_attach_ebpf_to_backend(n->nic, -1);
1212 }
1213 
1214 static void virtio_net_commit_rss_config(VirtIONet *n)
1215 {
1216     if (n->rss_data.peer_hash_available) {
1217         return;
1218     }
1219 
1220     if (n->rss_data.enabled) {
1221         n->rss_data.enabled_software_rss = n->rss_data.populate_hash;
1222         if (n->rss_data.populate_hash) {
1223             virtio_net_detach_ebpf_rss(n);
1224         } else if (!virtio_net_attach_ebpf_rss(n)) {
1225             if (get_vhost_net(qemu_get_queue(n->nic)->peer)) {
1226                 warn_report("Can't load eBPF RSS for vhost");
1227             } else {
1228                 warn_report("Can't load eBPF RSS - fallback to software RSS");
1229                 n->rss_data.enabled_software_rss = true;
1230             }
1231         }
1232 
1233         trace_virtio_net_rss_enable(n,
1234                                     n->rss_data.runtime_hash_types,
1235                                     n->rss_data.indirections_len,
1236                                     sizeof(n->rss_data.key));
1237     } else {
1238         virtio_net_detach_ebpf_rss(n);
1239         trace_virtio_net_rss_disable(n);
1240     }
1241 }
1242 
1243 static void virtio_net_disable_rss(VirtIONet *n)
1244 {
1245     if (!n->rss_data.enabled) {
1246         return;
1247     }
1248 
1249     n->rss_data.enabled = false;
1250     virtio_net_commit_rss_config(n);
1251 }
1252 
1253 static bool virtio_net_load_ebpf_fds(VirtIONet *n, Error **errp)
1254 {
1255     int fds[EBPF_RSS_MAX_FDS] = { [0 ... EBPF_RSS_MAX_FDS - 1] = -1};
1256     int ret = true;
1257     int i = 0;
1258 
1259     if (n->nr_ebpf_rss_fds != EBPF_RSS_MAX_FDS) {
1260         error_setg(errp, "Expected %d file descriptors but got %d",
1261                    EBPF_RSS_MAX_FDS, n->nr_ebpf_rss_fds);
1262         return false;
1263     }
1264 
1265     for (i = 0; i < n->nr_ebpf_rss_fds; i++) {
1266         fds[i] = monitor_fd_param(monitor_cur(), n->ebpf_rss_fds[i], errp);
1267         if (fds[i] < 0) {
1268             ret = false;
1269             goto exit;
1270         }
1271     }
1272 
1273     ret = ebpf_rss_load_fds(&n->ebpf_rss, fds[0], fds[1], fds[2], fds[3], errp);
1274 
1275 exit:
1276     if (!ret) {
1277         for (i = 0; i < n->nr_ebpf_rss_fds && fds[i] != -1; i++) {
1278             close(fds[i]);
1279         }
1280     }
1281 
1282     return ret;
1283 }
1284 
1285 static bool virtio_net_load_ebpf(VirtIONet *n, Error **errp)
1286 {
1287     if (!virtio_net_attach_ebpf_to_backend(n->nic, -1)) {
1288         return true;
1289     }
1290 
1291     trace_virtio_net_rss_load(n, n->nr_ebpf_rss_fds, n->ebpf_rss_fds);
1292 
1293     /*
1294      * If user explicitly gave QEMU RSS FDs to use, then
1295      * failing to use them must be considered a fatal
1296      * error. If no RSS FDs were provided, QEMU is trying
1297      * eBPF on a "best effort" basis only, so report a
1298      * warning and allow fallback to software RSS.
1299      */
1300     if (n->ebpf_rss_fds) {
1301         return virtio_net_load_ebpf_fds(n, errp);
1302     }
1303 
1304     ebpf_rss_load(&n->ebpf_rss, &error_warn);
1305     return true;
1306 }
1307 
1308 static void virtio_net_unload_ebpf(VirtIONet *n)
1309 {
1310     virtio_net_attach_ebpf_to_backend(n->nic, -1);
1311     ebpf_rss_unload(&n->ebpf_rss);
1312 }
1313 
1314 static uint16_t virtio_net_handle_rss(VirtIONet *n,
1315                                       struct iovec *iov,
1316                                       unsigned int iov_cnt,
1317                                       bool do_rss)
1318 {
1319     VirtIODevice *vdev = VIRTIO_DEVICE(n);
1320     struct virtio_net_rss_config cfg;
1321     size_t s, offset = 0, size_get;
1322     uint16_t queue_pairs, i;
1323     struct {
1324         uint16_t us;
1325         uint8_t b;
1326     } QEMU_PACKED temp;
1327     const char *err_msg = "";
1328     uint32_t err_value = 0;
1329 
1330     if (do_rss && !virtio_vdev_has_feature(vdev, VIRTIO_NET_F_RSS)) {
1331         err_msg = "RSS is not negotiated";
1332         goto error;
1333     }
1334     if (!do_rss && !virtio_vdev_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) {
1335         err_msg = "Hash report is not negotiated";
1336         goto error;
1337     }
1338     size_get = offsetof(struct virtio_net_rss_config, indirection_table);
1339     s = iov_to_buf(iov, iov_cnt, offset, &cfg, size_get);
1340     if (s != size_get) {
1341         err_msg = "Short command buffer";
1342         err_value = (uint32_t)s;
1343         goto error;
1344     }
1345     n->rss_data.runtime_hash_types = virtio_ldl_p(vdev, &cfg.hash_types);
1346     n->rss_data.indirections_len =
1347         virtio_lduw_p(vdev, &cfg.indirection_table_mask);
1348     if (!do_rss) {
1349         n->rss_data.indirections_len = 0;
1350     }
1351     if (n->rss_data.indirections_len >= VIRTIO_NET_RSS_MAX_TABLE_LEN) {
1352         err_msg = "Too large indirection table";
1353         err_value = n->rss_data.indirections_len;
1354         goto error;
1355     }
1356     n->rss_data.indirections_len++;
1357     if (!is_power_of_2(n->rss_data.indirections_len)) {
1358         err_msg = "Invalid size of indirection table";
1359         err_value = n->rss_data.indirections_len;
1360         goto error;
1361     }
1362     n->rss_data.default_queue = do_rss ?
1363         virtio_lduw_p(vdev, &cfg.unclassified_queue) : 0;
1364     if (n->rss_data.default_queue >= n->max_queue_pairs) {
1365         err_msg = "Invalid default queue";
1366         err_value = n->rss_data.default_queue;
1367         goto error;
1368     }
1369     offset += size_get;
1370     size_get = sizeof(uint16_t) * n->rss_data.indirections_len;
1371     g_free(n->rss_data.indirections_table);
1372     n->rss_data.indirections_table = g_malloc(size_get);
1373     if (!n->rss_data.indirections_table) {
1374         err_msg = "Can't allocate indirections table";
1375         err_value = n->rss_data.indirections_len;
1376         goto error;
1377     }
1378     s = iov_to_buf(iov, iov_cnt, offset,
1379                    n->rss_data.indirections_table, size_get);
1380     if (s != size_get) {
1381         err_msg = "Short indirection table buffer";
1382         err_value = (uint32_t)s;
1383         goto error;
1384     }
1385     for (i = 0; i < n->rss_data.indirections_len; ++i) {
1386         uint16_t val = n->rss_data.indirections_table[i];
1387         n->rss_data.indirections_table[i] = virtio_lduw_p(vdev, &val);
1388     }
1389     offset += size_get;
1390     size_get = sizeof(temp);
1391     s = iov_to_buf(iov, iov_cnt, offset, &temp, size_get);
1392     if (s != size_get) {
1393         err_msg = "Can't get queue_pairs";
1394         err_value = (uint32_t)s;
1395         goto error;
1396     }
1397     queue_pairs = do_rss ? virtio_lduw_p(vdev, &temp.us) : n->curr_queue_pairs;
1398     if (queue_pairs == 0 || queue_pairs > n->max_queue_pairs) {
1399         err_msg = "Invalid number of queue_pairs";
1400         err_value = queue_pairs;
1401         goto error;
1402     }
1403     if (temp.b > VIRTIO_NET_RSS_MAX_KEY_SIZE) {
1404         err_msg = "Invalid key size";
1405         err_value = temp.b;
1406         goto error;
1407     }
1408     if (!temp.b && n->rss_data.runtime_hash_types) {
1409         err_msg = "No key provided";
1410         err_value = 0;
1411         goto error;
1412     }
1413     if (!temp.b && !n->rss_data.runtime_hash_types) {
1414         virtio_net_disable_rss(n);
1415         return queue_pairs;
1416     }
1417     offset += size_get;
1418     size_get = temp.b;
1419     s = iov_to_buf(iov, iov_cnt, offset, n->rss_data.key, size_get);
1420     if (s != size_get) {
1421         err_msg = "Can get key buffer";
1422         err_value = (uint32_t)s;
1423         goto error;
1424     }
1425     n->rss_data.enabled = true;
1426     virtio_net_commit_rss_config(n);
1427     return queue_pairs;
1428 error:
1429     trace_virtio_net_rss_error(n, err_msg, err_value);
1430     virtio_net_disable_rss(n);
1431     return 0;
1432 }
1433 
1434 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
1435                                 struct iovec *iov, unsigned int iov_cnt)
1436 {
1437     VirtIODevice *vdev = VIRTIO_DEVICE(n);
1438     uint16_t queue_pairs;
1439     NetClientState *nc = qemu_get_queue(n->nic);
1440 
1441     virtio_net_disable_rss(n);
1442     if (cmd == VIRTIO_NET_CTRL_MQ_HASH_CONFIG) {
1443         queue_pairs = virtio_net_handle_rss(n, iov, iov_cnt, false);
1444         return queue_pairs ? VIRTIO_NET_OK : VIRTIO_NET_ERR;
1445     }
1446     if (cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
1447         queue_pairs = virtio_net_handle_rss(n, iov, iov_cnt, true);
1448     } else if (cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
1449         struct virtio_net_ctrl_mq mq;
1450         size_t s;
1451         if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ)) {
1452             return VIRTIO_NET_ERR;
1453         }
1454         s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
1455         if (s != sizeof(mq)) {
1456             return VIRTIO_NET_ERR;
1457         }
1458         queue_pairs = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
1459 
1460     } else {
1461         return VIRTIO_NET_ERR;
1462     }
1463 
1464     if (queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
1465         queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
1466         queue_pairs > n->max_queue_pairs ||
1467         !n->multiqueue) {
1468         return VIRTIO_NET_ERR;
1469     }
1470 
1471     n->curr_queue_pairs = queue_pairs;
1472     if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
1473         /*
1474          * Avoid updating the backend for a vdpa device: We're only interested
1475          * in updating the device model queues.
1476          */
1477         return VIRTIO_NET_OK;
1478     }
1479     /* stop the backend before changing the number of queue_pairs to avoid handling a
1480      * disabled queue */
1481     virtio_net_set_status(vdev, vdev->status);
1482     virtio_net_set_queue_pairs(n);
1483 
1484     return VIRTIO_NET_OK;
1485 }
1486 
1487 size_t virtio_net_handle_ctrl_iov(VirtIODevice *vdev,
1488                                   const struct iovec *in_sg, unsigned in_num,
1489                                   const struct iovec *out_sg,
1490                                   unsigned out_num)
1491 {
1492     VirtIONet *n = VIRTIO_NET(vdev);
1493     struct virtio_net_ctrl_hdr ctrl;
1494     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1495     size_t s;
1496     struct iovec *iov, *iov2;
1497 
1498     if (iov_size(in_sg, in_num) < sizeof(status) ||
1499         iov_size(out_sg, out_num) < sizeof(ctrl)) {
1500         virtio_error(vdev, "virtio-net ctrl missing headers");
1501         return 0;
1502     }
1503 
1504     iov2 = iov = g_memdup2(out_sg, sizeof(struct iovec) * out_num);
1505     s = iov_to_buf(iov, out_num, 0, &ctrl, sizeof(ctrl));
1506     iov_discard_front(&iov, &out_num, sizeof(ctrl));
1507     if (s != sizeof(ctrl)) {
1508         status = VIRTIO_NET_ERR;
1509     } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
1510         status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, out_num);
1511     } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
1512         status = virtio_net_handle_mac(n, ctrl.cmd, iov, out_num);
1513     } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
1514         status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, out_num);
1515     } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
1516         status = virtio_net_handle_announce(n, ctrl.cmd, iov, out_num);
1517     } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
1518         status = virtio_net_handle_mq(n, ctrl.cmd, iov, out_num);
1519     } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
1520         status = virtio_net_handle_offloads(n, ctrl.cmd, iov, out_num);
1521     }
1522 
1523     s = iov_from_buf(in_sg, in_num, 0, &status, sizeof(status));
1524     assert(s == sizeof(status));
1525 
1526     g_free(iov2);
1527     return sizeof(status);
1528 }
1529 
1530 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
1531 {
1532     VirtQueueElement *elem;
1533 
1534     for (;;) {
1535         size_t written;
1536         elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
1537         if (!elem) {
1538             break;
1539         }
1540 
1541         written = virtio_net_handle_ctrl_iov(vdev, elem->in_sg, elem->in_num,
1542                                              elem->out_sg, elem->out_num);
1543         if (written > 0) {
1544             virtqueue_push(vq, elem, written);
1545             virtio_notify(vdev, vq);
1546             g_free(elem);
1547         } else {
1548             virtqueue_detach_element(vq, elem, 0);
1549             g_free(elem);
1550             break;
1551         }
1552     }
1553 }
1554 
1555 /* RX */
1556 
1557 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
1558 {
1559     VirtIONet *n = VIRTIO_NET(vdev);
1560     int queue_index = vq2q(virtio_get_queue_index(vq));
1561 
1562     qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
1563 }
1564 
1565 static bool virtio_net_can_receive(NetClientState *nc)
1566 {
1567     VirtIONet *n = qemu_get_nic_opaque(nc);
1568     VirtIODevice *vdev = VIRTIO_DEVICE(n);
1569     VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1570 
1571     if (!vdev->vm_running) {
1572         return false;
1573     }
1574 
1575     if (nc->queue_index >= n->curr_queue_pairs) {
1576         return false;
1577     }
1578 
1579     if (!virtio_queue_ready(q->rx_vq) ||
1580         !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1581         return false;
1582     }
1583 
1584     return true;
1585 }
1586 
1587 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
1588 {
1589     int opaque;
1590     unsigned int in_bytes;
1591     VirtIONet *n = q->n;
1592 
1593     while (virtio_queue_empty(q->rx_vq) || n->mergeable_rx_bufs) {
1594         opaque = virtqueue_get_avail_bytes(q->rx_vq, &in_bytes, NULL,
1595                                            bufsize, 0);
1596         /* Buffer is enough, disable notifiaction */
1597         if (bufsize <= in_bytes) {
1598             break;
1599         }
1600 
1601         if (virtio_queue_enable_notification_and_check(q->rx_vq, opaque)) {
1602             /* Guest has added some buffers, try again */
1603             continue;
1604         } else {
1605             return 0;
1606         }
1607     }
1608 
1609     virtio_queue_set_notification(q->rx_vq, 0);
1610 
1611     return 1;
1612 }
1613 
1614 static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
1615 {
1616     virtio_tswap16s(vdev, &hdr->hdr_len);
1617     virtio_tswap16s(vdev, &hdr->gso_size);
1618     virtio_tswap16s(vdev, &hdr->csum_start);
1619     virtio_tswap16s(vdev, &hdr->csum_offset);
1620 }
1621 
1622 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
1623  * it never finds out that the packets don't have valid checksums.  This
1624  * causes dhclient to get upset.  Fedora's carried a patch for ages to
1625  * fix this with Xen but it hasn't appeared in an upstream release of
1626  * dhclient yet.
1627  *
1628  * To avoid breaking existing guests, we catch udp packets and add
1629  * checksums.  This is terrible but it's better than hacking the guest
1630  * kernels.
1631  *
1632  * N.B. if we introduce a zero-copy API, this operation is no longer free so
1633  * we should provide a mechanism to disable it to avoid polluting the host
1634  * cache.
1635  */
1636 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
1637                                         uint8_t *buf, size_t size)
1638 {
1639     size_t csum_size = ETH_HLEN + sizeof(struct ip_header) +
1640                        sizeof(struct udp_header);
1641 
1642     if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
1643         (size >= csum_size && size < 1500) && /* normal sized MTU */
1644         (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
1645         (buf[23] == 17) && /* ip.protocol == UDP */
1646         (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
1647         net_checksum_calculate(buf, size, CSUM_UDP);
1648         hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
1649     }
1650 }
1651 
1652 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
1653                            const void *buf, size_t size)
1654 {
1655     if (n->has_vnet_hdr) {
1656         /* FIXME this cast is evil */
1657         void *wbuf = (void *)buf;
1658         work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
1659                                     size - n->host_hdr_len);
1660 
1661         if (n->needs_vnet_hdr_swap) {
1662             virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
1663         }
1664         iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
1665     } else {
1666         struct virtio_net_hdr hdr = {
1667             .flags = 0,
1668             .gso_type = VIRTIO_NET_HDR_GSO_NONE
1669         };
1670         iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
1671     }
1672 }
1673 
1674 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
1675 {
1676     static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1677     static const uint8_t vlan[] = {0x81, 0x00};
1678     uint8_t *ptr = (uint8_t *)buf;
1679     int i;
1680 
1681     if (n->promisc)
1682         return 1;
1683 
1684     ptr += n->host_hdr_len;
1685 
1686     if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
1687         int vid = lduw_be_p(ptr + 14) & 0xfff;
1688         if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
1689             return 0;
1690     }
1691 
1692     if (ptr[0] & 1) { // multicast
1693         if (!memcmp(ptr, bcast, sizeof(bcast))) {
1694             return !n->nobcast;
1695         } else if (n->nomulti) {
1696             return 0;
1697         } else if (n->allmulti || n->mac_table.multi_overflow) {
1698             return 1;
1699         }
1700 
1701         for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
1702             if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1703                 return 1;
1704             }
1705         }
1706     } else { // unicast
1707         if (n->nouni) {
1708             return 0;
1709         } else if (n->alluni || n->mac_table.uni_overflow) {
1710             return 1;
1711         } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
1712             return 1;
1713         }
1714 
1715         for (i = 0; i < n->mac_table.first_multi; i++) {
1716             if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1717                 return 1;
1718             }
1719         }
1720     }
1721 
1722     return 0;
1723 }
1724 
1725 static uint8_t virtio_net_get_hash_type(bool hasip4,
1726                                         bool hasip6,
1727                                         EthL4HdrProto l4hdr_proto,
1728                                         uint32_t types)
1729 {
1730     if (hasip4) {
1731         switch (l4hdr_proto) {
1732         case ETH_L4_HDR_PROTO_TCP:
1733             if (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
1734                 return NetPktRssIpV4Tcp;
1735             }
1736             break;
1737 
1738         case ETH_L4_HDR_PROTO_UDP:
1739             if (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
1740                 return NetPktRssIpV4Udp;
1741             }
1742             break;
1743 
1744         default:
1745             break;
1746         }
1747 
1748         if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
1749             return NetPktRssIpV4;
1750         }
1751     } else if (hasip6) {
1752         switch (l4hdr_proto) {
1753         case ETH_L4_HDR_PROTO_TCP:
1754             if (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) {
1755                 return NetPktRssIpV6TcpEx;
1756             }
1757             if (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
1758                 return NetPktRssIpV6Tcp;
1759             }
1760             break;
1761 
1762         case ETH_L4_HDR_PROTO_UDP:
1763             if (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) {
1764                 return NetPktRssIpV6UdpEx;
1765             }
1766             if (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
1767                 return NetPktRssIpV6Udp;
1768             }
1769             break;
1770 
1771         default:
1772             break;
1773         }
1774 
1775         if (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) {
1776             return NetPktRssIpV6Ex;
1777         }
1778         if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
1779             return NetPktRssIpV6;
1780         }
1781     }
1782     return 0xff;
1783 }
1784 
1785 static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
1786                                   size_t size,
1787                                   struct virtio_net_hdr_v1_hash *hdr)
1788 {
1789     VirtIONet *n = qemu_get_nic_opaque(nc);
1790     unsigned int index = nc->queue_index, new_index = index;
1791     struct NetRxPkt *pkt = n->rx_pkt;
1792     uint8_t net_hash_type;
1793     uint32_t hash;
1794     bool hasip4, hasip6;
1795     EthL4HdrProto l4hdr_proto;
1796     static const uint8_t reports[NetPktRssIpV6UdpEx + 1] = {
1797         VIRTIO_NET_HASH_REPORT_IPv4,
1798         VIRTIO_NET_HASH_REPORT_TCPv4,
1799         VIRTIO_NET_HASH_REPORT_TCPv6,
1800         VIRTIO_NET_HASH_REPORT_IPv6,
1801         VIRTIO_NET_HASH_REPORT_IPv6_EX,
1802         VIRTIO_NET_HASH_REPORT_TCPv6_EX,
1803         VIRTIO_NET_HASH_REPORT_UDPv4,
1804         VIRTIO_NET_HASH_REPORT_UDPv6,
1805         VIRTIO_NET_HASH_REPORT_UDPv6_EX
1806     };
1807     struct iovec iov = {
1808         .iov_base = (void *)buf,
1809         .iov_len = size
1810     };
1811 
1812     net_rx_pkt_set_protocols(pkt, &iov, 1, n->host_hdr_len);
1813     net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
1814     net_hash_type = virtio_net_get_hash_type(hasip4, hasip6, l4hdr_proto,
1815                                              n->rss_data.runtime_hash_types);
1816     if (net_hash_type > NetPktRssIpV6UdpEx) {
1817         if (n->rss_data.populate_hash) {
1818             hdr->hash_value = VIRTIO_NET_HASH_REPORT_NONE;
1819             hdr->hash_report = 0;
1820         }
1821         return n->rss_data.redirect ? n->rss_data.default_queue : -1;
1822     }
1823 
1824     hash = net_rx_pkt_calc_rss_hash(pkt, net_hash_type, n->rss_data.key);
1825 
1826     if (n->rss_data.populate_hash) {
1827         hdr->hash_value = hash;
1828         hdr->hash_report = reports[net_hash_type];
1829     }
1830 
1831     if (n->rss_data.redirect) {
1832         new_index = hash & (n->rss_data.indirections_len - 1);
1833         new_index = n->rss_data.indirections_table[new_index];
1834     }
1835 
1836     return (index == new_index) ? -1 : new_index;
1837 }
1838 
1839 static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
1840                                       size_t size)
1841 {
1842     VirtIONet *n = qemu_get_nic_opaque(nc);
1843     VirtIONetQueue *q;
1844     VirtIODevice *vdev = VIRTIO_DEVICE(n);
1845     QEMU_UNINITIALIZED VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE];
1846     QEMU_UNINITIALIZED size_t lens[VIRTQUEUE_MAX_SIZE];
1847     QEMU_UNINITIALIZED struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
1848     struct virtio_net_hdr_v1_hash extra_hdr;
1849     unsigned mhdr_cnt = 0;
1850     size_t offset, i, guest_offset, j;
1851     ssize_t err;
1852 
1853     memset(&extra_hdr, 0, sizeof(extra_hdr));
1854 
1855     if (n->rss_data.enabled && n->rss_data.enabled_software_rss) {
1856         int index = virtio_net_process_rss(nc, buf, size, &extra_hdr);
1857         if (index >= 0) {
1858             nc = qemu_get_subqueue(n->nic, index % n->curr_queue_pairs);
1859         }
1860     }
1861 
1862     if (!virtio_net_can_receive(nc)) {
1863         return -1;
1864     }
1865 
1866     q = virtio_net_get_subqueue(nc);
1867 
1868     /* hdr_len refers to the header we supply to the guest */
1869     if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
1870         return 0;
1871     }
1872 
1873     if (!receive_filter(n, buf, size))
1874         return size;
1875 
1876     offset = i = 0;
1877 
1878     while (offset < size) {
1879         VirtQueueElement *elem;
1880         int len, total;
1881         const struct iovec *sg;
1882 
1883         total = 0;
1884 
1885         if (i == VIRTQUEUE_MAX_SIZE) {
1886             virtio_error(vdev, "virtio-net unexpected long buffer chain");
1887             err = size;
1888             goto err;
1889         }
1890 
1891         elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
1892         if (!elem) {
1893             if (i) {
1894                 virtio_error(vdev, "virtio-net unexpected empty queue: "
1895                              "i %zd mergeable %d offset %zd, size %zd, "
1896                              "guest hdr len %zd, host hdr len %zd "
1897                              "guest features 0x%" PRIx64,
1898                              i, n->mergeable_rx_bufs, offset, size,
1899                              n->guest_hdr_len, n->host_hdr_len,
1900                              vdev->guest_features);
1901             }
1902             err = -1;
1903             goto err;
1904         }
1905 
1906         if (elem->in_num < 1) {
1907             virtio_error(vdev,
1908                          "virtio-net receive queue contains no in buffers");
1909             virtqueue_detach_element(q->rx_vq, elem, 0);
1910             g_free(elem);
1911             err = -1;
1912             goto err;
1913         }
1914 
1915         sg = elem->in_sg;
1916         if (i == 0) {
1917             assert(offset == 0);
1918             if (n->mergeable_rx_bufs) {
1919                 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
1920                                     sg, elem->in_num,
1921                                     offsetof(typeof(extra_hdr), hdr.num_buffers),
1922                                     sizeof(extra_hdr.hdr.num_buffers));
1923             } else {
1924                 extra_hdr.hdr.num_buffers = cpu_to_le16(1);
1925             }
1926 
1927             receive_header(n, sg, elem->in_num, buf, size);
1928             if (n->rss_data.populate_hash) {
1929                 offset = offsetof(typeof(extra_hdr), hash_value);
1930                 iov_from_buf(sg, elem->in_num, offset,
1931                              (char *)&extra_hdr + offset,
1932                              sizeof(extra_hdr.hash_value) +
1933                              sizeof(extra_hdr.hash_report));
1934             }
1935             offset = n->host_hdr_len;
1936             total += n->guest_hdr_len;
1937             guest_offset = n->guest_hdr_len;
1938         } else {
1939             guest_offset = 0;
1940         }
1941 
1942         /* copy in packet.  ugh */
1943         len = iov_from_buf(sg, elem->in_num, guest_offset,
1944                            buf + offset, size - offset);
1945         total += len;
1946         offset += len;
1947         /* If buffers can't be merged, at this point we
1948          * must have consumed the complete packet.
1949          * Otherwise, drop it. */
1950         if (!n->mergeable_rx_bufs && offset < size) {
1951             virtqueue_unpop(q->rx_vq, elem, total);
1952             g_free(elem);
1953             err = size;
1954             goto err;
1955         }
1956 
1957         elems[i] = elem;
1958         lens[i] = total;
1959         i++;
1960     }
1961 
1962     if (mhdr_cnt) {
1963         virtio_stw_p(vdev, &extra_hdr.hdr.num_buffers, i);
1964         iov_from_buf(mhdr_sg, mhdr_cnt,
1965                      0,
1966                      &extra_hdr.hdr.num_buffers,
1967                      sizeof extra_hdr.hdr.num_buffers);
1968     }
1969 
1970     for (j = 0; j < i; j++) {
1971         /* signal other side */
1972         virtqueue_fill(q->rx_vq, elems[j], lens[j], j);
1973         g_free(elems[j]);
1974     }
1975 
1976     virtqueue_flush(q->rx_vq, i);
1977     virtio_notify(vdev, q->rx_vq);
1978 
1979     return size;
1980 
1981 err:
1982     for (j = 0; j < i; j++) {
1983         virtqueue_detach_element(q->rx_vq, elems[j], lens[j]);
1984         g_free(elems[j]);
1985     }
1986 
1987     return err;
1988 }
1989 
1990 static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
1991                                   size_t size)
1992 {
1993     RCU_READ_LOCK_GUARD();
1994 
1995     return virtio_net_receive_rcu(nc, buf, size);
1996 }
1997 
1998 /*
1999  * Accessors to read and write the IP packet data length field. This
2000  * is a potentially unaligned network-byte-order 16 bit unsigned integer
2001  * pointed to by unit->ip_len.
2002  */
2003 static uint16_t read_unit_ip_len(VirtioNetRscUnit *unit)
2004 {
2005     return lduw_be_p(unit->ip_plen);
2006 }
2007 
2008 static void write_unit_ip_len(VirtioNetRscUnit *unit, uint16_t l)
2009 {
2010     stw_be_p(unit->ip_plen, l);
2011 }
2012 
2013 static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
2014                                          const uint8_t *buf,
2015                                          VirtioNetRscUnit *unit)
2016 {
2017     uint16_t ip_hdrlen;
2018     struct ip_header *ip;
2019 
2020     ip = (struct ip_header *)(buf + chain->n->guest_hdr_len
2021                               + sizeof(struct eth_header));
2022     unit->ip = (void *)ip;
2023     ip_hdrlen = (ip->ip_ver_len & 0xF) << 2;
2024     unit->ip_plen = &ip->ip_len;
2025     unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip) + ip_hdrlen);
2026     unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
2027     unit->payload = read_unit_ip_len(unit) - ip_hdrlen - unit->tcp_hdrlen;
2028 }
2029 
2030 static void virtio_net_rsc_extract_unit6(VirtioNetRscChain *chain,
2031                                          const uint8_t *buf,
2032                                          VirtioNetRscUnit *unit)
2033 {
2034     struct ip6_header *ip6;
2035 
2036     ip6 = (struct ip6_header *)(buf + chain->n->guest_hdr_len
2037                                  + sizeof(struct eth_header));
2038     unit->ip = ip6;
2039     unit->ip_plen = &(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen);
2040     unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip)
2041                                         + sizeof(struct ip6_header));
2042     unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
2043 
2044     /* There is a difference between payload length in ipv4 and v6,
2045        ip header is excluded in ipv6 */
2046     unit->payload = read_unit_ip_len(unit) - unit->tcp_hdrlen;
2047 }
2048 
2049 static size_t virtio_net_rsc_drain_seg(VirtioNetRscChain *chain,
2050                                        VirtioNetRscSeg *seg)
2051 {
2052     int ret;
2053     struct virtio_net_hdr_v1 *h;
2054 
2055     h = (struct virtio_net_hdr_v1 *)seg->buf;
2056     h->flags = 0;
2057     h->gso_type = VIRTIO_NET_HDR_GSO_NONE;
2058 
2059     if (seg->is_coalesced) {
2060         h->rsc.segments = seg->packets;
2061         h->rsc.dup_acks = seg->dup_ack;
2062         h->flags = VIRTIO_NET_HDR_F_RSC_INFO;
2063         if (chain->proto == ETH_P_IP) {
2064             h->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2065         } else {
2066             h->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2067         }
2068     }
2069 
2070     ret = virtio_net_do_receive(seg->nc, seg->buf, seg->size);
2071     QTAILQ_REMOVE(&chain->buffers, seg, next);
2072     g_free(seg->buf);
2073     g_free(seg);
2074 
2075     return ret;
2076 }
2077 
2078 static void virtio_net_rsc_purge(void *opq)
2079 {
2080     VirtioNetRscSeg *seg, *rn;
2081     VirtioNetRscChain *chain = (VirtioNetRscChain *)opq;
2082 
2083     QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn) {
2084         if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
2085             chain->stat.purge_failed++;
2086             continue;
2087         }
2088     }
2089 
2090     chain->stat.timer++;
2091     if (!QTAILQ_EMPTY(&chain->buffers)) {
2092         timer_mod(chain->drain_timer,
2093               qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + chain->n->rsc_timeout);
2094     }
2095 }
2096 
2097 static void virtio_net_rsc_cleanup(VirtIONet *n)
2098 {
2099     VirtioNetRscChain *chain, *rn_chain;
2100     VirtioNetRscSeg *seg, *rn_seg;
2101 
2102     QTAILQ_FOREACH_SAFE(chain, &n->rsc_chains, next, rn_chain) {
2103         QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn_seg) {
2104             QTAILQ_REMOVE(&chain->buffers, seg, next);
2105             g_free(seg->buf);
2106             g_free(seg);
2107         }
2108 
2109         timer_free(chain->drain_timer);
2110         QTAILQ_REMOVE(&n->rsc_chains, chain, next);
2111         g_free(chain);
2112     }
2113 }
2114 
2115 static void virtio_net_rsc_cache_buf(VirtioNetRscChain *chain,
2116                                      NetClientState *nc,
2117                                      const uint8_t *buf, size_t size)
2118 {
2119     uint16_t hdr_len;
2120     VirtioNetRscSeg *seg;
2121 
2122     hdr_len = chain->n->guest_hdr_len;
2123     seg = g_new(VirtioNetRscSeg, 1);
2124     seg->buf = g_malloc(hdr_len + sizeof(struct eth_header)
2125         + sizeof(struct ip6_header) + VIRTIO_NET_MAX_TCP_PAYLOAD);
2126     memcpy(seg->buf, buf, size);
2127     seg->size = size;
2128     seg->packets = 1;
2129     seg->dup_ack = 0;
2130     seg->is_coalesced = 0;
2131     seg->nc = nc;
2132 
2133     QTAILQ_INSERT_TAIL(&chain->buffers, seg, next);
2134     chain->stat.cache++;
2135 
2136     switch (chain->proto) {
2137     case ETH_P_IP:
2138         virtio_net_rsc_extract_unit4(chain, seg->buf, &seg->unit);
2139         break;
2140     case ETH_P_IPV6:
2141         virtio_net_rsc_extract_unit6(chain, seg->buf, &seg->unit);
2142         break;
2143     default:
2144         g_assert_not_reached();
2145     }
2146 }
2147 
2148 static int32_t virtio_net_rsc_handle_ack(VirtioNetRscChain *chain,
2149                                          VirtioNetRscSeg *seg,
2150                                          const uint8_t *buf,
2151                                          struct tcp_header *n_tcp,
2152                                          struct tcp_header *o_tcp)
2153 {
2154     uint32_t nack, oack;
2155     uint16_t nwin, owin;
2156 
2157     nack = htonl(n_tcp->th_ack);
2158     nwin = htons(n_tcp->th_win);
2159     oack = htonl(o_tcp->th_ack);
2160     owin = htons(o_tcp->th_win);
2161 
2162     if ((nack - oack) >= VIRTIO_NET_MAX_TCP_PAYLOAD) {
2163         chain->stat.ack_out_of_win++;
2164         return RSC_FINAL;
2165     } else if (nack == oack) {
2166         /* duplicated ack or window probe */
2167         if (nwin == owin) {
2168             /* duplicated ack, add dup ack count due to whql test up to 1 */
2169             chain->stat.dup_ack++;
2170             return RSC_FINAL;
2171         } else {
2172             /* Coalesce window update */
2173             o_tcp->th_win = n_tcp->th_win;
2174             chain->stat.win_update++;
2175             return RSC_COALESCE;
2176         }
2177     } else {
2178         /* pure ack, go to 'C', finalize*/
2179         chain->stat.pure_ack++;
2180         return RSC_FINAL;
2181     }
2182 }
2183 
2184 static int32_t virtio_net_rsc_coalesce_data(VirtioNetRscChain *chain,
2185                                             VirtioNetRscSeg *seg,
2186                                             const uint8_t *buf,
2187                                             VirtioNetRscUnit *n_unit)
2188 {
2189     void *data;
2190     uint16_t o_ip_len;
2191     uint32_t nseq, oseq;
2192     VirtioNetRscUnit *o_unit;
2193 
2194     o_unit = &seg->unit;
2195     o_ip_len = read_unit_ip_len(o_unit);
2196     nseq = htonl(n_unit->tcp->th_seq);
2197     oseq = htonl(o_unit->tcp->th_seq);
2198 
2199     /* out of order or retransmitted. */
2200     if ((nseq - oseq) > VIRTIO_NET_MAX_TCP_PAYLOAD) {
2201         chain->stat.data_out_of_win++;
2202         return RSC_FINAL;
2203     }
2204 
2205     data = ((uint8_t *)n_unit->tcp) + n_unit->tcp_hdrlen;
2206     if (nseq == oseq) {
2207         if ((o_unit->payload == 0) && n_unit->payload) {
2208             /* From no payload to payload, normal case, not a dup ack or etc */
2209             chain->stat.data_after_pure_ack++;
2210             goto coalesce;
2211         } else {
2212             return virtio_net_rsc_handle_ack(chain, seg, buf,
2213                                              n_unit->tcp, o_unit->tcp);
2214         }
2215     } else if ((nseq - oseq) != o_unit->payload) {
2216         /* Not a consistent packet, out of order */
2217         chain->stat.data_out_of_order++;
2218         return RSC_FINAL;
2219     } else {
2220 coalesce:
2221         if ((o_ip_len + n_unit->payload) > chain->max_payload) {
2222             chain->stat.over_size++;
2223             return RSC_FINAL;
2224         }
2225 
2226         /* Here comes the right data, the payload length in v4/v6 is different,
2227            so use the field value to update and record the new data len */
2228         o_unit->payload += n_unit->payload; /* update new data len */
2229 
2230         /* update field in ip header */
2231         write_unit_ip_len(o_unit, o_ip_len + n_unit->payload);
2232 
2233         /* Bring 'PUSH' big, the whql test guide says 'PUSH' can be coalesced
2234            for windows guest, while this may change the behavior for linux
2235            guest (only if it uses RSC feature). */
2236         o_unit->tcp->th_offset_flags = n_unit->tcp->th_offset_flags;
2237 
2238         o_unit->tcp->th_ack = n_unit->tcp->th_ack;
2239         o_unit->tcp->th_win = n_unit->tcp->th_win;
2240 
2241         memmove(seg->buf + seg->size, data, n_unit->payload);
2242         seg->size += n_unit->payload;
2243         seg->packets++;
2244         chain->stat.coalesced++;
2245         return RSC_COALESCE;
2246     }
2247 }
2248 
2249 static int32_t virtio_net_rsc_coalesce4(VirtioNetRscChain *chain,
2250                                         VirtioNetRscSeg *seg,
2251                                         const uint8_t *buf, size_t size,
2252                                         VirtioNetRscUnit *unit)
2253 {
2254     struct ip_header *ip1, *ip2;
2255 
2256     ip1 = (struct ip_header *)(unit->ip);
2257     ip2 = (struct ip_header *)(seg->unit.ip);
2258     if ((ip1->ip_src ^ ip2->ip_src) || (ip1->ip_dst ^ ip2->ip_dst)
2259         || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport)
2260         || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) {
2261         chain->stat.no_match++;
2262         return RSC_NO_MATCH;
2263     }
2264 
2265     return virtio_net_rsc_coalesce_data(chain, seg, buf, unit);
2266 }
2267 
2268 static int32_t virtio_net_rsc_coalesce6(VirtioNetRscChain *chain,
2269                                         VirtioNetRscSeg *seg,
2270                                         const uint8_t *buf, size_t size,
2271                                         VirtioNetRscUnit *unit)
2272 {
2273     struct ip6_header *ip1, *ip2;
2274 
2275     ip1 = (struct ip6_header *)(unit->ip);
2276     ip2 = (struct ip6_header *)(seg->unit.ip);
2277     if (memcmp(&ip1->ip6_src, &ip2->ip6_src, sizeof(struct in6_address))
2278         || memcmp(&ip1->ip6_dst, &ip2->ip6_dst, sizeof(struct in6_address))
2279         || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport)
2280         || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) {
2281             chain->stat.no_match++;
2282             return RSC_NO_MATCH;
2283     }
2284 
2285     return virtio_net_rsc_coalesce_data(chain, seg, buf, unit);
2286 }
2287 
2288 /* Packets with 'SYN' should bypass, other flag should be sent after drain
2289  * to prevent out of order */
2290 static int virtio_net_rsc_tcp_ctrl_check(VirtioNetRscChain *chain,
2291                                          struct tcp_header *tcp)
2292 {
2293     uint16_t tcp_hdr;
2294     uint16_t tcp_flag;
2295 
2296     tcp_flag = htons(tcp->th_offset_flags);
2297     tcp_hdr = (tcp_flag & VIRTIO_NET_TCP_HDR_LENGTH) >> 10;
2298     tcp_flag &= VIRTIO_NET_TCP_FLAG;
2299     if (tcp_flag & TH_SYN) {
2300         chain->stat.tcp_syn++;
2301         return RSC_BYPASS;
2302     }
2303 
2304     if (tcp_flag & (TH_FIN | TH_URG | TH_RST | TH_ECE | TH_CWR)) {
2305         chain->stat.tcp_ctrl_drain++;
2306         return RSC_FINAL;
2307     }
2308 
2309     if (tcp_hdr > sizeof(struct tcp_header)) {
2310         chain->stat.tcp_all_opt++;
2311         return RSC_FINAL;
2312     }
2313 
2314     return RSC_CANDIDATE;
2315 }
2316 
2317 static size_t virtio_net_rsc_do_coalesce(VirtioNetRscChain *chain,
2318                                          NetClientState *nc,
2319                                          const uint8_t *buf, size_t size,
2320                                          VirtioNetRscUnit *unit)
2321 {
2322     int ret;
2323     VirtioNetRscSeg *seg, *nseg;
2324 
2325     if (QTAILQ_EMPTY(&chain->buffers)) {
2326         chain->stat.empty_cache++;
2327         virtio_net_rsc_cache_buf(chain, nc, buf, size);
2328         timer_mod(chain->drain_timer,
2329               qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + chain->n->rsc_timeout);
2330         return size;
2331     }
2332 
2333     QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) {
2334         if (chain->proto == ETH_P_IP) {
2335             ret = virtio_net_rsc_coalesce4(chain, seg, buf, size, unit);
2336         } else {
2337             ret = virtio_net_rsc_coalesce6(chain, seg, buf, size, unit);
2338         }
2339 
2340         if (ret == RSC_FINAL) {
2341             if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
2342                 /* Send failed */
2343                 chain->stat.final_failed++;
2344                 return 0;
2345             }
2346 
2347             /* Send current packet */
2348             return virtio_net_do_receive(nc, buf, size);
2349         } else if (ret == RSC_NO_MATCH) {
2350             continue;
2351         } else {
2352             /* Coalesced, mark coalesced flag to tell calc cksum for ipv4 */
2353             seg->is_coalesced = 1;
2354             return size;
2355         }
2356     }
2357 
2358     chain->stat.no_match_cache++;
2359     virtio_net_rsc_cache_buf(chain, nc, buf, size);
2360     return size;
2361 }
2362 
2363 /* Drain a connection data, this is to avoid out of order segments */
2364 static size_t virtio_net_rsc_drain_flow(VirtioNetRscChain *chain,
2365                                         NetClientState *nc,
2366                                         const uint8_t *buf, size_t size,
2367                                         uint16_t ip_start, uint16_t ip_size,
2368                                         uint16_t tcp_port)
2369 {
2370     VirtioNetRscSeg *seg, *nseg;
2371     uint32_t ppair1, ppair2;
2372 
2373     ppair1 = *(uint32_t *)(buf + tcp_port);
2374     QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) {
2375         ppair2 = *(uint32_t *)(seg->buf + tcp_port);
2376         if (memcmp(buf + ip_start, seg->buf + ip_start, ip_size)
2377             || (ppair1 != ppair2)) {
2378             continue;
2379         }
2380         if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
2381             chain->stat.drain_failed++;
2382         }
2383 
2384         break;
2385     }
2386 
2387     return virtio_net_do_receive(nc, buf, size);
2388 }
2389 
2390 static int32_t virtio_net_rsc_sanity_check4(VirtioNetRscChain *chain,
2391                                             struct ip_header *ip,
2392                                             const uint8_t *buf, size_t size)
2393 {
2394     uint16_t ip_len;
2395 
2396     /* Not an ipv4 packet */
2397     if (((ip->ip_ver_len & 0xF0) >> 4) != IP_HEADER_VERSION_4) {
2398         chain->stat.ip_option++;
2399         return RSC_BYPASS;
2400     }
2401 
2402     /* Don't handle packets with ip option */
2403     if ((ip->ip_ver_len & 0xF) != VIRTIO_NET_IP4_HEADER_LENGTH) {
2404         chain->stat.ip_option++;
2405         return RSC_BYPASS;
2406     }
2407 
2408     if (ip->ip_p != IPPROTO_TCP) {
2409         chain->stat.bypass_not_tcp++;
2410         return RSC_BYPASS;
2411     }
2412 
2413     /* Don't handle packets with ip fragment */
2414     if (!(htons(ip->ip_off) & IP_DF)) {
2415         chain->stat.ip_frag++;
2416         return RSC_BYPASS;
2417     }
2418 
2419     /* Don't handle packets with ecn flag */
2420     if (IPTOS_ECN(ip->ip_tos)) {
2421         chain->stat.ip_ecn++;
2422         return RSC_BYPASS;
2423     }
2424 
2425     ip_len = htons(ip->ip_len);
2426     if (ip_len < (sizeof(struct ip_header) + sizeof(struct tcp_header))
2427         || ip_len > (size - chain->n->guest_hdr_len -
2428                      sizeof(struct eth_header))) {
2429         chain->stat.ip_hacked++;
2430         return RSC_BYPASS;
2431     }
2432 
2433     return RSC_CANDIDATE;
2434 }
2435 
2436 static size_t virtio_net_rsc_receive4(VirtioNetRscChain *chain,
2437                                       NetClientState *nc,
2438                                       const uint8_t *buf, size_t size)
2439 {
2440     int32_t ret;
2441     uint16_t hdr_len;
2442     VirtioNetRscUnit unit;
2443 
2444     hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
2445 
2446     if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header)
2447         + sizeof(struct tcp_header))) {
2448         chain->stat.bypass_not_tcp++;
2449         return virtio_net_do_receive(nc, buf, size);
2450     }
2451 
2452     virtio_net_rsc_extract_unit4(chain, buf, &unit);
2453     if (virtio_net_rsc_sanity_check4(chain, unit.ip, buf, size)
2454         != RSC_CANDIDATE) {
2455         return virtio_net_do_receive(nc, buf, size);
2456     }
2457 
2458     ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp);
2459     if (ret == RSC_BYPASS) {
2460         return virtio_net_do_receive(nc, buf, size);
2461     } else if (ret == RSC_FINAL) {
2462         return virtio_net_rsc_drain_flow(chain, nc, buf, size,
2463                 ((hdr_len + sizeof(struct eth_header)) + 12),
2464                 VIRTIO_NET_IP4_ADDR_SIZE,
2465                 hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header));
2466     }
2467 
2468     return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit);
2469 }
2470 
2471 static int32_t virtio_net_rsc_sanity_check6(VirtioNetRscChain *chain,
2472                                             struct ip6_header *ip6,
2473                                             const uint8_t *buf, size_t size)
2474 {
2475     uint16_t ip_len;
2476 
2477     if (((ip6->ip6_ctlun.ip6_un1.ip6_un1_flow & 0xF0) >> 4)
2478         != IP_HEADER_VERSION_6) {
2479         return RSC_BYPASS;
2480     }
2481 
2482     /* Both option and protocol is checked in this */
2483     if (ip6->ip6_ctlun.ip6_un1.ip6_un1_nxt != IPPROTO_TCP) {
2484         chain->stat.bypass_not_tcp++;
2485         return RSC_BYPASS;
2486     }
2487 
2488     ip_len = htons(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen);
2489     if (ip_len < sizeof(struct tcp_header) ||
2490         ip_len > (size - chain->n->guest_hdr_len - sizeof(struct eth_header)
2491                   - sizeof(struct ip6_header))) {
2492         chain->stat.ip_hacked++;
2493         return RSC_BYPASS;
2494     }
2495 
2496     /* Don't handle packets with ecn flag */
2497     if (IP6_ECN(ip6->ip6_ctlun.ip6_un3.ip6_un3_ecn)) {
2498         chain->stat.ip_ecn++;
2499         return RSC_BYPASS;
2500     }
2501 
2502     return RSC_CANDIDATE;
2503 }
2504 
2505 static size_t virtio_net_rsc_receive6(void *opq, NetClientState *nc,
2506                                       const uint8_t *buf, size_t size)
2507 {
2508     int32_t ret;
2509     uint16_t hdr_len;
2510     VirtioNetRscChain *chain;
2511     VirtioNetRscUnit unit;
2512 
2513     chain = opq;
2514     hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
2515 
2516     if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip6_header)
2517         + sizeof(tcp_header))) {
2518         return virtio_net_do_receive(nc, buf, size);
2519     }
2520 
2521     virtio_net_rsc_extract_unit6(chain, buf, &unit);
2522     if (RSC_CANDIDATE != virtio_net_rsc_sanity_check6(chain,
2523                                                  unit.ip, buf, size)) {
2524         return virtio_net_do_receive(nc, buf, size);
2525     }
2526 
2527     ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp);
2528     if (ret == RSC_BYPASS) {
2529         return virtio_net_do_receive(nc, buf, size);
2530     } else if (ret == RSC_FINAL) {
2531         return virtio_net_rsc_drain_flow(chain, nc, buf, size,
2532                 ((hdr_len + sizeof(struct eth_header)) + 8),
2533                 VIRTIO_NET_IP6_ADDR_SIZE,
2534                 hdr_len + sizeof(struct eth_header)
2535                 + sizeof(struct ip6_header));
2536     }
2537 
2538     return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit);
2539 }
2540 
2541 static VirtioNetRscChain *virtio_net_rsc_lookup_chain(VirtIONet *n,
2542                                                       NetClientState *nc,
2543                                                       uint16_t proto)
2544 {
2545     VirtioNetRscChain *chain;
2546 
2547     if ((proto != (uint16_t)ETH_P_IP) && (proto != (uint16_t)ETH_P_IPV6)) {
2548         return NULL;
2549     }
2550 
2551     QTAILQ_FOREACH(chain, &n->rsc_chains, next) {
2552         if (chain->proto == proto) {
2553             return chain;
2554         }
2555     }
2556 
2557     chain = g_malloc(sizeof(*chain));
2558     chain->n = n;
2559     chain->proto = proto;
2560     if (proto == (uint16_t)ETH_P_IP) {
2561         chain->max_payload = VIRTIO_NET_MAX_IP4_PAYLOAD;
2562         chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2563     } else {
2564         chain->max_payload = VIRTIO_NET_MAX_IP6_PAYLOAD;
2565         chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2566     }
2567     chain->drain_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2568                                       virtio_net_rsc_purge, chain);
2569     memset(&chain->stat, 0, sizeof(chain->stat));
2570 
2571     QTAILQ_INIT(&chain->buffers);
2572     QTAILQ_INSERT_TAIL(&n->rsc_chains, chain, next);
2573 
2574     return chain;
2575 }
2576 
2577 static ssize_t virtio_net_rsc_receive(NetClientState *nc,
2578                                       const uint8_t *buf,
2579                                       size_t size)
2580 {
2581     uint16_t proto;
2582     VirtioNetRscChain *chain;
2583     struct eth_header *eth;
2584     VirtIONet *n;
2585 
2586     n = qemu_get_nic_opaque(nc);
2587     if (size < (n->host_hdr_len + sizeof(struct eth_header))) {
2588         return virtio_net_do_receive(nc, buf, size);
2589     }
2590 
2591     eth = (struct eth_header *)(buf + n->guest_hdr_len);
2592     proto = htons(eth->h_proto);
2593 
2594     chain = virtio_net_rsc_lookup_chain(n, nc, proto);
2595     if (chain) {
2596         chain->stat.received++;
2597         if (proto == (uint16_t)ETH_P_IP && n->rsc4_enabled) {
2598             return virtio_net_rsc_receive4(chain, nc, buf, size);
2599         } else if (proto == (uint16_t)ETH_P_IPV6 && n->rsc6_enabled) {
2600             return virtio_net_rsc_receive6(chain, nc, buf, size);
2601         }
2602     }
2603     return virtio_net_do_receive(nc, buf, size);
2604 }
2605 
2606 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf,
2607                                   size_t size)
2608 {
2609     VirtIONet *n = qemu_get_nic_opaque(nc);
2610     if ((n->rsc4_enabled || n->rsc6_enabled)) {
2611         return virtio_net_rsc_receive(nc, buf, size);
2612     } else {
2613         return virtio_net_do_receive(nc, buf, size);
2614     }
2615 }
2616 
2617 static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
2618 
2619 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
2620 {
2621     VirtIONet *n = qemu_get_nic_opaque(nc);
2622     VirtIONetQueue *q = virtio_net_get_subqueue(nc);
2623     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2624     int ret;
2625 
2626     virtqueue_push(q->tx_vq, q->async_tx.elem, 0);
2627     virtio_notify(vdev, q->tx_vq);
2628 
2629     g_free(q->async_tx.elem);
2630     q->async_tx.elem = NULL;
2631 
2632     virtio_queue_set_notification(q->tx_vq, 1);
2633     ret = virtio_net_flush_tx(q);
2634     if (ret >= n->tx_burst) {
2635         /*
2636          * the flush has been stopped by tx_burst
2637          * we will not receive notification for the
2638          * remainining part, so re-schedule
2639          */
2640         virtio_queue_set_notification(q->tx_vq, 0);
2641         if (q->tx_bh) {
2642             replay_bh_schedule_event(q->tx_bh);
2643         } else {
2644             timer_mod(q->tx_timer,
2645                       qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2646         }
2647         q->tx_waiting = 1;
2648     }
2649 }
2650 
2651 /* TX */
2652 static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
2653 {
2654     VirtIONet *n = q->n;
2655     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2656     VirtQueueElement *elem;
2657     int32_t num_packets = 0;
2658     int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
2659     if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
2660         return num_packets;
2661     }
2662 
2663     if (q->async_tx.elem) {
2664         virtio_queue_set_notification(q->tx_vq, 0);
2665         return num_packets;
2666     }
2667 
2668     for (;;) {
2669         ssize_t ret;
2670         unsigned int out_num;
2671         struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
2672         struct virtio_net_hdr vhdr;
2673 
2674         elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
2675         if (!elem) {
2676             break;
2677         }
2678 
2679         out_num = elem->out_num;
2680         out_sg = elem->out_sg;
2681         if (out_num < 1) {
2682             virtio_error(vdev, "virtio-net header not in first element");
2683             goto detach;
2684         }
2685 
2686         if (n->needs_vnet_hdr_swap) {
2687             if (iov_to_buf(out_sg, out_num, 0, &vhdr, sizeof(vhdr)) <
2688                 sizeof(vhdr)) {
2689                 virtio_error(vdev, "virtio-net header incorrect");
2690                 goto detach;
2691             }
2692             virtio_net_hdr_swap(vdev, &vhdr);
2693             sg2[0].iov_base = &vhdr;
2694             sg2[0].iov_len = sizeof(vhdr);
2695             out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1, out_sg, out_num,
2696                                sizeof(vhdr), -1);
2697             if (out_num == VIRTQUEUE_MAX_SIZE) {
2698                 goto drop;
2699             }
2700             out_num += 1;
2701             out_sg = sg2;
2702         }
2703         /*
2704          * If host wants to see the guest header as is, we can
2705          * pass it on unchanged. Otherwise, copy just the parts
2706          * that host is interested in.
2707          */
2708         assert(n->host_hdr_len <= n->guest_hdr_len);
2709         if (n->host_hdr_len != n->guest_hdr_len) {
2710             if (iov_size(out_sg, out_num) < n->guest_hdr_len) {
2711                 virtio_error(vdev, "virtio-net header is invalid");
2712                 goto detach;
2713             }
2714             unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
2715                                        out_sg, out_num,
2716                                        0, n->host_hdr_len);
2717             sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
2718                              out_sg, out_num,
2719                              n->guest_hdr_len, -1);
2720             out_num = sg_num;
2721             out_sg = sg;
2722 
2723             if (out_num < 1) {
2724                 virtio_error(vdev, "virtio-net nothing to send");
2725                 goto detach;
2726             }
2727         }
2728 
2729         ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
2730                                       out_sg, out_num, virtio_net_tx_complete);
2731         if (ret == 0) {
2732             virtio_queue_set_notification(q->tx_vq, 0);
2733             q->async_tx.elem = elem;
2734             return -EBUSY;
2735         }
2736 
2737 drop:
2738         virtqueue_push(q->tx_vq, elem, 0);
2739         virtio_notify(vdev, q->tx_vq);
2740         g_free(elem);
2741 
2742         if (++num_packets >= n->tx_burst) {
2743             break;
2744         }
2745     }
2746     return num_packets;
2747 
2748 detach:
2749     virtqueue_detach_element(q->tx_vq, elem, 0);
2750     g_free(elem);
2751     return -EINVAL;
2752 }
2753 
2754 static void virtio_net_tx_timer(void *opaque);
2755 
2756 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
2757 {
2758     VirtIONet *n = VIRTIO_NET(vdev);
2759     VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
2760 
2761     if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
2762         virtio_net_drop_tx_queue_data(vdev, vq);
2763         return;
2764     }
2765 
2766     /* This happens when device was stopped but VCPU wasn't. */
2767     if (!vdev->vm_running) {
2768         q->tx_waiting = 1;
2769         return;
2770     }
2771 
2772     if (q->tx_waiting) {
2773         /* We already have queued packets, immediately flush */
2774         timer_del(q->tx_timer);
2775         virtio_net_tx_timer(q);
2776     } else {
2777         /* re-arm timer to flush it (and more) on next tick */
2778         timer_mod(q->tx_timer,
2779                   qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2780         q->tx_waiting = 1;
2781         virtio_queue_set_notification(vq, 0);
2782     }
2783 }
2784 
2785 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
2786 {
2787     VirtIONet *n = VIRTIO_NET(vdev);
2788     VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
2789 
2790     if (unlikely(n->vhost_started)) {
2791         return;
2792     }
2793 
2794     if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
2795         virtio_net_drop_tx_queue_data(vdev, vq);
2796         return;
2797     }
2798 
2799     if (unlikely(q->tx_waiting)) {
2800         return;
2801     }
2802     q->tx_waiting = 1;
2803     /* This happens when device was stopped but VCPU wasn't. */
2804     if (!vdev->vm_running) {
2805         return;
2806     }
2807     virtio_queue_set_notification(vq, 0);
2808     replay_bh_schedule_event(q->tx_bh);
2809 }
2810 
2811 static void virtio_net_tx_timer(void *opaque)
2812 {
2813     VirtIONetQueue *q = opaque;
2814     VirtIONet *n = q->n;
2815     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2816     int ret;
2817 
2818     /* This happens when device was stopped but BH wasn't. */
2819     if (!vdev->vm_running) {
2820         /* Make sure tx waiting is set, so we'll run when restarted. */
2821         assert(q->tx_waiting);
2822         return;
2823     }
2824 
2825     q->tx_waiting = 0;
2826 
2827     /* Just in case the driver is not ready on more */
2828     if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
2829         return;
2830     }
2831 
2832     ret = virtio_net_flush_tx(q);
2833     if (ret == -EBUSY || ret == -EINVAL) {
2834         return;
2835     }
2836     /*
2837      * If we flush a full burst of packets, assume there are
2838      * more coming and immediately rearm
2839      */
2840     if (ret >= n->tx_burst) {
2841         q->tx_waiting = 1;
2842         timer_mod(q->tx_timer,
2843                   qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2844         return;
2845     }
2846     /*
2847      * If less than a full burst, re-enable notification and flush
2848      * anything that may have come in while we weren't looking.  If
2849      * we find something, assume the guest is still active and rearm
2850      */
2851     virtio_queue_set_notification(q->tx_vq, 1);
2852     ret = virtio_net_flush_tx(q);
2853     if (ret > 0) {
2854         virtio_queue_set_notification(q->tx_vq, 0);
2855         q->tx_waiting = 1;
2856         timer_mod(q->tx_timer,
2857                   qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2858     }
2859 }
2860 
2861 static void virtio_net_tx_bh(void *opaque)
2862 {
2863     VirtIONetQueue *q = opaque;
2864     VirtIONet *n = q->n;
2865     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2866     int32_t ret;
2867 
2868     /* This happens when device was stopped but BH wasn't. */
2869     if (!vdev->vm_running) {
2870         /* Make sure tx waiting is set, so we'll run when restarted. */
2871         assert(q->tx_waiting);
2872         return;
2873     }
2874 
2875     q->tx_waiting = 0;
2876 
2877     /* Just in case the driver is not ready on more */
2878     if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
2879         return;
2880     }
2881 
2882     ret = virtio_net_flush_tx(q);
2883     if (ret == -EBUSY || ret == -EINVAL) {
2884         return; /* Notification re-enable handled by tx_complete or device
2885                  * broken */
2886     }
2887 
2888     /* If we flush a full burst of packets, assume there are
2889      * more coming and immediately reschedule */
2890     if (ret >= n->tx_burst) {
2891         replay_bh_schedule_event(q->tx_bh);
2892         q->tx_waiting = 1;
2893         return;
2894     }
2895 
2896     /* If less than a full burst, re-enable notification and flush
2897      * anything that may have come in while we weren't looking.  If
2898      * we find something, assume the guest is still active and reschedule */
2899     virtio_queue_set_notification(q->tx_vq, 1);
2900     ret = virtio_net_flush_tx(q);
2901     if (ret == -EINVAL) {
2902         return;
2903     } else if (ret > 0) {
2904         virtio_queue_set_notification(q->tx_vq, 0);
2905         replay_bh_schedule_event(q->tx_bh);
2906         q->tx_waiting = 1;
2907     }
2908 }
2909 
2910 static void virtio_net_add_queue(VirtIONet *n, int index)
2911 {
2912     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2913 
2914     n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
2915                                            virtio_net_handle_rx);
2916 
2917     if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
2918         n->vqs[index].tx_vq =
2919             virtio_add_queue(vdev, n->net_conf.tx_queue_size,
2920                              virtio_net_handle_tx_timer);
2921         n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2922                                               virtio_net_tx_timer,
2923                                               &n->vqs[index]);
2924     } else {
2925         n->vqs[index].tx_vq =
2926             virtio_add_queue(vdev, n->net_conf.tx_queue_size,
2927                              virtio_net_handle_tx_bh);
2928         n->vqs[index].tx_bh = qemu_bh_new_guarded(virtio_net_tx_bh, &n->vqs[index],
2929                                                   &DEVICE(vdev)->mem_reentrancy_guard);
2930     }
2931 
2932     n->vqs[index].tx_waiting = 0;
2933     n->vqs[index].n = n;
2934 }
2935 
2936 static void virtio_net_del_queue(VirtIONet *n, int index)
2937 {
2938     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2939     VirtIONetQueue *q = &n->vqs[index];
2940     NetClientState *nc = qemu_get_subqueue(n->nic, index);
2941 
2942     qemu_purge_queued_packets(nc);
2943 
2944     virtio_del_queue(vdev, index * 2);
2945     if (q->tx_timer) {
2946         timer_free(q->tx_timer);
2947         q->tx_timer = NULL;
2948     } else {
2949         qemu_bh_delete(q->tx_bh);
2950         q->tx_bh = NULL;
2951     }
2952     q->tx_waiting = 0;
2953     virtio_del_queue(vdev, index * 2 + 1);
2954 }
2955 
2956 static void virtio_net_change_num_queue_pairs(VirtIONet *n, int new_max_queue_pairs)
2957 {
2958     VirtIODevice *vdev = VIRTIO_DEVICE(n);
2959     int old_num_queues = virtio_get_num_queues(vdev);
2960     int new_num_queues = new_max_queue_pairs * 2 + 1;
2961     int i;
2962 
2963     assert(old_num_queues >= 3);
2964     assert(old_num_queues % 2 == 1);
2965 
2966     if (old_num_queues == new_num_queues) {
2967         return;
2968     }
2969 
2970     /*
2971      * We always need to remove and add ctrl vq if
2972      * old_num_queues != new_num_queues. Remove ctrl_vq first,
2973      * and then we only enter one of the following two loops.
2974      */
2975     virtio_del_queue(vdev, old_num_queues - 1);
2976 
2977     for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
2978         /* new_num_queues < old_num_queues */
2979         virtio_net_del_queue(n, i / 2);
2980     }
2981 
2982     for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
2983         /* new_num_queues > old_num_queues */
2984         virtio_net_add_queue(n, i / 2);
2985     }
2986 
2987     /* add ctrl_vq last */
2988     n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
2989 }
2990 
2991 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
2992 {
2993     int max = multiqueue ? n->max_queue_pairs : 1;
2994 
2995     n->multiqueue = multiqueue;
2996     virtio_net_change_num_queue_pairs(n, max);
2997 
2998     virtio_net_set_queue_pairs(n);
2999 }
3000 
3001 static int virtio_net_pre_load_queues(VirtIODevice *vdev)
3002 {
3003     virtio_net_set_multiqueue(VIRTIO_NET(vdev),
3004                               virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_RSS) ||
3005                               virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MQ));
3006 
3007     return 0;
3008 }
3009 
3010 static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
3011                                         Error **errp)
3012 {
3013     VirtIONet *n = VIRTIO_NET(vdev);
3014     NetClientState *nc = qemu_get_queue(n->nic);
3015     uint32_t supported_hash_types = n->rss_data.supported_hash_types;
3016     uint32_t peer_hash_types = n->rss_data.peer_hash_types;
3017     bool use_own_hash =
3018         (supported_hash_types & VIRTIO_NET_RSS_SUPPORTED_HASHES) ==
3019         supported_hash_types;
3020     bool use_peer_hash =
3021         n->rss_data.peer_hash_available &&
3022         (supported_hash_types & peer_hash_types) == supported_hash_types;
3023 
3024     /* Firstly sync all virtio-net possible supported features */
3025     features |= n->host_features;
3026 
3027     virtio_add_feature(&features, VIRTIO_NET_F_MAC);
3028 
3029     if (!peer_has_vnet_hdr(n)) {
3030         virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
3031         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
3032         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
3033         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
3034 
3035         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
3036         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
3037         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
3038         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
3039 
3040         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO);
3041         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4);
3042         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6);
3043 
3044         virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
3045     }
3046 
3047     if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
3048         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
3049         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
3050     }
3051 
3052     if (!peer_has_uso(n)) {
3053         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO);
3054         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4);
3055         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6);
3056     }
3057 
3058     if (!get_vhost_net(nc->peer)) {
3059         if (!use_own_hash) {
3060             virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
3061             virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
3062         } else if (virtio_has_feature(features, VIRTIO_NET_F_RSS)) {
3063             virtio_net_load_ebpf(n, errp);
3064         }
3065 
3066         return features;
3067     }
3068 
3069     if (!use_peer_hash) {
3070         virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
3071 
3072         if (!use_own_hash || !virtio_net_attach_ebpf_to_backend(n->nic, -1)) {
3073             if (!virtio_net_load_ebpf(n, errp)) {
3074                 return features;
3075             }
3076 
3077             virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
3078         }
3079     }
3080 
3081     features = vhost_net_get_features(get_vhost_net(nc->peer), features);
3082     vdev->backend_features = features;
3083 
3084     if (n->mtu_bypass_backend &&
3085             (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) {
3086         features |= (1ULL << VIRTIO_NET_F_MTU);
3087     }
3088 
3089     /*
3090      * Since GUEST_ANNOUNCE is emulated the feature bit could be set without
3091      * enabled. This happens in the vDPA case.
3092      *
3093      * Make sure the feature set is not incoherent, as the driver could refuse
3094      * to start.
3095      *
3096      * TODO: QEMU is able to emulate a CVQ just for guest_announce purposes,
3097      * helping guest to notify the new location with vDPA devices that does not
3098      * support it.
3099      */
3100     if (!virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_CTRL_VQ)) {
3101         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ANNOUNCE);
3102     }
3103 
3104     return features;
3105 }
3106 
3107 static int virtio_net_post_load_device(void *opaque, int version_id)
3108 {
3109     VirtIONet *n = opaque;
3110     VirtIODevice *vdev = VIRTIO_DEVICE(n);
3111     int i, link_down;
3112 
3113     trace_virtio_net_post_load_device();
3114     virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs,
3115                                virtio_vdev_has_feature(vdev,
3116                                                        VIRTIO_F_VERSION_1),
3117                                virtio_vdev_has_feature(vdev,
3118                                                        VIRTIO_NET_F_HASH_REPORT));
3119 
3120     /* MAC_TABLE_ENTRIES may be different from the saved image */
3121     if (n->mac_table.in_use > MAC_TABLE_ENTRIES) {
3122         n->mac_table.in_use = 0;
3123     }
3124 
3125     if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
3126         n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
3127     }
3128 
3129     /*
3130      * curr_guest_offloads will be later overwritten by the
3131      * virtio_set_features_nocheck call done from the virtio_load.
3132      * Here we make sure it is preserved and restored accordingly
3133      * in the virtio_net_post_load_virtio callback.
3134      */
3135     n->saved_guest_offloads = n->curr_guest_offloads;
3136 
3137     virtio_net_set_queue_pairs(n);
3138 
3139     /* Find the first multicast entry in the saved MAC filter */
3140     for (i = 0; i < n->mac_table.in_use; i++) {
3141         if (n->mac_table.macs[i * ETH_ALEN] & 1) {
3142             break;
3143         }
3144     }
3145     n->mac_table.first_multi = i;
3146 
3147     /* nc.link_down can't be migrated, so infer link_down according
3148      * to link status bit in n->status */
3149     link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
3150     for (i = 0; i < n->max_queue_pairs; i++) {
3151         qemu_get_subqueue(n->nic, i)->link_down = link_down;
3152     }
3153 
3154     if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
3155         virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
3156         qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
3157                                   QEMU_CLOCK_VIRTUAL,
3158                                   virtio_net_announce_timer, n);
3159         if (n->announce_timer.round) {
3160             timer_mod(n->announce_timer.tm,
3161                       qemu_clock_get_ms(n->announce_timer.type));
3162         } else {
3163             qemu_announce_timer_del(&n->announce_timer, false);
3164         }
3165     }
3166 
3167     virtio_net_commit_rss_config(n);
3168     return 0;
3169 }
3170 
3171 static int virtio_net_post_load_virtio(VirtIODevice *vdev)
3172 {
3173     VirtIONet *n = VIRTIO_NET(vdev);
3174     /*
3175      * The actual needed state is now in saved_guest_offloads,
3176      * see virtio_net_post_load_device for detail.
3177      * Restore it back and apply the desired offloads.
3178      */
3179     n->curr_guest_offloads = n->saved_guest_offloads;
3180     if (peer_has_vnet_hdr(n)) {
3181         virtio_net_apply_guest_offloads(n);
3182     }
3183 
3184     return 0;
3185 }
3186 
3187 /* tx_waiting field of a VirtIONetQueue */
3188 static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = {
3189     .name = "virtio-net-queue-tx_waiting",
3190     .fields = (const VMStateField[]) {
3191         VMSTATE_UINT32(tx_waiting, VirtIONetQueue),
3192         VMSTATE_END_OF_LIST()
3193    },
3194 };
3195 
3196 static bool max_queue_pairs_gt_1(void *opaque, int version_id)
3197 {
3198     return VIRTIO_NET(opaque)->max_queue_pairs > 1;
3199 }
3200 
3201 static bool has_ctrl_guest_offloads(void *opaque, int version_id)
3202 {
3203     return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque),
3204                                    VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
3205 }
3206 
3207 static bool mac_table_fits(void *opaque, int version_id)
3208 {
3209     return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES;
3210 }
3211 
3212 static bool mac_table_doesnt_fit(void *opaque, int version_id)
3213 {
3214     return !mac_table_fits(opaque, version_id);
3215 }
3216 
3217 /* This temporary type is shared by all the WITH_TMP methods
3218  * although only some fields are used by each.
3219  */
3220 struct VirtIONetMigTmp {
3221     VirtIONet      *parent;
3222     VirtIONetQueue *vqs_1;
3223     uint16_t        curr_queue_pairs_1;
3224     uint8_t         has_ufo;
3225     uint32_t        has_vnet_hdr;
3226 };
3227 
3228 /* The 2nd and subsequent tx_waiting flags are loaded later than
3229  * the 1st entry in the queue_pairs and only if there's more than one
3230  * entry.  We use the tmp mechanism to calculate a temporary
3231  * pointer and count and also validate the count.
3232  */
3233 
3234 static int virtio_net_tx_waiting_pre_save(void *opaque)
3235 {
3236     struct VirtIONetMigTmp *tmp = opaque;
3237 
3238     tmp->vqs_1 = tmp->parent->vqs + 1;
3239     tmp->curr_queue_pairs_1 = tmp->parent->curr_queue_pairs - 1;
3240     if (tmp->parent->curr_queue_pairs == 0) {
3241         tmp->curr_queue_pairs_1 = 0;
3242     }
3243 
3244     return 0;
3245 }
3246 
3247 static int virtio_net_tx_waiting_pre_load(void *opaque)
3248 {
3249     struct VirtIONetMigTmp *tmp = opaque;
3250 
3251     /* Reuse the pointer setup from save */
3252     virtio_net_tx_waiting_pre_save(opaque);
3253 
3254     if (tmp->parent->curr_queue_pairs > tmp->parent->max_queue_pairs) {
3255         error_report("virtio-net: curr_queue_pairs %x > max_queue_pairs %x",
3256             tmp->parent->curr_queue_pairs, tmp->parent->max_queue_pairs);
3257 
3258         return -EINVAL;
3259     }
3260 
3261     return 0; /* all good */
3262 }
3263 
3264 static const VMStateDescription vmstate_virtio_net_tx_waiting = {
3265     .name      = "virtio-net-tx_waiting",
3266     .pre_load  = virtio_net_tx_waiting_pre_load,
3267     .pre_save  = virtio_net_tx_waiting_pre_save,
3268     .fields    = (const VMStateField[]) {
3269         VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp,
3270                                      curr_queue_pairs_1,
3271                                      vmstate_virtio_net_queue_tx_waiting,
3272                                      struct VirtIONetQueue),
3273         VMSTATE_END_OF_LIST()
3274     },
3275 };
3276 
3277 /* the 'has_ufo' flag is just tested; if the incoming stream has the
3278  * flag set we need to check that we have it
3279  */
3280 static int virtio_net_ufo_post_load(void *opaque, int version_id)
3281 {
3282     struct VirtIONetMigTmp *tmp = opaque;
3283 
3284     if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) {
3285         error_report("virtio-net: saved image requires TUN_F_UFO support");
3286         return -EINVAL;
3287     }
3288 
3289     return 0;
3290 }
3291 
3292 static int virtio_net_ufo_pre_save(void *opaque)
3293 {
3294     struct VirtIONetMigTmp *tmp = opaque;
3295 
3296     tmp->has_ufo = tmp->parent->has_ufo;
3297 
3298     return 0;
3299 }
3300 
3301 static const VMStateDescription vmstate_virtio_net_has_ufo = {
3302     .name      = "virtio-net-ufo",
3303     .post_load = virtio_net_ufo_post_load,
3304     .pre_save  = virtio_net_ufo_pre_save,
3305     .fields    = (const VMStateField[]) {
3306         VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp),
3307         VMSTATE_END_OF_LIST()
3308     },
3309 };
3310 
3311 /* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
3312  * flag set we need to check that we have it
3313  */
3314 static int virtio_net_vnet_post_load(void *opaque, int version_id)
3315 {
3316     struct VirtIONetMigTmp *tmp = opaque;
3317 
3318     if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) {
3319         error_report("virtio-net: saved image requires vnet_hdr=on");
3320         return -EINVAL;
3321     }
3322 
3323     return 0;
3324 }
3325 
3326 static int virtio_net_vnet_pre_save(void *opaque)
3327 {
3328     struct VirtIONetMigTmp *tmp = opaque;
3329 
3330     tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr;
3331 
3332     return 0;
3333 }
3334 
3335 static const VMStateDescription vmstate_virtio_net_has_vnet = {
3336     .name      = "virtio-net-vnet",
3337     .post_load = virtio_net_vnet_post_load,
3338     .pre_save  = virtio_net_vnet_pre_save,
3339     .fields    = (const VMStateField[]) {
3340         VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp),
3341         VMSTATE_END_OF_LIST()
3342     },
3343 };
3344 
3345 static int virtio_net_rss_post_load(void *opaque, int version_id)
3346 {
3347     VirtIONet *n = VIRTIO_NET(opaque);
3348 
3349     if (version_id == 1) {
3350         n->rss_data.supported_hash_types = VIRTIO_NET_RSS_SUPPORTED_HASHES;
3351     }
3352 
3353     return 0;
3354 }
3355 
3356 static bool virtio_net_rss_needed(void *opaque)
3357 {
3358     return VIRTIO_NET(opaque)->rss_data.enabled;
3359 }
3360 
3361 static const VMStateDescription vmstate_virtio_net_rss = {
3362     .name      = "virtio-net-device/rss",
3363     .version_id = 2,
3364     .minimum_version_id = 1,
3365     .post_load = virtio_net_rss_post_load,
3366     .needed = virtio_net_rss_needed,
3367     .fields = (const VMStateField[]) {
3368         VMSTATE_BOOL(rss_data.enabled, VirtIONet),
3369         VMSTATE_BOOL(rss_data.redirect, VirtIONet),
3370         VMSTATE_BOOL(rss_data.populate_hash, VirtIONet),
3371         VMSTATE_UINT32(rss_data.runtime_hash_types, VirtIONet),
3372         VMSTATE_UINT32_V(rss_data.supported_hash_types, VirtIONet, 2),
3373         VMSTATE_UINT16(rss_data.indirections_len, VirtIONet),
3374         VMSTATE_UINT16(rss_data.default_queue, VirtIONet),
3375         VMSTATE_UINT8_ARRAY(rss_data.key, VirtIONet,
3376                             VIRTIO_NET_RSS_MAX_KEY_SIZE),
3377         VMSTATE_VARRAY_UINT16_ALLOC(rss_data.indirections_table, VirtIONet,
3378                                     rss_data.indirections_len, 0,
3379                                     vmstate_info_uint16, uint16_t),
3380         VMSTATE_END_OF_LIST()
3381     },
3382 };
3383 
3384 static struct vhost_dev *virtio_net_get_vhost(VirtIODevice *vdev)
3385 {
3386     VirtIONet *n = VIRTIO_NET(vdev);
3387     NetClientState *nc;
3388     struct vhost_net *net;
3389 
3390     if (!n->nic) {
3391         return NULL;
3392     }
3393 
3394     nc = qemu_get_queue(n->nic);
3395     if (!nc) {
3396         return NULL;
3397     }
3398 
3399     net = get_vhost_net(nc->peer);
3400     if (!net) {
3401         return NULL;
3402     }
3403 
3404     return &net->dev;
3405 }
3406 
3407 static int vhost_user_net_save_state(QEMUFile *f, void *pv, size_t size,
3408                                      const VMStateField *field,
3409                                      JSONWriter *vmdesc)
3410 {
3411     VirtIONet *n = pv;
3412     VirtIODevice *vdev = VIRTIO_DEVICE(n);
3413     struct vhost_dev *vhdev;
3414     Error *local_error = NULL;
3415     int ret;
3416 
3417     vhdev = virtio_net_get_vhost(vdev);
3418     if (vhdev == NULL) {
3419         error_reportf_err(local_error,
3420                           "Error getting vhost back-end of %s device %s: ",
3421                           vdev->name, vdev->parent_obj.canonical_path);
3422         return -1;
3423     }
3424 
3425     ret = vhost_save_backend_state(vhdev, f, &local_error);
3426     if (ret < 0) {
3427         error_reportf_err(local_error,
3428                           "Error saving back-end state of %s device %s: ",
3429                           vdev->name, vdev->parent_obj.canonical_path);
3430         return ret;
3431     }
3432 
3433     return 0;
3434 }
3435 
3436 static int vhost_user_net_load_state(QEMUFile *f, void *pv, size_t size,
3437                                      const VMStateField *field)
3438 {
3439     VirtIONet *n = pv;
3440     VirtIODevice *vdev = VIRTIO_DEVICE(n);
3441     struct vhost_dev *vhdev;
3442     Error *local_error = NULL;
3443     int ret;
3444 
3445     vhdev = virtio_net_get_vhost(vdev);
3446     if (vhdev == NULL) {
3447         error_reportf_err(local_error,
3448                           "Error getting vhost back-end of %s device %s: ",
3449                           vdev->name, vdev->parent_obj.canonical_path);
3450         return -1;
3451     }
3452 
3453     ret = vhost_load_backend_state(vhdev, f, &local_error);
3454     if (ret < 0) {
3455         error_reportf_err(local_error,
3456                           "Error loading  back-end state of %s device %s: ",
3457                           vdev->name, vdev->parent_obj.canonical_path);
3458         return ret;
3459     }
3460 
3461     return 0;
3462 }
3463 
3464 static bool vhost_user_net_is_internal_migration(void *opaque)
3465 {
3466     VirtIONet *n = opaque;
3467     VirtIODevice *vdev = VIRTIO_DEVICE(n);
3468     struct vhost_dev *vhdev;
3469 
3470     vhdev = virtio_net_get_vhost(vdev);
3471     if (vhdev == NULL) {
3472         return false;
3473     }
3474 
3475     return vhost_supports_device_state(vhdev);
3476 }
3477 
3478 static const VMStateDescription vhost_user_net_backend_state = {
3479     .name = "virtio-net-device/backend",
3480     .version_id = 0,
3481     .needed = vhost_user_net_is_internal_migration,
3482     .fields = (const VMStateField[]) {
3483         {
3484             .name = "backend",
3485             .info = &(const VMStateInfo) {
3486                 .name = "virtio-net vhost-user backend state",
3487                 .get = vhost_user_net_load_state,
3488                 .put = vhost_user_net_save_state,
3489             },
3490          },
3491          VMSTATE_END_OF_LIST()
3492     }
3493 };
3494 
3495 static const VMStateDescription vmstate_virtio_net_device = {
3496     .name = "virtio-net-device",
3497     .version_id = VIRTIO_NET_VM_VERSION,
3498     .minimum_version_id = VIRTIO_NET_VM_VERSION,
3499     .post_load = virtio_net_post_load_device,
3500     .fields = (const VMStateField[]) {
3501         VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN),
3502         VMSTATE_STRUCT_POINTER(vqs, VirtIONet,
3503                                vmstate_virtio_net_queue_tx_waiting,
3504                                VirtIONetQueue),
3505         VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet),
3506         VMSTATE_UINT16(status, VirtIONet),
3507         VMSTATE_UINT8(promisc, VirtIONet),
3508         VMSTATE_UINT8(allmulti, VirtIONet),
3509         VMSTATE_UINT32(mac_table.in_use, VirtIONet),
3510 
3511         /* Guarded pair: If it fits we load it, else we throw it away
3512          * - can happen if source has a larger MAC table.; post-load
3513          *  sets flags in this case.
3514          */
3515         VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet,
3516                                 0, mac_table_fits, mac_table.in_use,
3517                                  ETH_ALEN),
3518         VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0,
3519                                      mac_table.in_use, ETH_ALEN),
3520 
3521         /* Note: This is an array of uint32's that's always been saved as a
3522          * buffer; hold onto your endiannesses; it's actually used as a bitmap
3523          * but based on the uint.
3524          */
3525         VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3),
3526         VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
3527                          vmstate_virtio_net_has_vnet),
3528         VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet),
3529         VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet),
3530         VMSTATE_UINT8(alluni, VirtIONet),
3531         VMSTATE_UINT8(nomulti, VirtIONet),
3532         VMSTATE_UINT8(nouni, VirtIONet),
3533         VMSTATE_UINT8(nobcast, VirtIONet),
3534         VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
3535                          vmstate_virtio_net_has_ufo),
3536         VMSTATE_SINGLE_TEST(max_queue_pairs, VirtIONet, max_queue_pairs_gt_1, 0,
3537                             vmstate_info_uint16_equal, uint16_t),
3538         VMSTATE_UINT16_TEST(curr_queue_pairs, VirtIONet, max_queue_pairs_gt_1),
3539         VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
3540                          vmstate_virtio_net_tx_waiting),
3541         VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet,
3542                             has_ctrl_guest_offloads),
3543         VMSTATE_END_OF_LIST()
3544     },
3545     .subsections = (const VMStateDescription * const []) {
3546         &vmstate_virtio_net_rss,
3547         &vhost_user_net_backend_state,
3548         NULL
3549     }
3550 };
3551 
3552 static NetClientInfo net_virtio_info = {
3553     .type = NET_CLIENT_DRIVER_NIC,
3554     .size = sizeof(NICState),
3555     .can_receive = virtio_net_can_receive,
3556     .receive = virtio_net_receive,
3557     .link_status_changed = virtio_net_set_link_status,
3558     .query_rx_filter = virtio_net_query_rxfilter,
3559     .announce = virtio_net_announce,
3560 };
3561 
3562 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
3563 {
3564     VirtIONet *n = VIRTIO_NET(vdev);
3565     NetClientState *nc;
3566     assert(n->vhost_started);
3567     if (!n->multiqueue && idx == 2) {
3568         /* Must guard against invalid features and bogus queue index
3569          * from being set by malicious guest, or penetrated through
3570          * buggy migration stream.
3571          */
3572         if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
3573             qemu_log_mask(LOG_GUEST_ERROR,
3574                           "%s: bogus vq index ignored\n", __func__);
3575             return false;
3576         }
3577         nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
3578     } else {
3579         nc = qemu_get_subqueue(n->nic, vq2q(idx));
3580     }
3581     /*
3582      * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
3583      * as the macro of configure interrupt's IDX, If this driver does not
3584      * support, the function will return false
3585      */
3586 
3587     if (idx == VIRTIO_CONFIG_IRQ_IDX) {
3588         return vhost_net_config_pending(get_vhost_net(nc->peer));
3589     }
3590     return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
3591 }
3592 
3593 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
3594                                            bool mask)
3595 {
3596     VirtIONet *n = VIRTIO_NET(vdev);
3597     NetClientState *nc;
3598     assert(n->vhost_started);
3599     if (!n->multiqueue && idx == 2) {
3600         /* Must guard against invalid features and bogus queue index
3601          * from being set by malicious guest, or penetrated through
3602          * buggy migration stream.
3603          */
3604         if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
3605             qemu_log_mask(LOG_GUEST_ERROR,
3606                           "%s: bogus vq index ignored\n", __func__);
3607             return;
3608         }
3609         nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
3610     } else {
3611         nc = qemu_get_subqueue(n->nic, vq2q(idx));
3612     }
3613     /*
3614      *Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
3615      * as the macro of configure interrupt's IDX, If this driver does not
3616      * support, the function will return
3617      */
3618 
3619     if (idx == VIRTIO_CONFIG_IRQ_IDX) {
3620         vhost_net_config_mask(get_vhost_net(nc->peer), vdev, mask);
3621         return;
3622     }
3623     vhost_net_virtqueue_mask(get_vhost_net(nc->peer), vdev, idx, mask);
3624 }
3625 
3626 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
3627 {
3628     virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
3629 
3630     n->config_size = virtio_get_config_size(&cfg_size_params, host_features);
3631 }
3632 
3633 void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
3634                                    const char *type)
3635 {
3636     /*
3637      * The name can be NULL, the netclient name will be type.x.
3638      */
3639     assert(type != NULL);
3640 
3641     g_free(n->netclient_name);
3642     g_free(n->netclient_type);
3643     n->netclient_name = g_strdup(name);
3644     n->netclient_type = g_strdup(type);
3645 }
3646 
3647 static bool failover_unplug_primary(VirtIONet *n, DeviceState *dev)
3648 {
3649     HotplugHandler *hotplug_ctrl;
3650     PCIDevice *pci_dev;
3651     Error *err = NULL;
3652 
3653     hotplug_ctrl = qdev_get_hotplug_handler(dev);
3654     if (hotplug_ctrl) {
3655         pci_dev = PCI_DEVICE(dev);
3656         pci_dev->partially_hotplugged = true;
3657         hotplug_handler_unplug_request(hotplug_ctrl, dev, &err);
3658         if (err) {
3659             error_report_err(err);
3660             return false;
3661         }
3662     } else {
3663         return false;
3664     }
3665     return true;
3666 }
3667 
3668 static bool failover_replug_primary(VirtIONet *n, DeviceState *dev,
3669                                     Error **errp)
3670 {
3671     Error *err = NULL;
3672     HotplugHandler *hotplug_ctrl;
3673     PCIDevice *pdev = PCI_DEVICE(dev);
3674     BusState *primary_bus;
3675 
3676     if (!pdev->partially_hotplugged) {
3677         return true;
3678     }
3679     primary_bus = dev->parent_bus;
3680     if (!primary_bus) {
3681         error_setg(errp, "virtio_net: couldn't find primary bus");
3682         return false;
3683     }
3684     qdev_set_parent_bus(dev, primary_bus, &error_abort);
3685     qatomic_set(&n->failover_primary_hidden, false);
3686     hotplug_ctrl = qdev_get_hotplug_handler(dev);
3687     if (hotplug_ctrl) {
3688         hotplug_handler_pre_plug(hotplug_ctrl, dev, &err);
3689         if (err) {
3690             goto out;
3691         }
3692         hotplug_handler_plug(hotplug_ctrl, dev, &err);
3693     }
3694     pdev->partially_hotplugged = false;
3695 
3696 out:
3697     error_propagate(errp, err);
3698     return !err;
3699 }
3700 
3701 static void virtio_net_handle_migration_primary(VirtIONet *n, MigrationEvent *e)
3702 {
3703     bool should_be_hidden;
3704     Error *err = NULL;
3705     DeviceState *dev = failover_find_primary_device(n);
3706 
3707     if (!dev) {
3708         return;
3709     }
3710 
3711     should_be_hidden = qatomic_read(&n->failover_primary_hidden);
3712 
3713     if (e->type == MIG_EVENT_PRECOPY_SETUP && !should_be_hidden) {
3714         if (failover_unplug_primary(n, dev)) {
3715             vmstate_unregister(VMSTATE_IF(dev), qdev_get_vmsd(dev), dev);
3716             qapi_event_send_unplug_primary(dev->id);
3717             qatomic_set(&n->failover_primary_hidden, true);
3718         } else {
3719             warn_report("couldn't unplug primary device");
3720         }
3721     } else if (e->type == MIG_EVENT_PRECOPY_FAILED) {
3722         /* We already unplugged the device let's plug it back */
3723         if (!failover_replug_primary(n, dev, &err)) {
3724             if (err) {
3725                 error_report_err(err);
3726             }
3727         }
3728     }
3729 }
3730 
3731 static int virtio_net_migration_state_notifier(NotifierWithReturn *notifier,
3732                                                MigrationEvent *e, Error **errp)
3733 {
3734     VirtIONet *n = container_of(notifier, VirtIONet, migration_state);
3735     virtio_net_handle_migration_primary(n, e);
3736     return 0;
3737 }
3738 
3739 static bool failover_hide_primary_device(DeviceListener *listener,
3740                                          const QDict *device_opts,
3741                                          bool from_json,
3742                                          Error **errp)
3743 {
3744     VirtIONet *n = container_of(listener, VirtIONet, primary_listener);
3745     const char *standby_id;
3746 
3747     if (!device_opts) {
3748         return false;
3749     }
3750 
3751     if (!qdict_haskey(device_opts, "failover_pair_id")) {
3752         return false;
3753     }
3754 
3755     if (!qdict_haskey(device_opts, "id")) {
3756         error_setg(errp, "Device with failover_pair_id needs to have id");
3757         return false;
3758     }
3759 
3760     standby_id = qdict_get_str(device_opts, "failover_pair_id");
3761     if (g_strcmp0(standby_id, n->netclient_name) != 0) {
3762         return false;
3763     }
3764 
3765     /*
3766      * The hide helper can be called several times for a given device.
3767      * Check there is only one primary for a virtio-net device but
3768      * don't duplicate the qdict several times if it's called for the same
3769      * device.
3770      */
3771     if (n->primary_opts) {
3772         const char *old, *new;
3773         /* devices with failover_pair_id always have an id */
3774         old = qdict_get_str(n->primary_opts, "id");
3775         new = qdict_get_str(device_opts, "id");
3776         if (strcmp(old, new) != 0) {
3777             error_setg(errp, "Cannot attach more than one primary device to "
3778                        "'%s': '%s' and '%s'", n->netclient_name, old, new);
3779             return false;
3780         }
3781     } else {
3782         n->primary_opts = qdict_clone_shallow(device_opts);
3783         n->primary_opts_from_json = from_json;
3784     }
3785 
3786     /* failover_primary_hidden is set during feature negotiation */
3787     return qatomic_read(&n->failover_primary_hidden);
3788 }
3789 
3790 static void virtio_net_device_realize(DeviceState *dev, Error **errp)
3791 {
3792     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3793     VirtIONet *n = VIRTIO_NET(dev);
3794     NetClientState *nc;
3795     int i;
3796 
3797     if (n->net_conf.mtu) {
3798         n->host_features |= (1ULL << VIRTIO_NET_F_MTU);
3799     }
3800 
3801     if (n->net_conf.duplex_str) {
3802         if (strncmp(n->net_conf.duplex_str, "half", 5) == 0) {
3803             n->net_conf.duplex = DUPLEX_HALF;
3804         } else if (strncmp(n->net_conf.duplex_str, "full", 5) == 0) {
3805             n->net_conf.duplex = DUPLEX_FULL;
3806         } else {
3807             error_setg(errp, "'duplex' must be 'half' or 'full'");
3808             return;
3809         }
3810         n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX);
3811     } else {
3812         n->net_conf.duplex = DUPLEX_UNKNOWN;
3813     }
3814 
3815     if (n->net_conf.speed < SPEED_UNKNOWN) {
3816         error_setg(errp, "'speed' must be between 0 and INT_MAX");
3817         return;
3818     }
3819     if (n->net_conf.speed >= 0) {
3820         n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX);
3821     }
3822 
3823     if (n->failover) {
3824         n->primary_listener.hide_device = failover_hide_primary_device;
3825         qatomic_set(&n->failover_primary_hidden, true);
3826         device_listener_register(&n->primary_listener);
3827         migration_add_notifier(&n->migration_state,
3828                                virtio_net_migration_state_notifier);
3829         n->host_features |= (1ULL << VIRTIO_NET_F_STANDBY);
3830     }
3831 
3832     virtio_net_set_config_size(n, n->host_features);
3833     virtio_init(vdev, VIRTIO_ID_NET, n->config_size);
3834 
3835     /*
3836      * We set a lower limit on RX queue size to what it always was.
3837      * Guests that want a smaller ring can always resize it without
3838      * help from us (using virtio 1 and up).
3839      */
3840     if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
3841         n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
3842         !is_power_of_2(n->net_conf.rx_queue_size)) {
3843         error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
3844                    "must be a power of 2 between %d and %d.",
3845                    n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
3846                    VIRTQUEUE_MAX_SIZE);
3847         virtio_cleanup(vdev);
3848         return;
3849     }
3850 
3851     if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
3852         n->net_conf.tx_queue_size > virtio_net_max_tx_queue_size(n) ||
3853         !is_power_of_2(n->net_conf.tx_queue_size)) {
3854         error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
3855                    "must be a power of 2 between %d and %d",
3856                    n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
3857                    virtio_net_max_tx_queue_size(n));
3858         virtio_cleanup(vdev);
3859         return;
3860     }
3861 
3862     n->max_ncs = MAX(n->nic_conf.peers.queues, 1);
3863 
3864     /*
3865      * Figure out the datapath queue pairs since the backend could
3866      * provide control queue via peers as well.
3867      */
3868     if (n->nic_conf.peers.queues) {
3869         for (i = 0; i < n->max_ncs; i++) {
3870             if (n->nic_conf.peers.ncs[i]->is_datapath) {
3871                 ++n->max_queue_pairs;
3872             }
3873         }
3874     }
3875     n->max_queue_pairs = MAX(n->max_queue_pairs, 1);
3876 
3877     if (n->max_queue_pairs * 2 + 1 > VIRTIO_QUEUE_MAX) {
3878         error_setg(errp, "Invalid number of queue pairs (= %" PRIu32 "), "
3879                    "must be a positive integer less than %d.",
3880                    n->max_queue_pairs, (VIRTIO_QUEUE_MAX - 1) / 2);
3881         virtio_cleanup(vdev);
3882         return;
3883     }
3884     n->vqs = g_new0(VirtIONetQueue, n->max_queue_pairs);
3885     n->curr_queue_pairs = 1;
3886     n->tx_timeout = n->net_conf.txtimer;
3887 
3888     if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
3889                        && strcmp(n->net_conf.tx, "bh")) {
3890         warn_report("virtio-net: "
3891                     "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
3892                     n->net_conf.tx);
3893         error_printf("Defaulting to \"bh\"");
3894     }
3895 
3896     n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n),
3897                                     n->net_conf.tx_queue_size);
3898 
3899     virtio_net_add_queue(n, 0);
3900 
3901     n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
3902     qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
3903     memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
3904     n->status = VIRTIO_NET_S_LINK_UP;
3905     qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
3906                               QEMU_CLOCK_VIRTUAL,
3907                               virtio_net_announce_timer, n);
3908     n->announce_timer.round = 0;
3909 
3910     if (n->netclient_type) {
3911         /*
3912          * Happen when virtio_net_set_netclient_name has been called.
3913          */
3914         n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
3915                               n->netclient_type, n->netclient_name,
3916                               &dev->mem_reentrancy_guard, n);
3917     } else {
3918         n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
3919                               object_get_typename(OBJECT(dev)), dev->id,
3920                               &dev->mem_reentrancy_guard, n);
3921     }
3922 
3923     for (i = 0; i < n->max_queue_pairs; i++) {
3924         n->nic->ncs[i].do_not_pad = true;
3925     }
3926 
3927     peer_test_vnet_hdr(n);
3928     if (peer_has_vnet_hdr(n)) {
3929         n->host_hdr_len = sizeof(struct virtio_net_hdr);
3930     } else {
3931         n->host_hdr_len = 0;
3932     }
3933 
3934     qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
3935 
3936     n->vqs[0].tx_waiting = 0;
3937     n->tx_burst = n->net_conf.txburst;
3938     virtio_net_set_mrg_rx_bufs(n, 0, 0, 0);
3939     n->promisc = 1; /* for compatibility */
3940 
3941     n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
3942 
3943     n->vlans = g_malloc0(MAX_VLAN >> 3);
3944 
3945     nc = qemu_get_queue(n->nic);
3946     nc->rxfilter_notify_enabled = 1;
3947 
3948    if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
3949         struct virtio_net_config netcfg = {};
3950         memcpy(&netcfg.mac, &n->nic_conf.macaddr, ETH_ALEN);
3951         vhost_net_set_config(get_vhost_net(nc->peer),
3952             (uint8_t *)&netcfg, 0, ETH_ALEN, VHOST_SET_CONFIG_TYPE_FRONTEND);
3953     }
3954     QTAILQ_INIT(&n->rsc_chains);
3955     n->qdev = dev;
3956 
3957     net_rx_pkt_init(&n->rx_pkt);
3958 
3959     if (qemu_get_vnet_hash_supported_types(qemu_get_queue(n->nic)->peer,
3960                                            &n->rss_data.peer_hash_types)) {
3961         n->rss_data.peer_hash_available = true;
3962         n->rss_data.supported_hash_types =
3963             n->rss_data.specified_hash_types.on_bits |
3964             (n->rss_data.specified_hash_types.auto_bits &
3965              n->rss_data.peer_hash_types);
3966     } else {
3967         n->rss_data.supported_hash_types =
3968             n->rss_data.specified_hash_types.on_bits |
3969             n->rss_data.specified_hash_types.auto_bits;
3970     }
3971 }
3972 
3973 static void virtio_net_device_unrealize(DeviceState *dev)
3974 {
3975     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3976     VirtIONet *n = VIRTIO_NET(dev);
3977     int i, max_queue_pairs;
3978 
3979     if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
3980         virtio_net_unload_ebpf(n);
3981     }
3982 
3983     /* This will stop vhost backend if appropriate. */
3984     virtio_net_set_status(vdev, 0);
3985 
3986     g_free(n->netclient_name);
3987     n->netclient_name = NULL;
3988     g_free(n->netclient_type);
3989     n->netclient_type = NULL;
3990 
3991     g_free(n->mac_table.macs);
3992     g_free(n->vlans);
3993 
3994     if (n->failover) {
3995         qobject_unref(n->primary_opts);
3996         device_listener_unregister(&n->primary_listener);
3997         migration_remove_notifier(&n->migration_state);
3998     } else {
3999         assert(n->primary_opts == NULL);
4000     }
4001 
4002     max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
4003     for (i = 0; i < max_queue_pairs; i++) {
4004         virtio_net_del_queue(n, i);
4005     }
4006     /* delete also control vq */
4007     virtio_del_queue(vdev, max_queue_pairs * 2);
4008     qemu_announce_timer_del(&n->announce_timer, false);
4009     g_free(n->vqs);
4010     qemu_del_nic(n->nic);
4011     virtio_net_rsc_cleanup(n);
4012     g_free(n->rss_data.indirections_table);
4013     net_rx_pkt_uninit(n->rx_pkt);
4014     virtio_cleanup(vdev);
4015 }
4016 
4017 static void virtio_net_reset(VirtIODevice *vdev)
4018 {
4019     VirtIONet *n = VIRTIO_NET(vdev);
4020     int i;
4021 
4022     /* Reset back to compatibility mode */
4023     n->promisc = 1;
4024     n->allmulti = 0;
4025     n->alluni = 0;
4026     n->nomulti = 0;
4027     n->nouni = 0;
4028     n->nobcast = 0;
4029     /* multiqueue is disabled by default */
4030     n->curr_queue_pairs = 1;
4031     timer_del(n->announce_timer.tm);
4032     n->announce_timer.round = 0;
4033     n->status &= ~VIRTIO_NET_S_ANNOUNCE;
4034 
4035     /* Flush any MAC and VLAN filter table state */
4036     n->mac_table.in_use = 0;
4037     n->mac_table.first_multi = 0;
4038     n->mac_table.multi_overflow = 0;
4039     n->mac_table.uni_overflow = 0;
4040     memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
4041     memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
4042     qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
4043     memset(n->vlans, 0, MAX_VLAN >> 3);
4044 
4045     /* Flush any async TX */
4046     for (i = 0;  i < n->max_queue_pairs; i++) {
4047         flush_or_purge_queued_packets(qemu_get_subqueue(n->nic, i));
4048     }
4049 
4050     virtio_net_disable_rss(n);
4051 }
4052 
4053 static void virtio_net_instance_init(Object *obj)
4054 {
4055     VirtIONet *n = VIRTIO_NET(obj);
4056 
4057     /*
4058      * The default config_size is sizeof(struct virtio_net_config).
4059      * Can be overridden with virtio_net_set_config_size.
4060      */
4061     n->config_size = sizeof(struct virtio_net_config);
4062     device_add_bootindex_property(obj, &n->nic_conf.bootindex,
4063                                   "bootindex", "/ethernet-phy@0",
4064                                   DEVICE(n));
4065 
4066     ebpf_rss_init(&n->ebpf_rss);
4067 }
4068 
4069 static int virtio_net_pre_save(void *opaque)
4070 {
4071     VirtIONet *n = opaque;
4072 
4073     /* At this point, backend must be stopped, otherwise
4074      * it might keep writing to memory. */
4075     assert(!n->vhost_started);
4076 
4077     return 0;
4078 }
4079 
4080 static bool primary_unplug_pending(void *opaque)
4081 {
4082     DeviceState *dev = opaque;
4083     DeviceState *primary;
4084     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
4085     VirtIONet *n = VIRTIO_NET(vdev);
4086 
4087     if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
4088         return false;
4089     }
4090     primary = failover_find_primary_device(n);
4091     return primary ? primary->pending_deleted_event : false;
4092 }
4093 
4094 static bool dev_unplug_pending(void *opaque)
4095 {
4096     DeviceState *dev = opaque;
4097     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
4098 
4099     return vdc->primary_unplug_pending(dev);
4100 }
4101 
4102 static const VMStateDescription vmstate_virtio_net = {
4103     .name = "virtio-net",
4104     .minimum_version_id = VIRTIO_NET_VM_VERSION,
4105     .version_id = VIRTIO_NET_VM_VERSION,
4106     .fields = (const VMStateField[]) {
4107         VMSTATE_VIRTIO_DEVICE,
4108         VMSTATE_END_OF_LIST()
4109     },
4110     .pre_save = virtio_net_pre_save,
4111     .dev_unplug_pending = dev_unplug_pending,
4112 };
4113 
4114 static const Property virtio_net_properties[] = {
4115     DEFINE_PROP_BIT64("csum", VirtIONet, host_features,
4116                     VIRTIO_NET_F_CSUM, true),
4117     DEFINE_PROP_BIT64("guest_csum", VirtIONet, host_features,
4118                     VIRTIO_NET_F_GUEST_CSUM, true),
4119     DEFINE_PROP_BIT64("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true),
4120     DEFINE_PROP_BIT64("guest_tso4", VirtIONet, host_features,
4121                     VIRTIO_NET_F_GUEST_TSO4, true),
4122     DEFINE_PROP_BIT64("guest_tso6", VirtIONet, host_features,
4123                     VIRTIO_NET_F_GUEST_TSO6, true),
4124     DEFINE_PROP_BIT64("guest_ecn", VirtIONet, host_features,
4125                     VIRTIO_NET_F_GUEST_ECN, true),
4126     DEFINE_PROP_BIT64("guest_ufo", VirtIONet, host_features,
4127                     VIRTIO_NET_F_GUEST_UFO, true),
4128     DEFINE_PROP_BIT64("guest_announce", VirtIONet, host_features,
4129                     VIRTIO_NET_F_GUEST_ANNOUNCE, true),
4130     DEFINE_PROP_BIT64("host_tso4", VirtIONet, host_features,
4131                     VIRTIO_NET_F_HOST_TSO4, true),
4132     DEFINE_PROP_BIT64("host_tso6", VirtIONet, host_features,
4133                     VIRTIO_NET_F_HOST_TSO6, true),
4134     DEFINE_PROP_BIT64("host_ecn", VirtIONet, host_features,
4135                     VIRTIO_NET_F_HOST_ECN, true),
4136     DEFINE_PROP_BIT64("host_ufo", VirtIONet, host_features,
4137                     VIRTIO_NET_F_HOST_UFO, true),
4138     DEFINE_PROP_BIT64("mrg_rxbuf", VirtIONet, host_features,
4139                     VIRTIO_NET_F_MRG_RXBUF, true),
4140     DEFINE_PROP_BIT64("status", VirtIONet, host_features,
4141                     VIRTIO_NET_F_STATUS, true),
4142     DEFINE_PROP_BIT64("ctrl_vq", VirtIONet, host_features,
4143                     VIRTIO_NET_F_CTRL_VQ, true),
4144     DEFINE_PROP_BIT64("ctrl_rx", VirtIONet, host_features,
4145                     VIRTIO_NET_F_CTRL_RX, true),
4146     DEFINE_PROP_BIT64("ctrl_vlan", VirtIONet, host_features,
4147                     VIRTIO_NET_F_CTRL_VLAN, true),
4148     DEFINE_PROP_BIT64("ctrl_rx_extra", VirtIONet, host_features,
4149                     VIRTIO_NET_F_CTRL_RX_EXTRA, true),
4150     DEFINE_PROP_BIT64("ctrl_mac_addr", VirtIONet, host_features,
4151                     VIRTIO_NET_F_CTRL_MAC_ADDR, true),
4152     DEFINE_PROP_BIT64("ctrl_guest_offloads", VirtIONet, host_features,
4153                     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
4154     DEFINE_PROP_BIT64("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
4155     DEFINE_PROP_BIT64("rss", VirtIONet, host_features,
4156                     VIRTIO_NET_F_RSS, false),
4157     DEFINE_PROP_BIT64("hash", VirtIONet, host_features,
4158                     VIRTIO_NET_F_HASH_REPORT, false),
4159     DEFINE_PROP_ARRAY("ebpf-rss-fds", VirtIONet, nr_ebpf_rss_fds,
4160                       ebpf_rss_fds, qdev_prop_string, char*),
4161     DEFINE_PROP_BIT64("guest_rsc_ext", VirtIONet, host_features,
4162                     VIRTIO_NET_F_RSC_EXT, false),
4163     DEFINE_PROP_UINT32("rsc_interval", VirtIONet, rsc_timeout,
4164                        VIRTIO_NET_RSC_DEFAULT_INTERVAL),
4165     DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
4166     DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
4167                        TX_TIMER_INTERVAL),
4168     DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
4169     DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
4170     DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
4171                        VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
4172     DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
4173                        VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
4174     DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
4175     DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend,
4176                      true),
4177     DEFINE_PROP_INT32("speed", VirtIONet, net_conf.speed, SPEED_UNKNOWN),
4178     DEFINE_PROP_STRING("duplex", VirtIONet, net_conf.duplex_str),
4179     DEFINE_PROP_BOOL("failover", VirtIONet, failover, false),
4180     DEFINE_PROP_BIT64("guest_uso4", VirtIONet, host_features,
4181                       VIRTIO_NET_F_GUEST_USO4, true),
4182     DEFINE_PROP_BIT64("guest_uso6", VirtIONet, host_features,
4183                       VIRTIO_NET_F_GUEST_USO6, true),
4184     DEFINE_PROP_BIT64("host_uso", VirtIONet, host_features,
4185                       VIRTIO_NET_F_HOST_USO, true),
4186     DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-ipv4", VirtIONet,
4187                                   rss_data.specified_hash_types,
4188                                   VIRTIO_NET_HASH_REPORT_IPv4 - 1,
4189                                   ON_OFF_AUTO_AUTO),
4190     DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-tcp4", VirtIONet,
4191                                   rss_data.specified_hash_types,
4192                                   VIRTIO_NET_HASH_REPORT_TCPv4 - 1,
4193                                   ON_OFF_AUTO_AUTO),
4194     DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-udp4", VirtIONet,
4195                                   rss_data.specified_hash_types,
4196                                   VIRTIO_NET_HASH_REPORT_UDPv4 - 1,
4197                                   ON_OFF_AUTO_AUTO),
4198     DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-ipv6", VirtIONet,
4199                                   rss_data.specified_hash_types,
4200                                   VIRTIO_NET_HASH_REPORT_IPv6 - 1,
4201                                   ON_OFF_AUTO_AUTO),
4202     DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-tcp6", VirtIONet,
4203                                   rss_data.specified_hash_types,
4204                                   VIRTIO_NET_HASH_REPORT_TCPv6 - 1,
4205                                   ON_OFF_AUTO_AUTO),
4206     DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-udp6", VirtIONet,
4207                                   rss_data.specified_hash_types,
4208                                   VIRTIO_NET_HASH_REPORT_UDPv6 - 1,
4209                                   ON_OFF_AUTO_AUTO),
4210     DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-ipv6ex", VirtIONet,
4211                                   rss_data.specified_hash_types,
4212                                   VIRTIO_NET_HASH_REPORT_IPv6_EX - 1,
4213                                   ON_OFF_AUTO_AUTO),
4214     DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-tcp6ex", VirtIONet,
4215                                   rss_data.specified_hash_types,
4216                                   VIRTIO_NET_HASH_REPORT_TCPv6_EX - 1,
4217                                   ON_OFF_AUTO_AUTO),
4218     DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-udp6ex", VirtIONet,
4219                                   rss_data.specified_hash_types,
4220                                   VIRTIO_NET_HASH_REPORT_UDPv6_EX - 1,
4221                                   ON_OFF_AUTO_AUTO),
4222 };
4223 
4224 static void virtio_net_class_init(ObjectClass *klass, const void *data)
4225 {
4226     DeviceClass *dc = DEVICE_CLASS(klass);
4227     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
4228 
4229     device_class_set_props(dc, virtio_net_properties);
4230     dc->vmsd = &vmstate_virtio_net;
4231     set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
4232     vdc->realize = virtio_net_device_realize;
4233     vdc->unrealize = virtio_net_device_unrealize;
4234     vdc->get_config = virtio_net_get_config;
4235     vdc->set_config = virtio_net_set_config;
4236     vdc->get_features = virtio_net_get_features;
4237     vdc->set_features = virtio_net_set_features;
4238     vdc->bad_features = virtio_net_bad_features;
4239     vdc->reset = virtio_net_reset;
4240     vdc->queue_reset = virtio_net_queue_reset;
4241     vdc->queue_enable = virtio_net_queue_enable;
4242     vdc->set_status = virtio_net_set_status;
4243     vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
4244     vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
4245     vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO);
4246     vdc->pre_load_queues = virtio_net_pre_load_queues;
4247     vdc->post_load = virtio_net_post_load_virtio;
4248     vdc->vmsd = &vmstate_virtio_net_device;
4249     vdc->primary_unplug_pending = primary_unplug_pending;
4250     vdc->get_vhost = virtio_net_get_vhost;
4251     vdc->toggle_device_iotlb = vhost_toggle_device_iotlb;
4252 }
4253 
4254 static const TypeInfo virtio_net_info = {
4255     .name = TYPE_VIRTIO_NET,
4256     .parent = TYPE_VIRTIO_DEVICE,
4257     .instance_size = sizeof(VirtIONet),
4258     .instance_init = virtio_net_instance_init,
4259     .class_init = virtio_net_class_init,
4260 };
4261 
4262 static void virtio_register_types(void)
4263 {
4264     type_register_static(&virtio_net_info);
4265 }
4266 
4267 type_init(virtio_register_types)
4268