xref: /openbmc/qemu/net/vhost-vdpa.c (revision 6c848c19)
1 /*
2  * vhost-vdpa.c
3  *
4  * Copyright(c) 2017-2018 Intel Corporation.
5  * Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "clients.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
26 #include <err.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/migration.h"
30 #include "migration/misc.h"
31 #include "hw/virtio/vhost.h"
32 
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState {
35     NetClientState nc;
36     struct vhost_vdpa vhost_vdpa;
37     Notifier migration_state;
38     VHostNetState *vhost_net;
39 
40     /* Control commands shadow buffers */
41     void *cvq_cmd_out_buffer;
42     virtio_net_ctrl_ack *status;
43 
44     /* The device always have SVQ enabled */
45     bool always_svq;
46 
47     /* The device can isolate CVQ in its own ASID */
48     bool cvq_isolated;
49 
50     bool started;
51 } VhostVDPAState;
52 
53 /*
54  * The array is sorted alphabetically in ascending order,
55  * with the exception of VHOST_INVALID_FEATURE_BIT,
56  * which should always be the last entry.
57  */
58 const int vdpa_feature_bits[] = {
59     VIRTIO_F_ANY_LAYOUT,
60     VIRTIO_F_IOMMU_PLATFORM,
61     VIRTIO_F_NOTIFY_ON_EMPTY,
62     VIRTIO_F_RING_PACKED,
63     VIRTIO_F_RING_RESET,
64     VIRTIO_F_VERSION_1,
65     VIRTIO_NET_F_CSUM,
66     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
67     VIRTIO_NET_F_CTRL_MAC_ADDR,
68     VIRTIO_NET_F_CTRL_RX,
69     VIRTIO_NET_F_CTRL_RX_EXTRA,
70     VIRTIO_NET_F_CTRL_VLAN,
71     VIRTIO_NET_F_CTRL_VQ,
72     VIRTIO_NET_F_GSO,
73     VIRTIO_NET_F_GUEST_CSUM,
74     VIRTIO_NET_F_GUEST_ECN,
75     VIRTIO_NET_F_GUEST_TSO4,
76     VIRTIO_NET_F_GUEST_TSO6,
77     VIRTIO_NET_F_GUEST_UFO,
78     VIRTIO_NET_F_GUEST_USO4,
79     VIRTIO_NET_F_GUEST_USO6,
80     VIRTIO_NET_F_HASH_REPORT,
81     VIRTIO_NET_F_HOST_ECN,
82     VIRTIO_NET_F_HOST_TSO4,
83     VIRTIO_NET_F_HOST_TSO6,
84     VIRTIO_NET_F_HOST_UFO,
85     VIRTIO_NET_F_HOST_USO,
86     VIRTIO_NET_F_MQ,
87     VIRTIO_NET_F_MRG_RXBUF,
88     VIRTIO_NET_F_MTU,
89     VIRTIO_NET_F_RSS,
90     VIRTIO_NET_F_STATUS,
91     VIRTIO_RING_F_EVENT_IDX,
92     VIRTIO_RING_F_INDIRECT_DESC,
93 
94     /* VHOST_INVALID_FEATURE_BIT should always be the last entry */
95     VHOST_INVALID_FEATURE_BIT
96 };
97 
98 /** Supported device specific feature bits with SVQ */
99 static const uint64_t vdpa_svq_device_features =
100     BIT_ULL(VIRTIO_NET_F_CSUM) |
101     BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
102     BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
103     BIT_ULL(VIRTIO_NET_F_MTU) |
104     BIT_ULL(VIRTIO_NET_F_MAC) |
105     BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
106     BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
107     BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
108     BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
109     BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
110     BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
111     BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
112     BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
113     BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
114     BIT_ULL(VIRTIO_NET_F_STATUS) |
115     BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
116     BIT_ULL(VIRTIO_NET_F_CTRL_RX) |
117     BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) |
118     BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) |
119     BIT_ULL(VIRTIO_NET_F_MQ) |
120     BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
121     BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
122     /* VHOST_F_LOG_ALL is exposed by SVQ */
123     BIT_ULL(VHOST_F_LOG_ALL) |
124     BIT_ULL(VIRTIO_NET_F_HASH_REPORT) |
125     BIT_ULL(VIRTIO_NET_F_RSS) |
126     BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
127     BIT_ULL(VIRTIO_NET_F_STANDBY) |
128     BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
129 
130 #define VHOST_VDPA_NET_CVQ_ASID 1
131 
132 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
133 {
134     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
135     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
136     return s->vhost_net;
137 }
138 
139 static size_t vhost_vdpa_net_cvq_cmd_len(void)
140 {
141     /*
142      * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
143      * In buffer is always 1 byte, so it should fit here
144      */
145     return sizeof(struct virtio_net_ctrl_hdr) +
146            2 * sizeof(struct virtio_net_ctrl_mac) +
147            MAC_TABLE_ENTRIES * ETH_ALEN;
148 }
149 
150 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
151 {
152     return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
153 }
154 
155 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
156 {
157     uint64_t invalid_dev_features =
158         features & ~vdpa_svq_device_features &
159         /* Transport are all accepted at this point */
160         ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
161                          VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
162 
163     if (invalid_dev_features) {
164         error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
165                    invalid_dev_features);
166         return false;
167     }
168 
169     return vhost_svq_valid_features(features, errp);
170 }
171 
172 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
173 {
174     uint32_t device_id;
175     int ret;
176     struct vhost_dev *hdev;
177 
178     hdev = (struct vhost_dev *)&net->dev;
179     ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
180     if (device_id != VIRTIO_ID_NET) {
181         return -ENOTSUP;
182     }
183     return ret;
184 }
185 
186 static int vhost_vdpa_add(NetClientState *ncs, void *be,
187                           int queue_pair_index, int nvqs)
188 {
189     VhostNetOptions options;
190     struct vhost_net *net = NULL;
191     VhostVDPAState *s;
192     int ret;
193 
194     options.backend_type = VHOST_BACKEND_TYPE_VDPA;
195     assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
196     s = DO_UPCAST(VhostVDPAState, nc, ncs);
197     options.net_backend = ncs;
198     options.opaque      = be;
199     options.busyloop_timeout = 0;
200     options.nvqs = nvqs;
201 
202     net = vhost_net_init(&options);
203     if (!net) {
204         error_report("failed to init vhost_net for queue");
205         goto err_init;
206     }
207     s->vhost_net = net;
208     ret = vhost_vdpa_net_check_device_id(net);
209     if (ret) {
210         goto err_check;
211     }
212     return 0;
213 err_check:
214     vhost_net_cleanup(net);
215     g_free(net);
216 err_init:
217     return -1;
218 }
219 
220 static void vhost_vdpa_cleanup(NetClientState *nc)
221 {
222     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
223 
224     /*
225      * If a peer NIC is attached, do not cleanup anything.
226      * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
227      * when the guest is shutting down.
228      */
229     if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
230         return;
231     }
232     munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
233     munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
234     if (s->vhost_net) {
235         vhost_net_cleanup(s->vhost_net);
236         g_free(s->vhost_net);
237         s->vhost_net = NULL;
238     }
239     if (s->vhost_vdpa.index != 0) {
240         return;
241     }
242     qemu_close(s->vhost_vdpa.shared->device_fd);
243     g_free(s->vhost_vdpa.shared);
244 }
245 
246 /** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend  */
247 static bool vhost_vdpa_set_steering_ebpf(NetClientState *nc, int prog_fd)
248 {
249     return true;
250 }
251 
252 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
253 {
254     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
255 
256     return true;
257 }
258 
259 static bool vhost_vdpa_has_ufo(NetClientState *nc)
260 {
261     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
262     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
263     uint64_t features = 0;
264     features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
265     features = vhost_net_get_features(s->vhost_net, features);
266     return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
267 
268 }
269 
270 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
271                                        Error **errp)
272 {
273     const char *driver = object_class_get_name(oc);
274 
275     if (!g_str_has_prefix(driver, "virtio-net-")) {
276         error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
277         return false;
278     }
279 
280     return true;
281 }
282 
283 /** Dummy receive in case qemu falls back to userland tap networking */
284 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
285                                   size_t size)
286 {
287     return size;
288 }
289 
290 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
291 {
292     struct vhost_vdpa *v = &s->vhost_vdpa;
293     VirtIONet *n;
294     VirtIODevice *vdev;
295     int data_queue_pairs, cvq, r;
296 
297     /* We are only called on the first data vqs and only if x-svq is not set */
298     if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
299         return;
300     }
301 
302     vdev = v->dev->vdev;
303     n = VIRTIO_NET(vdev);
304     if (!n->vhost_started) {
305         return;
306     }
307 
308     data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
309     cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
310                                   n->max_ncs - n->max_queue_pairs : 0;
311     /*
312      * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
313      * in the future and resume the device if read-only operations between
314      * suspend and reset goes wrong.
315      */
316     vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
317 
318     /* Start will check migration setup_or_active to configure or not SVQ */
319     r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
320     if (unlikely(r < 0)) {
321         error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
322     }
323 }
324 
325 static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data)
326 {
327     MigrationState *migration = data;
328     VhostVDPAState *s = container_of(notifier, VhostVDPAState,
329                                      migration_state);
330 
331     if (migration_in_setup(migration)) {
332         vhost_vdpa_net_log_global_enable(s, true);
333     } else if (migration_has_failed(migration)) {
334         vhost_vdpa_net_log_global_enable(s, false);
335     }
336 }
337 
338 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
339 {
340     struct vhost_vdpa *v = &s->vhost_vdpa;
341 
342     migration_add_notifier(&s->migration_state,
343                            vdpa_net_migration_state_notifier);
344     if (v->shadow_vqs_enabled) {
345         v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first,
346                                                    v->shared->iova_range.last);
347     }
348 }
349 
350 static int vhost_vdpa_net_data_start(NetClientState *nc)
351 {
352     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
353     struct vhost_vdpa *v = &s->vhost_vdpa;
354 
355     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
356 
357     if (s->always_svq ||
358         migration_is_setup_or_active(migrate_get_current()->state)) {
359         v->shadow_vqs_enabled = true;
360     } else {
361         v->shadow_vqs_enabled = false;
362     }
363 
364     if (v->index == 0) {
365         v->shared->shadow_data = v->shadow_vqs_enabled;
366         vhost_vdpa_net_data_start_first(s);
367         return 0;
368     }
369 
370     return 0;
371 }
372 
373 static int vhost_vdpa_net_data_load(NetClientState *nc)
374 {
375     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
376     struct vhost_vdpa *v = &s->vhost_vdpa;
377     bool has_cvq = v->dev->vq_index_end % 2;
378 
379     if (has_cvq) {
380         return 0;
381     }
382 
383     for (int i = 0; i < v->dev->nvqs; ++i) {
384         vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index);
385     }
386     return 0;
387 }
388 
389 static void vhost_vdpa_net_client_stop(NetClientState *nc)
390 {
391     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
392     struct vhost_dev *dev;
393 
394     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
395 
396     if (s->vhost_vdpa.index == 0) {
397         migration_remove_notifier(&s->migration_state);
398     }
399 
400     dev = s->vhost_vdpa.dev;
401     if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
402         g_clear_pointer(&s->vhost_vdpa.shared->iova_tree,
403                         vhost_iova_tree_delete);
404     }
405 }
406 
407 static NetClientInfo net_vhost_vdpa_info = {
408         .type = NET_CLIENT_DRIVER_VHOST_VDPA,
409         .size = sizeof(VhostVDPAState),
410         .receive = vhost_vdpa_receive,
411         .start = vhost_vdpa_net_data_start,
412         .load = vhost_vdpa_net_data_load,
413         .stop = vhost_vdpa_net_client_stop,
414         .cleanup = vhost_vdpa_cleanup,
415         .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
416         .has_ufo = vhost_vdpa_has_ufo,
417         .check_peer_type = vhost_vdpa_check_peer_type,
418         .set_steering_ebpf = vhost_vdpa_set_steering_ebpf,
419 };
420 
421 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
422                                           Error **errp)
423 {
424     struct vhost_vring_state state = {
425         .index = vq_index,
426     };
427     int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
428 
429     if (unlikely(r < 0)) {
430         r = -errno;
431         error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
432         return r;
433     }
434 
435     return state.num;
436 }
437 
438 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
439                                            unsigned vq_group,
440                                            unsigned asid_num)
441 {
442     struct vhost_vring_state asid = {
443         .index = vq_group,
444         .num = asid_num,
445     };
446     int r;
447 
448     r = ioctl(v->shared->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
449     if (unlikely(r < 0)) {
450         error_report("Can't set vq group %u asid %u, errno=%d (%s)",
451                      asid.index, asid.num, errno, g_strerror(errno));
452     }
453     return r;
454 }
455 
456 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
457 {
458     VhostIOVATree *tree = v->shared->iova_tree;
459     DMAMap needle = {
460         /*
461          * No need to specify size or to look for more translations since
462          * this contiguous chunk was allocated by us.
463          */
464         .translated_addr = (hwaddr)(uintptr_t)addr,
465     };
466     const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
467     int r;
468 
469     if (unlikely(!map)) {
470         error_report("Cannot locate expected map");
471         return;
472     }
473 
474     r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova,
475                              map->size + 1);
476     if (unlikely(r != 0)) {
477         error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
478     }
479 
480     vhost_iova_tree_remove(tree, *map);
481 }
482 
483 /** Map CVQ buffer. */
484 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
485                                   bool write)
486 {
487     DMAMap map = {};
488     int r;
489 
490     map.translated_addr = (hwaddr)(uintptr_t)buf;
491     map.size = size - 1;
492     map.perm = write ? IOMMU_RW : IOMMU_RO,
493     r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map);
494     if (unlikely(r != IOVA_OK)) {
495         error_report("Cannot map injected element");
496         return r;
497     }
498 
499     r = vhost_vdpa_dma_map(v->shared, v->address_space_id, map.iova,
500                            vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
501     if (unlikely(r < 0)) {
502         goto dma_map_err;
503     }
504 
505     return 0;
506 
507 dma_map_err:
508     vhost_iova_tree_remove(v->shared->iova_tree, map);
509     return r;
510 }
511 
512 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
513 {
514     VhostVDPAState *s;
515     struct vhost_vdpa *v;
516     int64_t cvq_group;
517     int r;
518     Error *err = NULL;
519 
520     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
521 
522     s = DO_UPCAST(VhostVDPAState, nc, nc);
523     v = &s->vhost_vdpa;
524 
525     v->shadow_vqs_enabled = v->shared->shadow_data;
526     s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
527 
528     if (v->shared->shadow_data) {
529         /* SVQ is already configured for all virtqueues */
530         goto out;
531     }
532 
533     /*
534      * If we early return in these cases SVQ will not be enabled. The migration
535      * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
536      */
537     if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
538         return 0;
539     }
540 
541     if (!s->cvq_isolated) {
542         return 0;
543     }
544 
545     cvq_group = vhost_vdpa_get_vring_group(v->shared->device_fd,
546                                            v->dev->vq_index_end - 1,
547                                            &err);
548     if (unlikely(cvq_group < 0)) {
549         error_report_err(err);
550         return cvq_group;
551     }
552 
553     r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
554     if (unlikely(r < 0)) {
555         return r;
556     }
557 
558     v->shadow_vqs_enabled = true;
559     s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
560 
561 out:
562     if (!s->vhost_vdpa.shadow_vqs_enabled) {
563         return 0;
564     }
565 
566     /*
567      * If other vhost_vdpa already have an iova_tree, reuse it for simplicity,
568      * whether CVQ shares ASID with guest or not, because:
569      * - Memory listener need access to guest's memory addresses allocated in
570      *   the IOVA tree.
571      * - There should be plenty of IOVA address space for both ASID not to
572      *   worry about collisions between them.  Guest's translations are still
573      *   validated with virtio virtqueue_pop so there is no risk for the guest
574      *   to access memory that it shouldn't.
575      *
576      * To allocate a iova tree per ASID is doable but it complicates the code
577      * and it is not worth it for the moment.
578      */
579     if (!v->shared->iova_tree) {
580         v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first,
581                                                    v->shared->iova_range.last);
582     }
583 
584     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
585                                vhost_vdpa_net_cvq_cmd_page_len(), false);
586     if (unlikely(r < 0)) {
587         return r;
588     }
589 
590     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
591                                vhost_vdpa_net_cvq_cmd_page_len(), true);
592     if (unlikely(r < 0)) {
593         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
594     }
595 
596     return r;
597 }
598 
599 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
600 {
601     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
602 
603     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
604 
605     if (s->vhost_vdpa.shadow_vqs_enabled) {
606         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
607         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
608     }
609 
610     vhost_vdpa_net_client_stop(nc);
611 }
612 
613 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
614                                     const struct iovec *out_sg, size_t out_num,
615                                     const struct iovec *in_sg, size_t in_num)
616 {
617     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
618     int r;
619 
620     r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL);
621     if (unlikely(r != 0)) {
622         if (unlikely(r == -ENOSPC)) {
623             qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
624                           __func__);
625         }
626     }
627 
628     return r;
629 }
630 
631 /*
632  * Convenience wrapper to poll SVQ for multiple control commands.
633  *
634  * Caller should hold the BQL when invoking this function, and should take
635  * the answer before SVQ pulls by itself when BQL is released.
636  */
637 static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight)
638 {
639     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
640     return vhost_svq_poll(svq, cmds_in_flight);
641 }
642 
643 static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s,
644                                              struct iovec *out_cursor,
645                                              struct iovec *in_cursor)
646 {
647     /* reset the cursor of the output buffer for the device */
648     out_cursor->iov_base = s->cvq_cmd_out_buffer;
649     out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
650 
651     /* reset the cursor of the in buffer for the device */
652     in_cursor->iov_base = s->status;
653     in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
654 }
655 
656 /*
657  * Poll SVQ for multiple pending control commands and check the device's ack.
658  *
659  * Caller should hold the BQL when invoking this function.
660  *
661  * @s: The VhostVDPAState
662  * @len: The length of the pending status shadow buffer
663  */
664 static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len)
665 {
666     /* device uses a one-byte length ack for each control command */
667     ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len);
668     if (unlikely(dev_written != len)) {
669         return -EIO;
670     }
671 
672     /* check the device's ack */
673     for (int i = 0; i < len; ++i) {
674         if (s->status[i] != VIRTIO_NET_OK) {
675             return -EIO;
676         }
677     }
678     return 0;
679 }
680 
681 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s,
682                                        struct iovec *out_cursor,
683                                        struct iovec *in_cursor, uint8_t class,
684                                        uint8_t cmd, const struct iovec *data_sg,
685                                        size_t data_num)
686 {
687     const struct virtio_net_ctrl_hdr ctrl = {
688         .class = class,
689         .cmd = cmd,
690     };
691     size_t data_size = iov_size(data_sg, data_num), cmd_size;
692     struct iovec out, in;
693     ssize_t r;
694     unsigned dummy_cursor_iov_cnt;
695     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
696 
697     assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
698     cmd_size = sizeof(ctrl) + data_size;
699     if (vhost_svq_available_slots(svq) < 2 ||
700         iov_size(out_cursor, 1) < cmd_size) {
701         /*
702          * It is time to flush all pending control commands if SVQ is full
703          * or control commands shadow buffers are full.
704          *
705          * We can poll here since we've had BQL from the time
706          * we sent the descriptor.
707          */
708         r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base -
709                                      (void *)s->status);
710         if (unlikely(r < 0)) {
711             return r;
712         }
713 
714         vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor);
715     }
716 
717     /* pack the CVQ command header */
718     iov_from_buf(out_cursor, 1, 0, &ctrl, sizeof(ctrl));
719     /* pack the CVQ command command-specific-data */
720     iov_to_buf(data_sg, data_num, 0,
721                out_cursor->iov_base + sizeof(ctrl), data_size);
722 
723     /* extract the required buffer from the cursor for output */
724     iov_copy(&out, 1, out_cursor, 1, 0, cmd_size);
725     /* extract the required buffer from the cursor for input */
726     iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status));
727 
728     r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
729     if (unlikely(r < 0)) {
730         return r;
731     }
732 
733     /* iterate the cursors */
734     dummy_cursor_iov_cnt = 1;
735     iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size);
736     dummy_cursor_iov_cnt = 1;
737     iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status));
738 
739     return 0;
740 }
741 
742 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n,
743                                    struct iovec *out_cursor,
744                                    struct iovec *in_cursor)
745 {
746     if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
747         const struct iovec data = {
748             .iov_base = (void *)n->mac,
749             .iov_len = sizeof(n->mac),
750         };
751         ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
752                                             VIRTIO_NET_CTRL_MAC,
753                                             VIRTIO_NET_CTRL_MAC_ADDR_SET,
754                                             &data, 1);
755         if (unlikely(r < 0)) {
756             return r;
757         }
758     }
759 
760     /*
761      * According to VirtIO standard, "The device MUST have an
762      * empty MAC filtering table on reset.".
763      *
764      * Therefore, there is no need to send this CVQ command if the
765      * driver also sets an empty MAC filter table, which aligns with
766      * the device's defaults.
767      *
768      * Note that the device's defaults can mismatch the driver's
769      * configuration only at live migration.
770      */
771     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) ||
772         n->mac_table.in_use == 0) {
773         return 0;
774     }
775 
776     uint32_t uni_entries = n->mac_table.first_multi,
777              uni_macs_size = uni_entries * ETH_ALEN,
778              mul_entries = n->mac_table.in_use - uni_entries,
779              mul_macs_size = mul_entries * ETH_ALEN;
780     struct virtio_net_ctrl_mac uni = {
781         .entries = cpu_to_le32(uni_entries),
782     };
783     struct virtio_net_ctrl_mac mul = {
784         .entries = cpu_to_le32(mul_entries),
785     };
786     const struct iovec data[] = {
787         {
788             .iov_base = &uni,
789             .iov_len = sizeof(uni),
790         }, {
791             .iov_base = n->mac_table.macs,
792             .iov_len = uni_macs_size,
793         }, {
794             .iov_base = &mul,
795             .iov_len = sizeof(mul),
796         }, {
797             .iov_base = &n->mac_table.macs[uni_macs_size],
798             .iov_len = mul_macs_size,
799         },
800     };
801     ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
802                                         VIRTIO_NET_CTRL_MAC,
803                                         VIRTIO_NET_CTRL_MAC_TABLE_SET,
804                                         data, ARRAY_SIZE(data));
805     if (unlikely(r < 0)) {
806         return r;
807     }
808 
809     return 0;
810 }
811 
812 static int vhost_vdpa_net_load_rss(VhostVDPAState *s, const VirtIONet *n,
813                                    struct iovec *out_cursor,
814                                    struct iovec *in_cursor, bool do_rss)
815 {
816     struct virtio_net_rss_config cfg = {};
817     ssize_t r;
818     g_autofree uint16_t *table = NULL;
819 
820     /*
821      * According to VirtIO standard, "Initially the device has all hash
822      * types disabled and reports only VIRTIO_NET_HASH_REPORT_NONE.".
823      *
824      * Therefore, there is no need to send this CVQ command if the
825      * driver disables the all hash types, which aligns with
826      * the device's defaults.
827      *
828      * Note that the device's defaults can mismatch the driver's
829      * configuration only at live migration.
830      */
831     if (!n->rss_data.enabled ||
832         n->rss_data.hash_types == VIRTIO_NET_HASH_REPORT_NONE) {
833         return 0;
834     }
835 
836     table = g_malloc_n(n->rss_data.indirections_len,
837                        sizeof(n->rss_data.indirections_table[0]));
838     cfg.hash_types = cpu_to_le32(n->rss_data.hash_types);
839 
840     if (do_rss) {
841         /*
842          * According to VirtIO standard, "Number of entries in indirection_table
843          * is (indirection_table_mask + 1)".
844          */
845         cfg.indirection_table_mask = cpu_to_le16(n->rss_data.indirections_len -
846                                                  1);
847         cfg.unclassified_queue = cpu_to_le16(n->rss_data.default_queue);
848         for (int i = 0; i < n->rss_data.indirections_len; ++i) {
849             table[i] = cpu_to_le16(n->rss_data.indirections_table[i]);
850         }
851         cfg.max_tx_vq = cpu_to_le16(n->curr_queue_pairs);
852     } else {
853         /*
854          * According to VirtIO standard, "Field reserved MUST contain zeroes.
855          * It is defined to make the structure to match the layout of
856          * virtio_net_rss_config structure, defined in 5.1.6.5.7.".
857          *
858          * Therefore, we need to zero the fields in
859          * struct virtio_net_rss_config, which corresponds to the
860          * `reserved` field in struct virtio_net_hash_config.
861          *
862          * Note that all other fields are zeroed at their definitions,
863          * except for the `indirection_table` field, where the actual data
864          * is stored in the `table` variable to ensure compatibility
865          * with RSS case. Therefore, we need to zero the `table` variable here.
866          */
867         table[0] = 0;
868     }
869 
870     /*
871      * Considering that virtio_net_handle_rss() currently does not restore
872      * the hash key length parsed from the CVQ command sent from the guest
873      * into n->rss_data and uses the maximum key length in other code, so
874      * we also employ the maximum key length here.
875      */
876     cfg.hash_key_length = sizeof(n->rss_data.key);
877 
878     const struct iovec data[] = {
879         {
880             .iov_base = &cfg,
881             .iov_len = offsetof(struct virtio_net_rss_config,
882                                 indirection_table),
883         }, {
884             .iov_base = table,
885             .iov_len = n->rss_data.indirections_len *
886                        sizeof(n->rss_data.indirections_table[0]),
887         }, {
888             .iov_base = &cfg.max_tx_vq,
889             .iov_len = offsetof(struct virtio_net_rss_config, hash_key_data) -
890                        offsetof(struct virtio_net_rss_config, max_tx_vq),
891         }, {
892             .iov_base = (void *)n->rss_data.key,
893             .iov_len = sizeof(n->rss_data.key),
894         }
895     };
896 
897     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
898                                 VIRTIO_NET_CTRL_MQ,
899                                 do_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG :
900                                 VIRTIO_NET_CTRL_MQ_HASH_CONFIG,
901                                 data, ARRAY_SIZE(data));
902     if (unlikely(r < 0)) {
903         return r;
904     }
905 
906     return 0;
907 }
908 
909 static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
910                                   const VirtIONet *n,
911                                   struct iovec *out_cursor,
912                                   struct iovec *in_cursor)
913 {
914     struct virtio_net_ctrl_mq mq;
915     ssize_t r;
916 
917     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
918         return 0;
919     }
920 
921     mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
922     const struct iovec data = {
923         .iov_base = &mq,
924         .iov_len = sizeof(mq),
925     };
926     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
927                                 VIRTIO_NET_CTRL_MQ,
928                                 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
929                                 &data, 1);
930     if (unlikely(r < 0)) {
931         return r;
932     }
933 
934     if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_RSS)) {
935         /* load the receive-side scaling state */
936         r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, true);
937         if (unlikely(r < 0)) {
938             return r;
939         }
940     } else if (virtio_vdev_has_feature(&n->parent_obj,
941                                        VIRTIO_NET_F_HASH_REPORT)) {
942         /* load the hash calculation state */
943         r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, false);
944         if (unlikely(r < 0)) {
945             return r;
946         }
947     }
948 
949     return 0;
950 }
951 
952 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
953                                         const VirtIONet *n,
954                                         struct iovec *out_cursor,
955                                         struct iovec *in_cursor)
956 {
957     uint64_t offloads;
958     ssize_t r;
959 
960     if (!virtio_vdev_has_feature(&n->parent_obj,
961                                  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
962         return 0;
963     }
964 
965     if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
966         /*
967          * According to VirtIO standard, "Upon feature negotiation
968          * corresponding offload gets enabled to preserve
969          * backward compatibility.".
970          *
971          * Therefore, there is no need to send this CVQ command if the
972          * driver also enables all supported offloads, which aligns with
973          * the device's defaults.
974          *
975          * Note that the device's defaults can mismatch the driver's
976          * configuration only at live migration.
977          */
978         return 0;
979     }
980 
981     offloads = cpu_to_le64(n->curr_guest_offloads);
982     const struct iovec data = {
983         .iov_base = &offloads,
984         .iov_len = sizeof(offloads),
985     };
986     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
987                                 VIRTIO_NET_CTRL_GUEST_OFFLOADS,
988                                 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
989                                 &data, 1);
990     if (unlikely(r < 0)) {
991         return r;
992     }
993 
994     return 0;
995 }
996 
997 static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
998                                        struct iovec *out_cursor,
999                                        struct iovec *in_cursor,
1000                                        uint8_t cmd,
1001                                        uint8_t on)
1002 {
1003     const struct iovec data = {
1004         .iov_base = &on,
1005         .iov_len = sizeof(on),
1006     };
1007     ssize_t r;
1008 
1009     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
1010                                 VIRTIO_NET_CTRL_RX, cmd, &data, 1);
1011     if (unlikely(r < 0)) {
1012         return r;
1013     }
1014 
1015     return 0;
1016 }
1017 
1018 static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
1019                                   const VirtIONet *n,
1020                                   struct iovec *out_cursor,
1021                                   struct iovec *in_cursor)
1022 {
1023     ssize_t r;
1024 
1025     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
1026         return 0;
1027     }
1028 
1029     /*
1030      * According to virtio_net_reset(), device turns promiscuous mode
1031      * on by default.
1032      *
1033      * Additionally, according to VirtIO standard, "Since there are
1034      * no guarantees, it can use a hash filter or silently switch to
1035      * allmulti or promiscuous mode if it is given too many addresses.".
1036      * QEMU marks `n->mac_table.uni_overflow` if guest sets too many
1037      * non-multicast MAC addresses, indicating that promiscuous mode
1038      * should be enabled.
1039      *
1040      * Therefore, QEMU should only send this CVQ command if the
1041      * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
1042      * which sets promiscuous mode on, different from the device's defaults.
1043      *
1044      * Note that the device's defaults can mismatch the driver's
1045      * configuration only at live migration.
1046      */
1047     if (!n->mac_table.uni_overflow && !n->promisc) {
1048         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1049                                         VIRTIO_NET_CTRL_RX_PROMISC, 0);
1050         if (unlikely(r < 0)) {
1051             return r;
1052         }
1053     }
1054 
1055     /*
1056      * According to virtio_net_reset(), device turns all-multicast mode
1057      * off by default.
1058      *
1059      * According to VirtIO standard, "Since there are no guarantees,
1060      * it can use a hash filter or silently switch to allmulti or
1061      * promiscuous mode if it is given too many addresses.". QEMU marks
1062      * `n->mac_table.multi_overflow` if guest sets too many
1063      * non-multicast MAC addresses.
1064      *
1065      * Therefore, QEMU should only send this CVQ command if the
1066      * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
1067      * which sets all-multicast mode on, different from the device's defaults.
1068      *
1069      * Note that the device's defaults can mismatch the driver's
1070      * configuration only at live migration.
1071      */
1072     if (n->mac_table.multi_overflow || n->allmulti) {
1073         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1074                                         VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
1075         if (unlikely(r < 0)) {
1076             return r;
1077         }
1078     }
1079 
1080     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) {
1081         return 0;
1082     }
1083 
1084     /*
1085      * According to virtio_net_reset(), device turns all-unicast mode
1086      * off by default.
1087      *
1088      * Therefore, QEMU should only send this CVQ command if the driver
1089      * sets all-unicast mode on, different from the device's defaults.
1090      *
1091      * Note that the device's defaults can mismatch the driver's
1092      * configuration only at live migration.
1093      */
1094     if (n->alluni) {
1095         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1096                                         VIRTIO_NET_CTRL_RX_ALLUNI, 1);
1097         if (r < 0) {
1098             return r;
1099         }
1100     }
1101 
1102     /*
1103      * According to virtio_net_reset(), device turns non-multicast mode
1104      * off by default.
1105      *
1106      * Therefore, QEMU should only send this CVQ command if the driver
1107      * sets non-multicast mode on, different from the device's defaults.
1108      *
1109      * Note that the device's defaults can mismatch the driver's
1110      * configuration only at live migration.
1111      */
1112     if (n->nomulti) {
1113         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1114                                         VIRTIO_NET_CTRL_RX_NOMULTI, 1);
1115         if (r < 0) {
1116             return r;
1117         }
1118     }
1119 
1120     /*
1121      * According to virtio_net_reset(), device turns non-unicast mode
1122      * off by default.
1123      *
1124      * Therefore, QEMU should only send this CVQ command if the driver
1125      * sets non-unicast mode on, different from the device's defaults.
1126      *
1127      * Note that the device's defaults can mismatch the driver's
1128      * configuration only at live migration.
1129      */
1130     if (n->nouni) {
1131         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1132                                         VIRTIO_NET_CTRL_RX_NOUNI, 1);
1133         if (r < 0) {
1134             return r;
1135         }
1136     }
1137 
1138     /*
1139      * According to virtio_net_reset(), device turns non-broadcast mode
1140      * off by default.
1141      *
1142      * Therefore, QEMU should only send this CVQ command if the driver
1143      * sets non-broadcast mode on, different from the device's defaults.
1144      *
1145      * Note that the device's defaults can mismatch the driver's
1146      * configuration only at live migration.
1147      */
1148     if (n->nobcast) {
1149         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1150                                         VIRTIO_NET_CTRL_RX_NOBCAST, 1);
1151         if (r < 0) {
1152             return r;
1153         }
1154     }
1155 
1156     return 0;
1157 }
1158 
1159 static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
1160                                            const VirtIONet *n,
1161                                            struct iovec *out_cursor,
1162                                            struct iovec *in_cursor,
1163                                            uint16_t vid)
1164 {
1165     const struct iovec data = {
1166         .iov_base = &vid,
1167         .iov_len = sizeof(vid),
1168     };
1169     ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
1170                                         VIRTIO_NET_CTRL_VLAN,
1171                                         VIRTIO_NET_CTRL_VLAN_ADD,
1172                                         &data, 1);
1173     if (unlikely(r < 0)) {
1174         return r;
1175     }
1176 
1177     return 0;
1178 }
1179 
1180 static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
1181                                     const VirtIONet *n,
1182                                     struct iovec *out_cursor,
1183                                     struct iovec *in_cursor)
1184 {
1185     int r;
1186 
1187     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) {
1188         return 0;
1189     }
1190 
1191     for (int i = 0; i < MAX_VLAN >> 5; i++) {
1192         for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
1193             if (n->vlans[i] & (1U << j)) {
1194                 r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor,
1195                                                     in_cursor, (i << 5) + j);
1196                 if (unlikely(r != 0)) {
1197                     return r;
1198                 }
1199             }
1200         }
1201     }
1202 
1203     return 0;
1204 }
1205 
1206 static int vhost_vdpa_net_cvq_load(NetClientState *nc)
1207 {
1208     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
1209     struct vhost_vdpa *v = &s->vhost_vdpa;
1210     const VirtIONet *n;
1211     int r;
1212     struct iovec out_cursor, in_cursor;
1213 
1214     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1215 
1216     vhost_vdpa_set_vring_ready(v, v->dev->vq_index);
1217 
1218     if (v->shadow_vqs_enabled) {
1219         n = VIRTIO_NET(v->dev->vdev);
1220         vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor);
1221         r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor);
1222         if (unlikely(r < 0)) {
1223             return r;
1224         }
1225         r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor);
1226         if (unlikely(r)) {
1227             return r;
1228         }
1229         r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor);
1230         if (unlikely(r)) {
1231             return r;
1232         }
1233         r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor);
1234         if (unlikely(r)) {
1235             return r;
1236         }
1237         r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor);
1238         if (unlikely(r)) {
1239             return r;
1240         }
1241 
1242         /*
1243          * We need to poll and check all pending device's used buffers.
1244          *
1245          * We can poll here since we've had BQL from the time
1246          * we sent the descriptor.
1247          */
1248         r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status);
1249         if (unlikely(r)) {
1250             return r;
1251         }
1252     }
1253 
1254     for (int i = 0; i < v->dev->vq_index; ++i) {
1255         vhost_vdpa_set_vring_ready(v, i);
1256     }
1257 
1258     return 0;
1259 }
1260 
1261 static NetClientInfo net_vhost_vdpa_cvq_info = {
1262     .type = NET_CLIENT_DRIVER_VHOST_VDPA,
1263     .size = sizeof(VhostVDPAState),
1264     .receive = vhost_vdpa_receive,
1265     .start = vhost_vdpa_net_cvq_start,
1266     .load = vhost_vdpa_net_cvq_load,
1267     .stop = vhost_vdpa_net_cvq_stop,
1268     .cleanup = vhost_vdpa_cleanup,
1269     .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
1270     .has_ufo = vhost_vdpa_has_ufo,
1271     .check_peer_type = vhost_vdpa_check_peer_type,
1272     .set_steering_ebpf = vhost_vdpa_set_steering_ebpf,
1273 };
1274 
1275 /*
1276  * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
1277  * vdpa device.
1278  *
1279  * Considering that QEMU cannot send the entire filter table to the
1280  * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
1281  * command to enable promiscuous mode to receive all packets,
1282  * according to VirtIO standard, "Since there are no guarantees,
1283  * it can use a hash filter or silently switch to allmulti or
1284  * promiscuous mode if it is given too many addresses.".
1285  *
1286  * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
1287  * marks `n->mac_table.x_overflow` accordingly, it should have
1288  * the same effect on the device model to receive
1289  * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1290  * The same applies to multicast MAC addresses.
1291  *
1292  * Therefore, QEMU can provide the device model with a fake
1293  * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
1294  * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1295  * MAC addresses. This ensures that the device model marks
1296  * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1297  * allowing all packets to be received, which aligns with the
1298  * state of the vdpa device.
1299  */
1300 static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
1301                                                        VirtQueueElement *elem,
1302                                                        struct iovec *out,
1303                                                        const struct iovec *in)
1304 {
1305     struct virtio_net_ctrl_mac mac_data, *mac_ptr;
1306     struct virtio_net_ctrl_hdr *hdr_ptr;
1307     uint32_t cursor;
1308     ssize_t r;
1309     uint8_t on = 1;
1310 
1311     /* parse the non-multicast MAC address entries from CVQ command */
1312     cursor = sizeof(*hdr_ptr);
1313     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1314                    &mac_data, sizeof(mac_data));
1315     if (unlikely(r != sizeof(mac_data))) {
1316         /*
1317          * If the CVQ command is invalid, we should simulate the vdpa device
1318          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1319          */
1320         *s->status = VIRTIO_NET_ERR;
1321         return sizeof(*s->status);
1322     }
1323     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1324 
1325     /* parse the multicast MAC address entries from CVQ command */
1326     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1327                    &mac_data, sizeof(mac_data));
1328     if (r != sizeof(mac_data)) {
1329         /*
1330          * If the CVQ command is invalid, we should simulate the vdpa device
1331          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1332          */
1333         *s->status = VIRTIO_NET_ERR;
1334         return sizeof(*s->status);
1335     }
1336     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1337 
1338     /* validate the CVQ command */
1339     if (iov_size(elem->out_sg, elem->out_num) != cursor) {
1340         /*
1341          * If the CVQ command is invalid, we should simulate the vdpa device
1342          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1343          */
1344         *s->status = VIRTIO_NET_ERR;
1345         return sizeof(*s->status);
1346     }
1347 
1348     /*
1349      * According to VirtIO standard, "Since there are no guarantees,
1350      * it can use a hash filter or silently switch to allmulti or
1351      * promiscuous mode if it is given too many addresses.".
1352      *
1353      * Therefore, considering that QEMU is unable to send the entire
1354      * filter table to the vdpa device, it should send the
1355      * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
1356      */
1357     hdr_ptr = out->iov_base;
1358     out->iov_len = sizeof(*hdr_ptr) + sizeof(on);
1359 
1360     hdr_ptr->class = VIRTIO_NET_CTRL_RX;
1361     hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC;
1362     iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on));
1363     r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1);
1364     if (unlikely(r < 0)) {
1365         return r;
1366     }
1367 
1368     /*
1369      * We can poll here since we've had BQL from the time
1370      * we sent the descriptor.
1371      */
1372     r = vhost_vdpa_net_svq_poll(s, 1);
1373     if (unlikely(r < sizeof(*s->status))) {
1374         return r;
1375     }
1376     if (*s->status != VIRTIO_NET_OK) {
1377         return sizeof(*s->status);
1378     }
1379 
1380     /*
1381      * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
1382      * command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
1383      * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
1384      * multicast MAC addresses.
1385      *
1386      * By doing so, the device model can mark `n->mac_table.uni_overflow`
1387      * and `n->mac_table.multi_overflow`, enabling all packets to be
1388      * received, which aligns with the state of the vdpa device.
1389      */
1390     cursor = 0;
1391     uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1,
1392              fake_mul_entries = MAC_TABLE_ENTRIES + 1,
1393              fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) +
1394                              sizeof(mac_data) + fake_uni_entries * ETH_ALEN +
1395                              sizeof(mac_data) + fake_mul_entries * ETH_ALEN;
1396 
1397     assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len());
1398     out->iov_len = fake_cvq_size;
1399 
1400     /* pack the header for fake CVQ command */
1401     hdr_ptr = out->iov_base + cursor;
1402     hdr_ptr->class = VIRTIO_NET_CTRL_MAC;
1403     hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1404     cursor += sizeof(*hdr_ptr);
1405 
1406     /*
1407      * Pack the non-multicast MAC addresses part for fake CVQ command.
1408      *
1409      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1410      * addresses provided in CVQ command. Therefore, only the entries
1411      * field need to be prepared in the CVQ command.
1412      */
1413     mac_ptr = out->iov_base + cursor;
1414     mac_ptr->entries = cpu_to_le32(fake_uni_entries);
1415     cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN;
1416 
1417     /*
1418      * Pack the multicast MAC addresses part for fake CVQ command.
1419      *
1420      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1421      * addresses provided in CVQ command. Therefore, only the entries
1422      * field need to be prepared in the CVQ command.
1423      */
1424     mac_ptr = out->iov_base + cursor;
1425     mac_ptr->entries = cpu_to_le32(fake_mul_entries);
1426 
1427     /*
1428      * Simulating QEMU poll a vdpa device used buffer
1429      * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1430      */
1431     return sizeof(*s->status);
1432 }
1433 
1434 /**
1435  * Validate and copy control virtqueue commands.
1436  *
1437  * Following QEMU guidelines, we offer a copy of the buffers to the device to
1438  * prevent TOCTOU bugs.
1439  */
1440 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
1441                                             VirtQueueElement *elem,
1442                                             void *opaque)
1443 {
1444     VhostVDPAState *s = opaque;
1445     size_t in_len;
1446     const struct virtio_net_ctrl_hdr *ctrl;
1447     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1448     /* Out buffer sent to both the vdpa device and the device model */
1449     struct iovec out = {
1450         .iov_base = s->cvq_cmd_out_buffer,
1451     };
1452     /* in buffer used for device model */
1453     const struct iovec model_in = {
1454         .iov_base = &status,
1455         .iov_len = sizeof(status),
1456     };
1457     /* in buffer used for vdpa device */
1458     const struct iovec vdpa_in = {
1459         .iov_base = s->status,
1460         .iov_len = sizeof(*s->status),
1461     };
1462     ssize_t dev_written = -EINVAL;
1463 
1464     out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
1465                              s->cvq_cmd_out_buffer,
1466                              vhost_vdpa_net_cvq_cmd_page_len());
1467 
1468     ctrl = s->cvq_cmd_out_buffer;
1469     if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
1470         /*
1471          * Guest announce capability is emulated by qemu, so don't forward to
1472          * the device.
1473          */
1474         dev_written = sizeof(status);
1475         *s->status = VIRTIO_NET_OK;
1476     } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC &&
1477                         ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET &&
1478                         iov_size(elem->out_sg, elem->out_num) > out.iov_len)) {
1479         /*
1480          * Due to the size limitation of the out buffer sent to the vdpa device,
1481          * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
1482          * MAC addresses set by the driver for the filter table can cause
1483          * truncation of the CVQ command in QEMU. As a result, the vdpa device
1484          * rejects the flawed CVQ command.
1485          *
1486          * Therefore, QEMU must handle this situation instead of sending
1487          * the CVQ command directly.
1488          */
1489         dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
1490                                                             &out, &vdpa_in);
1491         if (unlikely(dev_written < 0)) {
1492             goto out;
1493         }
1494     } else {
1495         ssize_t r;
1496         r = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1);
1497         if (unlikely(r < 0)) {
1498             dev_written = r;
1499             goto out;
1500         }
1501 
1502         /*
1503          * We can poll here since we've had BQL from the time
1504          * we sent the descriptor.
1505          */
1506         dev_written = vhost_vdpa_net_svq_poll(s, 1);
1507     }
1508 
1509     if (unlikely(dev_written < sizeof(status))) {
1510         error_report("Insufficient written data (%zu)", dev_written);
1511         goto out;
1512     }
1513 
1514     if (*s->status != VIRTIO_NET_OK) {
1515         goto out;
1516     }
1517 
1518     status = VIRTIO_NET_ERR;
1519     virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1);
1520     if (status != VIRTIO_NET_OK) {
1521         error_report("Bad CVQ processing in model");
1522     }
1523 
1524 out:
1525     in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
1526                           sizeof(status));
1527     if (unlikely(in_len < sizeof(status))) {
1528         error_report("Bad device CVQ written length");
1529     }
1530     vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
1531     /*
1532      * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
1533      * the function successfully forwards the CVQ command, indicated
1534      * by a non-negative value of `dev_written`. Otherwise, it still
1535      * belongs to SVQ.
1536      * This function should only free the `elem` when it owns.
1537      */
1538     if (dev_written >= 0) {
1539         g_free(elem);
1540     }
1541     return dev_written < 0 ? dev_written : 0;
1542 }
1543 
1544 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
1545     .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
1546 };
1547 
1548 /**
1549  * Probe if CVQ is isolated
1550  *
1551  * @device_fd         The vdpa device fd
1552  * @features          Features offered by the device.
1553  * @cvq_index         The control vq pair index
1554  *
1555  * Returns <0 in case of failure, 0 if false and 1 if true.
1556  */
1557 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
1558                                           int cvq_index, Error **errp)
1559 {
1560     uint64_t backend_features;
1561     int64_t cvq_group;
1562     uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
1563                      VIRTIO_CONFIG_S_DRIVER;
1564     int r;
1565 
1566     ERRP_GUARD();
1567 
1568     r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
1569     if (unlikely(r < 0)) {
1570         error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
1571         return r;
1572     }
1573 
1574     if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
1575         return 0;
1576     }
1577 
1578     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1579     if (unlikely(r)) {
1580         error_setg_errno(errp, -r, "Cannot set device status");
1581         goto out;
1582     }
1583 
1584     r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
1585     if (unlikely(r)) {
1586         error_setg_errno(errp, -r, "Cannot set features");
1587         goto out;
1588     }
1589 
1590     status |= VIRTIO_CONFIG_S_FEATURES_OK;
1591     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1592     if (unlikely(r)) {
1593         error_setg_errno(errp, -r, "Cannot set device status");
1594         goto out;
1595     }
1596 
1597     cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
1598     if (unlikely(cvq_group < 0)) {
1599         if (cvq_group != -ENOTSUP) {
1600             r = cvq_group;
1601             goto out;
1602         }
1603 
1604         /*
1605          * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
1606          * support ASID even if the parent driver does not.  The CVQ cannot be
1607          * isolated in this case.
1608          */
1609         error_free(*errp);
1610         *errp = NULL;
1611         r = 0;
1612         goto out;
1613     }
1614 
1615     for (int i = 0; i < cvq_index; ++i) {
1616         int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
1617         if (unlikely(group < 0)) {
1618             r = group;
1619             goto out;
1620         }
1621 
1622         if (group == (int64_t)cvq_group) {
1623             r = 0;
1624             goto out;
1625         }
1626     }
1627 
1628     r = 1;
1629 
1630 out:
1631     status = 0;
1632     ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1633     return r;
1634 }
1635 
1636 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
1637                                        const char *device,
1638                                        const char *name,
1639                                        int vdpa_device_fd,
1640                                        int queue_pair_index,
1641                                        int nvqs,
1642                                        bool is_datapath,
1643                                        bool svq,
1644                                        struct vhost_vdpa_iova_range iova_range,
1645                                        uint64_t features,
1646                                        VhostVDPAShared *shared,
1647                                        Error **errp)
1648 {
1649     NetClientState *nc = NULL;
1650     VhostVDPAState *s;
1651     int ret = 0;
1652     assert(name);
1653     int cvq_isolated = 0;
1654 
1655     if (is_datapath) {
1656         nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
1657                                  name);
1658     } else {
1659         cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
1660                                                       queue_pair_index * 2,
1661                                                       errp);
1662         if (unlikely(cvq_isolated < 0)) {
1663             return NULL;
1664         }
1665 
1666         nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
1667                                          device, name);
1668     }
1669     qemu_set_info_str(nc, TYPE_VHOST_VDPA);
1670     s = DO_UPCAST(VhostVDPAState, nc, nc);
1671 
1672     s->vhost_vdpa.index = queue_pair_index;
1673     s->always_svq = svq;
1674     s->migration_state.notify = NULL;
1675     s->vhost_vdpa.shadow_vqs_enabled = svq;
1676     if (queue_pair_index == 0) {
1677         vhost_vdpa_net_valid_svq_features(features,
1678                                           &s->vhost_vdpa.migration_blocker);
1679         s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1);
1680         s->vhost_vdpa.shared->device_fd = vdpa_device_fd;
1681         s->vhost_vdpa.shared->iova_range = iova_range;
1682         s->vhost_vdpa.shared->shadow_data = svq;
1683     } else if (!is_datapath) {
1684         s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1685                                      PROT_READ | PROT_WRITE,
1686                                      MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1687         s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1688                          PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
1689                          -1, 0);
1690 
1691         s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
1692         s->vhost_vdpa.shadow_vq_ops_opaque = s;
1693         s->cvq_isolated = cvq_isolated;
1694     }
1695     if (queue_pair_index != 0) {
1696         s->vhost_vdpa.shared = shared;
1697     }
1698 
1699     ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
1700     if (ret) {
1701         qemu_del_net_client(nc);
1702         return NULL;
1703     }
1704 
1705     return nc;
1706 }
1707 
1708 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
1709 {
1710     int ret = ioctl(fd, VHOST_GET_FEATURES, features);
1711     if (unlikely(ret < 0)) {
1712         error_setg_errno(errp, errno,
1713                          "Fail to query features from vhost-vDPA device");
1714     }
1715     return ret;
1716 }
1717 
1718 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
1719                                           int *has_cvq, Error **errp)
1720 {
1721     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
1722     g_autofree struct vhost_vdpa_config *config = NULL;
1723     __virtio16 *max_queue_pairs;
1724     int ret;
1725 
1726     if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
1727         *has_cvq = 1;
1728     } else {
1729         *has_cvq = 0;
1730     }
1731 
1732     if (features & (1 << VIRTIO_NET_F_MQ)) {
1733         config = g_malloc0(config_size + sizeof(*max_queue_pairs));
1734         config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
1735         config->len = sizeof(*max_queue_pairs);
1736 
1737         ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
1738         if (ret) {
1739             error_setg(errp, "Fail to get config from vhost-vDPA device");
1740             return -ret;
1741         }
1742 
1743         max_queue_pairs = (__virtio16 *)&config->buf;
1744 
1745         return lduw_le_p(max_queue_pairs);
1746     }
1747 
1748     return 1;
1749 }
1750 
1751 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
1752                         NetClientState *peer, Error **errp)
1753 {
1754     const NetdevVhostVDPAOptions *opts;
1755     uint64_t features;
1756     int vdpa_device_fd;
1757     g_autofree NetClientState **ncs = NULL;
1758     struct vhost_vdpa_iova_range iova_range;
1759     NetClientState *nc;
1760     int queue_pairs, r, i = 0, has_cvq = 0;
1761 
1762     assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1763     opts = &netdev->u.vhost_vdpa;
1764     if (!opts->vhostdev && !opts->vhostfd) {
1765         error_setg(errp,
1766                    "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1767         return -1;
1768     }
1769 
1770     if (opts->vhostdev && opts->vhostfd) {
1771         error_setg(errp,
1772                    "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1773         return -1;
1774     }
1775 
1776     if (opts->vhostdev) {
1777         vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1778         if (vdpa_device_fd == -1) {
1779             return -errno;
1780         }
1781     } else {
1782         /* has_vhostfd */
1783         vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1784         if (vdpa_device_fd == -1) {
1785             error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1786             return -1;
1787         }
1788     }
1789 
1790     r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1791     if (unlikely(r < 0)) {
1792         goto err;
1793     }
1794 
1795     queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
1796                                                  &has_cvq, errp);
1797     if (queue_pairs < 0) {
1798         qemu_close(vdpa_device_fd);
1799         return queue_pairs;
1800     }
1801 
1802     r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1803     if (unlikely(r < 0)) {
1804         error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1805                    strerror(-r));
1806         goto err;
1807     }
1808 
1809     if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1810         goto err;
1811     }
1812 
1813     ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1814 
1815     for (i = 0; i < queue_pairs; i++) {
1816         VhostVDPAShared *shared = NULL;
1817 
1818         if (i) {
1819             shared = DO_UPCAST(VhostVDPAState, nc, ncs[0])->vhost_vdpa.shared;
1820         }
1821         ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1822                                      vdpa_device_fd, i, 2, true, opts->x_svq,
1823                                      iova_range, features, shared, errp);
1824         if (!ncs[i])
1825             goto err;
1826     }
1827 
1828     if (has_cvq) {
1829         VhostVDPAState *s0 = DO_UPCAST(VhostVDPAState, nc, ncs[0]);
1830         VhostVDPAShared *shared = s0->vhost_vdpa.shared;
1831 
1832         nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1833                                  vdpa_device_fd, i, 1, false,
1834                                  opts->x_svq, iova_range, features, shared,
1835                                  errp);
1836         if (!nc)
1837             goto err;
1838     }
1839 
1840     return 0;
1841 
1842 err:
1843     if (i) {
1844         for (i--; i >= 0; i--) {
1845             qemu_del_net_client(ncs[i]);
1846         }
1847     }
1848 
1849     qemu_close(vdpa_device_fd);
1850 
1851     return -1;
1852 }
1853