xref: /openbmc/qemu/net/vhost-vdpa.c (revision 33b78a30a3e8e1cf16ef423bf2e78caf3d560985)
1 /*
2  * vhost-vdpa.c
3  *
4  * Copyright(c) 2017-2018 Intel Corporation.
5  * Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "clients.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
26 #include <err.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/misc.h"
30 #include "hw/virtio/vhost.h"
31 #include "trace.h"
32 
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState {
35     NetClientState nc;
36     struct vhost_vdpa vhost_vdpa;
37     NotifierWithReturn migration_state;
38     VHostNetState *vhost_net;
39 
40     /* Control commands shadow buffers */
41     void *cvq_cmd_out_buffer;
42     virtio_net_ctrl_ack *status;
43 
44     /* The device always have SVQ enabled */
45     bool always_svq;
46 
47     /* The device can isolate CVQ in its own ASID */
48     bool cvq_isolated;
49 
50     bool started;
51 } VhostVDPAState;
52 
53 /*
54  * The array is sorted alphabetically in ascending order,
55  * with the exception of VHOST_INVALID_FEATURE_BIT,
56  * which should always be the last entry.
57  */
58 static const int vdpa_feature_bits[] = {
59     VIRTIO_F_ANY_LAYOUT,
60     VIRTIO_F_IOMMU_PLATFORM,
61     VIRTIO_F_NOTIFY_ON_EMPTY,
62     VIRTIO_F_RING_PACKED,
63     VIRTIO_F_RING_RESET,
64     VIRTIO_F_VERSION_1,
65     VIRTIO_F_IN_ORDER,
66     VIRTIO_F_NOTIFICATION_DATA,
67     VIRTIO_NET_F_CSUM,
68     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
69     VIRTIO_NET_F_CTRL_MAC_ADDR,
70     VIRTIO_NET_F_CTRL_RX,
71     VIRTIO_NET_F_CTRL_RX_EXTRA,
72     VIRTIO_NET_F_CTRL_VLAN,
73     VIRTIO_NET_F_CTRL_VQ,
74     VIRTIO_NET_F_GSO,
75     VIRTIO_NET_F_GUEST_CSUM,
76     VIRTIO_NET_F_GUEST_ECN,
77     VIRTIO_NET_F_GUEST_TSO4,
78     VIRTIO_NET_F_GUEST_TSO6,
79     VIRTIO_NET_F_GUEST_UFO,
80     VIRTIO_NET_F_GUEST_USO4,
81     VIRTIO_NET_F_GUEST_USO6,
82     VIRTIO_NET_F_HASH_REPORT,
83     VIRTIO_NET_F_HOST_ECN,
84     VIRTIO_NET_F_HOST_TSO4,
85     VIRTIO_NET_F_HOST_TSO6,
86     VIRTIO_NET_F_HOST_UFO,
87     VIRTIO_NET_F_HOST_USO,
88     VIRTIO_NET_F_MQ,
89     VIRTIO_NET_F_MRG_RXBUF,
90     VIRTIO_NET_F_MTU,
91     VIRTIO_NET_F_RSC_EXT,
92     VIRTIO_NET_F_RSS,
93     VIRTIO_NET_F_STATUS,
94     VIRTIO_RING_F_EVENT_IDX,
95     VIRTIO_RING_F_INDIRECT_DESC,
96 
97     /* VHOST_INVALID_FEATURE_BIT should always be the last entry */
98     VHOST_INVALID_FEATURE_BIT
99 };
100 
101 /** Supported device specific feature bits with SVQ */
102 static const uint64_t vdpa_svq_device_features =
103     BIT_ULL(VIRTIO_NET_F_CSUM) |
104     BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
105     BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
106     BIT_ULL(VIRTIO_NET_F_MTU) |
107     BIT_ULL(VIRTIO_NET_F_MAC) |
108     BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
109     BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
110     BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
111     BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
112     BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
113     BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
114     BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
115     BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
116     BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
117     BIT_ULL(VIRTIO_NET_F_STATUS) |
118     BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
119     BIT_ULL(VIRTIO_NET_F_CTRL_RX) |
120     BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) |
121     BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) |
122     BIT_ULL(VIRTIO_NET_F_MQ) |
123     BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
124     BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
125     /* VHOST_F_LOG_ALL is exposed by SVQ */
126     BIT_ULL(VHOST_F_LOG_ALL) |
127     BIT_ULL(VIRTIO_NET_F_HASH_REPORT) |
128     BIT_ULL(VIRTIO_NET_F_RSS) |
129     BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
130     BIT_ULL(VIRTIO_NET_F_STANDBY) |
131     BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
132 
133 #define VHOST_VDPA_NET_CVQ_ASID 1
134 
135 static struct vhost_net *vhost_vdpa_get_vhost_net(NetClientState *nc)
136 {
137     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
138     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
139     return s->vhost_net;
140 }
141 
142 static size_t vhost_vdpa_net_cvq_cmd_len(void)
143 {
144     /*
145      * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
146      * In buffer is always 1 byte, so it should fit here
147      */
148     return sizeof(struct virtio_net_ctrl_hdr) +
149            2 * sizeof(struct virtio_net_ctrl_mac) +
150            MAC_TABLE_ENTRIES * ETH_ALEN;
151 }
152 
153 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
154 {
155     return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
156 }
157 
158 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
159 {
160     uint64_t invalid_dev_features =
161         features & ~vdpa_svq_device_features &
162         /* Transport are all accepted at this point */
163         ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
164                          VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
165 
166     if (invalid_dev_features) {
167         error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
168                    invalid_dev_features);
169         return false;
170     }
171 
172     return vhost_svq_valid_features(features, errp);
173 }
174 
175 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
176 {
177     uint32_t device_id;
178     int ret;
179     struct vhost_dev *hdev;
180 
181     hdev = (struct vhost_dev *)&net->dev;
182     ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
183     if (device_id != VIRTIO_ID_NET) {
184         return -ENOTSUP;
185     }
186     return ret;
187 }
188 
189 static int vhost_vdpa_add(NetClientState *ncs, void *be,
190                           int queue_pair_index, int nvqs)
191 {
192     VhostNetOptions options;
193     struct vhost_net *net = NULL;
194     VhostVDPAState *s;
195     int ret;
196 
197     options.backend_type = VHOST_BACKEND_TYPE_VDPA;
198     assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
199     s = DO_UPCAST(VhostVDPAState, nc, ncs);
200     options.net_backend = ncs;
201     options.opaque      = be;
202     options.busyloop_timeout = 0;
203     options.nvqs = nvqs;
204     options.feature_bits = vdpa_feature_bits;
205     options.get_acked_features = NULL;
206     options.save_acked_features = NULL;
207     options.max_tx_queue_size = VIRTQUEUE_MAX_SIZE;
208 
209     net = vhost_net_init(&options);
210     if (!net) {
211         error_report("failed to init vhost_net for queue");
212         goto err_init;
213     }
214     s->vhost_net = net;
215     ret = vhost_vdpa_net_check_device_id(net);
216     if (ret) {
217         goto err_check;
218     }
219     return 0;
220 err_check:
221     vhost_net_cleanup(net);
222     g_free(net);
223 err_init:
224     return -1;
225 }
226 
227 static void vhost_vdpa_cleanup(NetClientState *nc)
228 {
229     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
230 
231     munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
232     munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
233     if (s->vhost_net) {
234         vhost_net_cleanup(s->vhost_net);
235         g_free(s->vhost_net);
236         s->vhost_net = NULL;
237     }
238     if (s->vhost_vdpa.index != 0) {
239         return;
240     }
241     qemu_close(s->vhost_vdpa.shared->device_fd);
242     g_clear_pointer(&s->vhost_vdpa.shared->iova_tree, vhost_iova_tree_delete);
243     g_free(s->vhost_vdpa.shared);
244 }
245 
246 /** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend  */
247 static bool vhost_vdpa_set_steering_ebpf(NetClientState *nc, int prog_fd)
248 {
249     return true;
250 }
251 
252 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
253 {
254     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
255 
256     return true;
257 }
258 
259 static bool vhost_vdpa_has_ufo(NetClientState *nc)
260 {
261     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
262     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
263     uint64_t features = 0;
264     features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
265     features = vhost_net_get_features(s->vhost_net, features);
266     return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
267 
268 }
269 
270 /*
271  * FIXME: vhost_vdpa doesn't have an API to "set h/w endianness". But it's
272  * reasonable to assume that h/w is LE by default, because LE is what
273  * virtio 1.0 and later ask for. So, this function just says "yes, the h/w is
274  * LE". Otherwise, on a BE machine, higher-level code would mistakely think
275  * the h/w is BE and can't support VDPA for a virtio 1.0 client.
276  */
277 static int vhost_vdpa_set_vnet_le(NetClientState *nc, bool enable)
278 {
279     return 0;
280 }
281 
282 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
283                                        Error **errp)
284 {
285     const char *driver = object_class_get_name(oc);
286 
287     if (!g_str_has_prefix(driver, "virtio-net-")) {
288         error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
289         return false;
290     }
291 
292     return true;
293 }
294 
295 /** Dummy receive in case qemu falls back to userland tap networking */
296 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
297                                   size_t size)
298 {
299     return size;
300 }
301 
302 
303 /** From any vdpa net client, get the netclient of the i-th queue pair */
304 static VhostVDPAState *vhost_vdpa_net_get_nc_vdpa(VhostVDPAState *s, int i)
305 {
306     NICState *nic = qemu_get_nic(s->nc.peer);
307     NetClientState *nc_i = qemu_get_peer(nic->ncs, i);
308 
309     return DO_UPCAST(VhostVDPAState, nc, nc_i);
310 }
311 
312 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
313 {
314     return vhost_vdpa_net_get_nc_vdpa(s, 0);
315 }
316 
317 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
318 {
319     struct vhost_vdpa *v = &s->vhost_vdpa;
320     VirtIONet *n;
321     VirtIODevice *vdev;
322     int data_queue_pairs, cvq, r;
323 
324     /* We are only called on the first data vqs and only if x-svq is not set */
325     if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
326         return;
327     }
328 
329     vdev = v->dev->vdev;
330     n = VIRTIO_NET(vdev);
331     if (!n->vhost_started) {
332         return;
333     }
334 
335     data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
336     cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
337                                   n->max_ncs - n->max_queue_pairs : 0;
338     v->shared->svq_switching = enable ?
339         SVQ_TSTATE_ENABLING : SVQ_TSTATE_DISABLING;
340     /*
341      * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
342      * in the future and resume the device if read-only operations between
343      * suspend and reset goes wrong.
344      */
345     vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
346 
347     /* Start will check migration setup_or_active to configure or not SVQ */
348     r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
349     if (unlikely(r < 0)) {
350         error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
351     }
352     v->shared->svq_switching = SVQ_TSTATE_DONE;
353 }
354 
355 static int vdpa_net_migration_state_notifier(NotifierWithReturn *notifier,
356                                              MigrationEvent *e, Error **errp)
357 {
358     VhostVDPAState *s = container_of(notifier, VhostVDPAState, migration_state);
359 
360     if (e->type == MIG_EVENT_PRECOPY_SETUP) {
361         vhost_vdpa_net_log_global_enable(s, true);
362     } else if (e->type == MIG_EVENT_PRECOPY_FAILED) {
363         vhost_vdpa_net_log_global_enable(s, false);
364     }
365     return 0;
366 }
367 
368 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
369 {
370     migration_add_notifier(&s->migration_state,
371                            vdpa_net_migration_state_notifier);
372 }
373 
374 static int vhost_vdpa_net_data_start(NetClientState *nc)
375 {
376     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
377     struct vhost_vdpa *v = &s->vhost_vdpa;
378 
379     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
380 
381     if (s->always_svq || migration_is_running()) {
382         v->shadow_vqs_enabled = true;
383     } else {
384         v->shadow_vqs_enabled = false;
385     }
386 
387     if (v->index == 0) {
388         v->shared->shadow_data = v->shadow_vqs_enabled;
389         vhost_vdpa_net_data_start_first(s);
390         return 0;
391     }
392 
393     return 0;
394 }
395 
396 static int vhost_vdpa_net_data_load(NetClientState *nc)
397 {
398     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
399     struct vhost_vdpa *v = &s->vhost_vdpa;
400     bool has_cvq = v->dev->vq_index_end % 2;
401 
402     if (has_cvq) {
403         return 0;
404     }
405 
406     for (int i = 0; i < v->dev->nvqs; ++i) {
407         int ret = vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index);
408         if (ret < 0) {
409             return ret;
410         }
411     }
412     return 0;
413 }
414 
415 static void vhost_vdpa_net_client_stop(NetClientState *nc)
416 {
417     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
418 
419     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
420 
421     if (s->vhost_vdpa.index == 0) {
422         migration_remove_notifier(&s->migration_state);
423     }
424 }
425 
426 static NetClientInfo net_vhost_vdpa_info = {
427         .type = NET_CLIENT_DRIVER_VHOST_VDPA,
428         .size = sizeof(VhostVDPAState),
429         .receive = vhost_vdpa_receive,
430         .start = vhost_vdpa_net_data_start,
431         .load = vhost_vdpa_net_data_load,
432         .stop = vhost_vdpa_net_client_stop,
433         .cleanup = vhost_vdpa_cleanup,
434         .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
435         .has_ufo = vhost_vdpa_has_ufo,
436         .set_vnet_le = vhost_vdpa_set_vnet_le,
437         .check_peer_type = vhost_vdpa_check_peer_type,
438         .set_steering_ebpf = vhost_vdpa_set_steering_ebpf,
439         .get_vhost_net = vhost_vdpa_get_vhost_net,
440 };
441 
442 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
443                                           Error **errp)
444 {
445     struct vhost_vring_state state = {
446         .index = vq_index,
447     };
448     int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
449 
450     if (unlikely(r < 0)) {
451         r = -errno;
452         error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
453         return r;
454     }
455 
456     return state.num;
457 }
458 
459 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
460                                            unsigned vq_group,
461                                            unsigned asid_num)
462 {
463     struct vhost_vring_state asid = {
464         .index = vq_group,
465         .num = asid_num,
466     };
467     int r;
468 
469     trace_vhost_vdpa_set_address_space_id(v, vq_group, asid_num);
470 
471     r = ioctl(v->shared->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
472     if (unlikely(r < 0)) {
473         error_report("Can't set vq group %u asid %u, errno=%d (%s)",
474                      asid.index, asid.num, errno, g_strerror(errno));
475     }
476     return r;
477 }
478 
479 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
480 {
481     VhostIOVATree *tree = v->shared->iova_tree;
482     DMAMap needle = {
483         /*
484          * No need to specify size or to look for more translations since
485          * this contiguous chunk was allocated by us.
486          */
487         .translated_addr = (hwaddr)(uintptr_t)addr,
488     };
489     const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
490     int r;
491 
492     if (unlikely(!map)) {
493         error_report("Cannot locate expected map");
494         return;
495     }
496 
497     r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova,
498                              map->size + 1);
499     if (unlikely(r != 0)) {
500         error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
501     }
502 
503     vhost_iova_tree_remove(tree, *map);
504 }
505 
506 /** Map CVQ buffer. */
507 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
508                                   bool write)
509 {
510     DMAMap map = {};
511     hwaddr taddr = (hwaddr)(uintptr_t)buf;
512     int r;
513 
514     map.size = size - 1;
515     map.perm = write ? IOMMU_RW : IOMMU_RO,
516     r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map, taddr);
517     if (unlikely(r != IOVA_OK)) {
518         error_report("Cannot map injected element");
519 
520         if (map.translated_addr == taddr) {
521             error_report("Insertion to IOVA->HVA tree failed");
522             /* Remove the mapping from the IOVA-only tree */
523             goto dma_map_err;
524         }
525         return r;
526     }
527 
528     r = vhost_vdpa_dma_map(v->shared, v->address_space_id, map.iova,
529                            vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
530     if (unlikely(r < 0)) {
531         goto dma_map_err;
532     }
533 
534     return 0;
535 
536 dma_map_err:
537     vhost_iova_tree_remove(v->shared->iova_tree, map);
538     return r;
539 }
540 
541 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
542 {
543     VhostVDPAState *s, *s0;
544     struct vhost_vdpa *v;
545     int64_t cvq_group;
546     int r;
547     Error *err = NULL;
548 
549     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
550 
551     s = DO_UPCAST(VhostVDPAState, nc, nc);
552     v = &s->vhost_vdpa;
553 
554     s0 = vhost_vdpa_net_first_nc_vdpa(s);
555     v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled;
556     s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
557 
558     if (v->shared->shadow_data) {
559         /* SVQ is already configured for all virtqueues */
560         goto out;
561     }
562 
563     /*
564      * If we early return in these cases SVQ will not be enabled. The migration
565      * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
566      */
567     if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
568         return 0;
569     }
570 
571     if (!s->cvq_isolated) {
572         return 0;
573     }
574 
575     cvq_group = vhost_vdpa_get_vring_group(v->shared->device_fd,
576                                            v->dev->vq_index_end - 1,
577                                            &err);
578     if (unlikely(cvq_group < 0)) {
579         error_report_err(err);
580         return cvq_group;
581     }
582 
583     r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
584     if (unlikely(r < 0)) {
585         return r;
586     }
587 
588     v->shadow_vqs_enabled = true;
589     s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
590 
591 out:
592     if (!s->vhost_vdpa.shadow_vqs_enabled) {
593         return 0;
594     }
595 
596     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
597                                vhost_vdpa_net_cvq_cmd_page_len(), false);
598     if (unlikely(r < 0)) {
599         return r;
600     }
601 
602     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
603                                vhost_vdpa_net_cvq_cmd_page_len(), true);
604     if (unlikely(r < 0)) {
605         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
606     }
607 
608     return r;
609 }
610 
611 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
612 {
613     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
614 
615     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
616 
617     if (s->vhost_vdpa.shadow_vqs_enabled) {
618         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
619         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
620     }
621 
622     vhost_vdpa_net_client_stop(nc);
623 }
624 
625 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
626                                     const struct iovec *out_sg, size_t out_num,
627                                     const struct iovec *in_sg, size_t in_num)
628 {
629     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
630     int r;
631 
632     r = vhost_svq_add(svq, out_sg, out_num, NULL, in_sg, in_num, NULL, NULL);
633     if (unlikely(r != 0)) {
634         if (unlikely(r == -ENOSPC)) {
635             qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
636                           __func__);
637         }
638     }
639 
640     return r;
641 }
642 
643 /*
644  * Convenience wrapper to poll SVQ for multiple control commands.
645  *
646  * Caller should hold the BQL when invoking this function, and should take
647  * the answer before SVQ pulls by itself when BQL is released.
648  */
649 static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight)
650 {
651     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
652     return vhost_svq_poll(svq, cmds_in_flight);
653 }
654 
655 static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s,
656                                              struct iovec *out_cursor,
657                                              struct iovec *in_cursor)
658 {
659     /* reset the cursor of the output buffer for the device */
660     out_cursor->iov_base = s->cvq_cmd_out_buffer;
661     out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
662 
663     /* reset the cursor of the in buffer for the device */
664     in_cursor->iov_base = s->status;
665     in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
666 }
667 
668 /*
669  * Poll SVQ for multiple pending control commands and check the device's ack.
670  *
671  * Caller should hold the BQL when invoking this function.
672  *
673  * @s: The VhostVDPAState
674  * @len: The length of the pending status shadow buffer
675  */
676 static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len)
677 {
678     /* device uses a one-byte length ack for each control command */
679     ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len);
680     if (unlikely(dev_written != len)) {
681         return -EIO;
682     }
683 
684     /* check the device's ack */
685     for (int i = 0; i < len; ++i) {
686         if (s->status[i] != VIRTIO_NET_OK) {
687             return -EIO;
688         }
689     }
690     return 0;
691 }
692 
693 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s,
694                                        struct iovec *out_cursor,
695                                        struct iovec *in_cursor, uint8_t class,
696                                        uint8_t cmd, const struct iovec *data_sg,
697                                        size_t data_num)
698 {
699     const struct virtio_net_ctrl_hdr ctrl = {
700         .class = class,
701         .cmd = cmd,
702     };
703     size_t data_size = iov_size(data_sg, data_num), cmd_size;
704     struct iovec out, in;
705     ssize_t r;
706     unsigned dummy_cursor_iov_cnt;
707     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
708 
709     assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
710     cmd_size = sizeof(ctrl) + data_size;
711     trace_vhost_vdpa_net_load_cmd(s, class, cmd, data_num, data_size);
712     if (vhost_svq_available_slots(svq) < 2 ||
713         iov_size(out_cursor, 1) < cmd_size) {
714         /*
715          * It is time to flush all pending control commands if SVQ is full
716          * or control commands shadow buffers are full.
717          *
718          * We can poll here since we've had BQL from the time
719          * we sent the descriptor.
720          */
721         r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base -
722                                      (void *)s->status);
723         if (unlikely(r < 0)) {
724             return r;
725         }
726 
727         vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor);
728     }
729 
730     /* pack the CVQ command header */
731     iov_from_buf(out_cursor, 1, 0, &ctrl, sizeof(ctrl));
732     /* pack the CVQ command command-specific-data */
733     iov_to_buf(data_sg, data_num, 0,
734                out_cursor->iov_base + sizeof(ctrl), data_size);
735 
736     /* extract the required buffer from the cursor for output */
737     iov_copy(&out, 1, out_cursor, 1, 0, cmd_size);
738     /* extract the required buffer from the cursor for input */
739     iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status));
740 
741     r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
742     if (unlikely(r < 0)) {
743         trace_vhost_vdpa_net_load_cmd_retval(s, class, cmd, r);
744         return r;
745     }
746 
747     /* iterate the cursors */
748     dummy_cursor_iov_cnt = 1;
749     iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size);
750     dummy_cursor_iov_cnt = 1;
751     iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status));
752 
753     return 0;
754 }
755 
756 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n,
757                                    struct iovec *out_cursor,
758                                    struct iovec *in_cursor)
759 {
760     if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
761         const struct iovec data = {
762             .iov_base = (void *)n->mac,
763             .iov_len = sizeof(n->mac),
764         };
765         ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
766                                             VIRTIO_NET_CTRL_MAC,
767                                             VIRTIO_NET_CTRL_MAC_ADDR_SET,
768                                             &data, 1);
769         if (unlikely(r < 0)) {
770             return r;
771         }
772     }
773 
774     /*
775      * According to VirtIO standard, "The device MUST have an
776      * empty MAC filtering table on reset.".
777      *
778      * Therefore, there is no need to send this CVQ command if the
779      * driver also sets an empty MAC filter table, which aligns with
780      * the device's defaults.
781      *
782      * Note that the device's defaults can mismatch the driver's
783      * configuration only at live migration.
784      */
785     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) ||
786         n->mac_table.in_use == 0) {
787         return 0;
788     }
789 
790     uint32_t uni_entries = n->mac_table.first_multi,
791              uni_macs_size = uni_entries * ETH_ALEN,
792              mul_entries = n->mac_table.in_use - uni_entries,
793              mul_macs_size = mul_entries * ETH_ALEN;
794     struct virtio_net_ctrl_mac uni = {
795         .entries = cpu_to_le32(uni_entries),
796     };
797     struct virtio_net_ctrl_mac mul = {
798         .entries = cpu_to_le32(mul_entries),
799     };
800     const struct iovec data[] = {
801         {
802             .iov_base = &uni,
803             .iov_len = sizeof(uni),
804         }, {
805             .iov_base = n->mac_table.macs,
806             .iov_len = uni_macs_size,
807         }, {
808             .iov_base = &mul,
809             .iov_len = sizeof(mul),
810         }, {
811             .iov_base = &n->mac_table.macs[uni_macs_size],
812             .iov_len = mul_macs_size,
813         },
814     };
815     ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
816                                         VIRTIO_NET_CTRL_MAC,
817                                         VIRTIO_NET_CTRL_MAC_TABLE_SET,
818                                         data, ARRAY_SIZE(data));
819     if (unlikely(r < 0)) {
820         return r;
821     }
822 
823     return 0;
824 }
825 
826 static int vhost_vdpa_net_load_rss(VhostVDPAState *s, const VirtIONet *n,
827                                    struct iovec *out_cursor,
828                                    struct iovec *in_cursor, bool do_rss)
829 {
830     struct virtio_net_rss_config cfg = {};
831     ssize_t r;
832     g_autofree uint16_t *table = NULL;
833 
834     /*
835      * According to VirtIO standard, "Initially the device has all hash
836      * types disabled and reports only VIRTIO_NET_HASH_REPORT_NONE.".
837      *
838      * Therefore, there is no need to send this CVQ command if the
839      * driver disables the all hash types, which aligns with
840      * the device's defaults.
841      *
842      * Note that the device's defaults can mismatch the driver's
843      * configuration only at live migration.
844      */
845     if (!n->rss_data.enabled ||
846         n->rss_data.hash_types == VIRTIO_NET_HASH_REPORT_NONE) {
847         return 0;
848     }
849 
850     table = g_malloc_n(n->rss_data.indirections_len,
851                        sizeof(n->rss_data.indirections_table[0]));
852     cfg.hash_types = cpu_to_le32(n->rss_data.hash_types);
853 
854     if (do_rss) {
855         /*
856          * According to VirtIO standard, "Number of entries in indirection_table
857          * is (indirection_table_mask + 1)".
858          */
859         cfg.indirection_table_mask = cpu_to_le16(n->rss_data.indirections_len -
860                                                  1);
861         cfg.unclassified_queue = cpu_to_le16(n->rss_data.default_queue);
862         for (int i = 0; i < n->rss_data.indirections_len; ++i) {
863             table[i] = cpu_to_le16(n->rss_data.indirections_table[i]);
864         }
865         cfg.max_tx_vq = cpu_to_le16(n->curr_queue_pairs);
866     } else {
867         /*
868          * According to VirtIO standard, "Field reserved MUST contain zeroes.
869          * It is defined to make the structure to match the layout of
870          * virtio_net_rss_config structure, defined in 5.1.6.5.7.".
871          *
872          * Therefore, we need to zero the fields in
873          * struct virtio_net_rss_config, which corresponds to the
874          * `reserved` field in struct virtio_net_hash_config.
875          *
876          * Note that all other fields are zeroed at their definitions,
877          * except for the `indirection_table` field, where the actual data
878          * is stored in the `table` variable to ensure compatibility
879          * with RSS case. Therefore, we need to zero the `table` variable here.
880          */
881         table[0] = 0;
882     }
883 
884     /*
885      * Considering that virtio_net_handle_rss() currently does not restore
886      * the hash key length parsed from the CVQ command sent from the guest
887      * into n->rss_data and uses the maximum key length in other code, so
888      * we also employ the maximum key length here.
889      */
890     cfg.hash_key_length = sizeof(n->rss_data.key);
891 
892     const struct iovec data[] = {
893         {
894             .iov_base = &cfg,
895             .iov_len = offsetof(struct virtio_net_rss_config,
896                                 indirection_table),
897         }, {
898             .iov_base = table,
899             .iov_len = n->rss_data.indirections_len *
900                        sizeof(n->rss_data.indirections_table[0]),
901         }, {
902             .iov_base = &cfg.max_tx_vq,
903             .iov_len = offsetof(struct virtio_net_rss_config, hash_key_data) -
904                        offsetof(struct virtio_net_rss_config, max_tx_vq),
905         }, {
906             .iov_base = (void *)n->rss_data.key,
907             .iov_len = sizeof(n->rss_data.key),
908         }
909     };
910 
911     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
912                                 VIRTIO_NET_CTRL_MQ,
913                                 do_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG :
914                                 VIRTIO_NET_CTRL_MQ_HASH_CONFIG,
915                                 data, ARRAY_SIZE(data));
916     if (unlikely(r < 0)) {
917         return r;
918     }
919 
920     return 0;
921 }
922 
923 static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
924                                   const VirtIONet *n,
925                                   struct iovec *out_cursor,
926                                   struct iovec *in_cursor)
927 {
928     struct virtio_net_ctrl_mq mq;
929     ssize_t r;
930 
931     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
932         return 0;
933     }
934 
935     trace_vhost_vdpa_net_load_mq(s, n->curr_queue_pairs);
936 
937     mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
938     const struct iovec data = {
939         .iov_base = &mq,
940         .iov_len = sizeof(mq),
941     };
942     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
943                                 VIRTIO_NET_CTRL_MQ,
944                                 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
945                                 &data, 1);
946     if (unlikely(r < 0)) {
947         return r;
948     }
949 
950     if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_RSS)) {
951         /* load the receive-side scaling state */
952         r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, true);
953         if (unlikely(r < 0)) {
954             return r;
955         }
956     } else if (virtio_vdev_has_feature(&n->parent_obj,
957                                        VIRTIO_NET_F_HASH_REPORT)) {
958         /* load the hash calculation state */
959         r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, false);
960         if (unlikely(r < 0)) {
961             return r;
962         }
963     }
964 
965     return 0;
966 }
967 
968 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
969                                         const VirtIONet *n,
970                                         struct iovec *out_cursor,
971                                         struct iovec *in_cursor)
972 {
973     uint64_t offloads;
974     ssize_t r;
975 
976     if (!virtio_vdev_has_feature(&n->parent_obj,
977                                  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
978         return 0;
979     }
980 
981     if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
982         /*
983          * According to VirtIO standard, "Upon feature negotiation
984          * corresponding offload gets enabled to preserve
985          * backward compatibility.".
986          *
987          * Therefore, there is no need to send this CVQ command if the
988          * driver also enables all supported offloads, which aligns with
989          * the device's defaults.
990          *
991          * Note that the device's defaults can mismatch the driver's
992          * configuration only at live migration.
993          */
994         return 0;
995     }
996 
997     offloads = cpu_to_le64(n->curr_guest_offloads);
998     const struct iovec data = {
999         .iov_base = &offloads,
1000         .iov_len = sizeof(offloads),
1001     };
1002     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
1003                                 VIRTIO_NET_CTRL_GUEST_OFFLOADS,
1004                                 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
1005                                 &data, 1);
1006     if (unlikely(r < 0)) {
1007         return r;
1008     }
1009 
1010     return 0;
1011 }
1012 
1013 static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
1014                                        struct iovec *out_cursor,
1015                                        struct iovec *in_cursor,
1016                                        uint8_t cmd,
1017                                        uint8_t on)
1018 {
1019     const struct iovec data = {
1020         .iov_base = &on,
1021         .iov_len = sizeof(on),
1022     };
1023     ssize_t r;
1024 
1025     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
1026                                 VIRTIO_NET_CTRL_RX, cmd, &data, 1);
1027     if (unlikely(r < 0)) {
1028         return r;
1029     }
1030 
1031     return 0;
1032 }
1033 
1034 static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
1035                                   const VirtIONet *n,
1036                                   struct iovec *out_cursor,
1037                                   struct iovec *in_cursor)
1038 {
1039     ssize_t r;
1040 
1041     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
1042         return 0;
1043     }
1044 
1045     /*
1046      * According to virtio_net_reset(), device turns promiscuous mode
1047      * on by default.
1048      *
1049      * Additionally, according to VirtIO standard, "Since there are
1050      * no guarantees, it can use a hash filter or silently switch to
1051      * allmulti or promiscuous mode if it is given too many addresses.".
1052      * QEMU marks `n->mac_table.uni_overflow` if guest sets too many
1053      * non-multicast MAC addresses, indicating that promiscuous mode
1054      * should be enabled.
1055      *
1056      * Therefore, QEMU should only send this CVQ command if the
1057      * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
1058      * which sets promiscuous mode on, different from the device's defaults.
1059      *
1060      * Note that the device's defaults can mismatch the driver's
1061      * configuration only at live migration.
1062      */
1063     if (!n->mac_table.uni_overflow && !n->promisc) {
1064         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1065                                         VIRTIO_NET_CTRL_RX_PROMISC, 0);
1066         if (unlikely(r < 0)) {
1067             return r;
1068         }
1069     }
1070 
1071     /*
1072      * According to virtio_net_reset(), device turns all-multicast mode
1073      * off by default.
1074      *
1075      * According to VirtIO standard, "Since there are no guarantees,
1076      * it can use a hash filter or silently switch to allmulti or
1077      * promiscuous mode if it is given too many addresses.". QEMU marks
1078      * `n->mac_table.multi_overflow` if guest sets too many
1079      * non-multicast MAC addresses.
1080      *
1081      * Therefore, QEMU should only send this CVQ command if the
1082      * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
1083      * which sets all-multicast mode on, different from the device's defaults.
1084      *
1085      * Note that the device's defaults can mismatch the driver's
1086      * configuration only at live migration.
1087      */
1088     if (n->mac_table.multi_overflow || n->allmulti) {
1089         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1090                                         VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
1091         if (unlikely(r < 0)) {
1092             return r;
1093         }
1094     }
1095 
1096     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) {
1097         return 0;
1098     }
1099 
1100     /*
1101      * According to virtio_net_reset(), device turns all-unicast mode
1102      * off by default.
1103      *
1104      * Therefore, QEMU should only send this CVQ command if the driver
1105      * sets all-unicast mode on, different from the device's defaults.
1106      *
1107      * Note that the device's defaults can mismatch the driver's
1108      * configuration only at live migration.
1109      */
1110     if (n->alluni) {
1111         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1112                                         VIRTIO_NET_CTRL_RX_ALLUNI, 1);
1113         if (r < 0) {
1114             return r;
1115         }
1116     }
1117 
1118     /*
1119      * According to virtio_net_reset(), device turns non-multicast mode
1120      * off by default.
1121      *
1122      * Therefore, QEMU should only send this CVQ command if the driver
1123      * sets non-multicast mode on, different from the device's defaults.
1124      *
1125      * Note that the device's defaults can mismatch the driver's
1126      * configuration only at live migration.
1127      */
1128     if (n->nomulti) {
1129         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1130                                         VIRTIO_NET_CTRL_RX_NOMULTI, 1);
1131         if (r < 0) {
1132             return r;
1133         }
1134     }
1135 
1136     /*
1137      * According to virtio_net_reset(), device turns non-unicast mode
1138      * off by default.
1139      *
1140      * Therefore, QEMU should only send this CVQ command if the driver
1141      * sets non-unicast mode on, different from the device's defaults.
1142      *
1143      * Note that the device's defaults can mismatch the driver's
1144      * configuration only at live migration.
1145      */
1146     if (n->nouni) {
1147         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1148                                         VIRTIO_NET_CTRL_RX_NOUNI, 1);
1149         if (r < 0) {
1150             return r;
1151         }
1152     }
1153 
1154     /*
1155      * According to virtio_net_reset(), device turns non-broadcast mode
1156      * off by default.
1157      *
1158      * Therefore, QEMU should only send this CVQ command if the driver
1159      * sets non-broadcast mode on, different from the device's defaults.
1160      *
1161      * Note that the device's defaults can mismatch the driver's
1162      * configuration only at live migration.
1163      */
1164     if (n->nobcast) {
1165         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1166                                         VIRTIO_NET_CTRL_RX_NOBCAST, 1);
1167         if (r < 0) {
1168             return r;
1169         }
1170     }
1171 
1172     return 0;
1173 }
1174 
1175 static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
1176                                            const VirtIONet *n,
1177                                            struct iovec *out_cursor,
1178                                            struct iovec *in_cursor,
1179                                            uint16_t vid)
1180 {
1181     const struct iovec data = {
1182         .iov_base = &vid,
1183         .iov_len = sizeof(vid),
1184     };
1185     ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
1186                                         VIRTIO_NET_CTRL_VLAN,
1187                                         VIRTIO_NET_CTRL_VLAN_ADD,
1188                                         &data, 1);
1189     if (unlikely(r < 0)) {
1190         return r;
1191     }
1192 
1193     return 0;
1194 }
1195 
1196 static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
1197                                     const VirtIONet *n,
1198                                     struct iovec *out_cursor,
1199                                     struct iovec *in_cursor)
1200 {
1201     int r;
1202 
1203     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) {
1204         return 0;
1205     }
1206 
1207     for (int i = 0; i < MAX_VLAN >> 5; i++) {
1208         for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
1209             if (n->vlans[i] & (1U << j)) {
1210                 r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor,
1211                                                     in_cursor, (i << 5) + j);
1212                 if (unlikely(r != 0)) {
1213                     return r;
1214                 }
1215             }
1216         }
1217     }
1218 
1219     return 0;
1220 }
1221 
1222 static int vhost_vdpa_net_cvq_load(NetClientState *nc)
1223 {
1224     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
1225     struct vhost_vdpa *v = &s->vhost_vdpa;
1226     const VirtIONet *n;
1227     int r;
1228     struct iovec out_cursor, in_cursor;
1229 
1230     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1231 
1232     r = vhost_vdpa_set_vring_ready(v, v->dev->vq_index);
1233     if (unlikely(r < 0)) {
1234         return r;
1235     }
1236 
1237     if (v->shadow_vqs_enabled) {
1238         n = VIRTIO_NET(v->dev->vdev);
1239         vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor);
1240         r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor);
1241         if (unlikely(r < 0)) {
1242             return r;
1243         }
1244         r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor);
1245         if (unlikely(r)) {
1246             return r;
1247         }
1248         r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor);
1249         if (unlikely(r)) {
1250             return r;
1251         }
1252         r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor);
1253         if (unlikely(r)) {
1254             return r;
1255         }
1256         r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor);
1257         if (unlikely(r)) {
1258             return r;
1259         }
1260 
1261         /*
1262          * We need to poll and check all pending device's used buffers.
1263          *
1264          * We can poll here since we've had BQL from the time
1265          * we sent the descriptor.
1266          */
1267         r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status);
1268         if (unlikely(r)) {
1269             return r;
1270         }
1271     }
1272 
1273     for (int i = 0; i < v->dev->vq_index; ++i) {
1274         r = vhost_vdpa_set_vring_ready(v, i);
1275         if (unlikely(r < 0)) {
1276             return r;
1277         }
1278     }
1279 
1280     return 0;
1281 }
1282 
1283 static NetClientInfo net_vhost_vdpa_cvq_info = {
1284     .type = NET_CLIENT_DRIVER_VHOST_VDPA,
1285     .size = sizeof(VhostVDPAState),
1286     .receive = vhost_vdpa_receive,
1287     .start = vhost_vdpa_net_cvq_start,
1288     .load = vhost_vdpa_net_cvq_load,
1289     .stop = vhost_vdpa_net_cvq_stop,
1290     .cleanup = vhost_vdpa_cleanup,
1291     .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
1292     .has_ufo = vhost_vdpa_has_ufo,
1293     .check_peer_type = vhost_vdpa_check_peer_type,
1294     .set_steering_ebpf = vhost_vdpa_set_steering_ebpf,
1295     .get_vhost_net = vhost_vdpa_get_vhost_net,
1296 };
1297 
1298 /*
1299  * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
1300  * vdpa device.
1301  *
1302  * Considering that QEMU cannot send the entire filter table to the
1303  * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
1304  * command to enable promiscuous mode to receive all packets,
1305  * according to VirtIO standard, "Since there are no guarantees,
1306  * it can use a hash filter or silently switch to allmulti or
1307  * promiscuous mode if it is given too many addresses.".
1308  *
1309  * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
1310  * marks `n->mac_table.x_overflow` accordingly, it should have
1311  * the same effect on the device model to receive
1312  * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1313  * The same applies to multicast MAC addresses.
1314  *
1315  * Therefore, QEMU can provide the device model with a fake
1316  * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
1317  * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1318  * MAC addresses. This ensures that the device model marks
1319  * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1320  * allowing all packets to be received, which aligns with the
1321  * state of the vdpa device.
1322  */
1323 static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
1324                                                        VirtQueueElement *elem,
1325                                                        struct iovec *out,
1326                                                        const struct iovec *in)
1327 {
1328     struct virtio_net_ctrl_mac mac_data, *mac_ptr;
1329     struct virtio_net_ctrl_hdr *hdr_ptr;
1330     uint32_t cursor;
1331     ssize_t r;
1332     uint8_t on = 1;
1333 
1334     /* parse the non-multicast MAC address entries from CVQ command */
1335     cursor = sizeof(*hdr_ptr);
1336     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1337                    &mac_data, sizeof(mac_data));
1338     if (unlikely(r != sizeof(mac_data))) {
1339         /*
1340          * If the CVQ command is invalid, we should simulate the vdpa device
1341          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1342          */
1343         *s->status = VIRTIO_NET_ERR;
1344         return sizeof(*s->status);
1345     }
1346     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1347 
1348     /* parse the multicast MAC address entries from CVQ command */
1349     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1350                    &mac_data, sizeof(mac_data));
1351     if (r != sizeof(mac_data)) {
1352         /*
1353          * If the CVQ command is invalid, we should simulate the vdpa device
1354          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1355          */
1356         *s->status = VIRTIO_NET_ERR;
1357         return sizeof(*s->status);
1358     }
1359     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1360 
1361     /* validate the CVQ command */
1362     if (iov_size(elem->out_sg, elem->out_num) != cursor) {
1363         /*
1364          * If the CVQ command is invalid, we should simulate the vdpa device
1365          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1366          */
1367         *s->status = VIRTIO_NET_ERR;
1368         return sizeof(*s->status);
1369     }
1370 
1371     /*
1372      * According to VirtIO standard, "Since there are no guarantees,
1373      * it can use a hash filter or silently switch to allmulti or
1374      * promiscuous mode if it is given too many addresses.".
1375      *
1376      * Therefore, considering that QEMU is unable to send the entire
1377      * filter table to the vdpa device, it should send the
1378      * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
1379      */
1380     hdr_ptr = out->iov_base;
1381     out->iov_len = sizeof(*hdr_ptr) + sizeof(on);
1382 
1383     hdr_ptr->class = VIRTIO_NET_CTRL_RX;
1384     hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC;
1385     iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on));
1386     r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1);
1387     if (unlikely(r < 0)) {
1388         return r;
1389     }
1390 
1391     /*
1392      * We can poll here since we've had BQL from the time
1393      * we sent the descriptor.
1394      */
1395     r = vhost_vdpa_net_svq_poll(s, 1);
1396     if (unlikely(r < sizeof(*s->status))) {
1397         return r;
1398     }
1399     if (*s->status != VIRTIO_NET_OK) {
1400         return sizeof(*s->status);
1401     }
1402 
1403     /*
1404      * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
1405      * command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
1406      * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
1407      * multicast MAC addresses.
1408      *
1409      * By doing so, the device model can mark `n->mac_table.uni_overflow`
1410      * and `n->mac_table.multi_overflow`, enabling all packets to be
1411      * received, which aligns with the state of the vdpa device.
1412      */
1413     cursor = 0;
1414     uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1,
1415              fake_mul_entries = MAC_TABLE_ENTRIES + 1,
1416              fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) +
1417                              sizeof(mac_data) + fake_uni_entries * ETH_ALEN +
1418                              sizeof(mac_data) + fake_mul_entries * ETH_ALEN;
1419 
1420     assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len());
1421     out->iov_len = fake_cvq_size;
1422 
1423     /* pack the header for fake CVQ command */
1424     hdr_ptr = out->iov_base + cursor;
1425     hdr_ptr->class = VIRTIO_NET_CTRL_MAC;
1426     hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1427     cursor += sizeof(*hdr_ptr);
1428 
1429     /*
1430      * Pack the non-multicast MAC addresses part for fake CVQ command.
1431      *
1432      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1433      * addresses provided in CVQ command. Therefore, only the entries
1434      * field need to be prepared in the CVQ command.
1435      */
1436     mac_ptr = out->iov_base + cursor;
1437     mac_ptr->entries = cpu_to_le32(fake_uni_entries);
1438     cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN;
1439 
1440     /*
1441      * Pack the multicast MAC addresses part for fake CVQ command.
1442      *
1443      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1444      * addresses provided in CVQ command. Therefore, only the entries
1445      * field need to be prepared in the CVQ command.
1446      */
1447     mac_ptr = out->iov_base + cursor;
1448     mac_ptr->entries = cpu_to_le32(fake_mul_entries);
1449 
1450     /*
1451      * Simulating QEMU poll a vdpa device used buffer
1452      * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1453      */
1454     return sizeof(*s->status);
1455 }
1456 
1457 /**
1458  * Validate and copy control virtqueue commands.
1459  *
1460  * Following QEMU guidelines, we offer a copy of the buffers to the device to
1461  * prevent TOCTOU bugs.
1462  */
1463 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
1464                                             VirtQueueElement *elem,
1465                                             void *opaque)
1466 {
1467     VhostVDPAState *s = opaque;
1468     size_t in_len;
1469     const struct virtio_net_ctrl_hdr *ctrl;
1470     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1471     /* Out buffer sent to both the vdpa device and the device model */
1472     struct iovec out = {
1473         .iov_base = s->cvq_cmd_out_buffer,
1474     };
1475     /* in buffer used for device model */
1476     const struct iovec model_in = {
1477         .iov_base = &status,
1478         .iov_len = sizeof(status),
1479     };
1480     /* in buffer used for vdpa device */
1481     const struct iovec vdpa_in = {
1482         .iov_base = s->status,
1483         .iov_len = sizeof(*s->status),
1484     };
1485     ssize_t dev_written = -EINVAL;
1486 
1487     out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
1488                              s->cvq_cmd_out_buffer,
1489                              vhost_vdpa_net_cvq_cmd_page_len());
1490 
1491     ctrl = s->cvq_cmd_out_buffer;
1492     if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
1493         /*
1494          * Guest announce capability is emulated by qemu, so don't forward to
1495          * the device.
1496          */
1497         dev_written = sizeof(status);
1498         *s->status = VIRTIO_NET_OK;
1499     } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC &&
1500                         ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET &&
1501                         iov_size(elem->out_sg, elem->out_num) > out.iov_len)) {
1502         /*
1503          * Due to the size limitation of the out buffer sent to the vdpa device,
1504          * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
1505          * MAC addresses set by the driver for the filter table can cause
1506          * truncation of the CVQ command in QEMU. As a result, the vdpa device
1507          * rejects the flawed CVQ command.
1508          *
1509          * Therefore, QEMU must handle this situation instead of sending
1510          * the CVQ command directly.
1511          */
1512         dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
1513                                                             &out, &vdpa_in);
1514         if (unlikely(dev_written < 0)) {
1515             goto out;
1516         }
1517     } else {
1518         ssize_t r;
1519         r = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1);
1520         if (unlikely(r < 0)) {
1521             dev_written = r;
1522             goto out;
1523         }
1524 
1525         /*
1526          * We can poll here since we've had BQL from the time
1527          * we sent the descriptor.
1528          */
1529         dev_written = vhost_vdpa_net_svq_poll(s, 1);
1530     }
1531 
1532     if (unlikely(dev_written < sizeof(status))) {
1533         error_report("Insufficient written data (%zu)", dev_written);
1534         goto out;
1535     }
1536 
1537     if (*s->status != VIRTIO_NET_OK) {
1538         goto out;
1539     }
1540 
1541     status = VIRTIO_NET_ERR;
1542     virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1);
1543     if (status != VIRTIO_NET_OK) {
1544         error_report("Bad CVQ processing in model");
1545     }
1546 
1547 out:
1548     in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
1549                           sizeof(status));
1550     if (unlikely(in_len < sizeof(status))) {
1551         error_report("Bad device CVQ written length");
1552     }
1553     vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
1554     /*
1555      * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
1556      * the function successfully forwards the CVQ command, indicated
1557      * by a non-negative value of `dev_written`. Otherwise, it still
1558      * belongs to SVQ.
1559      * This function should only free the `elem` when it owns.
1560      */
1561     if (dev_written >= 0) {
1562         g_free(elem);
1563     }
1564     return dev_written < 0 ? dev_written : 0;
1565 }
1566 
1567 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
1568     .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
1569 };
1570 
1571 /**
1572  * Probe if CVQ is isolated
1573  *
1574  * @device_fd         The vdpa device fd
1575  * @features          Features offered by the device.
1576  * @cvq_index         The control vq pair index
1577  *
1578  * Returns <0 in case of failure, 0 if false and 1 if true.
1579  */
1580 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
1581                                           int cvq_index, Error **errp)
1582 {
1583     ERRP_GUARD();
1584     uint64_t backend_features;
1585     int64_t cvq_group;
1586     uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
1587                      VIRTIO_CONFIG_S_DRIVER;
1588     int r;
1589 
1590     r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
1591     if (unlikely(r < 0)) {
1592         error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
1593         return r;
1594     }
1595 
1596     if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
1597         return 0;
1598     }
1599 
1600     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1601     if (unlikely(r)) {
1602         error_setg_errno(errp, -r, "Cannot set device status");
1603         goto out;
1604     }
1605 
1606     r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
1607     if (unlikely(r)) {
1608         error_setg_errno(errp, -r, "Cannot set features");
1609         goto out;
1610     }
1611 
1612     status |= VIRTIO_CONFIG_S_FEATURES_OK;
1613     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1614     if (unlikely(r)) {
1615         error_setg_errno(errp, -r, "Cannot set device status");
1616         goto out;
1617     }
1618 
1619     cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
1620     if (unlikely(cvq_group < 0)) {
1621         if (cvq_group != -ENOTSUP) {
1622             r = cvq_group;
1623             goto out;
1624         }
1625 
1626         /*
1627          * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
1628          * support ASID even if the parent driver does not.  The CVQ cannot be
1629          * isolated in this case.
1630          */
1631         error_free(*errp);
1632         *errp = NULL;
1633         r = 0;
1634         goto out;
1635     }
1636 
1637     for (int i = 0; i < cvq_index; ++i) {
1638         int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
1639         if (unlikely(group < 0)) {
1640             r = group;
1641             goto out;
1642         }
1643 
1644         if (group == (int64_t)cvq_group) {
1645             r = 0;
1646             goto out;
1647         }
1648     }
1649 
1650     r = 1;
1651 
1652 out:
1653     status = 0;
1654     ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1655     return r;
1656 }
1657 
1658 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
1659                                        const char *device,
1660                                        const char *name,
1661                                        int vdpa_device_fd,
1662                                        int queue_pair_index,
1663                                        int nvqs,
1664                                        bool is_datapath,
1665                                        bool svq,
1666                                        struct vhost_vdpa_iova_range iova_range,
1667                                        uint64_t features,
1668                                        VhostVDPAShared *shared,
1669                                        Error **errp)
1670 {
1671     NetClientState *nc = NULL;
1672     VhostVDPAState *s;
1673     int ret = 0;
1674     assert(name);
1675     int cvq_isolated = 0;
1676 
1677     if (is_datapath) {
1678         nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
1679                                  name);
1680     } else {
1681         cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
1682                                                       queue_pair_index * 2,
1683                                                       errp);
1684         if (unlikely(cvq_isolated < 0)) {
1685             return NULL;
1686         }
1687 
1688         nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
1689                                          device, name);
1690     }
1691     qemu_set_info_str(nc, TYPE_VHOST_VDPA);
1692     s = DO_UPCAST(VhostVDPAState, nc, nc);
1693 
1694     s->vhost_vdpa.index = queue_pair_index;
1695     s->always_svq = svq;
1696     s->migration_state.notify = NULL;
1697     s->vhost_vdpa.shadow_vqs_enabled = svq;
1698     if (queue_pair_index == 0) {
1699         vhost_vdpa_net_valid_svq_features(features,
1700                                           &s->vhost_vdpa.migration_blocker);
1701         s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1);
1702         s->vhost_vdpa.shared->device_fd = vdpa_device_fd;
1703         s->vhost_vdpa.shared->iova_range = iova_range;
1704         s->vhost_vdpa.shared->shadow_data = svq;
1705         s->vhost_vdpa.shared->iova_tree = vhost_iova_tree_new(iova_range.first,
1706                                                               iova_range.last);
1707     } else if (!is_datapath) {
1708         s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1709                                      PROT_READ | PROT_WRITE,
1710                                      MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1711         s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1712                          PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
1713                          -1, 0);
1714 
1715         s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
1716         s->vhost_vdpa.shadow_vq_ops_opaque = s;
1717         s->cvq_isolated = cvq_isolated;
1718     }
1719     if (queue_pair_index != 0) {
1720         s->vhost_vdpa.shared = shared;
1721     }
1722 
1723     ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
1724     if (ret) {
1725         qemu_del_net_client(nc);
1726         return NULL;
1727     }
1728 
1729     return nc;
1730 }
1731 
1732 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
1733 {
1734     int ret = ioctl(fd, VHOST_GET_FEATURES, features);
1735     if (unlikely(ret < 0)) {
1736         error_setg_errno(errp, errno,
1737                          "Fail to query features from vhost-vDPA device");
1738     }
1739     return ret;
1740 }
1741 
1742 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
1743                                           int *has_cvq, Error **errp)
1744 {
1745     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
1746     g_autofree struct vhost_vdpa_config *config = NULL;
1747     __virtio16 *max_queue_pairs;
1748     int ret;
1749 
1750     if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
1751         *has_cvq = 1;
1752     } else {
1753         *has_cvq = 0;
1754     }
1755 
1756     if (features & (1 << VIRTIO_NET_F_MQ)) {
1757         config = g_malloc0(config_size + sizeof(*max_queue_pairs));
1758         config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
1759         config->len = sizeof(*max_queue_pairs);
1760 
1761         ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
1762         if (ret) {
1763             error_setg(errp, "Fail to get config from vhost-vDPA device");
1764             return -ret;
1765         }
1766 
1767         max_queue_pairs = (__virtio16 *)&config->buf;
1768 
1769         return lduw_le_p(max_queue_pairs);
1770     }
1771 
1772     return 1;
1773 }
1774 
1775 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
1776                         NetClientState *peer, Error **errp)
1777 {
1778     ERRP_GUARD();
1779     const NetdevVhostVDPAOptions *opts;
1780     uint64_t features;
1781     int vdpa_device_fd;
1782     g_autofree NetClientState **ncs = NULL;
1783     struct vhost_vdpa_iova_range iova_range;
1784     NetClientState *nc;
1785     int queue_pairs, r, i = 0, has_cvq = 0;
1786 
1787     assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1788     opts = &netdev->u.vhost_vdpa;
1789     if (!opts->vhostdev && !opts->vhostfd) {
1790         error_setg(errp,
1791                    "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1792         return -1;
1793     }
1794 
1795     if (opts->vhostdev && opts->vhostfd) {
1796         error_setg(errp,
1797                    "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1798         return -1;
1799     }
1800 
1801     if (opts->vhostdev) {
1802         vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1803         if (vdpa_device_fd == -1) {
1804             return -errno;
1805         }
1806     } else {
1807         /* has_vhostfd */
1808         vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1809         if (vdpa_device_fd == -1) {
1810             error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1811             return -1;
1812         }
1813     }
1814 
1815     r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1816     if (unlikely(r < 0)) {
1817         goto err;
1818     }
1819 
1820     queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
1821                                                  &has_cvq, errp);
1822     if (queue_pairs < 0) {
1823         qemu_close(vdpa_device_fd);
1824         return queue_pairs;
1825     }
1826 
1827     r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1828     if (unlikely(r < 0)) {
1829         error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1830                    strerror(-r));
1831         goto err;
1832     }
1833 
1834     if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1835         goto err;
1836     }
1837 
1838     ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1839 
1840     for (i = 0; i < queue_pairs; i++) {
1841         VhostVDPAShared *shared = NULL;
1842 
1843         if (i) {
1844             shared = DO_UPCAST(VhostVDPAState, nc, ncs[0])->vhost_vdpa.shared;
1845         }
1846         ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1847                                      vdpa_device_fd, i, 2, true, opts->x_svq,
1848                                      iova_range, features, shared, errp);
1849         if (!ncs[i])
1850             goto err;
1851     }
1852 
1853     if (has_cvq) {
1854         VhostVDPAState *s0 = DO_UPCAST(VhostVDPAState, nc, ncs[0]);
1855         VhostVDPAShared *shared = s0->vhost_vdpa.shared;
1856 
1857         nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1858                                  vdpa_device_fd, i, 1, false,
1859                                  opts->x_svq, iova_range, features, shared,
1860                                  errp);
1861         if (!nc)
1862             goto err;
1863     }
1864 
1865     return 0;
1866 
1867 err:
1868     if (i) {
1869         for (i--; i >= 0; i--) {
1870             qemu_del_net_client(ncs[i]);
1871         }
1872     }
1873 
1874     qemu_close(vdpa_device_fd);
1875 
1876     return -1;
1877 }
1878