xref: /openbmc/qemu/net/vhost-vdpa.c (revision 854ee02b22220377f3fa3806adf7e0718c3a5c5a)
1 /*
2  * vhost-vdpa.c
3  *
4  * Copyright(c) 2017-2018 Intel Corporation.
5  * Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "clients.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
26 #include <err.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/misc.h"
30 #include "hw/virtio/vhost.h"
31 #include "trace.h"
32 
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState {
35     NetClientState nc;
36     struct vhost_vdpa vhost_vdpa;
37     NotifierWithReturn migration_state;
38     VHostNetState *vhost_net;
39 
40     /* Control commands shadow buffers */
41     void *cvq_cmd_out_buffer;
42     virtio_net_ctrl_ack *status;
43 
44     /* The device always have SVQ enabled */
45     bool always_svq;
46 
47     /* The device can isolate CVQ in its own ASID */
48     bool cvq_isolated;
49 
50     bool started;
51 } VhostVDPAState;
52 
53 /*
54  * The array is sorted alphabetically in ascending order,
55  * with the exception of VHOST_INVALID_FEATURE_BIT,
56  * which should always be the last entry.
57  */
58 static const int vdpa_feature_bits[] = {
59     VIRTIO_F_ANY_LAYOUT,
60     VIRTIO_F_IOMMU_PLATFORM,
61     VIRTIO_F_NOTIFY_ON_EMPTY,
62     VIRTIO_F_RING_PACKED,
63     VIRTIO_F_RING_RESET,
64     VIRTIO_F_VERSION_1,
65     VIRTIO_F_IN_ORDER,
66     VIRTIO_F_NOTIFICATION_DATA,
67     VIRTIO_NET_F_CSUM,
68     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
69     VIRTIO_NET_F_CTRL_MAC_ADDR,
70     VIRTIO_NET_F_CTRL_RX,
71     VIRTIO_NET_F_CTRL_RX_EXTRA,
72     VIRTIO_NET_F_CTRL_VLAN,
73     VIRTIO_NET_F_CTRL_VQ,
74     VIRTIO_NET_F_GSO,
75     VIRTIO_NET_F_GUEST_CSUM,
76     VIRTIO_NET_F_GUEST_ECN,
77     VIRTIO_NET_F_GUEST_TSO4,
78     VIRTIO_NET_F_GUEST_TSO6,
79     VIRTIO_NET_F_GUEST_UFO,
80     VIRTIO_NET_F_GUEST_USO4,
81     VIRTIO_NET_F_GUEST_USO6,
82     VIRTIO_NET_F_HASH_REPORT,
83     VIRTIO_NET_F_HOST_ECN,
84     VIRTIO_NET_F_HOST_TSO4,
85     VIRTIO_NET_F_HOST_TSO6,
86     VIRTIO_NET_F_HOST_UFO,
87     VIRTIO_NET_F_HOST_USO,
88     VIRTIO_NET_F_MQ,
89     VIRTIO_NET_F_MRG_RXBUF,
90     VIRTIO_NET_F_MTU,
91     VIRTIO_NET_F_RSC_EXT,
92     VIRTIO_NET_F_RSS,
93     VIRTIO_NET_F_STATUS,
94     VIRTIO_RING_F_EVENT_IDX,
95     VIRTIO_RING_F_INDIRECT_DESC,
96 
97     /* VHOST_INVALID_FEATURE_BIT should always be the last entry */
98     VHOST_INVALID_FEATURE_BIT
99 };
100 
101 /** Supported device specific feature bits with SVQ */
102 static const uint64_t vdpa_svq_device_features =
103     BIT_ULL(VIRTIO_NET_F_CSUM) |
104     BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
105     BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
106     BIT_ULL(VIRTIO_NET_F_MTU) |
107     BIT_ULL(VIRTIO_NET_F_MAC) |
108     BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
109     BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
110     BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
111     BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
112     BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
113     BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
114     BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
115     BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
116     BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
117     BIT_ULL(VIRTIO_NET_F_STATUS) |
118     BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
119     BIT_ULL(VIRTIO_NET_F_CTRL_RX) |
120     BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) |
121     BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) |
122     BIT_ULL(VIRTIO_NET_F_MQ) |
123     BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
124     BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
125     /* VHOST_F_LOG_ALL is exposed by SVQ */
126     BIT_ULL(VHOST_F_LOG_ALL) |
127     BIT_ULL(VIRTIO_NET_F_HASH_REPORT) |
128     BIT_ULL(VIRTIO_NET_F_RSS) |
129     BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
130     BIT_ULL(VIRTIO_NET_F_STANDBY) |
131     BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
132 
133 #define VHOST_VDPA_NET_CVQ_ASID 1
134 
135 static struct vhost_net *vhost_vdpa_get_vhost_net(NetClientState *nc)
136 {
137     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
138     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
139     return s->vhost_net;
140 }
141 
142 static size_t vhost_vdpa_net_cvq_cmd_len(void)
143 {
144     /*
145      * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
146      * In buffer is always 1 byte, so it should fit here
147      */
148     return sizeof(struct virtio_net_ctrl_hdr) +
149            2 * sizeof(struct virtio_net_ctrl_mac) +
150            MAC_TABLE_ENTRIES * ETH_ALEN;
151 }
152 
153 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
154 {
155     return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
156 }
157 
158 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
159 {
160     uint64_t invalid_dev_features =
161         features & ~vdpa_svq_device_features &
162         /* Transport are all accepted at this point */
163         ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
164                          VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
165 
166     if (invalid_dev_features) {
167         error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
168                    invalid_dev_features);
169         return false;
170     }
171 
172     return vhost_svq_valid_features(features, errp);
173 }
174 
175 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
176 {
177     uint32_t device_id;
178     int ret;
179     struct vhost_dev *hdev;
180 
181     hdev = (struct vhost_dev *)&net->dev;
182     ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
183     if (device_id != VIRTIO_ID_NET) {
184         return -ENOTSUP;
185     }
186     return ret;
187 }
188 
189 static int vhost_vdpa_add(NetClientState *ncs, void *be,
190                           int queue_pair_index, int nvqs)
191 {
192     VhostNetOptions options;
193     struct vhost_net *net = NULL;
194     VhostVDPAState *s;
195     int ret;
196 
197     options.backend_type = VHOST_BACKEND_TYPE_VDPA;
198     assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
199     s = DO_UPCAST(VhostVDPAState, nc, ncs);
200     options.net_backend = ncs;
201     options.opaque      = be;
202     options.busyloop_timeout = 0;
203     options.nvqs = nvqs;
204     options.feature_bits = vdpa_feature_bits;
205     options.get_acked_features = NULL;
206     options.save_acked_features = NULL;
207     options.max_tx_queue_size = VIRTQUEUE_MAX_SIZE;
208     options.is_vhost_user = false;
209 
210     net = vhost_net_init(&options);
211     if (!net) {
212         error_report("failed to init vhost_net for queue");
213         goto err_init;
214     }
215     s->vhost_net = net;
216     ret = vhost_vdpa_net_check_device_id(net);
217     if (ret) {
218         goto err_check;
219     }
220     return 0;
221 err_check:
222     vhost_net_cleanup(net);
223     g_free(net);
224 err_init:
225     return -1;
226 }
227 
228 static void vhost_vdpa_cleanup(NetClientState *nc)
229 {
230     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
231 
232     munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
233     munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
234     if (s->vhost_net) {
235         vhost_net_cleanup(s->vhost_net);
236         g_free(s->vhost_net);
237         s->vhost_net = NULL;
238     }
239     if (s->vhost_vdpa.index != 0) {
240         return;
241     }
242     qemu_close(s->vhost_vdpa.shared->device_fd);
243     g_clear_pointer(&s->vhost_vdpa.shared->iova_tree, vhost_iova_tree_delete);
244     g_free(s->vhost_vdpa.shared);
245 }
246 
247 /** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend  */
248 static bool vhost_vdpa_set_steering_ebpf(NetClientState *nc, int prog_fd)
249 {
250     return true;
251 }
252 
253 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
254 {
255     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
256 
257     return true;
258 }
259 
260 static bool vhost_vdpa_has_ufo(NetClientState *nc)
261 {
262     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
263     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
264     uint64_t features = 0;
265     features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
266     features = vhost_net_get_features(s->vhost_net, features);
267     return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
268 
269 }
270 
271 /*
272  * FIXME: vhost_vdpa doesn't have an API to "set h/w endianness". But it's
273  * reasonable to assume that h/w is LE by default, because LE is what
274  * virtio 1.0 and later ask for. So, this function just says "yes, the h/w is
275  * LE". Otherwise, on a BE machine, higher-level code would mistakely think
276  * the h/w is BE and can't support VDPA for a virtio 1.0 client.
277  */
278 static int vhost_vdpa_set_vnet_le(NetClientState *nc, bool enable)
279 {
280     return 0;
281 }
282 
283 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
284                                        Error **errp)
285 {
286     const char *driver = object_class_get_name(oc);
287 
288     if (!g_str_has_prefix(driver, "virtio-net-")) {
289         error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
290         return false;
291     }
292 
293     return true;
294 }
295 
296 /** Dummy receive in case qemu falls back to userland tap networking */
297 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
298                                   size_t size)
299 {
300     return size;
301 }
302 
303 
304 /** From any vdpa net client, get the netclient of the i-th queue pair */
305 static VhostVDPAState *vhost_vdpa_net_get_nc_vdpa(VhostVDPAState *s, int i)
306 {
307     NICState *nic = qemu_get_nic(s->nc.peer);
308     NetClientState *nc_i = qemu_get_peer(nic->ncs, i);
309 
310     return DO_UPCAST(VhostVDPAState, nc, nc_i);
311 }
312 
313 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
314 {
315     return vhost_vdpa_net_get_nc_vdpa(s, 0);
316 }
317 
318 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
319 {
320     struct vhost_vdpa *v = &s->vhost_vdpa;
321     VirtIONet *n;
322     VirtIODevice *vdev;
323     int data_queue_pairs, cvq, r;
324 
325     /* We are only called on the first data vqs and only if x-svq is not set */
326     if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
327         return;
328     }
329 
330     vdev = v->dev->vdev;
331     n = VIRTIO_NET(vdev);
332     if (!n->vhost_started) {
333         return;
334     }
335 
336     data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
337     cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
338                                   n->max_ncs - n->max_queue_pairs : 0;
339     v->shared->svq_switching = enable ?
340         SVQ_TSTATE_ENABLING : SVQ_TSTATE_DISABLING;
341     /*
342      * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
343      * in the future and resume the device if read-only operations between
344      * suspend and reset goes wrong.
345      */
346     vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
347 
348     /* Start will check migration setup_or_active to configure or not SVQ */
349     r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
350     if (unlikely(r < 0)) {
351         error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
352     }
353     v->shared->svq_switching = SVQ_TSTATE_DONE;
354 }
355 
356 static int vdpa_net_migration_state_notifier(NotifierWithReturn *notifier,
357                                              MigrationEvent *e, Error **errp)
358 {
359     VhostVDPAState *s = container_of(notifier, VhostVDPAState, migration_state);
360 
361     if (e->type == MIG_EVENT_PRECOPY_SETUP) {
362         vhost_vdpa_net_log_global_enable(s, true);
363     } else if (e->type == MIG_EVENT_PRECOPY_FAILED) {
364         vhost_vdpa_net_log_global_enable(s, false);
365     }
366     return 0;
367 }
368 
369 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
370 {
371     migration_add_notifier(&s->migration_state,
372                            vdpa_net_migration_state_notifier);
373 }
374 
375 static int vhost_vdpa_net_data_start(NetClientState *nc)
376 {
377     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
378     struct vhost_vdpa *v = &s->vhost_vdpa;
379 
380     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
381 
382     if (s->always_svq || migration_is_running()) {
383         v->shadow_vqs_enabled = true;
384     } else {
385         v->shadow_vqs_enabled = false;
386     }
387 
388     if (v->index == 0) {
389         v->shared->shadow_data = v->shadow_vqs_enabled;
390         vhost_vdpa_net_data_start_first(s);
391         return 0;
392     }
393 
394     return 0;
395 }
396 
397 static int vhost_vdpa_net_data_load(NetClientState *nc)
398 {
399     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
400     struct vhost_vdpa *v = &s->vhost_vdpa;
401     bool has_cvq = v->dev->vq_index_end % 2;
402 
403     if (has_cvq) {
404         return 0;
405     }
406 
407     for (int i = 0; i < v->dev->nvqs; ++i) {
408         int ret = vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index);
409         if (ret < 0) {
410             return ret;
411         }
412     }
413     return 0;
414 }
415 
416 static void vhost_vdpa_net_client_stop(NetClientState *nc)
417 {
418     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
419 
420     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
421 
422     if (s->vhost_vdpa.index == 0) {
423         migration_remove_notifier(&s->migration_state);
424     }
425 }
426 
427 static NetClientInfo net_vhost_vdpa_info = {
428         .type = NET_CLIENT_DRIVER_VHOST_VDPA,
429         .size = sizeof(VhostVDPAState),
430         .receive = vhost_vdpa_receive,
431         .start = vhost_vdpa_net_data_start,
432         .load = vhost_vdpa_net_data_load,
433         .stop = vhost_vdpa_net_client_stop,
434         .cleanup = vhost_vdpa_cleanup,
435         .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
436         .has_ufo = vhost_vdpa_has_ufo,
437         .set_vnet_le = vhost_vdpa_set_vnet_le,
438         .check_peer_type = vhost_vdpa_check_peer_type,
439         .set_steering_ebpf = vhost_vdpa_set_steering_ebpf,
440         .get_vhost_net = vhost_vdpa_get_vhost_net,
441 };
442 
443 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
444                                           Error **errp)
445 {
446     struct vhost_vring_state state = {
447         .index = vq_index,
448     };
449     int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
450 
451     if (unlikely(r < 0)) {
452         r = -errno;
453         error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
454         return r;
455     }
456 
457     return state.num;
458 }
459 
460 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
461                                            unsigned vq_group,
462                                            unsigned asid_num)
463 {
464     struct vhost_vring_state asid = {
465         .index = vq_group,
466         .num = asid_num,
467     };
468     int r;
469 
470     trace_vhost_vdpa_set_address_space_id(v, vq_group, asid_num);
471 
472     r = ioctl(v->shared->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
473     if (unlikely(r < 0)) {
474         error_report("Can't set vq group %u asid %u, errno=%d (%s)",
475                      asid.index, asid.num, errno, g_strerror(errno));
476     }
477     return r;
478 }
479 
480 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
481 {
482     VhostIOVATree *tree = v->shared->iova_tree;
483     DMAMap needle = {
484         /*
485          * No need to specify size or to look for more translations since
486          * this contiguous chunk was allocated by us.
487          */
488         .translated_addr = (hwaddr)(uintptr_t)addr,
489     };
490     const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
491     int r;
492 
493     if (unlikely(!map)) {
494         error_report("Cannot locate expected map");
495         return;
496     }
497 
498     r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova,
499                              map->size + 1);
500     if (unlikely(r != 0)) {
501         error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
502     }
503 
504     vhost_iova_tree_remove(tree, *map);
505 }
506 
507 /** Map CVQ buffer. */
508 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
509                                   bool write)
510 {
511     DMAMap map = {};
512     hwaddr taddr = (hwaddr)(uintptr_t)buf;
513     int r;
514 
515     map.size = size - 1;
516     map.perm = write ? IOMMU_RW : IOMMU_RO,
517     r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map, taddr);
518     if (unlikely(r != IOVA_OK)) {
519         error_report("Cannot map injected element");
520 
521         if (map.translated_addr == taddr) {
522             error_report("Insertion to IOVA->HVA tree failed");
523             /* Remove the mapping from the IOVA-only tree */
524             goto dma_map_err;
525         }
526         return r;
527     }
528 
529     r = vhost_vdpa_dma_map(v->shared, v->address_space_id, map.iova,
530                            vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
531     if (unlikely(r < 0)) {
532         goto dma_map_err;
533     }
534 
535     return 0;
536 
537 dma_map_err:
538     vhost_iova_tree_remove(v->shared->iova_tree, map);
539     return r;
540 }
541 
542 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
543 {
544     VhostVDPAState *s, *s0;
545     struct vhost_vdpa *v;
546     int64_t cvq_group;
547     int r;
548     Error *err = NULL;
549 
550     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
551 
552     s = DO_UPCAST(VhostVDPAState, nc, nc);
553     v = &s->vhost_vdpa;
554 
555     s0 = vhost_vdpa_net_first_nc_vdpa(s);
556     v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled;
557     s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
558 
559     if (v->shared->shadow_data) {
560         /* SVQ is already configured for all virtqueues */
561         goto out;
562     }
563 
564     /*
565      * If we early return in these cases SVQ will not be enabled. The migration
566      * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
567      */
568     if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
569         return 0;
570     }
571 
572     if (!s->cvq_isolated) {
573         return 0;
574     }
575 
576     cvq_group = vhost_vdpa_get_vring_group(v->shared->device_fd,
577                                            v->dev->vq_index_end - 1,
578                                            &err);
579     if (unlikely(cvq_group < 0)) {
580         error_report_err(err);
581         return cvq_group;
582     }
583 
584     r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
585     if (unlikely(r < 0)) {
586         return r;
587     }
588 
589     v->shadow_vqs_enabled = true;
590     s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
591 
592 out:
593     if (!s->vhost_vdpa.shadow_vqs_enabled) {
594         return 0;
595     }
596 
597     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
598                                vhost_vdpa_net_cvq_cmd_page_len(), false);
599     if (unlikely(r < 0)) {
600         return r;
601     }
602 
603     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
604                                vhost_vdpa_net_cvq_cmd_page_len(), true);
605     if (unlikely(r < 0)) {
606         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
607     }
608 
609     return r;
610 }
611 
612 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
613 {
614     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
615 
616     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
617 
618     if (s->vhost_vdpa.shadow_vqs_enabled) {
619         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
620         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
621     }
622 
623     vhost_vdpa_net_client_stop(nc);
624 }
625 
626 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
627                                     const struct iovec *out_sg, size_t out_num,
628                                     const struct iovec *in_sg, size_t in_num)
629 {
630     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
631     int r;
632 
633     r = vhost_svq_add(svq, out_sg, out_num, NULL, in_sg, in_num, NULL, NULL);
634     if (unlikely(r != 0)) {
635         if (unlikely(r == -ENOSPC)) {
636             qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
637                           __func__);
638         }
639     }
640 
641     return r;
642 }
643 
644 /*
645  * Convenience wrapper to poll SVQ for multiple control commands.
646  *
647  * Caller should hold the BQL when invoking this function, and should take
648  * the answer before SVQ pulls by itself when BQL is released.
649  */
650 static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight)
651 {
652     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
653     return vhost_svq_poll(svq, cmds_in_flight);
654 }
655 
656 static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s,
657                                              struct iovec *out_cursor,
658                                              struct iovec *in_cursor)
659 {
660     /* reset the cursor of the output buffer for the device */
661     out_cursor->iov_base = s->cvq_cmd_out_buffer;
662     out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
663 
664     /* reset the cursor of the in buffer for the device */
665     in_cursor->iov_base = s->status;
666     in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
667 }
668 
669 /*
670  * Poll SVQ for multiple pending control commands and check the device's ack.
671  *
672  * Caller should hold the BQL when invoking this function.
673  *
674  * @s: The VhostVDPAState
675  * @len: The length of the pending status shadow buffer
676  */
677 static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len)
678 {
679     /* device uses a one-byte length ack for each control command */
680     ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len);
681     if (unlikely(dev_written != len)) {
682         return -EIO;
683     }
684 
685     /* check the device's ack */
686     for (int i = 0; i < len; ++i) {
687         if (s->status[i] != VIRTIO_NET_OK) {
688             return -EIO;
689         }
690     }
691     return 0;
692 }
693 
694 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s,
695                                        struct iovec *out_cursor,
696                                        struct iovec *in_cursor, uint8_t class,
697                                        uint8_t cmd, const struct iovec *data_sg,
698                                        size_t data_num)
699 {
700     const struct virtio_net_ctrl_hdr ctrl = {
701         .class = class,
702         .cmd = cmd,
703     };
704     size_t data_size = iov_size(data_sg, data_num), cmd_size;
705     struct iovec out, in;
706     ssize_t r;
707     unsigned dummy_cursor_iov_cnt;
708     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
709 
710     assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
711     cmd_size = sizeof(ctrl) + data_size;
712     trace_vhost_vdpa_net_load_cmd(s, class, cmd, data_num, data_size);
713     if (vhost_svq_available_slots(svq) < 2 ||
714         iov_size(out_cursor, 1) < cmd_size) {
715         /*
716          * It is time to flush all pending control commands if SVQ is full
717          * or control commands shadow buffers are full.
718          *
719          * We can poll here since we've had BQL from the time
720          * we sent the descriptor.
721          */
722         r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base -
723                                      (void *)s->status);
724         if (unlikely(r < 0)) {
725             return r;
726         }
727 
728         vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor);
729     }
730 
731     /* pack the CVQ command header */
732     iov_from_buf(out_cursor, 1, 0, &ctrl, sizeof(ctrl));
733     /* pack the CVQ command command-specific-data */
734     iov_to_buf(data_sg, data_num, 0,
735                out_cursor->iov_base + sizeof(ctrl), data_size);
736 
737     /* extract the required buffer from the cursor for output */
738     iov_copy(&out, 1, out_cursor, 1, 0, cmd_size);
739     /* extract the required buffer from the cursor for input */
740     iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status));
741 
742     r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
743     if (unlikely(r < 0)) {
744         trace_vhost_vdpa_net_load_cmd_retval(s, class, cmd, r);
745         return r;
746     }
747 
748     /* iterate the cursors */
749     dummy_cursor_iov_cnt = 1;
750     iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size);
751     dummy_cursor_iov_cnt = 1;
752     iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status));
753 
754     return 0;
755 }
756 
757 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n,
758                                    struct iovec *out_cursor,
759                                    struct iovec *in_cursor)
760 {
761     if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
762         const struct iovec data = {
763             .iov_base = (void *)n->mac,
764             .iov_len = sizeof(n->mac),
765         };
766         ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
767                                             VIRTIO_NET_CTRL_MAC,
768                                             VIRTIO_NET_CTRL_MAC_ADDR_SET,
769                                             &data, 1);
770         if (unlikely(r < 0)) {
771             return r;
772         }
773     }
774 
775     /*
776      * According to VirtIO standard, "The device MUST have an
777      * empty MAC filtering table on reset.".
778      *
779      * Therefore, there is no need to send this CVQ command if the
780      * driver also sets an empty MAC filter table, which aligns with
781      * the device's defaults.
782      *
783      * Note that the device's defaults can mismatch the driver's
784      * configuration only at live migration.
785      */
786     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) ||
787         n->mac_table.in_use == 0) {
788         return 0;
789     }
790 
791     uint32_t uni_entries = n->mac_table.first_multi,
792              uni_macs_size = uni_entries * ETH_ALEN,
793              mul_entries = n->mac_table.in_use - uni_entries,
794              mul_macs_size = mul_entries * ETH_ALEN;
795     struct virtio_net_ctrl_mac uni = {
796         .entries = cpu_to_le32(uni_entries),
797     };
798     struct virtio_net_ctrl_mac mul = {
799         .entries = cpu_to_le32(mul_entries),
800     };
801     const struct iovec data[] = {
802         {
803             .iov_base = &uni,
804             .iov_len = sizeof(uni),
805         }, {
806             .iov_base = n->mac_table.macs,
807             .iov_len = uni_macs_size,
808         }, {
809             .iov_base = &mul,
810             .iov_len = sizeof(mul),
811         }, {
812             .iov_base = &n->mac_table.macs[uni_macs_size],
813             .iov_len = mul_macs_size,
814         },
815     };
816     ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
817                                         VIRTIO_NET_CTRL_MAC,
818                                         VIRTIO_NET_CTRL_MAC_TABLE_SET,
819                                         data, ARRAY_SIZE(data));
820     if (unlikely(r < 0)) {
821         return r;
822     }
823 
824     return 0;
825 }
826 
827 static int vhost_vdpa_net_load_rss(VhostVDPAState *s, const VirtIONet *n,
828                                    struct iovec *out_cursor,
829                                    struct iovec *in_cursor, bool do_rss)
830 {
831     struct virtio_net_rss_config cfg = {};
832     ssize_t r;
833     g_autofree uint16_t *table = NULL;
834 
835     /*
836      * According to VirtIO standard, "Initially the device has all hash
837      * types disabled and reports only VIRTIO_NET_HASH_REPORT_NONE.".
838      *
839      * Therefore, there is no need to send this CVQ command if the
840      * driver disables the all hash types, which aligns with
841      * the device's defaults.
842      *
843      * Note that the device's defaults can mismatch the driver's
844      * configuration only at live migration.
845      */
846     if (!n->rss_data.enabled ||
847         n->rss_data.hash_types == VIRTIO_NET_HASH_REPORT_NONE) {
848         return 0;
849     }
850 
851     table = g_malloc_n(n->rss_data.indirections_len,
852                        sizeof(n->rss_data.indirections_table[0]));
853     cfg.hash_types = cpu_to_le32(n->rss_data.hash_types);
854 
855     if (do_rss) {
856         /*
857          * According to VirtIO standard, "Number of entries in indirection_table
858          * is (indirection_table_mask + 1)".
859          */
860         cfg.indirection_table_mask = cpu_to_le16(n->rss_data.indirections_len -
861                                                  1);
862         cfg.unclassified_queue = cpu_to_le16(n->rss_data.default_queue);
863         for (int i = 0; i < n->rss_data.indirections_len; ++i) {
864             table[i] = cpu_to_le16(n->rss_data.indirections_table[i]);
865         }
866         cfg.max_tx_vq = cpu_to_le16(n->curr_queue_pairs);
867     } else {
868         /*
869          * According to VirtIO standard, "Field reserved MUST contain zeroes.
870          * It is defined to make the structure to match the layout of
871          * virtio_net_rss_config structure, defined in 5.1.6.5.7.".
872          *
873          * Therefore, we need to zero the fields in
874          * struct virtio_net_rss_config, which corresponds to the
875          * `reserved` field in struct virtio_net_hash_config.
876          *
877          * Note that all other fields are zeroed at their definitions,
878          * except for the `indirection_table` field, where the actual data
879          * is stored in the `table` variable to ensure compatibility
880          * with RSS case. Therefore, we need to zero the `table` variable here.
881          */
882         table[0] = 0;
883     }
884 
885     /*
886      * Considering that virtio_net_handle_rss() currently does not restore
887      * the hash key length parsed from the CVQ command sent from the guest
888      * into n->rss_data and uses the maximum key length in other code, so
889      * we also employ the maximum key length here.
890      */
891     cfg.hash_key_length = sizeof(n->rss_data.key);
892 
893     const struct iovec data[] = {
894         {
895             .iov_base = &cfg,
896             .iov_len = offsetof(struct virtio_net_rss_config,
897                                 indirection_table),
898         }, {
899             .iov_base = table,
900             .iov_len = n->rss_data.indirections_len *
901                        sizeof(n->rss_data.indirections_table[0]),
902         }, {
903             .iov_base = &cfg.max_tx_vq,
904             .iov_len = offsetof(struct virtio_net_rss_config, hash_key_data) -
905                        offsetof(struct virtio_net_rss_config, max_tx_vq),
906         }, {
907             .iov_base = (void *)n->rss_data.key,
908             .iov_len = sizeof(n->rss_data.key),
909         }
910     };
911 
912     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
913                                 VIRTIO_NET_CTRL_MQ,
914                                 do_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG :
915                                 VIRTIO_NET_CTRL_MQ_HASH_CONFIG,
916                                 data, ARRAY_SIZE(data));
917     if (unlikely(r < 0)) {
918         return r;
919     }
920 
921     return 0;
922 }
923 
924 static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
925                                   const VirtIONet *n,
926                                   struct iovec *out_cursor,
927                                   struct iovec *in_cursor)
928 {
929     struct virtio_net_ctrl_mq mq;
930     ssize_t r;
931 
932     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
933         return 0;
934     }
935 
936     trace_vhost_vdpa_net_load_mq(s, n->curr_queue_pairs);
937 
938     mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
939     const struct iovec data = {
940         .iov_base = &mq,
941         .iov_len = sizeof(mq),
942     };
943     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
944                                 VIRTIO_NET_CTRL_MQ,
945                                 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
946                                 &data, 1);
947     if (unlikely(r < 0)) {
948         return r;
949     }
950 
951     if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_RSS)) {
952         /* load the receive-side scaling state */
953         r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, true);
954         if (unlikely(r < 0)) {
955             return r;
956         }
957     } else if (virtio_vdev_has_feature(&n->parent_obj,
958                                        VIRTIO_NET_F_HASH_REPORT)) {
959         /* load the hash calculation state */
960         r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, false);
961         if (unlikely(r < 0)) {
962             return r;
963         }
964     }
965 
966     return 0;
967 }
968 
969 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
970                                         const VirtIONet *n,
971                                         struct iovec *out_cursor,
972                                         struct iovec *in_cursor)
973 {
974     uint64_t offloads;
975     ssize_t r;
976 
977     if (!virtio_vdev_has_feature(&n->parent_obj,
978                                  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
979         return 0;
980     }
981 
982     if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
983         /*
984          * According to VirtIO standard, "Upon feature negotiation
985          * corresponding offload gets enabled to preserve
986          * backward compatibility.".
987          *
988          * Therefore, there is no need to send this CVQ command if the
989          * driver also enables all supported offloads, which aligns with
990          * the device's defaults.
991          *
992          * Note that the device's defaults can mismatch the driver's
993          * configuration only at live migration.
994          */
995         return 0;
996     }
997 
998     offloads = cpu_to_le64(n->curr_guest_offloads);
999     const struct iovec data = {
1000         .iov_base = &offloads,
1001         .iov_len = sizeof(offloads),
1002     };
1003     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
1004                                 VIRTIO_NET_CTRL_GUEST_OFFLOADS,
1005                                 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
1006                                 &data, 1);
1007     if (unlikely(r < 0)) {
1008         return r;
1009     }
1010 
1011     return 0;
1012 }
1013 
1014 static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
1015                                        struct iovec *out_cursor,
1016                                        struct iovec *in_cursor,
1017                                        uint8_t cmd,
1018                                        uint8_t on)
1019 {
1020     const struct iovec data = {
1021         .iov_base = &on,
1022         .iov_len = sizeof(on),
1023     };
1024     ssize_t r;
1025 
1026     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
1027                                 VIRTIO_NET_CTRL_RX, cmd, &data, 1);
1028     if (unlikely(r < 0)) {
1029         return r;
1030     }
1031 
1032     return 0;
1033 }
1034 
1035 static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
1036                                   const VirtIONet *n,
1037                                   struct iovec *out_cursor,
1038                                   struct iovec *in_cursor)
1039 {
1040     ssize_t r;
1041 
1042     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
1043         return 0;
1044     }
1045 
1046     /*
1047      * According to virtio_net_reset(), device turns promiscuous mode
1048      * on by default.
1049      *
1050      * Additionally, according to VirtIO standard, "Since there are
1051      * no guarantees, it can use a hash filter or silently switch to
1052      * allmulti or promiscuous mode if it is given too many addresses.".
1053      * QEMU marks `n->mac_table.uni_overflow` if guest sets too many
1054      * non-multicast MAC addresses, indicating that promiscuous mode
1055      * should be enabled.
1056      *
1057      * Therefore, QEMU should only send this CVQ command if the
1058      * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
1059      * which sets promiscuous mode on, different from the device's defaults.
1060      *
1061      * Note that the device's defaults can mismatch the driver's
1062      * configuration only at live migration.
1063      */
1064     if (!n->mac_table.uni_overflow && !n->promisc) {
1065         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1066                                         VIRTIO_NET_CTRL_RX_PROMISC, 0);
1067         if (unlikely(r < 0)) {
1068             return r;
1069         }
1070     }
1071 
1072     /*
1073      * According to virtio_net_reset(), device turns all-multicast mode
1074      * off by default.
1075      *
1076      * According to VirtIO standard, "Since there are no guarantees,
1077      * it can use a hash filter or silently switch to allmulti or
1078      * promiscuous mode if it is given too many addresses.". QEMU marks
1079      * `n->mac_table.multi_overflow` if guest sets too many
1080      * non-multicast MAC addresses.
1081      *
1082      * Therefore, QEMU should only send this CVQ command if the
1083      * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
1084      * which sets all-multicast mode on, different from the device's defaults.
1085      *
1086      * Note that the device's defaults can mismatch the driver's
1087      * configuration only at live migration.
1088      */
1089     if (n->mac_table.multi_overflow || n->allmulti) {
1090         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1091                                         VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
1092         if (unlikely(r < 0)) {
1093             return r;
1094         }
1095     }
1096 
1097     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) {
1098         return 0;
1099     }
1100 
1101     /*
1102      * According to virtio_net_reset(), device turns all-unicast mode
1103      * off by default.
1104      *
1105      * Therefore, QEMU should only send this CVQ command if the driver
1106      * sets all-unicast mode on, different from the device's defaults.
1107      *
1108      * Note that the device's defaults can mismatch the driver's
1109      * configuration only at live migration.
1110      */
1111     if (n->alluni) {
1112         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1113                                         VIRTIO_NET_CTRL_RX_ALLUNI, 1);
1114         if (r < 0) {
1115             return r;
1116         }
1117     }
1118 
1119     /*
1120      * According to virtio_net_reset(), device turns non-multicast mode
1121      * off by default.
1122      *
1123      * Therefore, QEMU should only send this CVQ command if the driver
1124      * sets non-multicast mode on, different from the device's defaults.
1125      *
1126      * Note that the device's defaults can mismatch the driver's
1127      * configuration only at live migration.
1128      */
1129     if (n->nomulti) {
1130         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1131                                         VIRTIO_NET_CTRL_RX_NOMULTI, 1);
1132         if (r < 0) {
1133             return r;
1134         }
1135     }
1136 
1137     /*
1138      * According to virtio_net_reset(), device turns non-unicast mode
1139      * off by default.
1140      *
1141      * Therefore, QEMU should only send this CVQ command if the driver
1142      * sets non-unicast mode on, different from the device's defaults.
1143      *
1144      * Note that the device's defaults can mismatch the driver's
1145      * configuration only at live migration.
1146      */
1147     if (n->nouni) {
1148         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1149                                         VIRTIO_NET_CTRL_RX_NOUNI, 1);
1150         if (r < 0) {
1151             return r;
1152         }
1153     }
1154 
1155     /*
1156      * According to virtio_net_reset(), device turns non-broadcast mode
1157      * off by default.
1158      *
1159      * Therefore, QEMU should only send this CVQ command if the driver
1160      * sets non-broadcast mode on, different from the device's defaults.
1161      *
1162      * Note that the device's defaults can mismatch the driver's
1163      * configuration only at live migration.
1164      */
1165     if (n->nobcast) {
1166         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1167                                         VIRTIO_NET_CTRL_RX_NOBCAST, 1);
1168         if (r < 0) {
1169             return r;
1170         }
1171     }
1172 
1173     return 0;
1174 }
1175 
1176 static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
1177                                            const VirtIONet *n,
1178                                            struct iovec *out_cursor,
1179                                            struct iovec *in_cursor,
1180                                            uint16_t vid)
1181 {
1182     const struct iovec data = {
1183         .iov_base = &vid,
1184         .iov_len = sizeof(vid),
1185     };
1186     ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
1187                                         VIRTIO_NET_CTRL_VLAN,
1188                                         VIRTIO_NET_CTRL_VLAN_ADD,
1189                                         &data, 1);
1190     if (unlikely(r < 0)) {
1191         return r;
1192     }
1193 
1194     return 0;
1195 }
1196 
1197 static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
1198                                     const VirtIONet *n,
1199                                     struct iovec *out_cursor,
1200                                     struct iovec *in_cursor)
1201 {
1202     int r;
1203 
1204     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) {
1205         return 0;
1206     }
1207 
1208     for (int i = 0; i < MAX_VLAN >> 5; i++) {
1209         for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
1210             if (n->vlans[i] & (1U << j)) {
1211                 r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor,
1212                                                     in_cursor, (i << 5) + j);
1213                 if (unlikely(r != 0)) {
1214                     return r;
1215                 }
1216             }
1217         }
1218     }
1219 
1220     return 0;
1221 }
1222 
1223 static int vhost_vdpa_net_cvq_load(NetClientState *nc)
1224 {
1225     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
1226     struct vhost_vdpa *v = &s->vhost_vdpa;
1227     const VirtIONet *n;
1228     int r;
1229     struct iovec out_cursor, in_cursor;
1230 
1231     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1232 
1233     r = vhost_vdpa_set_vring_ready(v, v->dev->vq_index);
1234     if (unlikely(r < 0)) {
1235         return r;
1236     }
1237 
1238     if (v->shadow_vqs_enabled) {
1239         n = VIRTIO_NET(v->dev->vdev);
1240         vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor);
1241         r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor);
1242         if (unlikely(r < 0)) {
1243             return r;
1244         }
1245         r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor);
1246         if (unlikely(r)) {
1247             return r;
1248         }
1249         r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor);
1250         if (unlikely(r)) {
1251             return r;
1252         }
1253         r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor);
1254         if (unlikely(r)) {
1255             return r;
1256         }
1257         r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor);
1258         if (unlikely(r)) {
1259             return r;
1260         }
1261 
1262         /*
1263          * We need to poll and check all pending device's used buffers.
1264          *
1265          * We can poll here since we've had BQL from the time
1266          * we sent the descriptor.
1267          */
1268         r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status);
1269         if (unlikely(r)) {
1270             return r;
1271         }
1272     }
1273 
1274     for (int i = 0; i < v->dev->vq_index; ++i) {
1275         r = vhost_vdpa_set_vring_ready(v, i);
1276         if (unlikely(r < 0)) {
1277             return r;
1278         }
1279     }
1280 
1281     return 0;
1282 }
1283 
1284 static NetClientInfo net_vhost_vdpa_cvq_info = {
1285     .type = NET_CLIENT_DRIVER_VHOST_VDPA,
1286     .size = sizeof(VhostVDPAState),
1287     .receive = vhost_vdpa_receive,
1288     .start = vhost_vdpa_net_cvq_start,
1289     .load = vhost_vdpa_net_cvq_load,
1290     .stop = vhost_vdpa_net_cvq_stop,
1291     .cleanup = vhost_vdpa_cleanup,
1292     .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
1293     .has_ufo = vhost_vdpa_has_ufo,
1294     .check_peer_type = vhost_vdpa_check_peer_type,
1295     .set_steering_ebpf = vhost_vdpa_set_steering_ebpf,
1296     .get_vhost_net = vhost_vdpa_get_vhost_net,
1297 };
1298 
1299 /*
1300  * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
1301  * vdpa device.
1302  *
1303  * Considering that QEMU cannot send the entire filter table to the
1304  * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
1305  * command to enable promiscuous mode to receive all packets,
1306  * according to VirtIO standard, "Since there are no guarantees,
1307  * it can use a hash filter or silently switch to allmulti or
1308  * promiscuous mode if it is given too many addresses.".
1309  *
1310  * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
1311  * marks `n->mac_table.x_overflow` accordingly, it should have
1312  * the same effect on the device model to receive
1313  * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1314  * The same applies to multicast MAC addresses.
1315  *
1316  * Therefore, QEMU can provide the device model with a fake
1317  * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
1318  * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1319  * MAC addresses. This ensures that the device model marks
1320  * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1321  * allowing all packets to be received, which aligns with the
1322  * state of the vdpa device.
1323  */
1324 static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
1325                                                        VirtQueueElement *elem,
1326                                                        struct iovec *out,
1327                                                        const struct iovec *in)
1328 {
1329     struct virtio_net_ctrl_mac mac_data, *mac_ptr;
1330     struct virtio_net_ctrl_hdr *hdr_ptr;
1331     uint32_t cursor;
1332     ssize_t r;
1333     uint8_t on = 1;
1334 
1335     /* parse the non-multicast MAC address entries from CVQ command */
1336     cursor = sizeof(*hdr_ptr);
1337     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1338                    &mac_data, sizeof(mac_data));
1339     if (unlikely(r != sizeof(mac_data))) {
1340         /*
1341          * If the CVQ command is invalid, we should simulate the vdpa device
1342          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1343          */
1344         *s->status = VIRTIO_NET_ERR;
1345         return sizeof(*s->status);
1346     }
1347     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1348 
1349     /* parse the multicast MAC address entries from CVQ command */
1350     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1351                    &mac_data, sizeof(mac_data));
1352     if (r != sizeof(mac_data)) {
1353         /*
1354          * If the CVQ command is invalid, we should simulate the vdpa device
1355          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1356          */
1357         *s->status = VIRTIO_NET_ERR;
1358         return sizeof(*s->status);
1359     }
1360     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1361 
1362     /* validate the CVQ command */
1363     if (iov_size(elem->out_sg, elem->out_num) != cursor) {
1364         /*
1365          * If the CVQ command is invalid, we should simulate the vdpa device
1366          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1367          */
1368         *s->status = VIRTIO_NET_ERR;
1369         return sizeof(*s->status);
1370     }
1371 
1372     /*
1373      * According to VirtIO standard, "Since there are no guarantees,
1374      * it can use a hash filter or silently switch to allmulti or
1375      * promiscuous mode if it is given too many addresses.".
1376      *
1377      * Therefore, considering that QEMU is unable to send the entire
1378      * filter table to the vdpa device, it should send the
1379      * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
1380      */
1381     hdr_ptr = out->iov_base;
1382     out->iov_len = sizeof(*hdr_ptr) + sizeof(on);
1383 
1384     hdr_ptr->class = VIRTIO_NET_CTRL_RX;
1385     hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC;
1386     iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on));
1387     r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1);
1388     if (unlikely(r < 0)) {
1389         return r;
1390     }
1391 
1392     /*
1393      * We can poll here since we've had BQL from the time
1394      * we sent the descriptor.
1395      */
1396     r = vhost_vdpa_net_svq_poll(s, 1);
1397     if (unlikely(r < sizeof(*s->status))) {
1398         return r;
1399     }
1400     if (*s->status != VIRTIO_NET_OK) {
1401         return sizeof(*s->status);
1402     }
1403 
1404     /*
1405      * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
1406      * command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
1407      * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
1408      * multicast MAC addresses.
1409      *
1410      * By doing so, the device model can mark `n->mac_table.uni_overflow`
1411      * and `n->mac_table.multi_overflow`, enabling all packets to be
1412      * received, which aligns with the state of the vdpa device.
1413      */
1414     cursor = 0;
1415     uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1,
1416              fake_mul_entries = MAC_TABLE_ENTRIES + 1,
1417              fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) +
1418                              sizeof(mac_data) + fake_uni_entries * ETH_ALEN +
1419                              sizeof(mac_data) + fake_mul_entries * ETH_ALEN;
1420 
1421     assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len());
1422     out->iov_len = fake_cvq_size;
1423 
1424     /* pack the header for fake CVQ command */
1425     hdr_ptr = out->iov_base + cursor;
1426     hdr_ptr->class = VIRTIO_NET_CTRL_MAC;
1427     hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1428     cursor += sizeof(*hdr_ptr);
1429 
1430     /*
1431      * Pack the non-multicast MAC addresses part for fake CVQ command.
1432      *
1433      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1434      * addresses provided in CVQ command. Therefore, only the entries
1435      * field need to be prepared in the CVQ command.
1436      */
1437     mac_ptr = out->iov_base + cursor;
1438     mac_ptr->entries = cpu_to_le32(fake_uni_entries);
1439     cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN;
1440 
1441     /*
1442      * Pack the multicast MAC addresses part for fake CVQ command.
1443      *
1444      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1445      * addresses provided in CVQ command. Therefore, only the entries
1446      * field need to be prepared in the CVQ command.
1447      */
1448     mac_ptr = out->iov_base + cursor;
1449     mac_ptr->entries = cpu_to_le32(fake_mul_entries);
1450 
1451     /*
1452      * Simulating QEMU poll a vdpa device used buffer
1453      * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1454      */
1455     return sizeof(*s->status);
1456 }
1457 
1458 /**
1459  * Validate and copy control virtqueue commands.
1460  *
1461  * Following QEMU guidelines, we offer a copy of the buffers to the device to
1462  * prevent TOCTOU bugs.
1463  */
1464 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
1465                                             VirtQueueElement *elem,
1466                                             void *opaque)
1467 {
1468     VhostVDPAState *s = opaque;
1469     size_t in_len;
1470     const struct virtio_net_ctrl_hdr *ctrl;
1471     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1472     /* Out buffer sent to both the vdpa device and the device model */
1473     struct iovec out = {
1474         .iov_base = s->cvq_cmd_out_buffer,
1475     };
1476     /* in buffer used for device model */
1477     const struct iovec model_in = {
1478         .iov_base = &status,
1479         .iov_len = sizeof(status),
1480     };
1481     /* in buffer used for vdpa device */
1482     const struct iovec vdpa_in = {
1483         .iov_base = s->status,
1484         .iov_len = sizeof(*s->status),
1485     };
1486     ssize_t dev_written = -EINVAL;
1487 
1488     out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
1489                              s->cvq_cmd_out_buffer,
1490                              vhost_vdpa_net_cvq_cmd_page_len());
1491 
1492     ctrl = s->cvq_cmd_out_buffer;
1493     if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
1494         /*
1495          * Guest announce capability is emulated by qemu, so don't forward to
1496          * the device.
1497          */
1498         dev_written = sizeof(status);
1499         *s->status = VIRTIO_NET_OK;
1500     } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC &&
1501                         ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET &&
1502                         iov_size(elem->out_sg, elem->out_num) > out.iov_len)) {
1503         /*
1504          * Due to the size limitation of the out buffer sent to the vdpa device,
1505          * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
1506          * MAC addresses set by the driver for the filter table can cause
1507          * truncation of the CVQ command in QEMU. As a result, the vdpa device
1508          * rejects the flawed CVQ command.
1509          *
1510          * Therefore, QEMU must handle this situation instead of sending
1511          * the CVQ command directly.
1512          */
1513         dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
1514                                                             &out, &vdpa_in);
1515         if (unlikely(dev_written < 0)) {
1516             goto out;
1517         }
1518     } else {
1519         ssize_t r;
1520         r = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1);
1521         if (unlikely(r < 0)) {
1522             dev_written = r;
1523             goto out;
1524         }
1525 
1526         /*
1527          * We can poll here since we've had BQL from the time
1528          * we sent the descriptor.
1529          */
1530         dev_written = vhost_vdpa_net_svq_poll(s, 1);
1531     }
1532 
1533     if (unlikely(dev_written < sizeof(status))) {
1534         error_report("Insufficient written data (%zu)", dev_written);
1535         goto out;
1536     }
1537 
1538     if (*s->status != VIRTIO_NET_OK) {
1539         goto out;
1540     }
1541 
1542     status = VIRTIO_NET_ERR;
1543     virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1);
1544     if (status != VIRTIO_NET_OK) {
1545         error_report("Bad CVQ processing in model");
1546     }
1547 
1548 out:
1549     in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
1550                           sizeof(status));
1551     if (unlikely(in_len < sizeof(status))) {
1552         error_report("Bad device CVQ written length");
1553     }
1554     vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
1555     /*
1556      * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
1557      * the function successfully forwards the CVQ command, indicated
1558      * by a non-negative value of `dev_written`. Otherwise, it still
1559      * belongs to SVQ.
1560      * This function should only free the `elem` when it owns.
1561      */
1562     if (dev_written >= 0) {
1563         g_free(elem);
1564     }
1565     return dev_written < 0 ? dev_written : 0;
1566 }
1567 
1568 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
1569     .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
1570 };
1571 
1572 /**
1573  * Probe if CVQ is isolated
1574  *
1575  * @device_fd         The vdpa device fd
1576  * @features          Features offered by the device.
1577  * @cvq_index         The control vq pair index
1578  *
1579  * Returns <0 in case of failure, 0 if false and 1 if true.
1580  */
1581 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
1582                                           int cvq_index, Error **errp)
1583 {
1584     ERRP_GUARD();
1585     uint64_t backend_features;
1586     int64_t cvq_group;
1587     uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
1588                      VIRTIO_CONFIG_S_DRIVER;
1589     int r;
1590 
1591     r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
1592     if (unlikely(r < 0)) {
1593         error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
1594         return r;
1595     }
1596 
1597     if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
1598         return 0;
1599     }
1600 
1601     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1602     if (unlikely(r)) {
1603         error_setg_errno(errp, -r, "Cannot set device status");
1604         goto out;
1605     }
1606 
1607     r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
1608     if (unlikely(r)) {
1609         error_setg_errno(errp, -r, "Cannot set features");
1610         goto out;
1611     }
1612 
1613     status |= VIRTIO_CONFIG_S_FEATURES_OK;
1614     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1615     if (unlikely(r)) {
1616         error_setg_errno(errp, -r, "Cannot set device status");
1617         goto out;
1618     }
1619 
1620     cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
1621     if (unlikely(cvq_group < 0)) {
1622         if (cvq_group != -ENOTSUP) {
1623             r = cvq_group;
1624             goto out;
1625         }
1626 
1627         /*
1628          * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
1629          * support ASID even if the parent driver does not.  The CVQ cannot be
1630          * isolated in this case.
1631          */
1632         error_free(*errp);
1633         *errp = NULL;
1634         r = 0;
1635         goto out;
1636     }
1637 
1638     for (int i = 0; i < cvq_index; ++i) {
1639         int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
1640         if (unlikely(group < 0)) {
1641             r = group;
1642             goto out;
1643         }
1644 
1645         if (group == (int64_t)cvq_group) {
1646             r = 0;
1647             goto out;
1648         }
1649     }
1650 
1651     r = 1;
1652 
1653 out:
1654     status = 0;
1655     ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1656     return r;
1657 }
1658 
1659 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
1660                                        const char *device,
1661                                        const char *name,
1662                                        int vdpa_device_fd,
1663                                        int queue_pair_index,
1664                                        int nvqs,
1665                                        bool is_datapath,
1666                                        bool svq,
1667                                        struct vhost_vdpa_iova_range iova_range,
1668                                        uint64_t features,
1669                                        VhostVDPAShared *shared,
1670                                        Error **errp)
1671 {
1672     NetClientState *nc = NULL;
1673     VhostVDPAState *s;
1674     int ret = 0;
1675     assert(name);
1676     int cvq_isolated = 0;
1677 
1678     if (is_datapath) {
1679         nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
1680                                  name);
1681     } else {
1682         cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
1683                                                       queue_pair_index * 2,
1684                                                       errp);
1685         if (unlikely(cvq_isolated < 0)) {
1686             return NULL;
1687         }
1688 
1689         nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
1690                                          device, name);
1691     }
1692     qemu_set_info_str(nc, TYPE_VHOST_VDPA);
1693     s = DO_UPCAST(VhostVDPAState, nc, nc);
1694 
1695     s->vhost_vdpa.index = queue_pair_index;
1696     s->always_svq = svq;
1697     s->migration_state.notify = NULL;
1698     s->vhost_vdpa.shadow_vqs_enabled = svq;
1699     if (queue_pair_index == 0) {
1700         vhost_vdpa_net_valid_svq_features(features,
1701                                           &s->vhost_vdpa.migration_blocker);
1702         s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1);
1703         s->vhost_vdpa.shared->device_fd = vdpa_device_fd;
1704         s->vhost_vdpa.shared->iova_range = iova_range;
1705         s->vhost_vdpa.shared->shadow_data = svq;
1706         s->vhost_vdpa.shared->iova_tree = vhost_iova_tree_new(iova_range.first,
1707                                                               iova_range.last);
1708     } else if (!is_datapath) {
1709         s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1710                                      PROT_READ | PROT_WRITE,
1711                                      MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1712         s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1713                          PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
1714                          -1, 0);
1715 
1716         s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
1717         s->vhost_vdpa.shadow_vq_ops_opaque = s;
1718         s->cvq_isolated = cvq_isolated;
1719     }
1720     if (queue_pair_index != 0) {
1721         s->vhost_vdpa.shared = shared;
1722     }
1723 
1724     ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
1725     if (ret) {
1726         qemu_del_net_client(nc);
1727         return NULL;
1728     }
1729 
1730     return nc;
1731 }
1732 
1733 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
1734 {
1735     int ret = ioctl(fd, VHOST_GET_FEATURES, features);
1736     if (unlikely(ret < 0)) {
1737         error_setg_errno(errp, errno,
1738                          "Fail to query features from vhost-vDPA device");
1739     }
1740     return ret;
1741 }
1742 
1743 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
1744                                           int *has_cvq, Error **errp)
1745 {
1746     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
1747     g_autofree struct vhost_vdpa_config *config = NULL;
1748     __virtio16 *max_queue_pairs;
1749     int ret;
1750 
1751     if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
1752         *has_cvq = 1;
1753     } else {
1754         *has_cvq = 0;
1755     }
1756 
1757     if (features & (1 << VIRTIO_NET_F_MQ)) {
1758         config = g_malloc0(config_size + sizeof(*max_queue_pairs));
1759         config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
1760         config->len = sizeof(*max_queue_pairs);
1761 
1762         ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
1763         if (ret) {
1764             error_setg(errp, "Fail to get config from vhost-vDPA device");
1765             return -ret;
1766         }
1767 
1768         max_queue_pairs = (__virtio16 *)&config->buf;
1769 
1770         return lduw_le_p(max_queue_pairs);
1771     }
1772 
1773     return 1;
1774 }
1775 
1776 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
1777                         NetClientState *peer, Error **errp)
1778 {
1779     ERRP_GUARD();
1780     const NetdevVhostVDPAOptions *opts;
1781     uint64_t features;
1782     int vdpa_device_fd;
1783     g_autofree NetClientState **ncs = NULL;
1784     struct vhost_vdpa_iova_range iova_range;
1785     NetClientState *nc;
1786     int queue_pairs, r, i = 0, has_cvq = 0;
1787 
1788     assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1789     opts = &netdev->u.vhost_vdpa;
1790     if (!opts->vhostdev && !opts->vhostfd) {
1791         error_setg(errp,
1792                    "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1793         return -1;
1794     }
1795 
1796     if (opts->vhostdev && opts->vhostfd) {
1797         error_setg(errp,
1798                    "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1799         return -1;
1800     }
1801 
1802     if (opts->vhostdev) {
1803         vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1804         if (vdpa_device_fd == -1) {
1805             return -errno;
1806         }
1807     } else {
1808         /* has_vhostfd */
1809         vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1810         if (vdpa_device_fd == -1) {
1811             error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1812             return -1;
1813         }
1814     }
1815 
1816     r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1817     if (unlikely(r < 0)) {
1818         goto err;
1819     }
1820 
1821     queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
1822                                                  &has_cvq, errp);
1823     if (queue_pairs < 0) {
1824         qemu_close(vdpa_device_fd);
1825         return queue_pairs;
1826     }
1827 
1828     r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1829     if (unlikely(r < 0)) {
1830         error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1831                    strerror(-r));
1832         goto err;
1833     }
1834 
1835     if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1836         goto err;
1837     }
1838 
1839     ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1840 
1841     for (i = 0; i < queue_pairs; i++) {
1842         VhostVDPAShared *shared = NULL;
1843 
1844         if (i) {
1845             shared = DO_UPCAST(VhostVDPAState, nc, ncs[0])->vhost_vdpa.shared;
1846         }
1847         ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1848                                      vdpa_device_fd, i, 2, true, opts->x_svq,
1849                                      iova_range, features, shared, errp);
1850         if (!ncs[i])
1851             goto err;
1852     }
1853 
1854     if (has_cvq) {
1855         VhostVDPAState *s0 = DO_UPCAST(VhostVDPAState, nc, ncs[0]);
1856         VhostVDPAShared *shared = s0->vhost_vdpa.shared;
1857 
1858         nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1859                                  vdpa_device_fd, i, 1, false,
1860                                  opts->x_svq, iova_range, features, shared,
1861                                  errp);
1862         if (!nc)
1863             goto err;
1864     }
1865 
1866     return 0;
1867 
1868 err:
1869     if (i) {
1870         for (i--; i >= 0; i--) {
1871             qemu_del_net_client(ncs[i]);
1872         }
1873     }
1874 
1875     qemu_close(vdpa_device_fd);
1876 
1877     return -1;
1878 }
1879