xref: /openbmc/qemu/net/vhost-vdpa.c (revision 2deec9ab7d25d7cd8f57033bd0421c1f9f28d905)
1 /*
2  * vhost-vdpa.c
3  *
4  * Copyright(c) 2017-2018 Intel Corporation.
5  * Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "clients.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
26 #include <err.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/misc.h"
30 #include "hw/virtio/vhost.h"
31 #include "trace.h"
32 
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState {
35     NetClientState nc;
36     struct vhost_vdpa vhost_vdpa;
37     NotifierWithReturn migration_state;
38     VHostNetState *vhost_net;
39 
40     /* Control commands shadow buffers */
41     void *cvq_cmd_out_buffer;
42     virtio_net_ctrl_ack *status;
43 
44     /* The device always have SVQ enabled */
45     bool always_svq;
46 
47     /* The device can isolate CVQ in its own ASID */
48     bool cvq_isolated;
49 
50     bool started;
51 } VhostVDPAState;
52 
53 /*
54  * The array is sorted alphabetically in ascending order,
55  * with the exception of VHOST_INVALID_FEATURE_BIT,
56  * which should always be the last entry.
57  */
58 const int vdpa_feature_bits[] = {
59     VIRTIO_F_ANY_LAYOUT,
60     VIRTIO_F_IOMMU_PLATFORM,
61     VIRTIO_F_NOTIFY_ON_EMPTY,
62     VIRTIO_F_RING_PACKED,
63     VIRTIO_F_RING_RESET,
64     VIRTIO_F_VERSION_1,
65     VIRTIO_F_IN_ORDER,
66     VIRTIO_F_NOTIFICATION_DATA,
67     VIRTIO_NET_F_CSUM,
68     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
69     VIRTIO_NET_F_CTRL_MAC_ADDR,
70     VIRTIO_NET_F_CTRL_RX,
71     VIRTIO_NET_F_CTRL_RX_EXTRA,
72     VIRTIO_NET_F_CTRL_VLAN,
73     VIRTIO_NET_F_CTRL_VQ,
74     VIRTIO_NET_F_GSO,
75     VIRTIO_NET_F_GUEST_CSUM,
76     VIRTIO_NET_F_GUEST_ECN,
77     VIRTIO_NET_F_GUEST_TSO4,
78     VIRTIO_NET_F_GUEST_TSO6,
79     VIRTIO_NET_F_GUEST_UFO,
80     VIRTIO_NET_F_GUEST_USO4,
81     VIRTIO_NET_F_GUEST_USO6,
82     VIRTIO_NET_F_HASH_REPORT,
83     VIRTIO_NET_F_HOST_ECN,
84     VIRTIO_NET_F_HOST_TSO4,
85     VIRTIO_NET_F_HOST_TSO6,
86     VIRTIO_NET_F_HOST_UFO,
87     VIRTIO_NET_F_HOST_USO,
88     VIRTIO_NET_F_MQ,
89     VIRTIO_NET_F_MRG_RXBUF,
90     VIRTIO_NET_F_MTU,
91     VIRTIO_NET_F_RSC_EXT,
92     VIRTIO_NET_F_RSS,
93     VIRTIO_NET_F_STATUS,
94     VIRTIO_RING_F_EVENT_IDX,
95     VIRTIO_RING_F_INDIRECT_DESC,
96 
97     /* VHOST_INVALID_FEATURE_BIT should always be the last entry */
98     VHOST_INVALID_FEATURE_BIT
99 };
100 
101 /** Supported device specific feature bits with SVQ */
102 static const uint64_t vdpa_svq_device_features =
103     BIT_ULL(VIRTIO_NET_F_CSUM) |
104     BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
105     BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
106     BIT_ULL(VIRTIO_NET_F_MTU) |
107     BIT_ULL(VIRTIO_NET_F_MAC) |
108     BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
109     BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
110     BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
111     BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
112     BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
113     BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
114     BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
115     BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
116     BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
117     BIT_ULL(VIRTIO_NET_F_STATUS) |
118     BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
119     BIT_ULL(VIRTIO_NET_F_CTRL_RX) |
120     BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) |
121     BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) |
122     BIT_ULL(VIRTIO_NET_F_MQ) |
123     BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
124     BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
125     /* VHOST_F_LOG_ALL is exposed by SVQ */
126     BIT_ULL(VHOST_F_LOG_ALL) |
127     BIT_ULL(VIRTIO_NET_F_HASH_REPORT) |
128     BIT_ULL(VIRTIO_NET_F_RSS) |
129     BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
130     BIT_ULL(VIRTIO_NET_F_STANDBY) |
131     BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
132 
133 #define VHOST_VDPA_NET_CVQ_ASID 1
134 
135 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
136 {
137     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
138     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
139     return s->vhost_net;
140 }
141 
142 static size_t vhost_vdpa_net_cvq_cmd_len(void)
143 {
144     /*
145      * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
146      * In buffer is always 1 byte, so it should fit here
147      */
148     return sizeof(struct virtio_net_ctrl_hdr) +
149            2 * sizeof(struct virtio_net_ctrl_mac) +
150            MAC_TABLE_ENTRIES * ETH_ALEN;
151 }
152 
153 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
154 {
155     return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
156 }
157 
158 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
159 {
160     uint64_t invalid_dev_features =
161         features & ~vdpa_svq_device_features &
162         /* Transport are all accepted at this point */
163         ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
164                          VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
165 
166     if (invalid_dev_features) {
167         error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
168                    invalid_dev_features);
169         return false;
170     }
171 
172     return vhost_svq_valid_features(features, errp);
173 }
174 
175 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
176 {
177     uint32_t device_id;
178     int ret;
179     struct vhost_dev *hdev;
180 
181     hdev = (struct vhost_dev *)&net->dev;
182     ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
183     if (device_id != VIRTIO_ID_NET) {
184         return -ENOTSUP;
185     }
186     return ret;
187 }
188 
189 static int vhost_vdpa_add(NetClientState *ncs, void *be,
190                           int queue_pair_index, int nvqs)
191 {
192     VhostNetOptions options;
193     struct vhost_net *net = NULL;
194     VhostVDPAState *s;
195     int ret;
196 
197     options.backend_type = VHOST_BACKEND_TYPE_VDPA;
198     assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
199     s = DO_UPCAST(VhostVDPAState, nc, ncs);
200     options.net_backend = ncs;
201     options.opaque      = be;
202     options.busyloop_timeout = 0;
203     options.nvqs = nvqs;
204 
205     net = vhost_net_init(&options);
206     if (!net) {
207         error_report("failed to init vhost_net for queue");
208         goto err_init;
209     }
210     s->vhost_net = net;
211     ret = vhost_vdpa_net_check_device_id(net);
212     if (ret) {
213         goto err_check;
214     }
215     return 0;
216 err_check:
217     vhost_net_cleanup(net);
218     g_free(net);
219 err_init:
220     return -1;
221 }
222 
223 static void vhost_vdpa_cleanup(NetClientState *nc)
224 {
225     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
226 
227     munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
228     munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
229     if (s->vhost_net) {
230         vhost_net_cleanup(s->vhost_net);
231         g_free(s->vhost_net);
232         s->vhost_net = NULL;
233     }
234     if (s->vhost_vdpa.index != 0) {
235         return;
236     }
237     qemu_close(s->vhost_vdpa.shared->device_fd);
238     g_clear_pointer(&s->vhost_vdpa.shared->iova_tree, vhost_iova_tree_delete);
239     g_free(s->vhost_vdpa.shared);
240 }
241 
242 /** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend  */
243 static bool vhost_vdpa_set_steering_ebpf(NetClientState *nc, int prog_fd)
244 {
245     return true;
246 }
247 
248 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
249 {
250     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
251 
252     return true;
253 }
254 
255 static bool vhost_vdpa_get_vnet_hash_supported_types(NetClientState *nc,
256                                                      uint32_t *types)
257 {
258     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
259     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
260     uint64_t features = s->vhost_vdpa.dev->features;
261     int fd = s->vhost_vdpa.shared->device_fd;
262     struct {
263         struct vhost_vdpa_config hdr;
264         uint32_t supported_hash_types;
265     } config;
266 
267     if (!virtio_has_feature(features, VIRTIO_NET_F_HASH_REPORT) &&
268         !virtio_has_feature(features, VIRTIO_NET_F_RSS)) {
269         return false;
270     }
271 
272     config.hdr.off = offsetof(struct virtio_net_config, supported_hash_types);
273     config.hdr.len = sizeof(config.supported_hash_types);
274 
275     assert(!ioctl(fd, VHOST_VDPA_GET_CONFIG, &config));
276     *types = le32_to_cpu(config.supported_hash_types);
277 
278     return true;
279 }
280 
281 static bool vhost_vdpa_has_ufo(NetClientState *nc)
282 {
283     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
284     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
285     uint64_t features = 0;
286     features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
287     features = vhost_net_get_features(s->vhost_net, features);
288     return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
289 
290 }
291 
292 /*
293  * FIXME: vhost_vdpa doesn't have an API to "set h/w endianness". But it's
294  * reasonable to assume that h/w is LE by default, because LE is what
295  * virtio 1.0 and later ask for. So, this function just says "yes, the h/w is
296  * LE". Otherwise, on a BE machine, higher-level code would mistakely think
297  * the h/w is BE and can't support VDPA for a virtio 1.0 client.
298  */
299 static int vhost_vdpa_set_vnet_le(NetClientState *nc, bool enable)
300 {
301     return 0;
302 }
303 
304 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
305                                        Error **errp)
306 {
307     const char *driver = object_class_get_name(oc);
308 
309     if (!g_str_has_prefix(driver, "virtio-net-")) {
310         error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
311         return false;
312     }
313 
314     return true;
315 }
316 
317 /** Dummy receive in case qemu falls back to userland tap networking */
318 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
319                                   size_t size)
320 {
321     return size;
322 }
323 
324 
325 /** From any vdpa net client, get the netclient of the i-th queue pair */
326 static VhostVDPAState *vhost_vdpa_net_get_nc_vdpa(VhostVDPAState *s, int i)
327 {
328     NICState *nic = qemu_get_nic(s->nc.peer);
329     NetClientState *nc_i = qemu_get_peer(nic->ncs, i);
330 
331     return DO_UPCAST(VhostVDPAState, nc, nc_i);
332 }
333 
334 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
335 {
336     return vhost_vdpa_net_get_nc_vdpa(s, 0);
337 }
338 
339 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
340 {
341     struct vhost_vdpa *v = &s->vhost_vdpa;
342     VirtIONet *n;
343     VirtIODevice *vdev;
344     int data_queue_pairs, cvq, r;
345 
346     /* We are only called on the first data vqs and only if x-svq is not set */
347     if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
348         return;
349     }
350 
351     vdev = v->dev->vdev;
352     n = VIRTIO_NET(vdev);
353     if (!n->vhost_started) {
354         return;
355     }
356 
357     data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
358     cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
359                                   n->max_ncs - n->max_queue_pairs : 0;
360     v->shared->svq_switching = enable ?
361         SVQ_TSTATE_ENABLING : SVQ_TSTATE_DISABLING;
362     /*
363      * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
364      * in the future and resume the device if read-only operations between
365      * suspend and reset goes wrong.
366      */
367     vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
368 
369     /* Start will check migration setup_or_active to configure or not SVQ */
370     r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
371     if (unlikely(r < 0)) {
372         error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
373     }
374     v->shared->svq_switching = SVQ_TSTATE_DONE;
375 }
376 
377 static int vdpa_net_migration_state_notifier(NotifierWithReturn *notifier,
378                                              MigrationEvent *e, Error **errp)
379 {
380     VhostVDPAState *s = container_of(notifier, VhostVDPAState, migration_state);
381 
382     if (e->type == MIG_EVENT_PRECOPY_SETUP) {
383         vhost_vdpa_net_log_global_enable(s, true);
384     } else if (e->type == MIG_EVENT_PRECOPY_FAILED) {
385         vhost_vdpa_net_log_global_enable(s, false);
386     }
387     return 0;
388 }
389 
390 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
391 {
392     migration_add_notifier(&s->migration_state,
393                            vdpa_net_migration_state_notifier);
394 }
395 
396 static int vhost_vdpa_net_data_start(NetClientState *nc)
397 {
398     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
399     struct vhost_vdpa *v = &s->vhost_vdpa;
400 
401     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
402 
403     if (s->always_svq || migration_is_running()) {
404         v->shadow_vqs_enabled = true;
405     } else {
406         v->shadow_vqs_enabled = false;
407     }
408 
409     if (v->index == 0) {
410         v->shared->shadow_data = v->shadow_vqs_enabled;
411         vhost_vdpa_net_data_start_first(s);
412         return 0;
413     }
414 
415     return 0;
416 }
417 
418 static int vhost_vdpa_net_data_load(NetClientState *nc)
419 {
420     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
421     struct vhost_vdpa *v = &s->vhost_vdpa;
422     bool has_cvq = v->dev->vq_index_end % 2;
423 
424     if (has_cvq) {
425         return 0;
426     }
427 
428     for (int i = 0; i < v->dev->nvqs; ++i) {
429         int ret = vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index);
430         if (ret < 0) {
431             return ret;
432         }
433     }
434     return 0;
435 }
436 
437 static void vhost_vdpa_net_client_stop(NetClientState *nc)
438 {
439     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
440 
441     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
442 
443     if (s->vhost_vdpa.index == 0) {
444         migration_remove_notifier(&s->migration_state);
445     }
446 }
447 
448 static NetClientInfo net_vhost_vdpa_info = {
449         .type = NET_CLIENT_DRIVER_VHOST_VDPA,
450         .size = sizeof(VhostVDPAState),
451         .receive = vhost_vdpa_receive,
452         .start = vhost_vdpa_net_data_start,
453         .load = vhost_vdpa_net_data_load,
454         .stop = vhost_vdpa_net_client_stop,
455         .cleanup = vhost_vdpa_cleanup,
456         .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
457         .get_vnet_hash_supported_types = vhost_vdpa_get_vnet_hash_supported_types,
458         .has_ufo = vhost_vdpa_has_ufo,
459         .set_vnet_le = vhost_vdpa_set_vnet_le,
460         .check_peer_type = vhost_vdpa_check_peer_type,
461         .set_steering_ebpf = vhost_vdpa_set_steering_ebpf,
462 };
463 
464 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
465                                           Error **errp)
466 {
467     struct vhost_vring_state state = {
468         .index = vq_index,
469     };
470     int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
471 
472     if (unlikely(r < 0)) {
473         r = -errno;
474         error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
475         return r;
476     }
477 
478     return state.num;
479 }
480 
481 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
482                                            unsigned vq_group,
483                                            unsigned asid_num)
484 {
485     struct vhost_vring_state asid = {
486         .index = vq_group,
487         .num = asid_num,
488     };
489     int r;
490 
491     trace_vhost_vdpa_set_address_space_id(v, vq_group, asid_num);
492 
493     r = ioctl(v->shared->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
494     if (unlikely(r < 0)) {
495         error_report("Can't set vq group %u asid %u, errno=%d (%s)",
496                      asid.index, asid.num, errno, g_strerror(errno));
497     }
498     return r;
499 }
500 
501 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
502 {
503     VhostIOVATree *tree = v->shared->iova_tree;
504     DMAMap needle = {
505         /*
506          * No need to specify size or to look for more translations since
507          * this contiguous chunk was allocated by us.
508          */
509         .translated_addr = (hwaddr)(uintptr_t)addr,
510     };
511     const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
512     int r;
513 
514     if (unlikely(!map)) {
515         error_report("Cannot locate expected map");
516         return;
517     }
518 
519     r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova,
520                              map->size + 1);
521     if (unlikely(r != 0)) {
522         error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
523     }
524 
525     vhost_iova_tree_remove(tree, *map);
526 }
527 
528 /** Map CVQ buffer. */
529 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
530                                   bool write)
531 {
532     DMAMap map = {};
533     hwaddr taddr = (hwaddr)(uintptr_t)buf;
534     int r;
535 
536     map.size = size - 1;
537     map.perm = write ? IOMMU_RW : IOMMU_RO,
538     r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map, taddr);
539     if (unlikely(r != IOVA_OK)) {
540         error_report("Cannot map injected element");
541 
542         if (map.translated_addr == taddr) {
543             error_report("Insertion to IOVA->HVA tree failed");
544             /* Remove the mapping from the IOVA-only tree */
545             goto dma_map_err;
546         }
547         return r;
548     }
549 
550     r = vhost_vdpa_dma_map(v->shared, v->address_space_id, map.iova,
551                            vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
552     if (unlikely(r < 0)) {
553         goto dma_map_err;
554     }
555 
556     return 0;
557 
558 dma_map_err:
559     vhost_iova_tree_remove(v->shared->iova_tree, map);
560     return r;
561 }
562 
563 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
564 {
565     VhostVDPAState *s, *s0;
566     struct vhost_vdpa *v;
567     int64_t cvq_group;
568     int r;
569     Error *err = NULL;
570 
571     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
572 
573     s = DO_UPCAST(VhostVDPAState, nc, nc);
574     v = &s->vhost_vdpa;
575 
576     s0 = vhost_vdpa_net_first_nc_vdpa(s);
577     v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled;
578     s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
579 
580     if (v->shared->shadow_data) {
581         /* SVQ is already configured for all virtqueues */
582         goto out;
583     }
584 
585     /*
586      * If we early return in these cases SVQ will not be enabled. The migration
587      * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
588      */
589     if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
590         return 0;
591     }
592 
593     if (!s->cvq_isolated) {
594         return 0;
595     }
596 
597     cvq_group = vhost_vdpa_get_vring_group(v->shared->device_fd,
598                                            v->dev->vq_index_end - 1,
599                                            &err);
600     if (unlikely(cvq_group < 0)) {
601         error_report_err(err);
602         return cvq_group;
603     }
604 
605     r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
606     if (unlikely(r < 0)) {
607         return r;
608     }
609 
610     v->shadow_vqs_enabled = true;
611     s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
612 
613 out:
614     if (!s->vhost_vdpa.shadow_vqs_enabled) {
615         return 0;
616     }
617 
618     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
619                                vhost_vdpa_net_cvq_cmd_page_len(), false);
620     if (unlikely(r < 0)) {
621         return r;
622     }
623 
624     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
625                                vhost_vdpa_net_cvq_cmd_page_len(), true);
626     if (unlikely(r < 0)) {
627         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
628     }
629 
630     return r;
631 }
632 
633 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
634 {
635     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
636 
637     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
638 
639     if (s->vhost_vdpa.shadow_vqs_enabled) {
640         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
641         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
642     }
643 
644     vhost_vdpa_net_client_stop(nc);
645 }
646 
647 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
648                                     const struct iovec *out_sg, size_t out_num,
649                                     const struct iovec *in_sg, size_t in_num)
650 {
651     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
652     int r;
653 
654     r = vhost_svq_add(svq, out_sg, out_num, NULL, in_sg, in_num, NULL, NULL);
655     if (unlikely(r != 0)) {
656         if (unlikely(r == -ENOSPC)) {
657             qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
658                           __func__);
659         }
660     }
661 
662     return r;
663 }
664 
665 /*
666  * Convenience wrapper to poll SVQ for multiple control commands.
667  *
668  * Caller should hold the BQL when invoking this function, and should take
669  * the answer before SVQ pulls by itself when BQL is released.
670  */
671 static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight)
672 {
673     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
674     return vhost_svq_poll(svq, cmds_in_flight);
675 }
676 
677 static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s,
678                                              struct iovec *out_cursor,
679                                              struct iovec *in_cursor)
680 {
681     /* reset the cursor of the output buffer for the device */
682     out_cursor->iov_base = s->cvq_cmd_out_buffer;
683     out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
684 
685     /* reset the cursor of the in buffer for the device */
686     in_cursor->iov_base = s->status;
687     in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
688 }
689 
690 /*
691  * Poll SVQ for multiple pending control commands and check the device's ack.
692  *
693  * Caller should hold the BQL when invoking this function.
694  *
695  * @s: The VhostVDPAState
696  * @len: The length of the pending status shadow buffer
697  */
698 static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len)
699 {
700     /* device uses a one-byte length ack for each control command */
701     ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len);
702     if (unlikely(dev_written != len)) {
703         return -EIO;
704     }
705 
706     /* check the device's ack */
707     for (int i = 0; i < len; ++i) {
708         if (s->status[i] != VIRTIO_NET_OK) {
709             return -EIO;
710         }
711     }
712     return 0;
713 }
714 
715 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s,
716                                        struct iovec *out_cursor,
717                                        struct iovec *in_cursor, uint8_t class,
718                                        uint8_t cmd, const struct iovec *data_sg,
719                                        size_t data_num)
720 {
721     const struct virtio_net_ctrl_hdr ctrl = {
722         .class = class,
723         .cmd = cmd,
724     };
725     size_t data_size = iov_size(data_sg, data_num), cmd_size;
726     struct iovec out, in;
727     ssize_t r;
728     unsigned dummy_cursor_iov_cnt;
729     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
730 
731     assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
732     cmd_size = sizeof(ctrl) + data_size;
733     trace_vhost_vdpa_net_load_cmd(s, class, cmd, data_num, data_size);
734     if (vhost_svq_available_slots(svq) < 2 ||
735         iov_size(out_cursor, 1) < cmd_size) {
736         /*
737          * It is time to flush all pending control commands if SVQ is full
738          * or control commands shadow buffers are full.
739          *
740          * We can poll here since we've had BQL from the time
741          * we sent the descriptor.
742          */
743         r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base -
744                                      (void *)s->status);
745         if (unlikely(r < 0)) {
746             return r;
747         }
748 
749         vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor);
750     }
751 
752     /* pack the CVQ command header */
753     iov_from_buf(out_cursor, 1, 0, &ctrl, sizeof(ctrl));
754     /* pack the CVQ command command-specific-data */
755     iov_to_buf(data_sg, data_num, 0,
756                out_cursor->iov_base + sizeof(ctrl), data_size);
757 
758     /* extract the required buffer from the cursor for output */
759     iov_copy(&out, 1, out_cursor, 1, 0, cmd_size);
760     /* extract the required buffer from the cursor for input */
761     iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status));
762 
763     r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
764     if (unlikely(r < 0)) {
765         trace_vhost_vdpa_net_load_cmd_retval(s, class, cmd, r);
766         return r;
767     }
768 
769     /* iterate the cursors */
770     dummy_cursor_iov_cnt = 1;
771     iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size);
772     dummy_cursor_iov_cnt = 1;
773     iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status));
774 
775     return 0;
776 }
777 
778 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n,
779                                    struct iovec *out_cursor,
780                                    struct iovec *in_cursor)
781 {
782     if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
783         const struct iovec data = {
784             .iov_base = (void *)n->mac,
785             .iov_len = sizeof(n->mac),
786         };
787         ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
788                                             VIRTIO_NET_CTRL_MAC,
789                                             VIRTIO_NET_CTRL_MAC_ADDR_SET,
790                                             &data, 1);
791         if (unlikely(r < 0)) {
792             return r;
793         }
794     }
795 
796     /*
797      * According to VirtIO standard, "The device MUST have an
798      * empty MAC filtering table on reset.".
799      *
800      * Therefore, there is no need to send this CVQ command if the
801      * driver also sets an empty MAC filter table, which aligns with
802      * the device's defaults.
803      *
804      * Note that the device's defaults can mismatch the driver's
805      * configuration only at live migration.
806      */
807     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) ||
808         n->mac_table.in_use == 0) {
809         return 0;
810     }
811 
812     uint32_t uni_entries = n->mac_table.first_multi,
813              uni_macs_size = uni_entries * ETH_ALEN,
814              mul_entries = n->mac_table.in_use - uni_entries,
815              mul_macs_size = mul_entries * ETH_ALEN;
816     struct virtio_net_ctrl_mac uni = {
817         .entries = cpu_to_le32(uni_entries),
818     };
819     struct virtio_net_ctrl_mac mul = {
820         .entries = cpu_to_le32(mul_entries),
821     };
822     const struct iovec data[] = {
823         {
824             .iov_base = &uni,
825             .iov_len = sizeof(uni),
826         }, {
827             .iov_base = n->mac_table.macs,
828             .iov_len = uni_macs_size,
829         }, {
830             .iov_base = &mul,
831             .iov_len = sizeof(mul),
832         }, {
833             .iov_base = &n->mac_table.macs[uni_macs_size],
834             .iov_len = mul_macs_size,
835         },
836     };
837     ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
838                                         VIRTIO_NET_CTRL_MAC,
839                                         VIRTIO_NET_CTRL_MAC_TABLE_SET,
840                                         data, ARRAY_SIZE(data));
841     if (unlikely(r < 0)) {
842         return r;
843     }
844 
845     return 0;
846 }
847 
848 static int vhost_vdpa_net_load_rss(VhostVDPAState *s, const VirtIONet *n,
849                                    struct iovec *out_cursor,
850                                    struct iovec *in_cursor, bool do_rss)
851 {
852     struct virtio_net_rss_config cfg = {};
853     ssize_t r;
854     g_autofree uint16_t *table = NULL;
855 
856     /*
857      * According to VirtIO standard, "Initially the device has all hash
858      * types disabled and reports only VIRTIO_NET_HASH_REPORT_NONE.".
859      *
860      * Therefore, there is no need to send this CVQ command if the
861      * driver disables the all hash types, which aligns with
862      * the device's defaults.
863      *
864      * Note that the device's defaults can mismatch the driver's
865      * configuration only at live migration.
866      */
867     if (!n->rss_data.enabled ||
868         n->rss_data.hash_types == VIRTIO_NET_HASH_REPORT_NONE) {
869         return 0;
870     }
871 
872     table = g_malloc_n(n->rss_data.indirections_len,
873                        sizeof(n->rss_data.indirections_table[0]));
874     cfg.hash_types = cpu_to_le32(n->rss_data.hash_types);
875 
876     if (do_rss) {
877         /*
878          * According to VirtIO standard, "Number of entries in indirection_table
879          * is (indirection_table_mask + 1)".
880          */
881         cfg.indirection_table_mask = cpu_to_le16(n->rss_data.indirections_len -
882                                                  1);
883         cfg.unclassified_queue = cpu_to_le16(n->rss_data.default_queue);
884         for (int i = 0; i < n->rss_data.indirections_len; ++i) {
885             table[i] = cpu_to_le16(n->rss_data.indirections_table[i]);
886         }
887         cfg.max_tx_vq = cpu_to_le16(n->curr_queue_pairs);
888     } else {
889         /*
890          * According to VirtIO standard, "Field reserved MUST contain zeroes.
891          * It is defined to make the structure to match the layout of
892          * virtio_net_rss_config structure, defined in 5.1.6.5.7.".
893          *
894          * Therefore, we need to zero the fields in
895          * struct virtio_net_rss_config, which corresponds to the
896          * `reserved` field in struct virtio_net_hash_config.
897          *
898          * Note that all other fields are zeroed at their definitions,
899          * except for the `indirection_table` field, where the actual data
900          * is stored in the `table` variable to ensure compatibility
901          * with RSS case. Therefore, we need to zero the `table` variable here.
902          */
903         table[0] = 0;
904     }
905 
906     /*
907      * Considering that virtio_net_handle_rss() currently does not restore
908      * the hash key length parsed from the CVQ command sent from the guest
909      * into n->rss_data and uses the maximum key length in other code, so
910      * we also employ the maximum key length here.
911      */
912     cfg.hash_key_length = sizeof(n->rss_data.key);
913 
914     const struct iovec data[] = {
915         {
916             .iov_base = &cfg,
917             .iov_len = offsetof(struct virtio_net_rss_config,
918                                 indirection_table),
919         }, {
920             .iov_base = table,
921             .iov_len = n->rss_data.indirections_len *
922                        sizeof(n->rss_data.indirections_table[0]),
923         }, {
924             .iov_base = &cfg.max_tx_vq,
925             .iov_len = offsetof(struct virtio_net_rss_config, hash_key_data) -
926                        offsetof(struct virtio_net_rss_config, max_tx_vq),
927         }, {
928             .iov_base = (void *)n->rss_data.key,
929             .iov_len = sizeof(n->rss_data.key),
930         }
931     };
932 
933     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
934                                 VIRTIO_NET_CTRL_MQ,
935                                 do_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG :
936                                 VIRTIO_NET_CTRL_MQ_HASH_CONFIG,
937                                 data, ARRAY_SIZE(data));
938     if (unlikely(r < 0)) {
939         return r;
940     }
941 
942     return 0;
943 }
944 
945 static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
946                                   const VirtIONet *n,
947                                   struct iovec *out_cursor,
948                                   struct iovec *in_cursor)
949 {
950     struct virtio_net_ctrl_mq mq;
951     ssize_t r;
952 
953     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
954         return 0;
955     }
956 
957     trace_vhost_vdpa_net_load_mq(s, n->curr_queue_pairs);
958 
959     mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
960     const struct iovec data = {
961         .iov_base = &mq,
962         .iov_len = sizeof(mq),
963     };
964     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
965                                 VIRTIO_NET_CTRL_MQ,
966                                 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
967                                 &data, 1);
968     if (unlikely(r < 0)) {
969         return r;
970     }
971 
972     if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_RSS)) {
973         /* load the receive-side scaling state */
974         r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, true);
975         if (unlikely(r < 0)) {
976             return r;
977         }
978     } else if (virtio_vdev_has_feature(&n->parent_obj,
979                                        VIRTIO_NET_F_HASH_REPORT)) {
980         /* load the hash calculation state */
981         r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, false);
982         if (unlikely(r < 0)) {
983             return r;
984         }
985     }
986 
987     return 0;
988 }
989 
990 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
991                                         const VirtIONet *n,
992                                         struct iovec *out_cursor,
993                                         struct iovec *in_cursor)
994 {
995     uint64_t offloads;
996     ssize_t r;
997 
998     if (!virtio_vdev_has_feature(&n->parent_obj,
999                                  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
1000         return 0;
1001     }
1002 
1003     if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
1004         /*
1005          * According to VirtIO standard, "Upon feature negotiation
1006          * corresponding offload gets enabled to preserve
1007          * backward compatibility.".
1008          *
1009          * Therefore, there is no need to send this CVQ command if the
1010          * driver also enables all supported offloads, which aligns with
1011          * the device's defaults.
1012          *
1013          * Note that the device's defaults can mismatch the driver's
1014          * configuration only at live migration.
1015          */
1016         return 0;
1017     }
1018 
1019     offloads = cpu_to_le64(n->curr_guest_offloads);
1020     const struct iovec data = {
1021         .iov_base = &offloads,
1022         .iov_len = sizeof(offloads),
1023     };
1024     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
1025                                 VIRTIO_NET_CTRL_GUEST_OFFLOADS,
1026                                 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
1027                                 &data, 1);
1028     if (unlikely(r < 0)) {
1029         return r;
1030     }
1031 
1032     return 0;
1033 }
1034 
1035 static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
1036                                        struct iovec *out_cursor,
1037                                        struct iovec *in_cursor,
1038                                        uint8_t cmd,
1039                                        uint8_t on)
1040 {
1041     const struct iovec data = {
1042         .iov_base = &on,
1043         .iov_len = sizeof(on),
1044     };
1045     ssize_t r;
1046 
1047     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
1048                                 VIRTIO_NET_CTRL_RX, cmd, &data, 1);
1049     if (unlikely(r < 0)) {
1050         return r;
1051     }
1052 
1053     return 0;
1054 }
1055 
1056 static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
1057                                   const VirtIONet *n,
1058                                   struct iovec *out_cursor,
1059                                   struct iovec *in_cursor)
1060 {
1061     ssize_t r;
1062 
1063     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
1064         return 0;
1065     }
1066 
1067     /*
1068      * According to virtio_net_reset(), device turns promiscuous mode
1069      * on by default.
1070      *
1071      * Additionally, according to VirtIO standard, "Since there are
1072      * no guarantees, it can use a hash filter or silently switch to
1073      * allmulti or promiscuous mode if it is given too many addresses.".
1074      * QEMU marks `n->mac_table.uni_overflow` if guest sets too many
1075      * non-multicast MAC addresses, indicating that promiscuous mode
1076      * should be enabled.
1077      *
1078      * Therefore, QEMU should only send this CVQ command if the
1079      * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
1080      * which sets promiscuous mode on, different from the device's defaults.
1081      *
1082      * Note that the device's defaults can mismatch the driver's
1083      * configuration only at live migration.
1084      */
1085     if (!n->mac_table.uni_overflow && !n->promisc) {
1086         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1087                                         VIRTIO_NET_CTRL_RX_PROMISC, 0);
1088         if (unlikely(r < 0)) {
1089             return r;
1090         }
1091     }
1092 
1093     /*
1094      * According to virtio_net_reset(), device turns all-multicast mode
1095      * off by default.
1096      *
1097      * According to VirtIO standard, "Since there are no guarantees,
1098      * it can use a hash filter or silently switch to allmulti or
1099      * promiscuous mode if it is given too many addresses.". QEMU marks
1100      * `n->mac_table.multi_overflow` if guest sets too many
1101      * non-multicast MAC addresses.
1102      *
1103      * Therefore, QEMU should only send this CVQ command if the
1104      * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
1105      * which sets all-multicast mode on, different from the device's defaults.
1106      *
1107      * Note that the device's defaults can mismatch the driver's
1108      * configuration only at live migration.
1109      */
1110     if (n->mac_table.multi_overflow || n->allmulti) {
1111         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1112                                         VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
1113         if (unlikely(r < 0)) {
1114             return r;
1115         }
1116     }
1117 
1118     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) {
1119         return 0;
1120     }
1121 
1122     /*
1123      * According to virtio_net_reset(), device turns all-unicast mode
1124      * off by default.
1125      *
1126      * Therefore, QEMU should only send this CVQ command if the driver
1127      * sets all-unicast mode on, different from the device's defaults.
1128      *
1129      * Note that the device's defaults can mismatch the driver's
1130      * configuration only at live migration.
1131      */
1132     if (n->alluni) {
1133         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1134                                         VIRTIO_NET_CTRL_RX_ALLUNI, 1);
1135         if (r < 0) {
1136             return r;
1137         }
1138     }
1139 
1140     /*
1141      * According to virtio_net_reset(), device turns non-multicast mode
1142      * off by default.
1143      *
1144      * Therefore, QEMU should only send this CVQ command if the driver
1145      * sets non-multicast mode on, different from the device's defaults.
1146      *
1147      * Note that the device's defaults can mismatch the driver's
1148      * configuration only at live migration.
1149      */
1150     if (n->nomulti) {
1151         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1152                                         VIRTIO_NET_CTRL_RX_NOMULTI, 1);
1153         if (r < 0) {
1154             return r;
1155         }
1156     }
1157 
1158     /*
1159      * According to virtio_net_reset(), device turns non-unicast mode
1160      * off by default.
1161      *
1162      * Therefore, QEMU should only send this CVQ command if the driver
1163      * sets non-unicast mode on, different from the device's defaults.
1164      *
1165      * Note that the device's defaults can mismatch the driver's
1166      * configuration only at live migration.
1167      */
1168     if (n->nouni) {
1169         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1170                                         VIRTIO_NET_CTRL_RX_NOUNI, 1);
1171         if (r < 0) {
1172             return r;
1173         }
1174     }
1175 
1176     /*
1177      * According to virtio_net_reset(), device turns non-broadcast mode
1178      * off by default.
1179      *
1180      * Therefore, QEMU should only send this CVQ command if the driver
1181      * sets non-broadcast mode on, different from the device's defaults.
1182      *
1183      * Note that the device's defaults can mismatch the driver's
1184      * configuration only at live migration.
1185      */
1186     if (n->nobcast) {
1187         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1188                                         VIRTIO_NET_CTRL_RX_NOBCAST, 1);
1189         if (r < 0) {
1190             return r;
1191         }
1192     }
1193 
1194     return 0;
1195 }
1196 
1197 static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
1198                                            const VirtIONet *n,
1199                                            struct iovec *out_cursor,
1200                                            struct iovec *in_cursor,
1201                                            uint16_t vid)
1202 {
1203     const struct iovec data = {
1204         .iov_base = &vid,
1205         .iov_len = sizeof(vid),
1206     };
1207     ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
1208                                         VIRTIO_NET_CTRL_VLAN,
1209                                         VIRTIO_NET_CTRL_VLAN_ADD,
1210                                         &data, 1);
1211     if (unlikely(r < 0)) {
1212         return r;
1213     }
1214 
1215     return 0;
1216 }
1217 
1218 static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
1219                                     const VirtIONet *n,
1220                                     struct iovec *out_cursor,
1221                                     struct iovec *in_cursor)
1222 {
1223     int r;
1224 
1225     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) {
1226         return 0;
1227     }
1228 
1229     for (int i = 0; i < MAX_VLAN >> 5; i++) {
1230         for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
1231             if (n->vlans[i] & (1U << j)) {
1232                 r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor,
1233                                                     in_cursor, (i << 5) + j);
1234                 if (unlikely(r != 0)) {
1235                     return r;
1236                 }
1237             }
1238         }
1239     }
1240 
1241     return 0;
1242 }
1243 
1244 static int vhost_vdpa_net_cvq_load(NetClientState *nc)
1245 {
1246     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
1247     struct vhost_vdpa *v = &s->vhost_vdpa;
1248     const VirtIONet *n;
1249     int r;
1250     struct iovec out_cursor, in_cursor;
1251 
1252     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1253 
1254     r = vhost_vdpa_set_vring_ready(v, v->dev->vq_index);
1255     if (unlikely(r < 0)) {
1256         return r;
1257     }
1258 
1259     if (v->shadow_vqs_enabled) {
1260         n = VIRTIO_NET(v->dev->vdev);
1261         vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor);
1262         r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor);
1263         if (unlikely(r < 0)) {
1264             return r;
1265         }
1266         r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor);
1267         if (unlikely(r)) {
1268             return r;
1269         }
1270         r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor);
1271         if (unlikely(r)) {
1272             return r;
1273         }
1274         r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor);
1275         if (unlikely(r)) {
1276             return r;
1277         }
1278         r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor);
1279         if (unlikely(r)) {
1280             return r;
1281         }
1282 
1283         /*
1284          * We need to poll and check all pending device's used buffers.
1285          *
1286          * We can poll here since we've had BQL from the time
1287          * we sent the descriptor.
1288          */
1289         r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status);
1290         if (unlikely(r)) {
1291             return r;
1292         }
1293     }
1294 
1295     for (int i = 0; i < v->dev->vq_index; ++i) {
1296         r = vhost_vdpa_set_vring_ready(v, i);
1297         if (unlikely(r < 0)) {
1298             return r;
1299         }
1300     }
1301 
1302     return 0;
1303 }
1304 
1305 static NetClientInfo net_vhost_vdpa_cvq_info = {
1306     .type = NET_CLIENT_DRIVER_VHOST_VDPA,
1307     .size = sizeof(VhostVDPAState),
1308     .receive = vhost_vdpa_receive,
1309     .start = vhost_vdpa_net_cvq_start,
1310     .load = vhost_vdpa_net_cvq_load,
1311     .stop = vhost_vdpa_net_cvq_stop,
1312     .cleanup = vhost_vdpa_cleanup,
1313     .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
1314     .get_vnet_hash_supported_types = vhost_vdpa_get_vnet_hash_supported_types,
1315     .has_ufo = vhost_vdpa_has_ufo,
1316     .check_peer_type = vhost_vdpa_check_peer_type,
1317     .set_steering_ebpf = vhost_vdpa_set_steering_ebpf,
1318 };
1319 
1320 /*
1321  * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
1322  * vdpa device.
1323  *
1324  * Considering that QEMU cannot send the entire filter table to the
1325  * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
1326  * command to enable promiscuous mode to receive all packets,
1327  * according to VirtIO standard, "Since there are no guarantees,
1328  * it can use a hash filter or silently switch to allmulti or
1329  * promiscuous mode if it is given too many addresses.".
1330  *
1331  * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
1332  * marks `n->mac_table.x_overflow` accordingly, it should have
1333  * the same effect on the device model to receive
1334  * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1335  * The same applies to multicast MAC addresses.
1336  *
1337  * Therefore, QEMU can provide the device model with a fake
1338  * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
1339  * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1340  * MAC addresses. This ensures that the device model marks
1341  * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1342  * allowing all packets to be received, which aligns with the
1343  * state of the vdpa device.
1344  */
1345 static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
1346                                                        VirtQueueElement *elem,
1347                                                        struct iovec *out,
1348                                                        const struct iovec *in)
1349 {
1350     struct virtio_net_ctrl_mac mac_data, *mac_ptr;
1351     struct virtio_net_ctrl_hdr *hdr_ptr;
1352     uint32_t cursor;
1353     ssize_t r;
1354     uint8_t on = 1;
1355 
1356     /* parse the non-multicast MAC address entries from CVQ command */
1357     cursor = sizeof(*hdr_ptr);
1358     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1359                    &mac_data, sizeof(mac_data));
1360     if (unlikely(r != sizeof(mac_data))) {
1361         /*
1362          * If the CVQ command is invalid, we should simulate the vdpa device
1363          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1364          */
1365         *s->status = VIRTIO_NET_ERR;
1366         return sizeof(*s->status);
1367     }
1368     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1369 
1370     /* parse the multicast MAC address entries from CVQ command */
1371     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1372                    &mac_data, sizeof(mac_data));
1373     if (r != sizeof(mac_data)) {
1374         /*
1375          * If the CVQ command is invalid, we should simulate the vdpa device
1376          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1377          */
1378         *s->status = VIRTIO_NET_ERR;
1379         return sizeof(*s->status);
1380     }
1381     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1382 
1383     /* validate the CVQ command */
1384     if (iov_size(elem->out_sg, elem->out_num) != cursor) {
1385         /*
1386          * If the CVQ command is invalid, we should simulate the vdpa device
1387          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1388          */
1389         *s->status = VIRTIO_NET_ERR;
1390         return sizeof(*s->status);
1391     }
1392 
1393     /*
1394      * According to VirtIO standard, "Since there are no guarantees,
1395      * it can use a hash filter or silently switch to allmulti or
1396      * promiscuous mode if it is given too many addresses.".
1397      *
1398      * Therefore, considering that QEMU is unable to send the entire
1399      * filter table to the vdpa device, it should send the
1400      * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
1401      */
1402     hdr_ptr = out->iov_base;
1403     out->iov_len = sizeof(*hdr_ptr) + sizeof(on);
1404 
1405     hdr_ptr->class = VIRTIO_NET_CTRL_RX;
1406     hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC;
1407     iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on));
1408     r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1);
1409     if (unlikely(r < 0)) {
1410         return r;
1411     }
1412 
1413     /*
1414      * We can poll here since we've had BQL from the time
1415      * we sent the descriptor.
1416      */
1417     r = vhost_vdpa_net_svq_poll(s, 1);
1418     if (unlikely(r < sizeof(*s->status))) {
1419         return r;
1420     }
1421     if (*s->status != VIRTIO_NET_OK) {
1422         return sizeof(*s->status);
1423     }
1424 
1425     /*
1426      * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
1427      * command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
1428      * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
1429      * multicast MAC addresses.
1430      *
1431      * By doing so, the device model can mark `n->mac_table.uni_overflow`
1432      * and `n->mac_table.multi_overflow`, enabling all packets to be
1433      * received, which aligns with the state of the vdpa device.
1434      */
1435     cursor = 0;
1436     uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1,
1437              fake_mul_entries = MAC_TABLE_ENTRIES + 1,
1438              fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) +
1439                              sizeof(mac_data) + fake_uni_entries * ETH_ALEN +
1440                              sizeof(mac_data) + fake_mul_entries * ETH_ALEN;
1441 
1442     assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len());
1443     out->iov_len = fake_cvq_size;
1444 
1445     /* pack the header for fake CVQ command */
1446     hdr_ptr = out->iov_base + cursor;
1447     hdr_ptr->class = VIRTIO_NET_CTRL_MAC;
1448     hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1449     cursor += sizeof(*hdr_ptr);
1450 
1451     /*
1452      * Pack the non-multicast MAC addresses part for fake CVQ command.
1453      *
1454      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1455      * addresses provided in CVQ command. Therefore, only the entries
1456      * field need to be prepared in the CVQ command.
1457      */
1458     mac_ptr = out->iov_base + cursor;
1459     mac_ptr->entries = cpu_to_le32(fake_uni_entries);
1460     cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN;
1461 
1462     /*
1463      * Pack the multicast MAC addresses part for fake CVQ command.
1464      *
1465      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1466      * addresses provided in CVQ command. Therefore, only the entries
1467      * field need to be prepared in the CVQ command.
1468      */
1469     mac_ptr = out->iov_base + cursor;
1470     mac_ptr->entries = cpu_to_le32(fake_mul_entries);
1471 
1472     /*
1473      * Simulating QEMU poll a vdpa device used buffer
1474      * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1475      */
1476     return sizeof(*s->status);
1477 }
1478 
1479 /**
1480  * Validate and copy control virtqueue commands.
1481  *
1482  * Following QEMU guidelines, we offer a copy of the buffers to the device to
1483  * prevent TOCTOU bugs.
1484  */
1485 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
1486                                             VirtQueueElement *elem,
1487                                             void *opaque)
1488 {
1489     VhostVDPAState *s = opaque;
1490     size_t in_len;
1491     const struct virtio_net_ctrl_hdr *ctrl;
1492     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1493     /* Out buffer sent to both the vdpa device and the device model */
1494     struct iovec out = {
1495         .iov_base = s->cvq_cmd_out_buffer,
1496     };
1497     /* in buffer used for device model */
1498     const struct iovec model_in = {
1499         .iov_base = &status,
1500         .iov_len = sizeof(status),
1501     };
1502     /* in buffer used for vdpa device */
1503     const struct iovec vdpa_in = {
1504         .iov_base = s->status,
1505         .iov_len = sizeof(*s->status),
1506     };
1507     ssize_t dev_written = -EINVAL;
1508 
1509     out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
1510                              s->cvq_cmd_out_buffer,
1511                              vhost_vdpa_net_cvq_cmd_page_len());
1512 
1513     ctrl = s->cvq_cmd_out_buffer;
1514     if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
1515         /*
1516          * Guest announce capability is emulated by qemu, so don't forward to
1517          * the device.
1518          */
1519         dev_written = sizeof(status);
1520         *s->status = VIRTIO_NET_OK;
1521     } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC &&
1522                         ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET &&
1523                         iov_size(elem->out_sg, elem->out_num) > out.iov_len)) {
1524         /*
1525          * Due to the size limitation of the out buffer sent to the vdpa device,
1526          * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
1527          * MAC addresses set by the driver for the filter table can cause
1528          * truncation of the CVQ command in QEMU. As a result, the vdpa device
1529          * rejects the flawed CVQ command.
1530          *
1531          * Therefore, QEMU must handle this situation instead of sending
1532          * the CVQ command directly.
1533          */
1534         dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
1535                                                             &out, &vdpa_in);
1536         if (unlikely(dev_written < 0)) {
1537             goto out;
1538         }
1539     } else {
1540         ssize_t r;
1541         r = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1);
1542         if (unlikely(r < 0)) {
1543             dev_written = r;
1544             goto out;
1545         }
1546 
1547         /*
1548          * We can poll here since we've had BQL from the time
1549          * we sent the descriptor.
1550          */
1551         dev_written = vhost_vdpa_net_svq_poll(s, 1);
1552     }
1553 
1554     if (unlikely(dev_written < sizeof(status))) {
1555         error_report("Insufficient written data (%zu)", dev_written);
1556         goto out;
1557     }
1558 
1559     if (*s->status != VIRTIO_NET_OK) {
1560         goto out;
1561     }
1562 
1563     status = VIRTIO_NET_ERR;
1564     virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1);
1565     if (status != VIRTIO_NET_OK) {
1566         error_report("Bad CVQ processing in model");
1567     }
1568 
1569 out:
1570     in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
1571                           sizeof(status));
1572     if (unlikely(in_len < sizeof(status))) {
1573         error_report("Bad device CVQ written length");
1574     }
1575     vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
1576     /*
1577      * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
1578      * the function successfully forwards the CVQ command, indicated
1579      * by a non-negative value of `dev_written`. Otherwise, it still
1580      * belongs to SVQ.
1581      * This function should only free the `elem` when it owns.
1582      */
1583     if (dev_written >= 0) {
1584         g_free(elem);
1585     }
1586     return dev_written < 0 ? dev_written : 0;
1587 }
1588 
1589 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
1590     .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
1591 };
1592 
1593 /**
1594  * Probe if CVQ is isolated
1595  *
1596  * @device_fd         The vdpa device fd
1597  * @features          Features offered by the device.
1598  * @cvq_index         The control vq pair index
1599  *
1600  * Returns <0 in case of failure, 0 if false and 1 if true.
1601  */
1602 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
1603                                           int cvq_index, Error **errp)
1604 {
1605     ERRP_GUARD();
1606     uint64_t backend_features;
1607     int64_t cvq_group;
1608     uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
1609                      VIRTIO_CONFIG_S_DRIVER;
1610     int r;
1611 
1612     r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
1613     if (unlikely(r < 0)) {
1614         error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
1615         return r;
1616     }
1617 
1618     if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
1619         return 0;
1620     }
1621 
1622     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1623     if (unlikely(r)) {
1624         error_setg_errno(errp, -r, "Cannot set device status");
1625         goto out;
1626     }
1627 
1628     r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
1629     if (unlikely(r)) {
1630         error_setg_errno(errp, -r, "Cannot set features");
1631         goto out;
1632     }
1633 
1634     status |= VIRTIO_CONFIG_S_FEATURES_OK;
1635     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1636     if (unlikely(r)) {
1637         error_setg_errno(errp, -r, "Cannot set device status");
1638         goto out;
1639     }
1640 
1641     cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
1642     if (unlikely(cvq_group < 0)) {
1643         if (cvq_group != -ENOTSUP) {
1644             r = cvq_group;
1645             goto out;
1646         }
1647 
1648         /*
1649          * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
1650          * support ASID even if the parent driver does not.  The CVQ cannot be
1651          * isolated in this case.
1652          */
1653         error_free(*errp);
1654         *errp = NULL;
1655         r = 0;
1656         goto out;
1657     }
1658 
1659     for (int i = 0; i < cvq_index; ++i) {
1660         int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
1661         if (unlikely(group < 0)) {
1662             r = group;
1663             goto out;
1664         }
1665 
1666         if (group == (int64_t)cvq_group) {
1667             r = 0;
1668             goto out;
1669         }
1670     }
1671 
1672     r = 1;
1673 
1674 out:
1675     status = 0;
1676     ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1677     return r;
1678 }
1679 
1680 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
1681                                        const char *device,
1682                                        const char *name,
1683                                        int vdpa_device_fd,
1684                                        int queue_pair_index,
1685                                        int nvqs,
1686                                        bool is_datapath,
1687                                        bool svq,
1688                                        struct vhost_vdpa_iova_range iova_range,
1689                                        uint64_t features,
1690                                        VhostVDPAShared *shared,
1691                                        Error **errp)
1692 {
1693     NetClientState *nc = NULL;
1694     VhostVDPAState *s;
1695     int ret = 0;
1696     assert(name);
1697     int cvq_isolated = 0;
1698 
1699     if (is_datapath) {
1700         nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
1701                                  name);
1702     } else {
1703         cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
1704                                                       queue_pair_index * 2,
1705                                                       errp);
1706         if (unlikely(cvq_isolated < 0)) {
1707             return NULL;
1708         }
1709 
1710         nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
1711                                          device, name);
1712     }
1713     qemu_set_info_str(nc, TYPE_VHOST_VDPA);
1714     s = DO_UPCAST(VhostVDPAState, nc, nc);
1715 
1716     s->vhost_vdpa.index = queue_pair_index;
1717     s->always_svq = svq;
1718     s->migration_state.notify = NULL;
1719     s->vhost_vdpa.shadow_vqs_enabled = svq;
1720     if (queue_pair_index == 0) {
1721         vhost_vdpa_net_valid_svq_features(features,
1722                                           &s->vhost_vdpa.migration_blocker);
1723         s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1);
1724         s->vhost_vdpa.shared->device_fd = vdpa_device_fd;
1725         s->vhost_vdpa.shared->iova_range = iova_range;
1726         s->vhost_vdpa.shared->shadow_data = svq;
1727         s->vhost_vdpa.shared->iova_tree = vhost_iova_tree_new(iova_range.first,
1728                                                               iova_range.last);
1729     } else if (!is_datapath) {
1730         s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1731                                      PROT_READ | PROT_WRITE,
1732                                      MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1733         s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1734                          PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
1735                          -1, 0);
1736 
1737         s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
1738         s->vhost_vdpa.shadow_vq_ops_opaque = s;
1739         s->cvq_isolated = cvq_isolated;
1740     }
1741     if (queue_pair_index != 0) {
1742         s->vhost_vdpa.shared = shared;
1743     }
1744 
1745     ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
1746     if (ret) {
1747         qemu_del_net_client(nc);
1748         return NULL;
1749     }
1750 
1751     return nc;
1752 }
1753 
1754 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
1755 {
1756     int ret = ioctl(fd, VHOST_GET_FEATURES, features);
1757     if (unlikely(ret < 0)) {
1758         error_setg_errno(errp, errno,
1759                          "Fail to query features from vhost-vDPA device");
1760     }
1761     return ret;
1762 }
1763 
1764 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
1765                                           int *has_cvq, Error **errp)
1766 {
1767     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
1768     g_autofree struct vhost_vdpa_config *config = NULL;
1769     __virtio16 *max_queue_pairs;
1770     int ret;
1771 
1772     if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
1773         *has_cvq = 1;
1774     } else {
1775         *has_cvq = 0;
1776     }
1777 
1778     if (features & (1 << VIRTIO_NET_F_MQ)) {
1779         config = g_malloc0(config_size + sizeof(*max_queue_pairs));
1780         config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
1781         config->len = sizeof(*max_queue_pairs);
1782 
1783         ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
1784         if (ret) {
1785             error_setg(errp, "Fail to get config from vhost-vDPA device");
1786             return -ret;
1787         }
1788 
1789         max_queue_pairs = (__virtio16 *)&config->buf;
1790 
1791         return lduw_le_p(max_queue_pairs);
1792     }
1793 
1794     return 1;
1795 }
1796 
1797 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
1798                         NetClientState *peer, Error **errp)
1799 {
1800     ERRP_GUARD();
1801     const NetdevVhostVDPAOptions *opts;
1802     uint64_t features;
1803     int vdpa_device_fd;
1804     g_autofree NetClientState **ncs = NULL;
1805     struct vhost_vdpa_iova_range iova_range;
1806     NetClientState *nc;
1807     int queue_pairs, r, i = 0, has_cvq = 0;
1808 
1809     assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1810     opts = &netdev->u.vhost_vdpa;
1811     if (!opts->vhostdev && !opts->vhostfd) {
1812         error_setg(errp,
1813                    "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1814         return -1;
1815     }
1816 
1817     if (opts->vhostdev && opts->vhostfd) {
1818         error_setg(errp,
1819                    "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1820         return -1;
1821     }
1822 
1823     if (opts->vhostdev) {
1824         vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1825         if (vdpa_device_fd == -1) {
1826             return -errno;
1827         }
1828     } else {
1829         /* has_vhostfd */
1830         vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1831         if (vdpa_device_fd == -1) {
1832             error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1833             return -1;
1834         }
1835     }
1836 
1837     r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1838     if (unlikely(r < 0)) {
1839         goto err;
1840     }
1841 
1842     queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
1843                                                  &has_cvq, errp);
1844     if (queue_pairs < 0) {
1845         qemu_close(vdpa_device_fd);
1846         return queue_pairs;
1847     }
1848 
1849     r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1850     if (unlikely(r < 0)) {
1851         error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1852                    strerror(-r));
1853         goto err;
1854     }
1855 
1856     if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1857         goto err;
1858     }
1859 
1860     ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1861 
1862     for (i = 0; i < queue_pairs; i++) {
1863         VhostVDPAShared *shared = NULL;
1864 
1865         if (i) {
1866             shared = DO_UPCAST(VhostVDPAState, nc, ncs[0])->vhost_vdpa.shared;
1867         }
1868         ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1869                                      vdpa_device_fd, i, 2, true, opts->x_svq,
1870                                      iova_range, features, shared, errp);
1871         if (!ncs[i])
1872             goto err;
1873     }
1874 
1875     if (has_cvq) {
1876         VhostVDPAState *s0 = DO_UPCAST(VhostVDPAState, nc, ncs[0]);
1877         VhostVDPAShared *shared = s0->vhost_vdpa.shared;
1878 
1879         nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1880                                  vdpa_device_fd, i, 1, false,
1881                                  opts->x_svq, iova_range, features, shared,
1882                                  errp);
1883         if (!nc)
1884             goto err;
1885     }
1886 
1887     return 0;
1888 
1889 err:
1890     if (i) {
1891         for (i--; i >= 0; i--) {
1892             qemu_del_net_client(ncs[i]);
1893         }
1894     }
1895 
1896     qemu_close(vdpa_device_fd);
1897 
1898     return -1;
1899 }
1900