xref: /openbmc/qemu/net/vhost-vdpa.c (revision fee364e4)
1 /*
2  * vhost-vdpa.c
3  *
4  * Copyright(c) 2017-2018 Intel Corporation.
5  * Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "clients.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
26 #include <err.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/migration.h"
30 #include "migration/misc.h"
31 #include "hw/virtio/vhost.h"
32 
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState {
35     NetClientState nc;
36     struct vhost_vdpa vhost_vdpa;
37     Notifier migration_state;
38     VHostNetState *vhost_net;
39 
40     /* Control commands shadow buffers */
41     void *cvq_cmd_out_buffer;
42     virtio_net_ctrl_ack *status;
43 
44     /* The device always have SVQ enabled */
45     bool always_svq;
46 
47     /* The device can isolate CVQ in its own ASID */
48     bool cvq_isolated;
49 
50     bool started;
51 } VhostVDPAState;
52 
53 /*
54  * The array is sorted alphabetically in ascending order,
55  * with the exception of VHOST_INVALID_FEATURE_BIT,
56  * which should always be the last entry.
57  */
58 const int vdpa_feature_bits[] = {
59     VIRTIO_F_ANY_LAYOUT,
60     VIRTIO_F_IOMMU_PLATFORM,
61     VIRTIO_F_NOTIFY_ON_EMPTY,
62     VIRTIO_F_RING_PACKED,
63     VIRTIO_F_RING_RESET,
64     VIRTIO_F_VERSION_1,
65     VIRTIO_NET_F_CSUM,
66     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
67     VIRTIO_NET_F_CTRL_MAC_ADDR,
68     VIRTIO_NET_F_CTRL_RX,
69     VIRTIO_NET_F_CTRL_RX_EXTRA,
70     VIRTIO_NET_F_CTRL_VLAN,
71     VIRTIO_NET_F_CTRL_VQ,
72     VIRTIO_NET_F_GSO,
73     VIRTIO_NET_F_GUEST_CSUM,
74     VIRTIO_NET_F_GUEST_ECN,
75     VIRTIO_NET_F_GUEST_TSO4,
76     VIRTIO_NET_F_GUEST_TSO6,
77     VIRTIO_NET_F_GUEST_UFO,
78     VIRTIO_NET_F_HASH_REPORT,
79     VIRTIO_NET_F_HOST_ECN,
80     VIRTIO_NET_F_HOST_TSO4,
81     VIRTIO_NET_F_HOST_TSO6,
82     VIRTIO_NET_F_HOST_UFO,
83     VIRTIO_NET_F_MQ,
84     VIRTIO_NET_F_MRG_RXBUF,
85     VIRTIO_NET_F_MTU,
86     VIRTIO_NET_F_RSS,
87     VIRTIO_NET_F_STATUS,
88     VIRTIO_RING_F_EVENT_IDX,
89     VIRTIO_RING_F_INDIRECT_DESC,
90 
91     /* VHOST_INVALID_FEATURE_BIT should always be the last entry */
92     VHOST_INVALID_FEATURE_BIT
93 };
94 
95 /** Supported device specific feature bits with SVQ */
96 static const uint64_t vdpa_svq_device_features =
97     BIT_ULL(VIRTIO_NET_F_CSUM) |
98     BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
99     BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
100     BIT_ULL(VIRTIO_NET_F_MTU) |
101     BIT_ULL(VIRTIO_NET_F_MAC) |
102     BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
103     BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
104     BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
105     BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
106     BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
107     BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
108     BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
109     BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
110     BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
111     BIT_ULL(VIRTIO_NET_F_STATUS) |
112     BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
113     BIT_ULL(VIRTIO_NET_F_MQ) |
114     BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
115     BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
116     /* VHOST_F_LOG_ALL is exposed by SVQ */
117     BIT_ULL(VHOST_F_LOG_ALL) |
118     BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
119     BIT_ULL(VIRTIO_NET_F_STANDBY) |
120     BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
121 
122 #define VHOST_VDPA_NET_CVQ_ASID 1
123 
124 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
125 {
126     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
127     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
128     return s->vhost_net;
129 }
130 
131 static size_t vhost_vdpa_net_cvq_cmd_len(void)
132 {
133     /*
134      * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
135      * In buffer is always 1 byte, so it should fit here
136      */
137     return sizeof(struct virtio_net_ctrl_hdr) +
138            2 * sizeof(struct virtio_net_ctrl_mac) +
139            MAC_TABLE_ENTRIES * ETH_ALEN;
140 }
141 
142 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
143 {
144     return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
145 }
146 
147 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
148 {
149     uint64_t invalid_dev_features =
150         features & ~vdpa_svq_device_features &
151         /* Transport are all accepted at this point */
152         ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
153                          VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
154 
155     if (invalid_dev_features) {
156         error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
157                    invalid_dev_features);
158         return false;
159     }
160 
161     return vhost_svq_valid_features(features, errp);
162 }
163 
164 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
165 {
166     uint32_t device_id;
167     int ret;
168     struct vhost_dev *hdev;
169 
170     hdev = (struct vhost_dev *)&net->dev;
171     ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
172     if (device_id != VIRTIO_ID_NET) {
173         return -ENOTSUP;
174     }
175     return ret;
176 }
177 
178 static int vhost_vdpa_add(NetClientState *ncs, void *be,
179                           int queue_pair_index, int nvqs)
180 {
181     VhostNetOptions options;
182     struct vhost_net *net = NULL;
183     VhostVDPAState *s;
184     int ret;
185 
186     options.backend_type = VHOST_BACKEND_TYPE_VDPA;
187     assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
188     s = DO_UPCAST(VhostVDPAState, nc, ncs);
189     options.net_backend = ncs;
190     options.opaque      = be;
191     options.busyloop_timeout = 0;
192     options.nvqs = nvqs;
193 
194     net = vhost_net_init(&options);
195     if (!net) {
196         error_report("failed to init vhost_net for queue");
197         goto err_init;
198     }
199     s->vhost_net = net;
200     ret = vhost_vdpa_net_check_device_id(net);
201     if (ret) {
202         goto err_check;
203     }
204     return 0;
205 err_check:
206     vhost_net_cleanup(net);
207     g_free(net);
208 err_init:
209     return -1;
210 }
211 
212 static void vhost_vdpa_cleanup(NetClientState *nc)
213 {
214     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
215 
216     /*
217      * If a peer NIC is attached, do not cleanup anything.
218      * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
219      * when the guest is shutting down.
220      */
221     if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
222         return;
223     }
224     munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
225     munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
226     if (s->vhost_net) {
227         vhost_net_cleanup(s->vhost_net);
228         g_free(s->vhost_net);
229         s->vhost_net = NULL;
230     }
231      if (s->vhost_vdpa.device_fd >= 0) {
232         qemu_close(s->vhost_vdpa.device_fd);
233         s->vhost_vdpa.device_fd = -1;
234     }
235 }
236 
237 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
238 {
239     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
240 
241     return true;
242 }
243 
244 static bool vhost_vdpa_has_ufo(NetClientState *nc)
245 {
246     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
247     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
248     uint64_t features = 0;
249     features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
250     features = vhost_net_get_features(s->vhost_net, features);
251     return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
252 
253 }
254 
255 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
256                                        Error **errp)
257 {
258     const char *driver = object_class_get_name(oc);
259 
260     if (!g_str_has_prefix(driver, "virtio-net-")) {
261         error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
262         return false;
263     }
264 
265     return true;
266 }
267 
268 /** Dummy receive in case qemu falls back to userland tap networking */
269 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
270                                   size_t size)
271 {
272     return size;
273 }
274 
275 /** From any vdpa net client, get the netclient of the first queue pair */
276 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
277 {
278     NICState *nic = qemu_get_nic(s->nc.peer);
279     NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
280 
281     return DO_UPCAST(VhostVDPAState, nc, nc0);
282 }
283 
284 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
285 {
286     struct vhost_vdpa *v = &s->vhost_vdpa;
287     VirtIONet *n;
288     VirtIODevice *vdev;
289     int data_queue_pairs, cvq, r;
290 
291     /* We are only called on the first data vqs and only if x-svq is not set */
292     if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
293         return;
294     }
295 
296     vdev = v->dev->vdev;
297     n = VIRTIO_NET(vdev);
298     if (!n->vhost_started) {
299         return;
300     }
301 
302     data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
303     cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
304                                   n->max_ncs - n->max_queue_pairs : 0;
305     /*
306      * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
307      * in the future and resume the device if read-only operations between
308      * suspend and reset goes wrong.
309      */
310     vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
311 
312     /* Start will check migration setup_or_active to configure or not SVQ */
313     r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
314     if (unlikely(r < 0)) {
315         error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
316     }
317 }
318 
319 static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data)
320 {
321     MigrationState *migration = data;
322     VhostVDPAState *s = container_of(notifier, VhostVDPAState,
323                                      migration_state);
324 
325     if (migration_in_setup(migration)) {
326         vhost_vdpa_net_log_global_enable(s, true);
327     } else if (migration_has_failed(migration)) {
328         vhost_vdpa_net_log_global_enable(s, false);
329     }
330 }
331 
332 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
333 {
334     struct vhost_vdpa *v = &s->vhost_vdpa;
335 
336     add_migration_state_change_notifier(&s->migration_state);
337     if (v->shadow_vqs_enabled) {
338         v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
339                                            v->iova_range.last);
340     }
341 }
342 
343 static int vhost_vdpa_net_data_start(NetClientState *nc)
344 {
345     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
346     struct vhost_vdpa *v = &s->vhost_vdpa;
347 
348     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
349 
350     if (s->always_svq ||
351         migration_is_setup_or_active(migrate_get_current()->state)) {
352         v->shadow_vqs_enabled = true;
353         v->shadow_data = true;
354     } else {
355         v->shadow_vqs_enabled = false;
356         v->shadow_data = false;
357     }
358 
359     if (v->index == 0) {
360         vhost_vdpa_net_data_start_first(s);
361         return 0;
362     }
363 
364     if (v->shadow_vqs_enabled) {
365         VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
366         v->iova_tree = s0->vhost_vdpa.iova_tree;
367     }
368 
369     return 0;
370 }
371 
372 static void vhost_vdpa_net_client_stop(NetClientState *nc)
373 {
374     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
375     struct vhost_dev *dev;
376 
377     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
378 
379     if (s->vhost_vdpa.index == 0) {
380         remove_migration_state_change_notifier(&s->migration_state);
381     }
382 
383     dev = s->vhost_vdpa.dev;
384     if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
385         g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
386     }
387 }
388 
389 static NetClientInfo net_vhost_vdpa_info = {
390         .type = NET_CLIENT_DRIVER_VHOST_VDPA,
391         .size = sizeof(VhostVDPAState),
392         .receive = vhost_vdpa_receive,
393         .start = vhost_vdpa_net_data_start,
394         .stop = vhost_vdpa_net_client_stop,
395         .cleanup = vhost_vdpa_cleanup,
396         .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
397         .has_ufo = vhost_vdpa_has_ufo,
398         .check_peer_type = vhost_vdpa_check_peer_type,
399 };
400 
401 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
402                                           Error **errp)
403 {
404     struct vhost_vring_state state = {
405         .index = vq_index,
406     };
407     int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
408 
409     if (unlikely(r < 0)) {
410         r = -errno;
411         error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
412         return r;
413     }
414 
415     return state.num;
416 }
417 
418 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
419                                            unsigned vq_group,
420                                            unsigned asid_num)
421 {
422     struct vhost_vring_state asid = {
423         .index = vq_group,
424         .num = asid_num,
425     };
426     int r;
427 
428     r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
429     if (unlikely(r < 0)) {
430         error_report("Can't set vq group %u asid %u, errno=%d (%s)",
431                      asid.index, asid.num, errno, g_strerror(errno));
432     }
433     return r;
434 }
435 
436 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
437 {
438     VhostIOVATree *tree = v->iova_tree;
439     DMAMap needle = {
440         /*
441          * No need to specify size or to look for more translations since
442          * this contiguous chunk was allocated by us.
443          */
444         .translated_addr = (hwaddr)(uintptr_t)addr,
445     };
446     const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
447     int r;
448 
449     if (unlikely(!map)) {
450         error_report("Cannot locate expected map");
451         return;
452     }
453 
454     r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
455     if (unlikely(r != 0)) {
456         error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
457     }
458 
459     vhost_iova_tree_remove(tree, *map);
460 }
461 
462 /** Map CVQ buffer. */
463 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
464                                   bool write)
465 {
466     DMAMap map = {};
467     int r;
468 
469     map.translated_addr = (hwaddr)(uintptr_t)buf;
470     map.size = size - 1;
471     map.perm = write ? IOMMU_RW : IOMMU_RO,
472     r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
473     if (unlikely(r != IOVA_OK)) {
474         error_report("Cannot map injected element");
475         return r;
476     }
477 
478     r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
479                            vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
480     if (unlikely(r < 0)) {
481         goto dma_map_err;
482     }
483 
484     return 0;
485 
486 dma_map_err:
487     vhost_iova_tree_remove(v->iova_tree, map);
488     return r;
489 }
490 
491 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
492 {
493     VhostVDPAState *s, *s0;
494     struct vhost_vdpa *v;
495     int64_t cvq_group;
496     int r;
497     Error *err = NULL;
498 
499     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
500 
501     s = DO_UPCAST(VhostVDPAState, nc, nc);
502     v = &s->vhost_vdpa;
503 
504     s0 = vhost_vdpa_net_first_nc_vdpa(s);
505     v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
506     v->shadow_vqs_enabled = s->always_svq;
507     s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
508 
509     if (s->vhost_vdpa.shadow_data) {
510         /* SVQ is already configured for all virtqueues */
511         goto out;
512     }
513 
514     /*
515      * If we early return in these cases SVQ will not be enabled. The migration
516      * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
517      */
518     if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
519         return 0;
520     }
521 
522     if (!s->cvq_isolated) {
523         return 0;
524     }
525 
526     cvq_group = vhost_vdpa_get_vring_group(v->device_fd,
527                                            v->dev->vq_index_end - 1,
528                                            &err);
529     if (unlikely(cvq_group < 0)) {
530         error_report_err(err);
531         return cvq_group;
532     }
533 
534     r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
535     if (unlikely(r < 0)) {
536         return r;
537     }
538 
539     v->shadow_vqs_enabled = true;
540     s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
541 
542 out:
543     if (!s->vhost_vdpa.shadow_vqs_enabled) {
544         return 0;
545     }
546 
547     if (s0->vhost_vdpa.iova_tree) {
548         /*
549          * SVQ is already configured for all virtqueues.  Reuse IOVA tree for
550          * simplicity, whether CVQ shares ASID with guest or not, because:
551          * - Memory listener need access to guest's memory addresses allocated
552          *   in the IOVA tree.
553          * - There should be plenty of IOVA address space for both ASID not to
554          *   worry about collisions between them.  Guest's translations are
555          *   still validated with virtio virtqueue_pop so there is no risk for
556          *   the guest to access memory that it shouldn't.
557          *
558          * To allocate a iova tree per ASID is doable but it complicates the
559          * code and it is not worth it for the moment.
560          */
561         v->iova_tree = s0->vhost_vdpa.iova_tree;
562     } else {
563         v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
564                                            v->iova_range.last);
565     }
566 
567     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
568                                vhost_vdpa_net_cvq_cmd_page_len(), false);
569     if (unlikely(r < 0)) {
570         return r;
571     }
572 
573     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
574                                vhost_vdpa_net_cvq_cmd_page_len(), true);
575     if (unlikely(r < 0)) {
576         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
577     }
578 
579     return r;
580 }
581 
582 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
583 {
584     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
585 
586     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
587 
588     if (s->vhost_vdpa.shadow_vqs_enabled) {
589         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
590         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
591     }
592 
593     vhost_vdpa_net_client_stop(nc);
594 }
595 
596 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
597                                       size_t in_len)
598 {
599     /* Buffers for the device */
600     const struct iovec out = {
601         .iov_base = s->cvq_cmd_out_buffer,
602         .iov_len = out_len,
603     };
604     const struct iovec in = {
605         .iov_base = s->status,
606         .iov_len = sizeof(virtio_net_ctrl_ack),
607     };
608     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
609     int r;
610 
611     r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
612     if (unlikely(r != 0)) {
613         if (unlikely(r == -ENOSPC)) {
614             qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
615                           __func__);
616         }
617         return r;
618     }
619 
620     /*
621      * We can poll here since we've had BQL from the time we sent the
622      * descriptor. Also, we need to take the answer before SVQ pulls by itself,
623      * when BQL is released
624      */
625     return vhost_svq_poll(svq);
626 }
627 
628 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
629                                        uint8_t cmd, const struct iovec *data_sg,
630                                        size_t data_num)
631 {
632     const struct virtio_net_ctrl_hdr ctrl = {
633         .class = class,
634         .cmd = cmd,
635     };
636     size_t data_size = iov_size(data_sg, data_num);
637 
638     assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
639 
640     /* pack the CVQ command header */
641     memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
642 
643     /* pack the CVQ command command-specific-data */
644     iov_to_buf(data_sg, data_num, 0,
645                s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
646 
647     return vhost_vdpa_net_cvq_add(s, data_size + sizeof(ctrl),
648                                   sizeof(virtio_net_ctrl_ack));
649 }
650 
651 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
652 {
653     if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
654         const struct iovec data = {
655             .iov_base = (void *)n->mac,
656             .iov_len = sizeof(n->mac),
657         };
658         ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
659                                                   VIRTIO_NET_CTRL_MAC_ADDR_SET,
660                                                   &data, 1);
661         if (unlikely(dev_written < 0)) {
662             return dev_written;
663         }
664         if (*s->status != VIRTIO_NET_OK) {
665             return -EIO;
666         }
667     }
668 
669     /*
670      * According to VirtIO standard, "The device MUST have an
671      * empty MAC filtering table on reset.".
672      *
673      * Therefore, there is no need to send this CVQ command if the
674      * driver also sets an empty MAC filter table, which aligns with
675      * the device's defaults.
676      *
677      * Note that the device's defaults can mismatch the driver's
678      * configuration only at live migration.
679      */
680     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) ||
681         n->mac_table.in_use == 0) {
682         return 0;
683     }
684 
685     uint32_t uni_entries = n->mac_table.first_multi,
686              uni_macs_size = uni_entries * ETH_ALEN,
687              mul_entries = n->mac_table.in_use - uni_entries,
688              mul_macs_size = mul_entries * ETH_ALEN;
689     struct virtio_net_ctrl_mac uni = {
690         .entries = cpu_to_le32(uni_entries),
691     };
692     struct virtio_net_ctrl_mac mul = {
693         .entries = cpu_to_le32(mul_entries),
694     };
695     const struct iovec data[] = {
696         {
697             .iov_base = &uni,
698             .iov_len = sizeof(uni),
699         }, {
700             .iov_base = n->mac_table.macs,
701             .iov_len = uni_macs_size,
702         }, {
703             .iov_base = &mul,
704             .iov_len = sizeof(mul),
705         }, {
706             .iov_base = &n->mac_table.macs[uni_macs_size],
707             .iov_len = mul_macs_size,
708         },
709     };
710     ssize_t dev_written = vhost_vdpa_net_load_cmd(s,
711                                 VIRTIO_NET_CTRL_MAC,
712                                 VIRTIO_NET_CTRL_MAC_TABLE_SET,
713                                 data, ARRAY_SIZE(data));
714     if (unlikely(dev_written < 0)) {
715         return dev_written;
716     }
717     if (*s->status != VIRTIO_NET_OK) {
718         return -EIO;
719     }
720 
721     return 0;
722 }
723 
724 static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
725                                   const VirtIONet *n)
726 {
727     struct virtio_net_ctrl_mq mq;
728     ssize_t dev_written;
729 
730     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
731         return 0;
732     }
733 
734     mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
735     const struct iovec data = {
736         .iov_base = &mq,
737         .iov_len = sizeof(mq),
738     };
739     dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
740                                           VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
741                                           &data, 1);
742     if (unlikely(dev_written < 0)) {
743         return dev_written;
744     }
745     if (*s->status != VIRTIO_NET_OK) {
746         return -EIO;
747     }
748 
749     return 0;
750 }
751 
752 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
753                                         const VirtIONet *n)
754 {
755     uint64_t offloads;
756     ssize_t dev_written;
757 
758     if (!virtio_vdev_has_feature(&n->parent_obj,
759                                  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
760         return 0;
761     }
762 
763     if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
764         /*
765          * According to VirtIO standard, "Upon feature negotiation
766          * corresponding offload gets enabled to preserve
767          * backward compatibility.".
768          *
769          * Therefore, there is no need to send this CVQ command if the
770          * driver also enables all supported offloads, which aligns with
771          * the device's defaults.
772          *
773          * Note that the device's defaults can mismatch the driver's
774          * configuration only at live migration.
775          */
776         return 0;
777     }
778 
779     offloads = cpu_to_le64(n->curr_guest_offloads);
780     const struct iovec data = {
781         .iov_base = &offloads,
782         .iov_len = sizeof(offloads),
783     };
784     dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
785                                           VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
786                                           &data, 1);
787     if (unlikely(dev_written < 0)) {
788         return dev_written;
789     }
790     if (*s->status != VIRTIO_NET_OK) {
791         return -EIO;
792     }
793 
794     return 0;
795 }
796 
797 static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
798                                        uint8_t cmd,
799                                        uint8_t on)
800 {
801     const struct iovec data = {
802         .iov_base = &on,
803         .iov_len = sizeof(on),
804     };
805     return vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX,
806                                    cmd, &data, 1);
807 }
808 
809 static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
810                                   const VirtIONet *n)
811 {
812     ssize_t dev_written;
813 
814     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
815         return 0;
816     }
817 
818     /*
819      * According to virtio_net_reset(), device turns promiscuous mode
820      * on by default.
821      *
822      * Addtionally, according to VirtIO standard, "Since there are
823      * no guarantees, it can use a hash filter or silently switch to
824      * allmulti or promiscuous mode if it is given too many addresses.".
825      * QEMU marks `n->mac_table.uni_overflow` if guest sets too many
826      * non-multicast MAC addresses, indicating that promiscuous mode
827      * should be enabled.
828      *
829      * Therefore, QEMU should only send this CVQ command if the
830      * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
831      * which sets promiscuous mode on, different from the device's defaults.
832      *
833      * Note that the device's defaults can mismatch the driver's
834      * configuration only at live migration.
835      */
836     if (!n->mac_table.uni_overflow && !n->promisc) {
837         dev_written = vhost_vdpa_net_load_rx_mode(s,
838                                             VIRTIO_NET_CTRL_RX_PROMISC, 0);
839         if (unlikely(dev_written < 0)) {
840             return dev_written;
841         }
842         if (*s->status != VIRTIO_NET_OK) {
843             return -EIO;
844         }
845     }
846 
847     /*
848      * According to virtio_net_reset(), device turns all-multicast mode
849      * off by default.
850      *
851      * According to VirtIO standard, "Since there are no guarantees,
852      * it can use a hash filter or silently switch to allmulti or
853      * promiscuous mode if it is given too many addresses.". QEMU marks
854      * `n->mac_table.multi_overflow` if guest sets too many
855      * non-multicast MAC addresses.
856      *
857      * Therefore, QEMU should only send this CVQ command if the
858      * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
859      * which sets all-multicast mode on, different from the device's defaults.
860      *
861      * Note that the device's defaults can mismatch the driver's
862      * configuration only at live migration.
863      */
864     if (n->mac_table.multi_overflow || n->allmulti) {
865         dev_written = vhost_vdpa_net_load_rx_mode(s,
866                                             VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
867         if (unlikely(dev_written < 0)) {
868             return dev_written;
869         }
870         if (*s->status != VIRTIO_NET_OK) {
871             return -EIO;
872         }
873     }
874 
875     return 0;
876 }
877 
878 static int vhost_vdpa_net_load(NetClientState *nc)
879 {
880     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
881     struct vhost_vdpa *v = &s->vhost_vdpa;
882     const VirtIONet *n;
883     int r;
884 
885     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
886 
887     if (!v->shadow_vqs_enabled) {
888         return 0;
889     }
890 
891     n = VIRTIO_NET(v->dev->vdev);
892     r = vhost_vdpa_net_load_mac(s, n);
893     if (unlikely(r < 0)) {
894         return r;
895     }
896     r = vhost_vdpa_net_load_mq(s, n);
897     if (unlikely(r)) {
898         return r;
899     }
900     r = vhost_vdpa_net_load_offloads(s, n);
901     if (unlikely(r)) {
902         return r;
903     }
904     r = vhost_vdpa_net_load_rx(s, n);
905     if (unlikely(r)) {
906         return r;
907     }
908 
909     return 0;
910 }
911 
912 static NetClientInfo net_vhost_vdpa_cvq_info = {
913     .type = NET_CLIENT_DRIVER_VHOST_VDPA,
914     .size = sizeof(VhostVDPAState),
915     .receive = vhost_vdpa_receive,
916     .start = vhost_vdpa_net_cvq_start,
917     .load = vhost_vdpa_net_load,
918     .stop = vhost_vdpa_net_cvq_stop,
919     .cleanup = vhost_vdpa_cleanup,
920     .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
921     .has_ufo = vhost_vdpa_has_ufo,
922     .check_peer_type = vhost_vdpa_check_peer_type,
923 };
924 
925 /*
926  * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
927  * vdpa device.
928  *
929  * Considering that QEMU cannot send the entire filter table to the
930  * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
931  * command to enable promiscuous mode to receive all packets,
932  * according to VirtIO standard, "Since there are no guarantees,
933  * it can use a hash filter or silently switch to allmulti or
934  * promiscuous mode if it is given too many addresses.".
935  *
936  * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
937  * marks `n->mac_table.x_overflow` accordingly, it should have
938  * the same effect on the device model to receive
939  * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
940  * The same applies to multicast MAC addresses.
941  *
942  * Therefore, QEMU can provide the device model with a fake
943  * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
944  * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
945  * MAC addresses. This ensures that the device model marks
946  * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
947  * allowing all packets to be received, which aligns with the
948  * state of the vdpa device.
949  */
950 static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
951                                                        VirtQueueElement *elem,
952                                                        struct iovec *out)
953 {
954     struct virtio_net_ctrl_mac mac_data, *mac_ptr;
955     struct virtio_net_ctrl_hdr *hdr_ptr;
956     uint32_t cursor;
957     ssize_t r;
958 
959     /* parse the non-multicast MAC address entries from CVQ command */
960     cursor = sizeof(*hdr_ptr);
961     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
962                    &mac_data, sizeof(mac_data));
963     if (unlikely(r != sizeof(mac_data))) {
964         /*
965          * If the CVQ command is invalid, we should simulate the vdpa device
966          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
967          */
968         *s->status = VIRTIO_NET_ERR;
969         return sizeof(*s->status);
970     }
971     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
972 
973     /* parse the multicast MAC address entries from CVQ command */
974     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
975                    &mac_data, sizeof(mac_data));
976     if (r != sizeof(mac_data)) {
977         /*
978          * If the CVQ command is invalid, we should simulate the vdpa device
979          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
980          */
981         *s->status = VIRTIO_NET_ERR;
982         return sizeof(*s->status);
983     }
984     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
985 
986     /* validate the CVQ command */
987     if (iov_size(elem->out_sg, elem->out_num) != cursor) {
988         /*
989          * If the CVQ command is invalid, we should simulate the vdpa device
990          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
991          */
992         *s->status = VIRTIO_NET_ERR;
993         return sizeof(*s->status);
994     }
995 
996     /*
997      * According to VirtIO standard, "Since there are no guarantees,
998      * it can use a hash filter or silently switch to allmulti or
999      * promiscuous mode if it is given too many addresses.".
1000      *
1001      * Therefore, considering that QEMU is unable to send the entire
1002      * filter table to the vdpa device, it should send the
1003      * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
1004      */
1005     r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 1);
1006     if (unlikely(r < 0)) {
1007         return r;
1008     }
1009     if (*s->status != VIRTIO_NET_OK) {
1010         return sizeof(*s->status);
1011     }
1012 
1013     /*
1014      * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
1015      * command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
1016      * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
1017      * multicast MAC addresses.
1018      *
1019      * By doing so, the device model can mark `n->mac_table.uni_overflow`
1020      * and `n->mac_table.multi_overflow`, enabling all packets to be
1021      * received, which aligns with the state of the vdpa device.
1022      */
1023     cursor = 0;
1024     uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1,
1025              fake_mul_entries = MAC_TABLE_ENTRIES + 1,
1026              fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) +
1027                              sizeof(mac_data) + fake_uni_entries * ETH_ALEN +
1028                              sizeof(mac_data) + fake_mul_entries * ETH_ALEN;
1029 
1030     assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len());
1031     out->iov_len = fake_cvq_size;
1032 
1033     /* pack the header for fake CVQ command */
1034     hdr_ptr = out->iov_base + cursor;
1035     hdr_ptr->class = VIRTIO_NET_CTRL_MAC;
1036     hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1037     cursor += sizeof(*hdr_ptr);
1038 
1039     /*
1040      * Pack the non-multicast MAC addresses part for fake CVQ command.
1041      *
1042      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1043      * addresses provieded in CVQ command. Therefore, only the entries
1044      * field need to be prepared in the CVQ command.
1045      */
1046     mac_ptr = out->iov_base + cursor;
1047     mac_ptr->entries = cpu_to_le32(fake_uni_entries);
1048     cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN;
1049 
1050     /*
1051      * Pack the multicast MAC addresses part for fake CVQ command.
1052      *
1053      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1054      * addresses provieded in CVQ command. Therefore, only the entries
1055      * field need to be prepared in the CVQ command.
1056      */
1057     mac_ptr = out->iov_base + cursor;
1058     mac_ptr->entries = cpu_to_le32(fake_mul_entries);
1059 
1060     /*
1061      * Simulating QEMU poll a vdpa device used buffer
1062      * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1063      */
1064     return sizeof(*s->status);
1065 }
1066 
1067 /**
1068  * Validate and copy control virtqueue commands.
1069  *
1070  * Following QEMU guidelines, we offer a copy of the buffers to the device to
1071  * prevent TOCTOU bugs.
1072  */
1073 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
1074                                             VirtQueueElement *elem,
1075                                             void *opaque)
1076 {
1077     VhostVDPAState *s = opaque;
1078     size_t in_len;
1079     const struct virtio_net_ctrl_hdr *ctrl;
1080     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1081     /* Out buffer sent to both the vdpa device and the device model */
1082     struct iovec out = {
1083         .iov_base = s->cvq_cmd_out_buffer,
1084     };
1085     /* in buffer used for device model */
1086     const struct iovec in = {
1087         .iov_base = &status,
1088         .iov_len = sizeof(status),
1089     };
1090     ssize_t dev_written = -EINVAL;
1091 
1092     out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
1093                              s->cvq_cmd_out_buffer,
1094                              vhost_vdpa_net_cvq_cmd_page_len());
1095 
1096     ctrl = s->cvq_cmd_out_buffer;
1097     if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
1098         /*
1099          * Guest announce capability is emulated by qemu, so don't forward to
1100          * the device.
1101          */
1102         dev_written = sizeof(status);
1103         *s->status = VIRTIO_NET_OK;
1104     } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC &&
1105                         ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET &&
1106                         iov_size(elem->out_sg, elem->out_num) > out.iov_len)) {
1107         /*
1108          * Due to the size limitation of the out buffer sent to the vdpa device,
1109          * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
1110          * MAC addresses set by the driver for the filter table can cause
1111          * truncation of the CVQ command in QEMU. As a result, the vdpa device
1112          * rejects the flawed CVQ command.
1113          *
1114          * Therefore, QEMU must handle this situation instead of sending
1115          * the CVQ command direclty.
1116          */
1117         dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
1118                                                                   &out);
1119         if (unlikely(dev_written < 0)) {
1120             goto out;
1121         }
1122     } else {
1123         dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
1124         if (unlikely(dev_written < 0)) {
1125             goto out;
1126         }
1127     }
1128 
1129     if (unlikely(dev_written < sizeof(status))) {
1130         error_report("Insufficient written data (%zu)", dev_written);
1131         goto out;
1132     }
1133 
1134     if (*s->status != VIRTIO_NET_OK) {
1135         goto out;
1136     }
1137 
1138     status = VIRTIO_NET_ERR;
1139     virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
1140     if (status != VIRTIO_NET_OK) {
1141         error_report("Bad CVQ processing in model");
1142     }
1143 
1144 out:
1145     in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
1146                           sizeof(status));
1147     if (unlikely(in_len < sizeof(status))) {
1148         error_report("Bad device CVQ written length");
1149     }
1150     vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
1151     /*
1152      * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
1153      * the function successfully forwards the CVQ command, indicated
1154      * by a non-negative value of `dev_written`. Otherwise, it still
1155      * belongs to SVQ.
1156      * This function should only free the `elem` when it owns.
1157      */
1158     if (dev_written >= 0) {
1159         g_free(elem);
1160     }
1161     return dev_written < 0 ? dev_written : 0;
1162 }
1163 
1164 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
1165     .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
1166 };
1167 
1168 /**
1169  * Probe if CVQ is isolated
1170  *
1171  * @device_fd         The vdpa device fd
1172  * @features          Features offered by the device.
1173  * @cvq_index         The control vq pair index
1174  *
1175  * Returns <0 in case of failure, 0 if false and 1 if true.
1176  */
1177 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
1178                                           int cvq_index, Error **errp)
1179 {
1180     uint64_t backend_features;
1181     int64_t cvq_group;
1182     uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
1183                      VIRTIO_CONFIG_S_DRIVER |
1184                      VIRTIO_CONFIG_S_FEATURES_OK;
1185     int r;
1186 
1187     ERRP_GUARD();
1188 
1189     r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
1190     if (unlikely(r < 0)) {
1191         error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
1192         return r;
1193     }
1194 
1195     if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
1196         return 0;
1197     }
1198 
1199     r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
1200     if (unlikely(r)) {
1201         error_setg_errno(errp, errno, "Cannot set features");
1202     }
1203 
1204     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1205     if (unlikely(r)) {
1206         error_setg_errno(errp, -r, "Cannot set device features");
1207         goto out;
1208     }
1209 
1210     cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
1211     if (unlikely(cvq_group < 0)) {
1212         if (cvq_group != -ENOTSUP) {
1213             r = cvq_group;
1214             goto out;
1215         }
1216 
1217         /*
1218          * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
1219          * support ASID even if the parent driver does not.  The CVQ cannot be
1220          * isolated in this case.
1221          */
1222         error_free(*errp);
1223         *errp = NULL;
1224         r = 0;
1225         goto out;
1226     }
1227 
1228     for (int i = 0; i < cvq_index; ++i) {
1229         int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
1230         if (unlikely(group < 0)) {
1231             r = group;
1232             goto out;
1233         }
1234 
1235         if (group == (int64_t)cvq_group) {
1236             r = 0;
1237             goto out;
1238         }
1239     }
1240 
1241     r = 1;
1242 
1243 out:
1244     status = 0;
1245     ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1246     return r;
1247 }
1248 
1249 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
1250                                        const char *device,
1251                                        const char *name,
1252                                        int vdpa_device_fd,
1253                                        int queue_pair_index,
1254                                        int nvqs,
1255                                        bool is_datapath,
1256                                        bool svq,
1257                                        struct vhost_vdpa_iova_range iova_range,
1258                                        uint64_t features,
1259                                        Error **errp)
1260 {
1261     NetClientState *nc = NULL;
1262     VhostVDPAState *s;
1263     int ret = 0;
1264     assert(name);
1265     int cvq_isolated;
1266 
1267     if (is_datapath) {
1268         nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
1269                                  name);
1270     } else {
1271         cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
1272                                                       queue_pair_index * 2,
1273                                                       errp);
1274         if (unlikely(cvq_isolated < 0)) {
1275             return NULL;
1276         }
1277 
1278         nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
1279                                          device, name);
1280     }
1281     qemu_set_info_str(nc, TYPE_VHOST_VDPA);
1282     s = DO_UPCAST(VhostVDPAState, nc, nc);
1283 
1284     s->vhost_vdpa.device_fd = vdpa_device_fd;
1285     s->vhost_vdpa.index = queue_pair_index;
1286     s->always_svq = svq;
1287     s->migration_state.notify = vdpa_net_migration_state_notifier;
1288     s->vhost_vdpa.shadow_vqs_enabled = svq;
1289     s->vhost_vdpa.iova_range = iova_range;
1290     s->vhost_vdpa.shadow_data = svq;
1291     if (queue_pair_index == 0) {
1292         vhost_vdpa_net_valid_svq_features(features,
1293                                           &s->vhost_vdpa.migration_blocker);
1294     } else if (!is_datapath) {
1295         s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1296                                      PROT_READ | PROT_WRITE,
1297                                      MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1298         s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1299                          PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
1300                          -1, 0);
1301 
1302         s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
1303         s->vhost_vdpa.shadow_vq_ops_opaque = s;
1304         s->cvq_isolated = cvq_isolated;
1305 
1306         /*
1307          * TODO: We cannot migrate devices with CVQ and no x-svq enabled as
1308          * there is no way to set the device state (MAC, MQ, etc) before
1309          * starting the datapath.
1310          *
1311          * Migration blocker ownership now belongs to s->vhost_vdpa.
1312          */
1313         if (!svq) {
1314             error_setg(&s->vhost_vdpa.migration_blocker,
1315                        "net vdpa cannot migrate with CVQ feature");
1316         }
1317     }
1318     ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
1319     if (ret) {
1320         qemu_del_net_client(nc);
1321         return NULL;
1322     }
1323     return nc;
1324 }
1325 
1326 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
1327 {
1328     int ret = ioctl(fd, VHOST_GET_FEATURES, features);
1329     if (unlikely(ret < 0)) {
1330         error_setg_errno(errp, errno,
1331                          "Fail to query features from vhost-vDPA device");
1332     }
1333     return ret;
1334 }
1335 
1336 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
1337                                           int *has_cvq, Error **errp)
1338 {
1339     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
1340     g_autofree struct vhost_vdpa_config *config = NULL;
1341     __virtio16 *max_queue_pairs;
1342     int ret;
1343 
1344     if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
1345         *has_cvq = 1;
1346     } else {
1347         *has_cvq = 0;
1348     }
1349 
1350     if (features & (1 << VIRTIO_NET_F_MQ)) {
1351         config = g_malloc0(config_size + sizeof(*max_queue_pairs));
1352         config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
1353         config->len = sizeof(*max_queue_pairs);
1354 
1355         ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
1356         if (ret) {
1357             error_setg(errp, "Fail to get config from vhost-vDPA device");
1358             return -ret;
1359         }
1360 
1361         max_queue_pairs = (__virtio16 *)&config->buf;
1362 
1363         return lduw_le_p(max_queue_pairs);
1364     }
1365 
1366     return 1;
1367 }
1368 
1369 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
1370                         NetClientState *peer, Error **errp)
1371 {
1372     const NetdevVhostVDPAOptions *opts;
1373     uint64_t features;
1374     int vdpa_device_fd;
1375     g_autofree NetClientState **ncs = NULL;
1376     struct vhost_vdpa_iova_range iova_range;
1377     NetClientState *nc;
1378     int queue_pairs, r, i = 0, has_cvq = 0;
1379 
1380     assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1381     opts = &netdev->u.vhost_vdpa;
1382     if (!opts->vhostdev && !opts->vhostfd) {
1383         error_setg(errp,
1384                    "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1385         return -1;
1386     }
1387 
1388     if (opts->vhostdev && opts->vhostfd) {
1389         error_setg(errp,
1390                    "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1391         return -1;
1392     }
1393 
1394     if (opts->vhostdev) {
1395         vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1396         if (vdpa_device_fd == -1) {
1397             return -errno;
1398         }
1399     } else {
1400         /* has_vhostfd */
1401         vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1402         if (vdpa_device_fd == -1) {
1403             error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1404             return -1;
1405         }
1406     }
1407 
1408     r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1409     if (unlikely(r < 0)) {
1410         goto err;
1411     }
1412 
1413     queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
1414                                                  &has_cvq, errp);
1415     if (queue_pairs < 0) {
1416         qemu_close(vdpa_device_fd);
1417         return queue_pairs;
1418     }
1419 
1420     r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1421     if (unlikely(r < 0)) {
1422         error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1423                    strerror(-r));
1424         goto err;
1425     }
1426 
1427     if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1428         goto err;
1429     }
1430 
1431     ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1432 
1433     for (i = 0; i < queue_pairs; i++) {
1434         ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1435                                      vdpa_device_fd, i, 2, true, opts->x_svq,
1436                                      iova_range, features, errp);
1437         if (!ncs[i])
1438             goto err;
1439     }
1440 
1441     if (has_cvq) {
1442         nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1443                                  vdpa_device_fd, i, 1, false,
1444                                  opts->x_svq, iova_range, features, errp);
1445         if (!nc)
1446             goto err;
1447     }
1448 
1449     return 0;
1450 
1451 err:
1452     if (i) {
1453         for (i--; i >= 0; i--) {
1454             qemu_del_net_client(ncs[i]);
1455         }
1456     }
1457 
1458     qemu_close(vdpa_device_fd);
1459 
1460     return -1;
1461 }
1462