xref: /openbmc/qemu/net/vhost-vdpa.c (revision 4fd180c7)
1 /*
2  * vhost-vdpa.c
3  *
4  * Copyright(c) 2017-2018 Intel Corporation.
5  * Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "clients.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
26 #include <err.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/migration.h"
30 #include "migration/misc.h"
31 #include "hw/virtio/vhost.h"
32 
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState {
35     NetClientState nc;
36     struct vhost_vdpa vhost_vdpa;
37     Notifier migration_state;
38     VHostNetState *vhost_net;
39 
40     /* Control commands shadow buffers */
41     void *cvq_cmd_out_buffer;
42     virtio_net_ctrl_ack *status;
43 
44     /* The device always have SVQ enabled */
45     bool always_svq;
46 
47     /* The device can isolate CVQ in its own ASID */
48     bool cvq_isolated;
49 
50     bool started;
51 } VhostVDPAState;
52 
53 /*
54  * The array is sorted alphabetically in ascending order,
55  * with the exception of VHOST_INVALID_FEATURE_BIT,
56  * which should always be the last entry.
57  */
58 const int vdpa_feature_bits[] = {
59     VIRTIO_F_ANY_LAYOUT,
60     VIRTIO_F_IOMMU_PLATFORM,
61     VIRTIO_F_NOTIFY_ON_EMPTY,
62     VIRTIO_F_RING_PACKED,
63     VIRTIO_F_RING_RESET,
64     VIRTIO_F_VERSION_1,
65     VIRTIO_NET_F_CSUM,
66     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
67     VIRTIO_NET_F_CTRL_MAC_ADDR,
68     VIRTIO_NET_F_CTRL_RX,
69     VIRTIO_NET_F_CTRL_RX_EXTRA,
70     VIRTIO_NET_F_CTRL_VLAN,
71     VIRTIO_NET_F_CTRL_VQ,
72     VIRTIO_NET_F_GSO,
73     VIRTIO_NET_F_GUEST_CSUM,
74     VIRTIO_NET_F_GUEST_ECN,
75     VIRTIO_NET_F_GUEST_TSO4,
76     VIRTIO_NET_F_GUEST_TSO6,
77     VIRTIO_NET_F_GUEST_UFO,
78     VIRTIO_NET_F_HASH_REPORT,
79     VIRTIO_NET_F_HOST_ECN,
80     VIRTIO_NET_F_HOST_TSO4,
81     VIRTIO_NET_F_HOST_TSO6,
82     VIRTIO_NET_F_HOST_UFO,
83     VIRTIO_NET_F_MQ,
84     VIRTIO_NET_F_MRG_RXBUF,
85     VIRTIO_NET_F_MTU,
86     VIRTIO_NET_F_RSS,
87     VIRTIO_NET_F_STATUS,
88     VIRTIO_RING_F_EVENT_IDX,
89     VIRTIO_RING_F_INDIRECT_DESC,
90 
91     /* VHOST_INVALID_FEATURE_BIT should always be the last entry */
92     VHOST_INVALID_FEATURE_BIT
93 };
94 
95 /** Supported device specific feature bits with SVQ */
96 static const uint64_t vdpa_svq_device_features =
97     BIT_ULL(VIRTIO_NET_F_CSUM) |
98     BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
99     BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
100     BIT_ULL(VIRTIO_NET_F_MTU) |
101     BIT_ULL(VIRTIO_NET_F_MAC) |
102     BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
103     BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
104     BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
105     BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
106     BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
107     BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
108     BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
109     BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
110     BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
111     BIT_ULL(VIRTIO_NET_F_STATUS) |
112     BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
113     BIT_ULL(VIRTIO_NET_F_CTRL_RX) |
114     BIT_ULL(VIRTIO_NET_F_MQ) |
115     BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
116     BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
117     /* VHOST_F_LOG_ALL is exposed by SVQ */
118     BIT_ULL(VHOST_F_LOG_ALL) |
119     BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
120     BIT_ULL(VIRTIO_NET_F_STANDBY) |
121     BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
122 
123 #define VHOST_VDPA_NET_CVQ_ASID 1
124 
125 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
126 {
127     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
128     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
129     return s->vhost_net;
130 }
131 
132 static size_t vhost_vdpa_net_cvq_cmd_len(void)
133 {
134     /*
135      * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
136      * In buffer is always 1 byte, so it should fit here
137      */
138     return sizeof(struct virtio_net_ctrl_hdr) +
139            2 * sizeof(struct virtio_net_ctrl_mac) +
140            MAC_TABLE_ENTRIES * ETH_ALEN;
141 }
142 
143 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
144 {
145     return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
146 }
147 
148 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
149 {
150     uint64_t invalid_dev_features =
151         features & ~vdpa_svq_device_features &
152         /* Transport are all accepted at this point */
153         ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
154                          VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
155 
156     if (invalid_dev_features) {
157         error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
158                    invalid_dev_features);
159         return false;
160     }
161 
162     return vhost_svq_valid_features(features, errp);
163 }
164 
165 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
166 {
167     uint32_t device_id;
168     int ret;
169     struct vhost_dev *hdev;
170 
171     hdev = (struct vhost_dev *)&net->dev;
172     ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
173     if (device_id != VIRTIO_ID_NET) {
174         return -ENOTSUP;
175     }
176     return ret;
177 }
178 
179 static int vhost_vdpa_add(NetClientState *ncs, void *be,
180                           int queue_pair_index, int nvqs)
181 {
182     VhostNetOptions options;
183     struct vhost_net *net = NULL;
184     VhostVDPAState *s;
185     int ret;
186 
187     options.backend_type = VHOST_BACKEND_TYPE_VDPA;
188     assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
189     s = DO_UPCAST(VhostVDPAState, nc, ncs);
190     options.net_backend = ncs;
191     options.opaque      = be;
192     options.busyloop_timeout = 0;
193     options.nvqs = nvqs;
194 
195     net = vhost_net_init(&options);
196     if (!net) {
197         error_report("failed to init vhost_net for queue");
198         goto err_init;
199     }
200     s->vhost_net = net;
201     ret = vhost_vdpa_net_check_device_id(net);
202     if (ret) {
203         goto err_check;
204     }
205     return 0;
206 err_check:
207     vhost_net_cleanup(net);
208     g_free(net);
209 err_init:
210     return -1;
211 }
212 
213 static void vhost_vdpa_cleanup(NetClientState *nc)
214 {
215     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
216 
217     /*
218      * If a peer NIC is attached, do not cleanup anything.
219      * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
220      * when the guest is shutting down.
221      */
222     if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
223         return;
224     }
225     munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
226     munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
227     if (s->vhost_net) {
228         vhost_net_cleanup(s->vhost_net);
229         g_free(s->vhost_net);
230         s->vhost_net = NULL;
231     }
232      if (s->vhost_vdpa.device_fd >= 0) {
233         qemu_close(s->vhost_vdpa.device_fd);
234         s->vhost_vdpa.device_fd = -1;
235     }
236 }
237 
238 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
239 {
240     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
241 
242     return true;
243 }
244 
245 static bool vhost_vdpa_has_ufo(NetClientState *nc)
246 {
247     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
248     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
249     uint64_t features = 0;
250     features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
251     features = vhost_net_get_features(s->vhost_net, features);
252     return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
253 
254 }
255 
256 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
257                                        Error **errp)
258 {
259     const char *driver = object_class_get_name(oc);
260 
261     if (!g_str_has_prefix(driver, "virtio-net-")) {
262         error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
263         return false;
264     }
265 
266     return true;
267 }
268 
269 /** Dummy receive in case qemu falls back to userland tap networking */
270 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
271                                   size_t size)
272 {
273     return size;
274 }
275 
276 /** From any vdpa net client, get the netclient of the first queue pair */
277 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
278 {
279     NICState *nic = qemu_get_nic(s->nc.peer);
280     NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
281 
282     return DO_UPCAST(VhostVDPAState, nc, nc0);
283 }
284 
285 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
286 {
287     struct vhost_vdpa *v = &s->vhost_vdpa;
288     VirtIONet *n;
289     VirtIODevice *vdev;
290     int data_queue_pairs, cvq, r;
291 
292     /* We are only called on the first data vqs and only if x-svq is not set */
293     if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
294         return;
295     }
296 
297     vdev = v->dev->vdev;
298     n = VIRTIO_NET(vdev);
299     if (!n->vhost_started) {
300         return;
301     }
302 
303     data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
304     cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
305                                   n->max_ncs - n->max_queue_pairs : 0;
306     /*
307      * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
308      * in the future and resume the device if read-only operations between
309      * suspend and reset goes wrong.
310      */
311     vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
312 
313     /* Start will check migration setup_or_active to configure or not SVQ */
314     r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
315     if (unlikely(r < 0)) {
316         error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
317     }
318 }
319 
320 static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data)
321 {
322     MigrationState *migration = data;
323     VhostVDPAState *s = container_of(notifier, VhostVDPAState,
324                                      migration_state);
325 
326     if (migration_in_setup(migration)) {
327         vhost_vdpa_net_log_global_enable(s, true);
328     } else if (migration_has_failed(migration)) {
329         vhost_vdpa_net_log_global_enable(s, false);
330     }
331 }
332 
333 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
334 {
335     struct vhost_vdpa *v = &s->vhost_vdpa;
336 
337     add_migration_state_change_notifier(&s->migration_state);
338     if (v->shadow_vqs_enabled) {
339         v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
340                                            v->iova_range.last);
341     }
342 }
343 
344 static int vhost_vdpa_net_data_start(NetClientState *nc)
345 {
346     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
347     struct vhost_vdpa *v = &s->vhost_vdpa;
348 
349     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
350 
351     if (s->always_svq ||
352         migration_is_setup_or_active(migrate_get_current()->state)) {
353         v->shadow_vqs_enabled = true;
354         v->shadow_data = true;
355     } else {
356         v->shadow_vqs_enabled = false;
357         v->shadow_data = false;
358     }
359 
360     if (v->index == 0) {
361         vhost_vdpa_net_data_start_first(s);
362         return 0;
363     }
364 
365     if (v->shadow_vqs_enabled) {
366         VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
367         v->iova_tree = s0->vhost_vdpa.iova_tree;
368     }
369 
370     return 0;
371 }
372 
373 static void vhost_vdpa_net_client_stop(NetClientState *nc)
374 {
375     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
376     struct vhost_dev *dev;
377 
378     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
379 
380     if (s->vhost_vdpa.index == 0) {
381         remove_migration_state_change_notifier(&s->migration_state);
382     }
383 
384     dev = s->vhost_vdpa.dev;
385     if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
386         g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
387     }
388 }
389 
390 static NetClientInfo net_vhost_vdpa_info = {
391         .type = NET_CLIENT_DRIVER_VHOST_VDPA,
392         .size = sizeof(VhostVDPAState),
393         .receive = vhost_vdpa_receive,
394         .start = vhost_vdpa_net_data_start,
395         .stop = vhost_vdpa_net_client_stop,
396         .cleanup = vhost_vdpa_cleanup,
397         .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
398         .has_ufo = vhost_vdpa_has_ufo,
399         .check_peer_type = vhost_vdpa_check_peer_type,
400 };
401 
402 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
403                                           Error **errp)
404 {
405     struct vhost_vring_state state = {
406         .index = vq_index,
407     };
408     int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
409 
410     if (unlikely(r < 0)) {
411         r = -errno;
412         error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
413         return r;
414     }
415 
416     return state.num;
417 }
418 
419 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
420                                            unsigned vq_group,
421                                            unsigned asid_num)
422 {
423     struct vhost_vring_state asid = {
424         .index = vq_group,
425         .num = asid_num,
426     };
427     int r;
428 
429     r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
430     if (unlikely(r < 0)) {
431         error_report("Can't set vq group %u asid %u, errno=%d (%s)",
432                      asid.index, asid.num, errno, g_strerror(errno));
433     }
434     return r;
435 }
436 
437 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
438 {
439     VhostIOVATree *tree = v->iova_tree;
440     DMAMap needle = {
441         /*
442          * No need to specify size or to look for more translations since
443          * this contiguous chunk was allocated by us.
444          */
445         .translated_addr = (hwaddr)(uintptr_t)addr,
446     };
447     const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
448     int r;
449 
450     if (unlikely(!map)) {
451         error_report("Cannot locate expected map");
452         return;
453     }
454 
455     r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
456     if (unlikely(r != 0)) {
457         error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
458     }
459 
460     vhost_iova_tree_remove(tree, *map);
461 }
462 
463 /** Map CVQ buffer. */
464 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
465                                   bool write)
466 {
467     DMAMap map = {};
468     int r;
469 
470     map.translated_addr = (hwaddr)(uintptr_t)buf;
471     map.size = size - 1;
472     map.perm = write ? IOMMU_RW : IOMMU_RO,
473     r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
474     if (unlikely(r != IOVA_OK)) {
475         error_report("Cannot map injected element");
476         return r;
477     }
478 
479     r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
480                            vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
481     if (unlikely(r < 0)) {
482         goto dma_map_err;
483     }
484 
485     return 0;
486 
487 dma_map_err:
488     vhost_iova_tree_remove(v->iova_tree, map);
489     return r;
490 }
491 
492 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
493 {
494     VhostVDPAState *s, *s0;
495     struct vhost_vdpa *v;
496     int64_t cvq_group;
497     int r;
498     Error *err = NULL;
499 
500     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
501 
502     s = DO_UPCAST(VhostVDPAState, nc, nc);
503     v = &s->vhost_vdpa;
504 
505     s0 = vhost_vdpa_net_first_nc_vdpa(s);
506     v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
507     v->shadow_vqs_enabled = s->always_svq;
508     s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
509 
510     if (s->vhost_vdpa.shadow_data) {
511         /* SVQ is already configured for all virtqueues */
512         goto out;
513     }
514 
515     /*
516      * If we early return in these cases SVQ will not be enabled. The migration
517      * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
518      */
519     if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
520         return 0;
521     }
522 
523     if (!s->cvq_isolated) {
524         return 0;
525     }
526 
527     cvq_group = vhost_vdpa_get_vring_group(v->device_fd,
528                                            v->dev->vq_index_end - 1,
529                                            &err);
530     if (unlikely(cvq_group < 0)) {
531         error_report_err(err);
532         return cvq_group;
533     }
534 
535     r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
536     if (unlikely(r < 0)) {
537         return r;
538     }
539 
540     v->shadow_vqs_enabled = true;
541     s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
542 
543 out:
544     if (!s->vhost_vdpa.shadow_vqs_enabled) {
545         return 0;
546     }
547 
548     if (s0->vhost_vdpa.iova_tree) {
549         /*
550          * SVQ is already configured for all virtqueues.  Reuse IOVA tree for
551          * simplicity, whether CVQ shares ASID with guest or not, because:
552          * - Memory listener need access to guest's memory addresses allocated
553          *   in the IOVA tree.
554          * - There should be plenty of IOVA address space for both ASID not to
555          *   worry about collisions between them.  Guest's translations are
556          *   still validated with virtio virtqueue_pop so there is no risk for
557          *   the guest to access memory that it shouldn't.
558          *
559          * To allocate a iova tree per ASID is doable but it complicates the
560          * code and it is not worth it for the moment.
561          */
562         v->iova_tree = s0->vhost_vdpa.iova_tree;
563     } else {
564         v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
565                                            v->iova_range.last);
566     }
567 
568     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
569                                vhost_vdpa_net_cvq_cmd_page_len(), false);
570     if (unlikely(r < 0)) {
571         return r;
572     }
573 
574     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
575                                vhost_vdpa_net_cvq_cmd_page_len(), true);
576     if (unlikely(r < 0)) {
577         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
578     }
579 
580     return r;
581 }
582 
583 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
584 {
585     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
586 
587     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
588 
589     if (s->vhost_vdpa.shadow_vqs_enabled) {
590         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
591         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
592     }
593 
594     vhost_vdpa_net_client_stop(nc);
595 }
596 
597 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
598                                       size_t in_len)
599 {
600     /* Buffers for the device */
601     const struct iovec out = {
602         .iov_base = s->cvq_cmd_out_buffer,
603         .iov_len = out_len,
604     };
605     const struct iovec in = {
606         .iov_base = s->status,
607         .iov_len = sizeof(virtio_net_ctrl_ack),
608     };
609     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
610     int r;
611 
612     r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
613     if (unlikely(r != 0)) {
614         if (unlikely(r == -ENOSPC)) {
615             qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
616                           __func__);
617         }
618         return r;
619     }
620 
621     /*
622      * We can poll here since we've had BQL from the time we sent the
623      * descriptor. Also, we need to take the answer before SVQ pulls by itself,
624      * when BQL is released
625      */
626     return vhost_svq_poll(svq);
627 }
628 
629 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
630                                        uint8_t cmd, const struct iovec *data_sg,
631                                        size_t data_num)
632 {
633     const struct virtio_net_ctrl_hdr ctrl = {
634         .class = class,
635         .cmd = cmd,
636     };
637     size_t data_size = iov_size(data_sg, data_num);
638 
639     assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
640 
641     /* pack the CVQ command header */
642     memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
643 
644     /* pack the CVQ command command-specific-data */
645     iov_to_buf(data_sg, data_num, 0,
646                s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
647 
648     return vhost_vdpa_net_cvq_add(s, data_size + sizeof(ctrl),
649                                   sizeof(virtio_net_ctrl_ack));
650 }
651 
652 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
653 {
654     if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
655         const struct iovec data = {
656             .iov_base = (void *)n->mac,
657             .iov_len = sizeof(n->mac),
658         };
659         ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
660                                                   VIRTIO_NET_CTRL_MAC_ADDR_SET,
661                                                   &data, 1);
662         if (unlikely(dev_written < 0)) {
663             return dev_written;
664         }
665         if (*s->status != VIRTIO_NET_OK) {
666             return -EIO;
667         }
668     }
669 
670     /*
671      * According to VirtIO standard, "The device MUST have an
672      * empty MAC filtering table on reset.".
673      *
674      * Therefore, there is no need to send this CVQ command if the
675      * driver also sets an empty MAC filter table, which aligns with
676      * the device's defaults.
677      *
678      * Note that the device's defaults can mismatch the driver's
679      * configuration only at live migration.
680      */
681     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) ||
682         n->mac_table.in_use == 0) {
683         return 0;
684     }
685 
686     uint32_t uni_entries = n->mac_table.first_multi,
687              uni_macs_size = uni_entries * ETH_ALEN,
688              mul_entries = n->mac_table.in_use - uni_entries,
689              mul_macs_size = mul_entries * ETH_ALEN;
690     struct virtio_net_ctrl_mac uni = {
691         .entries = cpu_to_le32(uni_entries),
692     };
693     struct virtio_net_ctrl_mac mul = {
694         .entries = cpu_to_le32(mul_entries),
695     };
696     const struct iovec data[] = {
697         {
698             .iov_base = &uni,
699             .iov_len = sizeof(uni),
700         }, {
701             .iov_base = n->mac_table.macs,
702             .iov_len = uni_macs_size,
703         }, {
704             .iov_base = &mul,
705             .iov_len = sizeof(mul),
706         }, {
707             .iov_base = &n->mac_table.macs[uni_macs_size],
708             .iov_len = mul_macs_size,
709         },
710     };
711     ssize_t dev_written = vhost_vdpa_net_load_cmd(s,
712                                 VIRTIO_NET_CTRL_MAC,
713                                 VIRTIO_NET_CTRL_MAC_TABLE_SET,
714                                 data, ARRAY_SIZE(data));
715     if (unlikely(dev_written < 0)) {
716         return dev_written;
717     }
718     if (*s->status != VIRTIO_NET_OK) {
719         return -EIO;
720     }
721 
722     return 0;
723 }
724 
725 static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
726                                   const VirtIONet *n)
727 {
728     struct virtio_net_ctrl_mq mq;
729     ssize_t dev_written;
730 
731     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
732         return 0;
733     }
734 
735     mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
736     const struct iovec data = {
737         .iov_base = &mq,
738         .iov_len = sizeof(mq),
739     };
740     dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
741                                           VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
742                                           &data, 1);
743     if (unlikely(dev_written < 0)) {
744         return dev_written;
745     }
746     if (*s->status != VIRTIO_NET_OK) {
747         return -EIO;
748     }
749 
750     return 0;
751 }
752 
753 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
754                                         const VirtIONet *n)
755 {
756     uint64_t offloads;
757     ssize_t dev_written;
758 
759     if (!virtio_vdev_has_feature(&n->parent_obj,
760                                  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
761         return 0;
762     }
763 
764     if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
765         /*
766          * According to VirtIO standard, "Upon feature negotiation
767          * corresponding offload gets enabled to preserve
768          * backward compatibility.".
769          *
770          * Therefore, there is no need to send this CVQ command if the
771          * driver also enables all supported offloads, which aligns with
772          * the device's defaults.
773          *
774          * Note that the device's defaults can mismatch the driver's
775          * configuration only at live migration.
776          */
777         return 0;
778     }
779 
780     offloads = cpu_to_le64(n->curr_guest_offloads);
781     const struct iovec data = {
782         .iov_base = &offloads,
783         .iov_len = sizeof(offloads),
784     };
785     dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
786                                           VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
787                                           &data, 1);
788     if (unlikely(dev_written < 0)) {
789         return dev_written;
790     }
791     if (*s->status != VIRTIO_NET_OK) {
792         return -EIO;
793     }
794 
795     return 0;
796 }
797 
798 static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
799                                        uint8_t cmd,
800                                        uint8_t on)
801 {
802     const struct iovec data = {
803         .iov_base = &on,
804         .iov_len = sizeof(on),
805     };
806     return vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX,
807                                    cmd, &data, 1);
808 }
809 
810 static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
811                                   const VirtIONet *n)
812 {
813     ssize_t dev_written;
814 
815     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
816         return 0;
817     }
818 
819     /*
820      * According to virtio_net_reset(), device turns promiscuous mode
821      * on by default.
822      *
823      * Addtionally, according to VirtIO standard, "Since there are
824      * no guarantees, it can use a hash filter or silently switch to
825      * allmulti or promiscuous mode if it is given too many addresses.".
826      * QEMU marks `n->mac_table.uni_overflow` if guest sets too many
827      * non-multicast MAC addresses, indicating that promiscuous mode
828      * should be enabled.
829      *
830      * Therefore, QEMU should only send this CVQ command if the
831      * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
832      * which sets promiscuous mode on, different from the device's defaults.
833      *
834      * Note that the device's defaults can mismatch the driver's
835      * configuration only at live migration.
836      */
837     if (!n->mac_table.uni_overflow && !n->promisc) {
838         dev_written = vhost_vdpa_net_load_rx_mode(s,
839                                             VIRTIO_NET_CTRL_RX_PROMISC, 0);
840         if (unlikely(dev_written < 0)) {
841             return dev_written;
842         }
843         if (*s->status != VIRTIO_NET_OK) {
844             return -EIO;
845         }
846     }
847 
848     /*
849      * According to virtio_net_reset(), device turns all-multicast mode
850      * off by default.
851      *
852      * According to VirtIO standard, "Since there are no guarantees,
853      * it can use a hash filter or silently switch to allmulti or
854      * promiscuous mode if it is given too many addresses.". QEMU marks
855      * `n->mac_table.multi_overflow` if guest sets too many
856      * non-multicast MAC addresses.
857      *
858      * Therefore, QEMU should only send this CVQ command if the
859      * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
860      * which sets all-multicast mode on, different from the device's defaults.
861      *
862      * Note that the device's defaults can mismatch the driver's
863      * configuration only at live migration.
864      */
865     if (n->mac_table.multi_overflow || n->allmulti) {
866         dev_written = vhost_vdpa_net_load_rx_mode(s,
867                                             VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
868         if (unlikely(dev_written < 0)) {
869             return dev_written;
870         }
871         if (*s->status != VIRTIO_NET_OK) {
872             return -EIO;
873         }
874     }
875 
876     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) {
877         return 0;
878     }
879 
880     /*
881      * According to virtio_net_reset(), device turns all-unicast mode
882      * off by default.
883      *
884      * Therefore, QEMU should only send this CVQ command if the driver
885      * sets all-unicast mode on, different from the device's defaults.
886      *
887      * Note that the device's defaults can mismatch the driver's
888      * configuration only at live migration.
889      */
890     if (n->alluni) {
891         dev_written = vhost_vdpa_net_load_rx_mode(s,
892                                             VIRTIO_NET_CTRL_RX_ALLUNI, 1);
893         if (dev_written < 0) {
894             return dev_written;
895         }
896         if (*s->status != VIRTIO_NET_OK) {
897             return -EIO;
898         }
899     }
900 
901     /*
902      * According to virtio_net_reset(), device turns non-multicast mode
903      * off by default.
904      *
905      * Therefore, QEMU should only send this CVQ command if the driver
906      * sets non-multicast mode on, different from the device's defaults.
907      *
908      * Note that the device's defaults can mismatch the driver's
909      * configuration only at live migration.
910      */
911     if (n->nomulti) {
912         dev_written = vhost_vdpa_net_load_rx_mode(s,
913                                             VIRTIO_NET_CTRL_RX_NOMULTI, 1);
914         if (dev_written < 0) {
915             return dev_written;
916         }
917         if (*s->status != VIRTIO_NET_OK) {
918             return -EIO;
919         }
920     }
921 
922     /*
923      * According to virtio_net_reset(), device turns non-unicast mode
924      * off by default.
925      *
926      * Therefore, QEMU should only send this CVQ command if the driver
927      * sets non-unicast mode on, different from the device's defaults.
928      *
929      * Note that the device's defaults can mismatch the driver's
930      * configuration only at live migration.
931      */
932     if (n->nouni) {
933         dev_written = vhost_vdpa_net_load_rx_mode(s,
934                                             VIRTIO_NET_CTRL_RX_NOUNI, 1);
935         if (dev_written < 0) {
936             return dev_written;
937         }
938         if (*s->status != VIRTIO_NET_OK) {
939             return -EIO;
940         }
941     }
942 
943     /*
944      * According to virtio_net_reset(), device turns non-broadcast mode
945      * off by default.
946      *
947      * Therefore, QEMU should only send this CVQ command if the driver
948      * sets non-broadcast mode on, different from the device's defaults.
949      *
950      * Note that the device's defaults can mismatch the driver's
951      * configuration only at live migration.
952      */
953     if (n->nobcast) {
954         dev_written = vhost_vdpa_net_load_rx_mode(s,
955                                             VIRTIO_NET_CTRL_RX_NOBCAST, 1);
956         if (dev_written < 0) {
957             return dev_written;
958         }
959         if (*s->status != VIRTIO_NET_OK) {
960             return -EIO;
961         }
962     }
963 
964     return 0;
965 }
966 
967 static int vhost_vdpa_net_load(NetClientState *nc)
968 {
969     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
970     struct vhost_vdpa *v = &s->vhost_vdpa;
971     const VirtIONet *n;
972     int r;
973 
974     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
975 
976     if (!v->shadow_vqs_enabled) {
977         return 0;
978     }
979 
980     n = VIRTIO_NET(v->dev->vdev);
981     r = vhost_vdpa_net_load_mac(s, n);
982     if (unlikely(r < 0)) {
983         return r;
984     }
985     r = vhost_vdpa_net_load_mq(s, n);
986     if (unlikely(r)) {
987         return r;
988     }
989     r = vhost_vdpa_net_load_offloads(s, n);
990     if (unlikely(r)) {
991         return r;
992     }
993     r = vhost_vdpa_net_load_rx(s, n);
994     if (unlikely(r)) {
995         return r;
996     }
997 
998     return 0;
999 }
1000 
1001 static NetClientInfo net_vhost_vdpa_cvq_info = {
1002     .type = NET_CLIENT_DRIVER_VHOST_VDPA,
1003     .size = sizeof(VhostVDPAState),
1004     .receive = vhost_vdpa_receive,
1005     .start = vhost_vdpa_net_cvq_start,
1006     .load = vhost_vdpa_net_load,
1007     .stop = vhost_vdpa_net_cvq_stop,
1008     .cleanup = vhost_vdpa_cleanup,
1009     .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
1010     .has_ufo = vhost_vdpa_has_ufo,
1011     .check_peer_type = vhost_vdpa_check_peer_type,
1012 };
1013 
1014 /*
1015  * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
1016  * vdpa device.
1017  *
1018  * Considering that QEMU cannot send the entire filter table to the
1019  * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
1020  * command to enable promiscuous mode to receive all packets,
1021  * according to VirtIO standard, "Since there are no guarantees,
1022  * it can use a hash filter or silently switch to allmulti or
1023  * promiscuous mode if it is given too many addresses.".
1024  *
1025  * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
1026  * marks `n->mac_table.x_overflow` accordingly, it should have
1027  * the same effect on the device model to receive
1028  * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1029  * The same applies to multicast MAC addresses.
1030  *
1031  * Therefore, QEMU can provide the device model with a fake
1032  * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
1033  * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1034  * MAC addresses. This ensures that the device model marks
1035  * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1036  * allowing all packets to be received, which aligns with the
1037  * state of the vdpa device.
1038  */
1039 static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
1040                                                        VirtQueueElement *elem,
1041                                                        struct iovec *out)
1042 {
1043     struct virtio_net_ctrl_mac mac_data, *mac_ptr;
1044     struct virtio_net_ctrl_hdr *hdr_ptr;
1045     uint32_t cursor;
1046     ssize_t r;
1047 
1048     /* parse the non-multicast MAC address entries from CVQ command */
1049     cursor = sizeof(*hdr_ptr);
1050     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1051                    &mac_data, sizeof(mac_data));
1052     if (unlikely(r != sizeof(mac_data))) {
1053         /*
1054          * If the CVQ command is invalid, we should simulate the vdpa device
1055          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1056          */
1057         *s->status = VIRTIO_NET_ERR;
1058         return sizeof(*s->status);
1059     }
1060     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1061 
1062     /* parse the multicast MAC address entries from CVQ command */
1063     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1064                    &mac_data, sizeof(mac_data));
1065     if (r != sizeof(mac_data)) {
1066         /*
1067          * If the CVQ command is invalid, we should simulate the vdpa device
1068          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1069          */
1070         *s->status = VIRTIO_NET_ERR;
1071         return sizeof(*s->status);
1072     }
1073     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1074 
1075     /* validate the CVQ command */
1076     if (iov_size(elem->out_sg, elem->out_num) != cursor) {
1077         /*
1078          * If the CVQ command is invalid, we should simulate the vdpa device
1079          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1080          */
1081         *s->status = VIRTIO_NET_ERR;
1082         return sizeof(*s->status);
1083     }
1084 
1085     /*
1086      * According to VirtIO standard, "Since there are no guarantees,
1087      * it can use a hash filter or silently switch to allmulti or
1088      * promiscuous mode if it is given too many addresses.".
1089      *
1090      * Therefore, considering that QEMU is unable to send the entire
1091      * filter table to the vdpa device, it should send the
1092      * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
1093      */
1094     r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 1);
1095     if (unlikely(r < 0)) {
1096         return r;
1097     }
1098     if (*s->status != VIRTIO_NET_OK) {
1099         return sizeof(*s->status);
1100     }
1101 
1102     /*
1103      * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
1104      * command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
1105      * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
1106      * multicast MAC addresses.
1107      *
1108      * By doing so, the device model can mark `n->mac_table.uni_overflow`
1109      * and `n->mac_table.multi_overflow`, enabling all packets to be
1110      * received, which aligns with the state of the vdpa device.
1111      */
1112     cursor = 0;
1113     uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1,
1114              fake_mul_entries = MAC_TABLE_ENTRIES + 1,
1115              fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) +
1116                              sizeof(mac_data) + fake_uni_entries * ETH_ALEN +
1117                              sizeof(mac_data) + fake_mul_entries * ETH_ALEN;
1118 
1119     assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len());
1120     out->iov_len = fake_cvq_size;
1121 
1122     /* pack the header for fake CVQ command */
1123     hdr_ptr = out->iov_base + cursor;
1124     hdr_ptr->class = VIRTIO_NET_CTRL_MAC;
1125     hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1126     cursor += sizeof(*hdr_ptr);
1127 
1128     /*
1129      * Pack the non-multicast MAC addresses part for fake CVQ command.
1130      *
1131      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1132      * addresses provieded in CVQ command. Therefore, only the entries
1133      * field need to be prepared in the CVQ command.
1134      */
1135     mac_ptr = out->iov_base + cursor;
1136     mac_ptr->entries = cpu_to_le32(fake_uni_entries);
1137     cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN;
1138 
1139     /*
1140      * Pack the multicast MAC addresses part for fake CVQ command.
1141      *
1142      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1143      * addresses provieded in CVQ command. Therefore, only the entries
1144      * field need to be prepared in the CVQ command.
1145      */
1146     mac_ptr = out->iov_base + cursor;
1147     mac_ptr->entries = cpu_to_le32(fake_mul_entries);
1148 
1149     /*
1150      * Simulating QEMU poll a vdpa device used buffer
1151      * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1152      */
1153     return sizeof(*s->status);
1154 }
1155 
1156 /**
1157  * Validate and copy control virtqueue commands.
1158  *
1159  * Following QEMU guidelines, we offer a copy of the buffers to the device to
1160  * prevent TOCTOU bugs.
1161  */
1162 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
1163                                             VirtQueueElement *elem,
1164                                             void *opaque)
1165 {
1166     VhostVDPAState *s = opaque;
1167     size_t in_len;
1168     const struct virtio_net_ctrl_hdr *ctrl;
1169     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1170     /* Out buffer sent to both the vdpa device and the device model */
1171     struct iovec out = {
1172         .iov_base = s->cvq_cmd_out_buffer,
1173     };
1174     /* in buffer used for device model */
1175     const struct iovec in = {
1176         .iov_base = &status,
1177         .iov_len = sizeof(status),
1178     };
1179     ssize_t dev_written = -EINVAL;
1180 
1181     out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
1182                              s->cvq_cmd_out_buffer,
1183                              vhost_vdpa_net_cvq_cmd_page_len());
1184 
1185     ctrl = s->cvq_cmd_out_buffer;
1186     if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
1187         /*
1188          * Guest announce capability is emulated by qemu, so don't forward to
1189          * the device.
1190          */
1191         dev_written = sizeof(status);
1192         *s->status = VIRTIO_NET_OK;
1193     } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC &&
1194                         ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET &&
1195                         iov_size(elem->out_sg, elem->out_num) > out.iov_len)) {
1196         /*
1197          * Due to the size limitation of the out buffer sent to the vdpa device,
1198          * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
1199          * MAC addresses set by the driver for the filter table can cause
1200          * truncation of the CVQ command in QEMU. As a result, the vdpa device
1201          * rejects the flawed CVQ command.
1202          *
1203          * Therefore, QEMU must handle this situation instead of sending
1204          * the CVQ command direclty.
1205          */
1206         dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
1207                                                                   &out);
1208         if (unlikely(dev_written < 0)) {
1209             goto out;
1210         }
1211     } else {
1212         dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
1213         if (unlikely(dev_written < 0)) {
1214             goto out;
1215         }
1216     }
1217 
1218     if (unlikely(dev_written < sizeof(status))) {
1219         error_report("Insufficient written data (%zu)", dev_written);
1220         goto out;
1221     }
1222 
1223     if (*s->status != VIRTIO_NET_OK) {
1224         goto out;
1225     }
1226 
1227     status = VIRTIO_NET_ERR;
1228     virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
1229     if (status != VIRTIO_NET_OK) {
1230         error_report("Bad CVQ processing in model");
1231     }
1232 
1233 out:
1234     in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
1235                           sizeof(status));
1236     if (unlikely(in_len < sizeof(status))) {
1237         error_report("Bad device CVQ written length");
1238     }
1239     vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
1240     /*
1241      * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
1242      * the function successfully forwards the CVQ command, indicated
1243      * by a non-negative value of `dev_written`. Otherwise, it still
1244      * belongs to SVQ.
1245      * This function should only free the `elem` when it owns.
1246      */
1247     if (dev_written >= 0) {
1248         g_free(elem);
1249     }
1250     return dev_written < 0 ? dev_written : 0;
1251 }
1252 
1253 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
1254     .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
1255 };
1256 
1257 /**
1258  * Probe if CVQ is isolated
1259  *
1260  * @device_fd         The vdpa device fd
1261  * @features          Features offered by the device.
1262  * @cvq_index         The control vq pair index
1263  *
1264  * Returns <0 in case of failure, 0 if false and 1 if true.
1265  */
1266 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
1267                                           int cvq_index, Error **errp)
1268 {
1269     uint64_t backend_features;
1270     int64_t cvq_group;
1271     uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
1272                      VIRTIO_CONFIG_S_DRIVER |
1273                      VIRTIO_CONFIG_S_FEATURES_OK;
1274     int r;
1275 
1276     ERRP_GUARD();
1277 
1278     r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
1279     if (unlikely(r < 0)) {
1280         error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
1281         return r;
1282     }
1283 
1284     if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
1285         return 0;
1286     }
1287 
1288     r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
1289     if (unlikely(r)) {
1290         error_setg_errno(errp, errno, "Cannot set features");
1291     }
1292 
1293     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1294     if (unlikely(r)) {
1295         error_setg_errno(errp, -r, "Cannot set device features");
1296         goto out;
1297     }
1298 
1299     cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
1300     if (unlikely(cvq_group < 0)) {
1301         if (cvq_group != -ENOTSUP) {
1302             r = cvq_group;
1303             goto out;
1304         }
1305 
1306         /*
1307          * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
1308          * support ASID even if the parent driver does not.  The CVQ cannot be
1309          * isolated in this case.
1310          */
1311         error_free(*errp);
1312         *errp = NULL;
1313         r = 0;
1314         goto out;
1315     }
1316 
1317     for (int i = 0; i < cvq_index; ++i) {
1318         int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
1319         if (unlikely(group < 0)) {
1320             r = group;
1321             goto out;
1322         }
1323 
1324         if (group == (int64_t)cvq_group) {
1325             r = 0;
1326             goto out;
1327         }
1328     }
1329 
1330     r = 1;
1331 
1332 out:
1333     status = 0;
1334     ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1335     return r;
1336 }
1337 
1338 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
1339                                        const char *device,
1340                                        const char *name,
1341                                        int vdpa_device_fd,
1342                                        int queue_pair_index,
1343                                        int nvqs,
1344                                        bool is_datapath,
1345                                        bool svq,
1346                                        struct vhost_vdpa_iova_range iova_range,
1347                                        uint64_t features,
1348                                        Error **errp)
1349 {
1350     NetClientState *nc = NULL;
1351     VhostVDPAState *s;
1352     int ret = 0;
1353     assert(name);
1354     int cvq_isolated;
1355 
1356     if (is_datapath) {
1357         nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
1358                                  name);
1359     } else {
1360         cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
1361                                                       queue_pair_index * 2,
1362                                                       errp);
1363         if (unlikely(cvq_isolated < 0)) {
1364             return NULL;
1365         }
1366 
1367         nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
1368                                          device, name);
1369     }
1370     qemu_set_info_str(nc, TYPE_VHOST_VDPA);
1371     s = DO_UPCAST(VhostVDPAState, nc, nc);
1372 
1373     s->vhost_vdpa.device_fd = vdpa_device_fd;
1374     s->vhost_vdpa.index = queue_pair_index;
1375     s->always_svq = svq;
1376     s->migration_state.notify = vdpa_net_migration_state_notifier;
1377     s->vhost_vdpa.shadow_vqs_enabled = svq;
1378     s->vhost_vdpa.iova_range = iova_range;
1379     s->vhost_vdpa.shadow_data = svq;
1380     if (queue_pair_index == 0) {
1381         vhost_vdpa_net_valid_svq_features(features,
1382                                           &s->vhost_vdpa.migration_blocker);
1383     } else if (!is_datapath) {
1384         s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1385                                      PROT_READ | PROT_WRITE,
1386                                      MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1387         s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1388                          PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
1389                          -1, 0);
1390 
1391         s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
1392         s->vhost_vdpa.shadow_vq_ops_opaque = s;
1393         s->cvq_isolated = cvq_isolated;
1394 
1395         /*
1396          * TODO: We cannot migrate devices with CVQ and no x-svq enabled as
1397          * there is no way to set the device state (MAC, MQ, etc) before
1398          * starting the datapath.
1399          *
1400          * Migration blocker ownership now belongs to s->vhost_vdpa.
1401          */
1402         if (!svq) {
1403             error_setg(&s->vhost_vdpa.migration_blocker,
1404                        "net vdpa cannot migrate with CVQ feature");
1405         }
1406     }
1407     ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
1408     if (ret) {
1409         qemu_del_net_client(nc);
1410         return NULL;
1411     }
1412     return nc;
1413 }
1414 
1415 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
1416 {
1417     int ret = ioctl(fd, VHOST_GET_FEATURES, features);
1418     if (unlikely(ret < 0)) {
1419         error_setg_errno(errp, errno,
1420                          "Fail to query features from vhost-vDPA device");
1421     }
1422     return ret;
1423 }
1424 
1425 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
1426                                           int *has_cvq, Error **errp)
1427 {
1428     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
1429     g_autofree struct vhost_vdpa_config *config = NULL;
1430     __virtio16 *max_queue_pairs;
1431     int ret;
1432 
1433     if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
1434         *has_cvq = 1;
1435     } else {
1436         *has_cvq = 0;
1437     }
1438 
1439     if (features & (1 << VIRTIO_NET_F_MQ)) {
1440         config = g_malloc0(config_size + sizeof(*max_queue_pairs));
1441         config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
1442         config->len = sizeof(*max_queue_pairs);
1443 
1444         ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
1445         if (ret) {
1446             error_setg(errp, "Fail to get config from vhost-vDPA device");
1447             return -ret;
1448         }
1449 
1450         max_queue_pairs = (__virtio16 *)&config->buf;
1451 
1452         return lduw_le_p(max_queue_pairs);
1453     }
1454 
1455     return 1;
1456 }
1457 
1458 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
1459                         NetClientState *peer, Error **errp)
1460 {
1461     const NetdevVhostVDPAOptions *opts;
1462     uint64_t features;
1463     int vdpa_device_fd;
1464     g_autofree NetClientState **ncs = NULL;
1465     struct vhost_vdpa_iova_range iova_range;
1466     NetClientState *nc;
1467     int queue_pairs, r, i = 0, has_cvq = 0;
1468 
1469     assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1470     opts = &netdev->u.vhost_vdpa;
1471     if (!opts->vhostdev && !opts->vhostfd) {
1472         error_setg(errp,
1473                    "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1474         return -1;
1475     }
1476 
1477     if (opts->vhostdev && opts->vhostfd) {
1478         error_setg(errp,
1479                    "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1480         return -1;
1481     }
1482 
1483     if (opts->vhostdev) {
1484         vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1485         if (vdpa_device_fd == -1) {
1486             return -errno;
1487         }
1488     } else {
1489         /* has_vhostfd */
1490         vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1491         if (vdpa_device_fd == -1) {
1492             error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1493             return -1;
1494         }
1495     }
1496 
1497     r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1498     if (unlikely(r < 0)) {
1499         goto err;
1500     }
1501 
1502     queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
1503                                                  &has_cvq, errp);
1504     if (queue_pairs < 0) {
1505         qemu_close(vdpa_device_fd);
1506         return queue_pairs;
1507     }
1508 
1509     r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1510     if (unlikely(r < 0)) {
1511         error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1512                    strerror(-r));
1513         goto err;
1514     }
1515 
1516     if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1517         goto err;
1518     }
1519 
1520     ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1521 
1522     for (i = 0; i < queue_pairs; i++) {
1523         ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1524                                      vdpa_device_fd, i, 2, true, opts->x_svq,
1525                                      iova_range, features, errp);
1526         if (!ncs[i])
1527             goto err;
1528     }
1529 
1530     if (has_cvq) {
1531         nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1532                                  vdpa_device_fd, i, 1, false,
1533                                  opts->x_svq, iova_range, features, errp);
1534         if (!nc)
1535             goto err;
1536     }
1537 
1538     return 0;
1539 
1540 err:
1541     if (i) {
1542         for (i--; i >= 0; i--) {
1543             qemu_del_net_client(ncs[i]);
1544         }
1545     }
1546 
1547     qemu_close(vdpa_device_fd);
1548 
1549     return -1;
1550 }
1551