xref: /openbmc/qemu/net/vhost-vdpa.c (revision e0c72452)
1 /*
2  * vhost-vdpa.c
3  *
4  * Copyright(c) 2017-2018 Intel Corporation.
5  * Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "clients.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
26 #include <err.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/migration.h"
30 #include "migration/misc.h"
31 #include "hw/virtio/vhost.h"
32 
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState {
35     NetClientState nc;
36     struct vhost_vdpa vhost_vdpa;
37     Notifier migration_state;
38     VHostNetState *vhost_net;
39 
40     /* Control commands shadow buffers */
41     void *cvq_cmd_out_buffer;
42     virtio_net_ctrl_ack *status;
43 
44     /* The device always have SVQ enabled */
45     bool always_svq;
46 
47     /* The device can isolate CVQ in its own ASID */
48     bool cvq_isolated;
49 
50     bool started;
51 } VhostVDPAState;
52 
53 /*
54  * The array is sorted alphabetically in ascending order,
55  * with the exception of VHOST_INVALID_FEATURE_BIT,
56  * which should always be the last entry.
57  */
58 const int vdpa_feature_bits[] = {
59     VIRTIO_F_ANY_LAYOUT,
60     VIRTIO_F_IOMMU_PLATFORM,
61     VIRTIO_F_NOTIFY_ON_EMPTY,
62     VIRTIO_F_RING_PACKED,
63     VIRTIO_F_RING_RESET,
64     VIRTIO_F_VERSION_1,
65     VIRTIO_NET_F_CSUM,
66     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
67     VIRTIO_NET_F_CTRL_MAC_ADDR,
68     VIRTIO_NET_F_CTRL_RX,
69     VIRTIO_NET_F_CTRL_RX_EXTRA,
70     VIRTIO_NET_F_CTRL_VLAN,
71     VIRTIO_NET_F_CTRL_VQ,
72     VIRTIO_NET_F_GSO,
73     VIRTIO_NET_F_GUEST_CSUM,
74     VIRTIO_NET_F_GUEST_ECN,
75     VIRTIO_NET_F_GUEST_TSO4,
76     VIRTIO_NET_F_GUEST_TSO6,
77     VIRTIO_NET_F_GUEST_UFO,
78     VIRTIO_NET_F_GUEST_USO4,
79     VIRTIO_NET_F_GUEST_USO6,
80     VIRTIO_NET_F_HASH_REPORT,
81     VIRTIO_NET_F_HOST_ECN,
82     VIRTIO_NET_F_HOST_TSO4,
83     VIRTIO_NET_F_HOST_TSO6,
84     VIRTIO_NET_F_HOST_UFO,
85     VIRTIO_NET_F_HOST_USO,
86     VIRTIO_NET_F_MQ,
87     VIRTIO_NET_F_MRG_RXBUF,
88     VIRTIO_NET_F_MTU,
89     VIRTIO_NET_F_RSS,
90     VIRTIO_NET_F_STATUS,
91     VIRTIO_RING_F_EVENT_IDX,
92     VIRTIO_RING_F_INDIRECT_DESC,
93 
94     /* VHOST_INVALID_FEATURE_BIT should always be the last entry */
95     VHOST_INVALID_FEATURE_BIT
96 };
97 
98 /** Supported device specific feature bits with SVQ */
99 static const uint64_t vdpa_svq_device_features =
100     BIT_ULL(VIRTIO_NET_F_CSUM) |
101     BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
102     BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
103     BIT_ULL(VIRTIO_NET_F_MTU) |
104     BIT_ULL(VIRTIO_NET_F_MAC) |
105     BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
106     BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
107     BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
108     BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
109     BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
110     BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
111     BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
112     BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
113     BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
114     BIT_ULL(VIRTIO_NET_F_STATUS) |
115     BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
116     BIT_ULL(VIRTIO_NET_F_CTRL_RX) |
117     BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) |
118     BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) |
119     BIT_ULL(VIRTIO_NET_F_MQ) |
120     BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
121     BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
122     /* VHOST_F_LOG_ALL is exposed by SVQ */
123     BIT_ULL(VHOST_F_LOG_ALL) |
124     BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
125     BIT_ULL(VIRTIO_NET_F_STANDBY) |
126     BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
127 
128 #define VHOST_VDPA_NET_CVQ_ASID 1
129 
130 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
131 {
132     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
133     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
134     return s->vhost_net;
135 }
136 
137 static size_t vhost_vdpa_net_cvq_cmd_len(void)
138 {
139     /*
140      * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
141      * In buffer is always 1 byte, so it should fit here
142      */
143     return sizeof(struct virtio_net_ctrl_hdr) +
144            2 * sizeof(struct virtio_net_ctrl_mac) +
145            MAC_TABLE_ENTRIES * ETH_ALEN;
146 }
147 
148 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
149 {
150     return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
151 }
152 
153 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
154 {
155     uint64_t invalid_dev_features =
156         features & ~vdpa_svq_device_features &
157         /* Transport are all accepted at this point */
158         ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
159                          VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
160 
161     if (invalid_dev_features) {
162         error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
163                    invalid_dev_features);
164         return false;
165     }
166 
167     return vhost_svq_valid_features(features, errp);
168 }
169 
170 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
171 {
172     uint32_t device_id;
173     int ret;
174     struct vhost_dev *hdev;
175 
176     hdev = (struct vhost_dev *)&net->dev;
177     ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
178     if (device_id != VIRTIO_ID_NET) {
179         return -ENOTSUP;
180     }
181     return ret;
182 }
183 
184 static int vhost_vdpa_add(NetClientState *ncs, void *be,
185                           int queue_pair_index, int nvqs)
186 {
187     VhostNetOptions options;
188     struct vhost_net *net = NULL;
189     VhostVDPAState *s;
190     int ret;
191 
192     options.backend_type = VHOST_BACKEND_TYPE_VDPA;
193     assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
194     s = DO_UPCAST(VhostVDPAState, nc, ncs);
195     options.net_backend = ncs;
196     options.opaque      = be;
197     options.busyloop_timeout = 0;
198     options.nvqs = nvqs;
199 
200     net = vhost_net_init(&options);
201     if (!net) {
202         error_report("failed to init vhost_net for queue");
203         goto err_init;
204     }
205     s->vhost_net = net;
206     ret = vhost_vdpa_net_check_device_id(net);
207     if (ret) {
208         goto err_check;
209     }
210     return 0;
211 err_check:
212     vhost_net_cleanup(net);
213     g_free(net);
214 err_init:
215     return -1;
216 }
217 
218 static void vhost_vdpa_cleanup(NetClientState *nc)
219 {
220     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
221 
222     /*
223      * If a peer NIC is attached, do not cleanup anything.
224      * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
225      * when the guest is shutting down.
226      */
227     if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
228         return;
229     }
230     munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
231     munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
232     if (s->vhost_net) {
233         vhost_net_cleanup(s->vhost_net);
234         g_free(s->vhost_net);
235         s->vhost_net = NULL;
236     }
237      if (s->vhost_vdpa.device_fd >= 0) {
238         qemu_close(s->vhost_vdpa.device_fd);
239         s->vhost_vdpa.device_fd = -1;
240     }
241 }
242 
243 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
244 {
245     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
246 
247     return true;
248 }
249 
250 static bool vhost_vdpa_has_ufo(NetClientState *nc)
251 {
252     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
253     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
254     uint64_t features = 0;
255     features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
256     features = vhost_net_get_features(s->vhost_net, features);
257     return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
258 
259 }
260 
261 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
262                                        Error **errp)
263 {
264     const char *driver = object_class_get_name(oc);
265 
266     if (!g_str_has_prefix(driver, "virtio-net-")) {
267         error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
268         return false;
269     }
270 
271     return true;
272 }
273 
274 /** Dummy receive in case qemu falls back to userland tap networking */
275 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
276                                   size_t size)
277 {
278     return size;
279 }
280 
281 /** From any vdpa net client, get the netclient of the first queue pair */
282 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
283 {
284     NICState *nic = qemu_get_nic(s->nc.peer);
285     NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
286 
287     return DO_UPCAST(VhostVDPAState, nc, nc0);
288 }
289 
290 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
291 {
292     struct vhost_vdpa *v = &s->vhost_vdpa;
293     VirtIONet *n;
294     VirtIODevice *vdev;
295     int data_queue_pairs, cvq, r;
296 
297     /* We are only called on the first data vqs and only if x-svq is not set */
298     if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
299         return;
300     }
301 
302     vdev = v->dev->vdev;
303     n = VIRTIO_NET(vdev);
304     if (!n->vhost_started) {
305         return;
306     }
307 
308     data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
309     cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
310                                   n->max_ncs - n->max_queue_pairs : 0;
311     /*
312      * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
313      * in the future and resume the device if read-only operations between
314      * suspend and reset goes wrong.
315      */
316     vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
317 
318     /* Start will check migration setup_or_active to configure or not SVQ */
319     r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
320     if (unlikely(r < 0)) {
321         error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
322     }
323 }
324 
325 static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data)
326 {
327     MigrationState *migration = data;
328     VhostVDPAState *s = container_of(notifier, VhostVDPAState,
329                                      migration_state);
330 
331     if (migration_in_setup(migration)) {
332         vhost_vdpa_net_log_global_enable(s, true);
333     } else if (migration_has_failed(migration)) {
334         vhost_vdpa_net_log_global_enable(s, false);
335     }
336 }
337 
338 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
339 {
340     struct vhost_vdpa *v = &s->vhost_vdpa;
341 
342     add_migration_state_change_notifier(&s->migration_state);
343     if (v->shadow_vqs_enabled) {
344         v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
345                                            v->iova_range.last);
346     }
347 }
348 
349 static int vhost_vdpa_net_data_start(NetClientState *nc)
350 {
351     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
352     struct vhost_vdpa *v = &s->vhost_vdpa;
353 
354     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
355 
356     if (s->always_svq ||
357         migration_is_setup_or_active(migrate_get_current()->state)) {
358         v->shadow_vqs_enabled = true;
359         v->shadow_data = true;
360     } else {
361         v->shadow_vqs_enabled = false;
362         v->shadow_data = false;
363     }
364 
365     if (v->index == 0) {
366         vhost_vdpa_net_data_start_first(s);
367         return 0;
368     }
369 
370     if (v->shadow_vqs_enabled) {
371         VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
372         v->iova_tree = s0->vhost_vdpa.iova_tree;
373     }
374 
375     return 0;
376 }
377 
378 static int vhost_vdpa_net_data_load(NetClientState *nc)
379 {
380     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
381     struct vhost_vdpa *v = &s->vhost_vdpa;
382     bool has_cvq = v->dev->vq_index_end % 2;
383 
384     if (has_cvq) {
385         return 0;
386     }
387 
388     for (int i = 0; i < v->dev->nvqs; ++i) {
389         vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index);
390     }
391     return 0;
392 }
393 
394 static void vhost_vdpa_net_client_stop(NetClientState *nc)
395 {
396     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
397     struct vhost_dev *dev;
398 
399     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
400 
401     if (s->vhost_vdpa.index == 0) {
402         remove_migration_state_change_notifier(&s->migration_state);
403     }
404 
405     dev = s->vhost_vdpa.dev;
406     if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
407         g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
408     } else {
409         s->vhost_vdpa.iova_tree = NULL;
410     }
411 }
412 
413 static NetClientInfo net_vhost_vdpa_info = {
414         .type = NET_CLIENT_DRIVER_VHOST_VDPA,
415         .size = sizeof(VhostVDPAState),
416         .receive = vhost_vdpa_receive,
417         .start = vhost_vdpa_net_data_start,
418         .load = vhost_vdpa_net_data_load,
419         .stop = vhost_vdpa_net_client_stop,
420         .cleanup = vhost_vdpa_cleanup,
421         .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
422         .has_ufo = vhost_vdpa_has_ufo,
423         .check_peer_type = vhost_vdpa_check_peer_type,
424 };
425 
426 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
427                                           Error **errp)
428 {
429     struct vhost_vring_state state = {
430         .index = vq_index,
431     };
432     int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
433 
434     if (unlikely(r < 0)) {
435         r = -errno;
436         error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
437         return r;
438     }
439 
440     return state.num;
441 }
442 
443 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
444                                            unsigned vq_group,
445                                            unsigned asid_num)
446 {
447     struct vhost_vring_state asid = {
448         .index = vq_group,
449         .num = asid_num,
450     };
451     int r;
452 
453     r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
454     if (unlikely(r < 0)) {
455         error_report("Can't set vq group %u asid %u, errno=%d (%s)",
456                      asid.index, asid.num, errno, g_strerror(errno));
457     }
458     return r;
459 }
460 
461 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
462 {
463     VhostIOVATree *tree = v->iova_tree;
464     DMAMap needle = {
465         /*
466          * No need to specify size or to look for more translations since
467          * this contiguous chunk was allocated by us.
468          */
469         .translated_addr = (hwaddr)(uintptr_t)addr,
470     };
471     const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
472     int r;
473 
474     if (unlikely(!map)) {
475         error_report("Cannot locate expected map");
476         return;
477     }
478 
479     r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
480     if (unlikely(r != 0)) {
481         error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
482     }
483 
484     vhost_iova_tree_remove(tree, *map);
485 }
486 
487 /** Map CVQ buffer. */
488 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
489                                   bool write)
490 {
491     DMAMap map = {};
492     int r;
493 
494     map.translated_addr = (hwaddr)(uintptr_t)buf;
495     map.size = size - 1;
496     map.perm = write ? IOMMU_RW : IOMMU_RO,
497     r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
498     if (unlikely(r != IOVA_OK)) {
499         error_report("Cannot map injected element");
500         return r;
501     }
502 
503     r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
504                            vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
505     if (unlikely(r < 0)) {
506         goto dma_map_err;
507     }
508 
509     return 0;
510 
511 dma_map_err:
512     vhost_iova_tree_remove(v->iova_tree, map);
513     return r;
514 }
515 
516 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
517 {
518     VhostVDPAState *s, *s0;
519     struct vhost_vdpa *v;
520     int64_t cvq_group;
521     int r;
522     Error *err = NULL;
523 
524     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
525 
526     s = DO_UPCAST(VhostVDPAState, nc, nc);
527     v = &s->vhost_vdpa;
528 
529     s0 = vhost_vdpa_net_first_nc_vdpa(s);
530     v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
531     v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled;
532     s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
533 
534     if (s->vhost_vdpa.shadow_data) {
535         /* SVQ is already configured for all virtqueues */
536         goto out;
537     }
538 
539     /*
540      * If we early return in these cases SVQ will not be enabled. The migration
541      * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
542      */
543     if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
544         return 0;
545     }
546 
547     if (!s->cvq_isolated) {
548         return 0;
549     }
550 
551     cvq_group = vhost_vdpa_get_vring_group(v->device_fd,
552                                            v->dev->vq_index_end - 1,
553                                            &err);
554     if (unlikely(cvq_group < 0)) {
555         error_report_err(err);
556         return cvq_group;
557     }
558 
559     r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
560     if (unlikely(r < 0)) {
561         return r;
562     }
563 
564     v->shadow_vqs_enabled = true;
565     s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
566 
567 out:
568     if (!s->vhost_vdpa.shadow_vqs_enabled) {
569         return 0;
570     }
571 
572     if (s0->vhost_vdpa.iova_tree) {
573         /*
574          * SVQ is already configured for all virtqueues.  Reuse IOVA tree for
575          * simplicity, whether CVQ shares ASID with guest or not, because:
576          * - Memory listener need access to guest's memory addresses allocated
577          *   in the IOVA tree.
578          * - There should be plenty of IOVA address space for both ASID not to
579          *   worry about collisions between them.  Guest's translations are
580          *   still validated with virtio virtqueue_pop so there is no risk for
581          *   the guest to access memory that it shouldn't.
582          *
583          * To allocate a iova tree per ASID is doable but it complicates the
584          * code and it is not worth it for the moment.
585          */
586         v->iova_tree = s0->vhost_vdpa.iova_tree;
587     } else {
588         v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
589                                            v->iova_range.last);
590     }
591 
592     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
593                                vhost_vdpa_net_cvq_cmd_page_len(), false);
594     if (unlikely(r < 0)) {
595         return r;
596     }
597 
598     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
599                                vhost_vdpa_net_cvq_cmd_page_len(), true);
600     if (unlikely(r < 0)) {
601         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
602     }
603 
604     return r;
605 }
606 
607 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
608 {
609     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
610 
611     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
612 
613     if (s->vhost_vdpa.shadow_vqs_enabled) {
614         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
615         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
616     }
617 
618     vhost_vdpa_net_client_stop(nc);
619 }
620 
621 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
622                                     const struct iovec *out_sg, size_t out_num,
623                                     const struct iovec *in_sg, size_t in_num)
624 {
625     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
626     int r;
627 
628     r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL);
629     if (unlikely(r != 0)) {
630         if (unlikely(r == -ENOSPC)) {
631             qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
632                           __func__);
633         }
634     }
635 
636     return r;
637 }
638 
639 /*
640  * Convenience wrapper to poll SVQ for multiple control commands.
641  *
642  * Caller should hold the BQL when invoking this function, and should take
643  * the answer before SVQ pulls by itself when BQL is released.
644  */
645 static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight)
646 {
647     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
648     return vhost_svq_poll(svq, cmds_in_flight);
649 }
650 
651 static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s,
652                                              struct iovec *out_cursor,
653                                              struct iovec *in_cursor)
654 {
655     /* reset the cursor of the output buffer for the device */
656     out_cursor->iov_base = s->cvq_cmd_out_buffer;
657     out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
658 
659     /* reset the cursor of the in buffer for the device */
660     in_cursor->iov_base = s->status;
661     in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
662 }
663 
664 /*
665  * Poll SVQ for multiple pending control commands and check the device's ack.
666  *
667  * Caller should hold the BQL when invoking this function.
668  *
669  * @s: The VhostVDPAState
670  * @len: The length of the pending status shadow buffer
671  */
672 static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len)
673 {
674     /* device uses a one-byte length ack for each control command */
675     ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len);
676     if (unlikely(dev_written != len)) {
677         return -EIO;
678     }
679 
680     /* check the device's ack */
681     for (int i = 0; i < len; ++i) {
682         if (s->status[i] != VIRTIO_NET_OK) {
683             return -EIO;
684         }
685     }
686     return 0;
687 }
688 
689 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s,
690                                        struct iovec *out_cursor,
691                                        struct iovec *in_cursor, uint8_t class,
692                                        uint8_t cmd, const struct iovec *data_sg,
693                                        size_t data_num)
694 {
695     const struct virtio_net_ctrl_hdr ctrl = {
696         .class = class,
697         .cmd = cmd,
698     };
699     size_t data_size = iov_size(data_sg, data_num), cmd_size;
700     struct iovec out, in;
701     ssize_t r;
702     unsigned dummy_cursor_iov_cnt;
703     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
704 
705     assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
706     cmd_size = sizeof(ctrl) + data_size;
707     if (vhost_svq_available_slots(svq) < 2 ||
708         iov_size(out_cursor, 1) < cmd_size) {
709         /*
710          * It is time to flush all pending control commands if SVQ is full
711          * or control commands shadow buffers are full.
712          *
713          * We can poll here since we've had BQL from the time
714          * we sent the descriptor.
715          */
716         r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base -
717                                      (void *)s->status);
718         if (unlikely(r < 0)) {
719             return r;
720         }
721 
722         vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor);
723     }
724 
725     /* pack the CVQ command header */
726     iov_from_buf(out_cursor, 1, 0, &ctrl, sizeof(ctrl));
727     /* pack the CVQ command command-specific-data */
728     iov_to_buf(data_sg, data_num, 0,
729                out_cursor->iov_base + sizeof(ctrl), data_size);
730 
731     /* extract the required buffer from the cursor for output */
732     iov_copy(&out, 1, out_cursor, 1, 0, cmd_size);
733     /* extract the required buffer from the cursor for input */
734     iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status));
735 
736     r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
737     if (unlikely(r < 0)) {
738         return r;
739     }
740 
741     /* iterate the cursors */
742     dummy_cursor_iov_cnt = 1;
743     iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size);
744     dummy_cursor_iov_cnt = 1;
745     iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status));
746 
747     return 0;
748 }
749 
750 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n,
751                                    struct iovec *out_cursor,
752                                    struct iovec *in_cursor)
753 {
754     if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
755         const struct iovec data = {
756             .iov_base = (void *)n->mac,
757             .iov_len = sizeof(n->mac),
758         };
759         ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
760                                             VIRTIO_NET_CTRL_MAC,
761                                             VIRTIO_NET_CTRL_MAC_ADDR_SET,
762                                             &data, 1);
763         if (unlikely(r < 0)) {
764             return r;
765         }
766     }
767 
768     /*
769      * According to VirtIO standard, "The device MUST have an
770      * empty MAC filtering table on reset.".
771      *
772      * Therefore, there is no need to send this CVQ command if the
773      * driver also sets an empty MAC filter table, which aligns with
774      * the device's defaults.
775      *
776      * Note that the device's defaults can mismatch the driver's
777      * configuration only at live migration.
778      */
779     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) ||
780         n->mac_table.in_use == 0) {
781         return 0;
782     }
783 
784     uint32_t uni_entries = n->mac_table.first_multi,
785              uni_macs_size = uni_entries * ETH_ALEN,
786              mul_entries = n->mac_table.in_use - uni_entries,
787              mul_macs_size = mul_entries * ETH_ALEN;
788     struct virtio_net_ctrl_mac uni = {
789         .entries = cpu_to_le32(uni_entries),
790     };
791     struct virtio_net_ctrl_mac mul = {
792         .entries = cpu_to_le32(mul_entries),
793     };
794     const struct iovec data[] = {
795         {
796             .iov_base = &uni,
797             .iov_len = sizeof(uni),
798         }, {
799             .iov_base = n->mac_table.macs,
800             .iov_len = uni_macs_size,
801         }, {
802             .iov_base = &mul,
803             .iov_len = sizeof(mul),
804         }, {
805             .iov_base = &n->mac_table.macs[uni_macs_size],
806             .iov_len = mul_macs_size,
807         },
808     };
809     ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
810                                         VIRTIO_NET_CTRL_MAC,
811                                         VIRTIO_NET_CTRL_MAC_TABLE_SET,
812                                         data, ARRAY_SIZE(data));
813     if (unlikely(r < 0)) {
814         return r;
815     }
816 
817     return 0;
818 }
819 
820 static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
821                                   const VirtIONet *n,
822                                   struct iovec *out_cursor,
823                                   struct iovec *in_cursor)
824 {
825     struct virtio_net_ctrl_mq mq;
826     ssize_t r;
827 
828     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
829         return 0;
830     }
831 
832     mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
833     const struct iovec data = {
834         .iov_base = &mq,
835         .iov_len = sizeof(mq),
836     };
837     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
838                                 VIRTIO_NET_CTRL_MQ,
839                                 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
840                                 &data, 1);
841     if (unlikely(r < 0)) {
842         return r;
843     }
844 
845     return 0;
846 }
847 
848 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
849                                         const VirtIONet *n,
850                                         struct iovec *out_cursor,
851                                         struct iovec *in_cursor)
852 {
853     uint64_t offloads;
854     ssize_t r;
855 
856     if (!virtio_vdev_has_feature(&n->parent_obj,
857                                  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
858         return 0;
859     }
860 
861     if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
862         /*
863          * According to VirtIO standard, "Upon feature negotiation
864          * corresponding offload gets enabled to preserve
865          * backward compatibility.".
866          *
867          * Therefore, there is no need to send this CVQ command if the
868          * driver also enables all supported offloads, which aligns with
869          * the device's defaults.
870          *
871          * Note that the device's defaults can mismatch the driver's
872          * configuration only at live migration.
873          */
874         return 0;
875     }
876 
877     offloads = cpu_to_le64(n->curr_guest_offloads);
878     const struct iovec data = {
879         .iov_base = &offloads,
880         .iov_len = sizeof(offloads),
881     };
882     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
883                                 VIRTIO_NET_CTRL_GUEST_OFFLOADS,
884                                 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
885                                 &data, 1);
886     if (unlikely(r < 0)) {
887         return r;
888     }
889 
890     return 0;
891 }
892 
893 static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
894                                        struct iovec *out_cursor,
895                                        struct iovec *in_cursor,
896                                        uint8_t cmd,
897                                        uint8_t on)
898 {
899     const struct iovec data = {
900         .iov_base = &on,
901         .iov_len = sizeof(on),
902     };
903     ssize_t r;
904 
905     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
906                                 VIRTIO_NET_CTRL_RX, cmd, &data, 1);
907     if (unlikely(r < 0)) {
908         return r;
909     }
910 
911     return 0;
912 }
913 
914 static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
915                                   const VirtIONet *n,
916                                   struct iovec *out_cursor,
917                                   struct iovec *in_cursor)
918 {
919     ssize_t r;
920 
921     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
922         return 0;
923     }
924 
925     /*
926      * According to virtio_net_reset(), device turns promiscuous mode
927      * on by default.
928      *
929      * Additionally, according to VirtIO standard, "Since there are
930      * no guarantees, it can use a hash filter or silently switch to
931      * allmulti or promiscuous mode if it is given too many addresses.".
932      * QEMU marks `n->mac_table.uni_overflow` if guest sets too many
933      * non-multicast MAC addresses, indicating that promiscuous mode
934      * should be enabled.
935      *
936      * Therefore, QEMU should only send this CVQ command if the
937      * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
938      * which sets promiscuous mode on, different from the device's defaults.
939      *
940      * Note that the device's defaults can mismatch the driver's
941      * configuration only at live migration.
942      */
943     if (!n->mac_table.uni_overflow && !n->promisc) {
944         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
945                                         VIRTIO_NET_CTRL_RX_PROMISC, 0);
946         if (unlikely(r < 0)) {
947             return r;
948         }
949     }
950 
951     /*
952      * According to virtio_net_reset(), device turns all-multicast mode
953      * off by default.
954      *
955      * According to VirtIO standard, "Since there are no guarantees,
956      * it can use a hash filter or silently switch to allmulti or
957      * promiscuous mode if it is given too many addresses.". QEMU marks
958      * `n->mac_table.multi_overflow` if guest sets too many
959      * non-multicast MAC addresses.
960      *
961      * Therefore, QEMU should only send this CVQ command if the
962      * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
963      * which sets all-multicast mode on, different from the device's defaults.
964      *
965      * Note that the device's defaults can mismatch the driver's
966      * configuration only at live migration.
967      */
968     if (n->mac_table.multi_overflow || n->allmulti) {
969         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
970                                         VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
971         if (unlikely(r < 0)) {
972             return r;
973         }
974     }
975 
976     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) {
977         return 0;
978     }
979 
980     /*
981      * According to virtio_net_reset(), device turns all-unicast mode
982      * off by default.
983      *
984      * Therefore, QEMU should only send this CVQ command if the driver
985      * sets all-unicast mode on, different from the device's defaults.
986      *
987      * Note that the device's defaults can mismatch the driver's
988      * configuration only at live migration.
989      */
990     if (n->alluni) {
991         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
992                                         VIRTIO_NET_CTRL_RX_ALLUNI, 1);
993         if (r < 0) {
994             return r;
995         }
996     }
997 
998     /*
999      * According to virtio_net_reset(), device turns non-multicast mode
1000      * off by default.
1001      *
1002      * Therefore, QEMU should only send this CVQ command if the driver
1003      * sets non-multicast mode on, different from the device's defaults.
1004      *
1005      * Note that the device's defaults can mismatch the driver's
1006      * configuration only at live migration.
1007      */
1008     if (n->nomulti) {
1009         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1010                                         VIRTIO_NET_CTRL_RX_NOMULTI, 1);
1011         if (r < 0) {
1012             return r;
1013         }
1014     }
1015 
1016     /*
1017      * According to virtio_net_reset(), device turns non-unicast mode
1018      * off by default.
1019      *
1020      * Therefore, QEMU should only send this CVQ command if the driver
1021      * sets non-unicast mode on, different from the device's defaults.
1022      *
1023      * Note that the device's defaults can mismatch the driver's
1024      * configuration only at live migration.
1025      */
1026     if (n->nouni) {
1027         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1028                                         VIRTIO_NET_CTRL_RX_NOUNI, 1);
1029         if (r < 0) {
1030             return r;
1031         }
1032     }
1033 
1034     /*
1035      * According to virtio_net_reset(), device turns non-broadcast mode
1036      * off by default.
1037      *
1038      * Therefore, QEMU should only send this CVQ command if the driver
1039      * sets non-broadcast mode on, different from the device's defaults.
1040      *
1041      * Note that the device's defaults can mismatch the driver's
1042      * configuration only at live migration.
1043      */
1044     if (n->nobcast) {
1045         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1046                                         VIRTIO_NET_CTRL_RX_NOBCAST, 1);
1047         if (r < 0) {
1048             return r;
1049         }
1050     }
1051 
1052     return 0;
1053 }
1054 
1055 static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
1056                                            const VirtIONet *n,
1057                                            struct iovec *out_cursor,
1058                                            struct iovec *in_cursor,
1059                                            uint16_t vid)
1060 {
1061     const struct iovec data = {
1062         .iov_base = &vid,
1063         .iov_len = sizeof(vid),
1064     };
1065     ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
1066                                         VIRTIO_NET_CTRL_VLAN,
1067                                         VIRTIO_NET_CTRL_VLAN_ADD,
1068                                         &data, 1);
1069     if (unlikely(r < 0)) {
1070         return r;
1071     }
1072 
1073     return 0;
1074 }
1075 
1076 static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
1077                                     const VirtIONet *n,
1078                                     struct iovec *out_cursor,
1079                                     struct iovec *in_cursor)
1080 {
1081     int r;
1082 
1083     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) {
1084         return 0;
1085     }
1086 
1087     for (int i = 0; i < MAX_VLAN >> 5; i++) {
1088         for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
1089             if (n->vlans[i] & (1U << j)) {
1090                 r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor,
1091                                                     in_cursor, (i << 5) + j);
1092                 if (unlikely(r != 0)) {
1093                     return r;
1094                 }
1095             }
1096         }
1097     }
1098 
1099     return 0;
1100 }
1101 
1102 static int vhost_vdpa_net_cvq_load(NetClientState *nc)
1103 {
1104     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
1105     struct vhost_vdpa *v = &s->vhost_vdpa;
1106     const VirtIONet *n;
1107     int r;
1108     struct iovec out_cursor, in_cursor;
1109 
1110     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1111 
1112     vhost_vdpa_set_vring_ready(v, v->dev->vq_index);
1113 
1114     if (v->shadow_vqs_enabled) {
1115         n = VIRTIO_NET(v->dev->vdev);
1116         vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor);
1117         r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor);
1118         if (unlikely(r < 0)) {
1119             return r;
1120         }
1121         r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor);
1122         if (unlikely(r)) {
1123             return r;
1124         }
1125         r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor);
1126         if (unlikely(r)) {
1127             return r;
1128         }
1129         r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor);
1130         if (unlikely(r)) {
1131             return r;
1132         }
1133         r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor);
1134         if (unlikely(r)) {
1135             return r;
1136         }
1137 
1138         /*
1139          * We need to poll and check all pending device's used buffers.
1140          *
1141          * We can poll here since we've had BQL from the time
1142          * we sent the descriptor.
1143          */
1144         r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status);
1145         if (unlikely(r)) {
1146             return r;
1147         }
1148     }
1149 
1150     for (int i = 0; i < v->dev->vq_index; ++i) {
1151         vhost_vdpa_set_vring_ready(v, i);
1152     }
1153 
1154     return 0;
1155 }
1156 
1157 static NetClientInfo net_vhost_vdpa_cvq_info = {
1158     .type = NET_CLIENT_DRIVER_VHOST_VDPA,
1159     .size = sizeof(VhostVDPAState),
1160     .receive = vhost_vdpa_receive,
1161     .start = vhost_vdpa_net_cvq_start,
1162     .load = vhost_vdpa_net_cvq_load,
1163     .stop = vhost_vdpa_net_cvq_stop,
1164     .cleanup = vhost_vdpa_cleanup,
1165     .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
1166     .has_ufo = vhost_vdpa_has_ufo,
1167     .check_peer_type = vhost_vdpa_check_peer_type,
1168 };
1169 
1170 /*
1171  * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
1172  * vdpa device.
1173  *
1174  * Considering that QEMU cannot send the entire filter table to the
1175  * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
1176  * command to enable promiscuous mode to receive all packets,
1177  * according to VirtIO standard, "Since there are no guarantees,
1178  * it can use a hash filter or silently switch to allmulti or
1179  * promiscuous mode if it is given too many addresses.".
1180  *
1181  * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
1182  * marks `n->mac_table.x_overflow` accordingly, it should have
1183  * the same effect on the device model to receive
1184  * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1185  * The same applies to multicast MAC addresses.
1186  *
1187  * Therefore, QEMU can provide the device model with a fake
1188  * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
1189  * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1190  * MAC addresses. This ensures that the device model marks
1191  * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1192  * allowing all packets to be received, which aligns with the
1193  * state of the vdpa device.
1194  */
1195 static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
1196                                                        VirtQueueElement *elem,
1197                                                        struct iovec *out,
1198                                                        const struct iovec *in)
1199 {
1200     struct virtio_net_ctrl_mac mac_data, *mac_ptr;
1201     struct virtio_net_ctrl_hdr *hdr_ptr;
1202     uint32_t cursor;
1203     ssize_t r;
1204     uint8_t on = 1;
1205 
1206     /* parse the non-multicast MAC address entries from CVQ command */
1207     cursor = sizeof(*hdr_ptr);
1208     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1209                    &mac_data, sizeof(mac_data));
1210     if (unlikely(r != sizeof(mac_data))) {
1211         /*
1212          * If the CVQ command is invalid, we should simulate the vdpa device
1213          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1214          */
1215         *s->status = VIRTIO_NET_ERR;
1216         return sizeof(*s->status);
1217     }
1218     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1219 
1220     /* parse the multicast MAC address entries from CVQ command */
1221     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1222                    &mac_data, sizeof(mac_data));
1223     if (r != sizeof(mac_data)) {
1224         /*
1225          * If the CVQ command is invalid, we should simulate the vdpa device
1226          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1227          */
1228         *s->status = VIRTIO_NET_ERR;
1229         return sizeof(*s->status);
1230     }
1231     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1232 
1233     /* validate the CVQ command */
1234     if (iov_size(elem->out_sg, elem->out_num) != cursor) {
1235         /*
1236          * If the CVQ command is invalid, we should simulate the vdpa device
1237          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1238          */
1239         *s->status = VIRTIO_NET_ERR;
1240         return sizeof(*s->status);
1241     }
1242 
1243     /*
1244      * According to VirtIO standard, "Since there are no guarantees,
1245      * it can use a hash filter or silently switch to allmulti or
1246      * promiscuous mode if it is given too many addresses.".
1247      *
1248      * Therefore, considering that QEMU is unable to send the entire
1249      * filter table to the vdpa device, it should send the
1250      * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
1251      */
1252     hdr_ptr = out->iov_base;
1253     out->iov_len = sizeof(*hdr_ptr) + sizeof(on);
1254 
1255     hdr_ptr->class = VIRTIO_NET_CTRL_RX;
1256     hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC;
1257     iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on));
1258     r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1);
1259     if (unlikely(r < 0)) {
1260         return r;
1261     }
1262 
1263     /*
1264      * We can poll here since we've had BQL from the time
1265      * we sent the descriptor.
1266      */
1267     r = vhost_vdpa_net_svq_poll(s, 1);
1268     if (unlikely(r < sizeof(*s->status))) {
1269         return r;
1270     }
1271     if (*s->status != VIRTIO_NET_OK) {
1272         return sizeof(*s->status);
1273     }
1274 
1275     /*
1276      * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
1277      * command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
1278      * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
1279      * multicast MAC addresses.
1280      *
1281      * By doing so, the device model can mark `n->mac_table.uni_overflow`
1282      * and `n->mac_table.multi_overflow`, enabling all packets to be
1283      * received, which aligns with the state of the vdpa device.
1284      */
1285     cursor = 0;
1286     uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1,
1287              fake_mul_entries = MAC_TABLE_ENTRIES + 1,
1288              fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) +
1289                              sizeof(mac_data) + fake_uni_entries * ETH_ALEN +
1290                              sizeof(mac_data) + fake_mul_entries * ETH_ALEN;
1291 
1292     assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len());
1293     out->iov_len = fake_cvq_size;
1294 
1295     /* pack the header for fake CVQ command */
1296     hdr_ptr = out->iov_base + cursor;
1297     hdr_ptr->class = VIRTIO_NET_CTRL_MAC;
1298     hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1299     cursor += sizeof(*hdr_ptr);
1300 
1301     /*
1302      * Pack the non-multicast MAC addresses part for fake CVQ command.
1303      *
1304      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1305      * addresses provided in CVQ command. Therefore, only the entries
1306      * field need to be prepared in the CVQ command.
1307      */
1308     mac_ptr = out->iov_base + cursor;
1309     mac_ptr->entries = cpu_to_le32(fake_uni_entries);
1310     cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN;
1311 
1312     /*
1313      * Pack the multicast MAC addresses part for fake CVQ command.
1314      *
1315      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1316      * addresses provided in CVQ command. Therefore, only the entries
1317      * field need to be prepared in the CVQ command.
1318      */
1319     mac_ptr = out->iov_base + cursor;
1320     mac_ptr->entries = cpu_to_le32(fake_mul_entries);
1321 
1322     /*
1323      * Simulating QEMU poll a vdpa device used buffer
1324      * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1325      */
1326     return sizeof(*s->status);
1327 }
1328 
1329 /**
1330  * Validate and copy control virtqueue commands.
1331  *
1332  * Following QEMU guidelines, we offer a copy of the buffers to the device to
1333  * prevent TOCTOU bugs.
1334  */
1335 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
1336                                             VirtQueueElement *elem,
1337                                             void *opaque)
1338 {
1339     VhostVDPAState *s = opaque;
1340     size_t in_len;
1341     const struct virtio_net_ctrl_hdr *ctrl;
1342     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1343     /* Out buffer sent to both the vdpa device and the device model */
1344     struct iovec out = {
1345         .iov_base = s->cvq_cmd_out_buffer,
1346     };
1347     /* in buffer used for device model */
1348     const struct iovec model_in = {
1349         .iov_base = &status,
1350         .iov_len = sizeof(status),
1351     };
1352     /* in buffer used for vdpa device */
1353     const struct iovec vdpa_in = {
1354         .iov_base = s->status,
1355         .iov_len = sizeof(*s->status),
1356     };
1357     ssize_t dev_written = -EINVAL;
1358 
1359     out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
1360                              s->cvq_cmd_out_buffer,
1361                              vhost_vdpa_net_cvq_cmd_page_len());
1362 
1363     ctrl = s->cvq_cmd_out_buffer;
1364     if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
1365         /*
1366          * Guest announce capability is emulated by qemu, so don't forward to
1367          * the device.
1368          */
1369         dev_written = sizeof(status);
1370         *s->status = VIRTIO_NET_OK;
1371     } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC &&
1372                         ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET &&
1373                         iov_size(elem->out_sg, elem->out_num) > out.iov_len)) {
1374         /*
1375          * Due to the size limitation of the out buffer sent to the vdpa device,
1376          * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
1377          * MAC addresses set by the driver for the filter table can cause
1378          * truncation of the CVQ command in QEMU. As a result, the vdpa device
1379          * rejects the flawed CVQ command.
1380          *
1381          * Therefore, QEMU must handle this situation instead of sending
1382          * the CVQ command directly.
1383          */
1384         dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
1385                                                             &out, &vdpa_in);
1386         if (unlikely(dev_written < 0)) {
1387             goto out;
1388         }
1389     } else {
1390         ssize_t r;
1391         r = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1);
1392         if (unlikely(r < 0)) {
1393             dev_written = r;
1394             goto out;
1395         }
1396 
1397         /*
1398          * We can poll here since we've had BQL from the time
1399          * we sent the descriptor.
1400          */
1401         dev_written = vhost_vdpa_net_svq_poll(s, 1);
1402     }
1403 
1404     if (unlikely(dev_written < sizeof(status))) {
1405         error_report("Insufficient written data (%zu)", dev_written);
1406         goto out;
1407     }
1408 
1409     if (*s->status != VIRTIO_NET_OK) {
1410         goto out;
1411     }
1412 
1413     status = VIRTIO_NET_ERR;
1414     virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1);
1415     if (status != VIRTIO_NET_OK) {
1416         error_report("Bad CVQ processing in model");
1417     }
1418 
1419 out:
1420     in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
1421                           sizeof(status));
1422     if (unlikely(in_len < sizeof(status))) {
1423         error_report("Bad device CVQ written length");
1424     }
1425     vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
1426     /*
1427      * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
1428      * the function successfully forwards the CVQ command, indicated
1429      * by a non-negative value of `dev_written`. Otherwise, it still
1430      * belongs to SVQ.
1431      * This function should only free the `elem` when it owns.
1432      */
1433     if (dev_written >= 0) {
1434         g_free(elem);
1435     }
1436     return dev_written < 0 ? dev_written : 0;
1437 }
1438 
1439 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
1440     .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
1441 };
1442 
1443 /**
1444  * Probe if CVQ is isolated
1445  *
1446  * @device_fd         The vdpa device fd
1447  * @features          Features offered by the device.
1448  * @cvq_index         The control vq pair index
1449  *
1450  * Returns <0 in case of failure, 0 if false and 1 if true.
1451  */
1452 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
1453                                           int cvq_index, Error **errp)
1454 {
1455     uint64_t backend_features;
1456     int64_t cvq_group;
1457     uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
1458                      VIRTIO_CONFIG_S_DRIVER;
1459     int r;
1460 
1461     ERRP_GUARD();
1462 
1463     r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
1464     if (unlikely(r < 0)) {
1465         error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
1466         return r;
1467     }
1468 
1469     if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
1470         return 0;
1471     }
1472 
1473     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1474     if (unlikely(r)) {
1475         error_setg_errno(errp, -r, "Cannot set device status");
1476         goto out;
1477     }
1478 
1479     r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
1480     if (unlikely(r)) {
1481         error_setg_errno(errp, -r, "Cannot set features");
1482         goto out;
1483     }
1484 
1485     status |= VIRTIO_CONFIG_S_FEATURES_OK;
1486     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1487     if (unlikely(r)) {
1488         error_setg_errno(errp, -r, "Cannot set device status");
1489         goto out;
1490     }
1491 
1492     cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
1493     if (unlikely(cvq_group < 0)) {
1494         if (cvq_group != -ENOTSUP) {
1495             r = cvq_group;
1496             goto out;
1497         }
1498 
1499         /*
1500          * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
1501          * support ASID even if the parent driver does not.  The CVQ cannot be
1502          * isolated in this case.
1503          */
1504         error_free(*errp);
1505         *errp = NULL;
1506         r = 0;
1507         goto out;
1508     }
1509 
1510     for (int i = 0; i < cvq_index; ++i) {
1511         int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
1512         if (unlikely(group < 0)) {
1513             r = group;
1514             goto out;
1515         }
1516 
1517         if (group == (int64_t)cvq_group) {
1518             r = 0;
1519             goto out;
1520         }
1521     }
1522 
1523     r = 1;
1524 
1525 out:
1526     status = 0;
1527     ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1528     return r;
1529 }
1530 
1531 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
1532                                        const char *device,
1533                                        const char *name,
1534                                        int vdpa_device_fd,
1535                                        int queue_pair_index,
1536                                        int nvqs,
1537                                        bool is_datapath,
1538                                        bool svq,
1539                                        struct vhost_vdpa_iova_range iova_range,
1540                                        uint64_t features,
1541                                        Error **errp)
1542 {
1543     NetClientState *nc = NULL;
1544     VhostVDPAState *s;
1545     int ret = 0;
1546     assert(name);
1547     int cvq_isolated = 0;
1548 
1549     if (is_datapath) {
1550         nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
1551                                  name);
1552     } else {
1553         cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
1554                                                       queue_pair_index * 2,
1555                                                       errp);
1556         if (unlikely(cvq_isolated < 0)) {
1557             return NULL;
1558         }
1559 
1560         nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
1561                                          device, name);
1562     }
1563     qemu_set_info_str(nc, TYPE_VHOST_VDPA);
1564     s = DO_UPCAST(VhostVDPAState, nc, nc);
1565 
1566     s->vhost_vdpa.device_fd = vdpa_device_fd;
1567     s->vhost_vdpa.index = queue_pair_index;
1568     s->always_svq = svq;
1569     s->migration_state.notify = vdpa_net_migration_state_notifier;
1570     s->vhost_vdpa.shadow_vqs_enabled = svq;
1571     s->vhost_vdpa.iova_range = iova_range;
1572     s->vhost_vdpa.shadow_data = svq;
1573     if (queue_pair_index == 0) {
1574         vhost_vdpa_net_valid_svq_features(features,
1575                                           &s->vhost_vdpa.migration_blocker);
1576     } else if (!is_datapath) {
1577         s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1578                                      PROT_READ | PROT_WRITE,
1579                                      MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1580         s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1581                          PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
1582                          -1, 0);
1583 
1584         s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
1585         s->vhost_vdpa.shadow_vq_ops_opaque = s;
1586         s->cvq_isolated = cvq_isolated;
1587     }
1588     ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
1589     if (ret) {
1590         qemu_del_net_client(nc);
1591         return NULL;
1592     }
1593     return nc;
1594 }
1595 
1596 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
1597 {
1598     int ret = ioctl(fd, VHOST_GET_FEATURES, features);
1599     if (unlikely(ret < 0)) {
1600         error_setg_errno(errp, errno,
1601                          "Fail to query features from vhost-vDPA device");
1602     }
1603     return ret;
1604 }
1605 
1606 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
1607                                           int *has_cvq, Error **errp)
1608 {
1609     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
1610     g_autofree struct vhost_vdpa_config *config = NULL;
1611     __virtio16 *max_queue_pairs;
1612     int ret;
1613 
1614     if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
1615         *has_cvq = 1;
1616     } else {
1617         *has_cvq = 0;
1618     }
1619 
1620     if (features & (1 << VIRTIO_NET_F_MQ)) {
1621         config = g_malloc0(config_size + sizeof(*max_queue_pairs));
1622         config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
1623         config->len = sizeof(*max_queue_pairs);
1624 
1625         ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
1626         if (ret) {
1627             error_setg(errp, "Fail to get config from vhost-vDPA device");
1628             return -ret;
1629         }
1630 
1631         max_queue_pairs = (__virtio16 *)&config->buf;
1632 
1633         return lduw_le_p(max_queue_pairs);
1634     }
1635 
1636     return 1;
1637 }
1638 
1639 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
1640                         NetClientState *peer, Error **errp)
1641 {
1642     const NetdevVhostVDPAOptions *opts;
1643     uint64_t features;
1644     int vdpa_device_fd;
1645     g_autofree NetClientState **ncs = NULL;
1646     struct vhost_vdpa_iova_range iova_range;
1647     NetClientState *nc;
1648     int queue_pairs, r, i = 0, has_cvq = 0;
1649 
1650     assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1651     opts = &netdev->u.vhost_vdpa;
1652     if (!opts->vhostdev && !opts->vhostfd) {
1653         error_setg(errp,
1654                    "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1655         return -1;
1656     }
1657 
1658     if (opts->vhostdev && opts->vhostfd) {
1659         error_setg(errp,
1660                    "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1661         return -1;
1662     }
1663 
1664     if (opts->vhostdev) {
1665         vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1666         if (vdpa_device_fd == -1) {
1667             return -errno;
1668         }
1669     } else {
1670         /* has_vhostfd */
1671         vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1672         if (vdpa_device_fd == -1) {
1673             error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1674             return -1;
1675         }
1676     }
1677 
1678     r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1679     if (unlikely(r < 0)) {
1680         goto err;
1681     }
1682 
1683     queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
1684                                                  &has_cvq, errp);
1685     if (queue_pairs < 0) {
1686         qemu_close(vdpa_device_fd);
1687         return queue_pairs;
1688     }
1689 
1690     r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1691     if (unlikely(r < 0)) {
1692         error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1693                    strerror(-r));
1694         goto err;
1695     }
1696 
1697     if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1698         goto err;
1699     }
1700 
1701     ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1702 
1703     for (i = 0; i < queue_pairs; i++) {
1704         ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1705                                      vdpa_device_fd, i, 2, true, opts->x_svq,
1706                                      iova_range, features, errp);
1707         if (!ncs[i])
1708             goto err;
1709     }
1710 
1711     if (has_cvq) {
1712         nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1713                                  vdpa_device_fd, i, 1, false,
1714                                  opts->x_svq, iova_range, features, errp);
1715         if (!nc)
1716             goto err;
1717     }
1718 
1719     return 0;
1720 
1721 err:
1722     if (i) {
1723         for (i--; i >= 0; i--) {
1724             qemu_del_net_client(ncs[i]);
1725         }
1726     }
1727 
1728     qemu_close(vdpa_device_fd);
1729 
1730     return -1;
1731 }
1732