xref: /openbmc/qemu/net/vhost-vdpa.c (revision 2f02c14b)
1 /*
2  * vhost-vdpa.c
3  *
4  * Copyright(c) 2017-2018 Intel Corporation.
5  * Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "clients.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
26 #include <err.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/migration.h"
30 #include "migration/misc.h"
31 #include "hw/virtio/vhost.h"
32 
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState {
35     NetClientState nc;
36     struct vhost_vdpa vhost_vdpa;
37     Notifier migration_state;
38     VHostNetState *vhost_net;
39 
40     /* Control commands shadow buffers */
41     void *cvq_cmd_out_buffer;
42     virtio_net_ctrl_ack *status;
43 
44     /* The device always have SVQ enabled */
45     bool always_svq;
46 
47     /* The device can isolate CVQ in its own ASID */
48     bool cvq_isolated;
49 
50     bool started;
51 } VhostVDPAState;
52 
53 /*
54  * The array is sorted alphabetically in ascending order,
55  * with the exception of VHOST_INVALID_FEATURE_BIT,
56  * which should always be the last entry.
57  */
58 const int vdpa_feature_bits[] = {
59     VIRTIO_F_ANY_LAYOUT,
60     VIRTIO_F_IOMMU_PLATFORM,
61     VIRTIO_F_NOTIFY_ON_EMPTY,
62     VIRTIO_F_RING_PACKED,
63     VIRTIO_F_RING_RESET,
64     VIRTIO_F_VERSION_1,
65     VIRTIO_NET_F_CSUM,
66     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
67     VIRTIO_NET_F_CTRL_MAC_ADDR,
68     VIRTIO_NET_F_CTRL_RX,
69     VIRTIO_NET_F_CTRL_RX_EXTRA,
70     VIRTIO_NET_F_CTRL_VLAN,
71     VIRTIO_NET_F_CTRL_VQ,
72     VIRTIO_NET_F_GSO,
73     VIRTIO_NET_F_GUEST_CSUM,
74     VIRTIO_NET_F_GUEST_ECN,
75     VIRTIO_NET_F_GUEST_TSO4,
76     VIRTIO_NET_F_GUEST_TSO6,
77     VIRTIO_NET_F_GUEST_UFO,
78     VIRTIO_NET_F_GUEST_USO4,
79     VIRTIO_NET_F_GUEST_USO6,
80     VIRTIO_NET_F_HASH_REPORT,
81     VIRTIO_NET_F_HOST_ECN,
82     VIRTIO_NET_F_HOST_TSO4,
83     VIRTIO_NET_F_HOST_TSO6,
84     VIRTIO_NET_F_HOST_UFO,
85     VIRTIO_NET_F_HOST_USO,
86     VIRTIO_NET_F_MQ,
87     VIRTIO_NET_F_MRG_RXBUF,
88     VIRTIO_NET_F_MTU,
89     VIRTIO_NET_F_RSS,
90     VIRTIO_NET_F_STATUS,
91     VIRTIO_RING_F_EVENT_IDX,
92     VIRTIO_RING_F_INDIRECT_DESC,
93 
94     /* VHOST_INVALID_FEATURE_BIT should always be the last entry */
95     VHOST_INVALID_FEATURE_BIT
96 };
97 
98 /** Supported device specific feature bits with SVQ */
99 static const uint64_t vdpa_svq_device_features =
100     BIT_ULL(VIRTIO_NET_F_CSUM) |
101     BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
102     BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
103     BIT_ULL(VIRTIO_NET_F_MTU) |
104     BIT_ULL(VIRTIO_NET_F_MAC) |
105     BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
106     BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
107     BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
108     BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
109     BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
110     BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
111     BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
112     BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
113     BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
114     BIT_ULL(VIRTIO_NET_F_STATUS) |
115     BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
116     BIT_ULL(VIRTIO_NET_F_CTRL_RX) |
117     BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) |
118     BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) |
119     BIT_ULL(VIRTIO_NET_F_MQ) |
120     BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
121     BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
122     /* VHOST_F_LOG_ALL is exposed by SVQ */
123     BIT_ULL(VHOST_F_LOG_ALL) |
124     BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
125     BIT_ULL(VIRTIO_NET_F_STANDBY) |
126     BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
127 
128 #define VHOST_VDPA_NET_CVQ_ASID 1
129 
130 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
131 {
132     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
133     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
134     return s->vhost_net;
135 }
136 
137 static size_t vhost_vdpa_net_cvq_cmd_len(void)
138 {
139     /*
140      * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
141      * In buffer is always 1 byte, so it should fit here
142      */
143     return sizeof(struct virtio_net_ctrl_hdr) +
144            2 * sizeof(struct virtio_net_ctrl_mac) +
145            MAC_TABLE_ENTRIES * ETH_ALEN;
146 }
147 
148 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
149 {
150     return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
151 }
152 
153 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
154 {
155     uint64_t invalid_dev_features =
156         features & ~vdpa_svq_device_features &
157         /* Transport are all accepted at this point */
158         ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
159                          VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
160 
161     if (invalid_dev_features) {
162         error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
163                    invalid_dev_features);
164         return false;
165     }
166 
167     return vhost_svq_valid_features(features, errp);
168 }
169 
170 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
171 {
172     uint32_t device_id;
173     int ret;
174     struct vhost_dev *hdev;
175 
176     hdev = (struct vhost_dev *)&net->dev;
177     ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
178     if (device_id != VIRTIO_ID_NET) {
179         return -ENOTSUP;
180     }
181     return ret;
182 }
183 
184 static int vhost_vdpa_add(NetClientState *ncs, void *be,
185                           int queue_pair_index, int nvqs)
186 {
187     VhostNetOptions options;
188     struct vhost_net *net = NULL;
189     VhostVDPAState *s;
190     int ret;
191 
192     options.backend_type = VHOST_BACKEND_TYPE_VDPA;
193     assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
194     s = DO_UPCAST(VhostVDPAState, nc, ncs);
195     options.net_backend = ncs;
196     options.opaque      = be;
197     options.busyloop_timeout = 0;
198     options.nvqs = nvqs;
199 
200     net = vhost_net_init(&options);
201     if (!net) {
202         error_report("failed to init vhost_net for queue");
203         goto err_init;
204     }
205     s->vhost_net = net;
206     ret = vhost_vdpa_net_check_device_id(net);
207     if (ret) {
208         goto err_check;
209     }
210     return 0;
211 err_check:
212     vhost_net_cleanup(net);
213     g_free(net);
214 err_init:
215     return -1;
216 }
217 
218 static void vhost_vdpa_cleanup(NetClientState *nc)
219 {
220     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
221 
222     /*
223      * If a peer NIC is attached, do not cleanup anything.
224      * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
225      * when the guest is shutting down.
226      */
227     if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
228         return;
229     }
230     munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
231     munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
232     if (s->vhost_net) {
233         vhost_net_cleanup(s->vhost_net);
234         g_free(s->vhost_net);
235         s->vhost_net = NULL;
236     }
237      if (s->vhost_vdpa.device_fd >= 0) {
238         qemu_close(s->vhost_vdpa.device_fd);
239         s->vhost_vdpa.device_fd = -1;
240     }
241 }
242 
243 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
244 {
245     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
246 
247     return true;
248 }
249 
250 static bool vhost_vdpa_has_ufo(NetClientState *nc)
251 {
252     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
253     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
254     uint64_t features = 0;
255     features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
256     features = vhost_net_get_features(s->vhost_net, features);
257     return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
258 
259 }
260 
261 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
262                                        Error **errp)
263 {
264     const char *driver = object_class_get_name(oc);
265 
266     if (!g_str_has_prefix(driver, "virtio-net-")) {
267         error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
268         return false;
269     }
270 
271     return true;
272 }
273 
274 /** Dummy receive in case qemu falls back to userland tap networking */
275 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
276                                   size_t size)
277 {
278     return size;
279 }
280 
281 /** From any vdpa net client, get the netclient of the first queue pair */
282 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
283 {
284     NICState *nic = qemu_get_nic(s->nc.peer);
285     NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
286 
287     return DO_UPCAST(VhostVDPAState, nc, nc0);
288 }
289 
290 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
291 {
292     struct vhost_vdpa *v = &s->vhost_vdpa;
293     VirtIONet *n;
294     VirtIODevice *vdev;
295     int data_queue_pairs, cvq, r;
296 
297     /* We are only called on the first data vqs and only if x-svq is not set */
298     if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
299         return;
300     }
301 
302     vdev = v->dev->vdev;
303     n = VIRTIO_NET(vdev);
304     if (!n->vhost_started) {
305         return;
306     }
307 
308     data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
309     cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
310                                   n->max_ncs - n->max_queue_pairs : 0;
311     /*
312      * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
313      * in the future and resume the device if read-only operations between
314      * suspend and reset goes wrong.
315      */
316     vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
317 
318     /* Start will check migration setup_or_active to configure or not SVQ */
319     r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
320     if (unlikely(r < 0)) {
321         error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
322     }
323 }
324 
325 static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data)
326 {
327     MigrationState *migration = data;
328     VhostVDPAState *s = container_of(notifier, VhostVDPAState,
329                                      migration_state);
330 
331     if (migration_in_setup(migration)) {
332         vhost_vdpa_net_log_global_enable(s, true);
333     } else if (migration_has_failed(migration)) {
334         vhost_vdpa_net_log_global_enable(s, false);
335     }
336 }
337 
338 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
339 {
340     struct vhost_vdpa *v = &s->vhost_vdpa;
341 
342     migration_add_notifier(&s->migration_state,
343                            vdpa_net_migration_state_notifier);
344     if (v->shadow_vqs_enabled) {
345         v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
346                                            v->iova_range.last);
347     }
348 }
349 
350 static int vhost_vdpa_net_data_start(NetClientState *nc)
351 {
352     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
353     struct vhost_vdpa *v = &s->vhost_vdpa;
354 
355     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
356 
357     if (s->always_svq ||
358         migration_is_setup_or_active(migrate_get_current()->state)) {
359         v->shadow_vqs_enabled = true;
360         v->shadow_data = true;
361     } else {
362         v->shadow_vqs_enabled = false;
363         v->shadow_data = false;
364     }
365 
366     if (v->index == 0) {
367         vhost_vdpa_net_data_start_first(s);
368         return 0;
369     }
370 
371     if (v->shadow_vqs_enabled) {
372         VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
373         v->iova_tree = s0->vhost_vdpa.iova_tree;
374     }
375 
376     return 0;
377 }
378 
379 static int vhost_vdpa_net_data_load(NetClientState *nc)
380 {
381     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
382     struct vhost_vdpa *v = &s->vhost_vdpa;
383     bool has_cvq = v->dev->vq_index_end % 2;
384 
385     if (has_cvq) {
386         return 0;
387     }
388 
389     for (int i = 0; i < v->dev->nvqs; ++i) {
390         vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index);
391     }
392     return 0;
393 }
394 
395 static void vhost_vdpa_net_client_stop(NetClientState *nc)
396 {
397     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
398     struct vhost_dev *dev;
399 
400     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
401 
402     if (s->vhost_vdpa.index == 0) {
403         migration_remove_notifier(&s->migration_state);
404     }
405 
406     dev = s->vhost_vdpa.dev;
407     if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
408         g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
409     } else {
410         s->vhost_vdpa.iova_tree = NULL;
411     }
412 }
413 
414 static NetClientInfo net_vhost_vdpa_info = {
415         .type = NET_CLIENT_DRIVER_VHOST_VDPA,
416         .size = sizeof(VhostVDPAState),
417         .receive = vhost_vdpa_receive,
418         .start = vhost_vdpa_net_data_start,
419         .load = vhost_vdpa_net_data_load,
420         .stop = vhost_vdpa_net_client_stop,
421         .cleanup = vhost_vdpa_cleanup,
422         .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
423         .has_ufo = vhost_vdpa_has_ufo,
424         .check_peer_type = vhost_vdpa_check_peer_type,
425 };
426 
427 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
428                                           Error **errp)
429 {
430     struct vhost_vring_state state = {
431         .index = vq_index,
432     };
433     int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
434 
435     if (unlikely(r < 0)) {
436         r = -errno;
437         error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
438         return r;
439     }
440 
441     return state.num;
442 }
443 
444 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
445                                            unsigned vq_group,
446                                            unsigned asid_num)
447 {
448     struct vhost_vring_state asid = {
449         .index = vq_group,
450         .num = asid_num,
451     };
452     int r;
453 
454     r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
455     if (unlikely(r < 0)) {
456         error_report("Can't set vq group %u asid %u, errno=%d (%s)",
457                      asid.index, asid.num, errno, g_strerror(errno));
458     }
459     return r;
460 }
461 
462 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
463 {
464     VhostIOVATree *tree = v->iova_tree;
465     DMAMap needle = {
466         /*
467          * No need to specify size or to look for more translations since
468          * this contiguous chunk was allocated by us.
469          */
470         .translated_addr = (hwaddr)(uintptr_t)addr,
471     };
472     const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
473     int r;
474 
475     if (unlikely(!map)) {
476         error_report("Cannot locate expected map");
477         return;
478     }
479 
480     r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
481     if (unlikely(r != 0)) {
482         error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
483     }
484 
485     vhost_iova_tree_remove(tree, *map);
486 }
487 
488 /** Map CVQ buffer. */
489 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
490                                   bool write)
491 {
492     DMAMap map = {};
493     int r;
494 
495     map.translated_addr = (hwaddr)(uintptr_t)buf;
496     map.size = size - 1;
497     map.perm = write ? IOMMU_RW : IOMMU_RO,
498     r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
499     if (unlikely(r != IOVA_OK)) {
500         error_report("Cannot map injected element");
501         return r;
502     }
503 
504     r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
505                            vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
506     if (unlikely(r < 0)) {
507         goto dma_map_err;
508     }
509 
510     return 0;
511 
512 dma_map_err:
513     vhost_iova_tree_remove(v->iova_tree, map);
514     return r;
515 }
516 
517 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
518 {
519     VhostVDPAState *s, *s0;
520     struct vhost_vdpa *v;
521     int64_t cvq_group;
522     int r;
523     Error *err = NULL;
524 
525     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
526 
527     s = DO_UPCAST(VhostVDPAState, nc, nc);
528     v = &s->vhost_vdpa;
529 
530     s0 = vhost_vdpa_net_first_nc_vdpa(s);
531     v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
532     v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled;
533     s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
534 
535     if (s->vhost_vdpa.shadow_data) {
536         /* SVQ is already configured for all virtqueues */
537         goto out;
538     }
539 
540     /*
541      * If we early return in these cases SVQ will not be enabled. The migration
542      * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
543      */
544     if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
545         return 0;
546     }
547 
548     if (!s->cvq_isolated) {
549         return 0;
550     }
551 
552     cvq_group = vhost_vdpa_get_vring_group(v->device_fd,
553                                            v->dev->vq_index_end - 1,
554                                            &err);
555     if (unlikely(cvq_group < 0)) {
556         error_report_err(err);
557         return cvq_group;
558     }
559 
560     r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
561     if (unlikely(r < 0)) {
562         return r;
563     }
564 
565     v->shadow_vqs_enabled = true;
566     s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
567 
568 out:
569     if (!s->vhost_vdpa.shadow_vqs_enabled) {
570         return 0;
571     }
572 
573     if (s0->vhost_vdpa.iova_tree) {
574         /*
575          * SVQ is already configured for all virtqueues.  Reuse IOVA tree for
576          * simplicity, whether CVQ shares ASID with guest or not, because:
577          * - Memory listener need access to guest's memory addresses allocated
578          *   in the IOVA tree.
579          * - There should be plenty of IOVA address space for both ASID not to
580          *   worry about collisions between them.  Guest's translations are
581          *   still validated with virtio virtqueue_pop so there is no risk for
582          *   the guest to access memory that it shouldn't.
583          *
584          * To allocate a iova tree per ASID is doable but it complicates the
585          * code and it is not worth it for the moment.
586          */
587         v->iova_tree = s0->vhost_vdpa.iova_tree;
588     } else {
589         v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
590                                            v->iova_range.last);
591     }
592 
593     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
594                                vhost_vdpa_net_cvq_cmd_page_len(), false);
595     if (unlikely(r < 0)) {
596         return r;
597     }
598 
599     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
600                                vhost_vdpa_net_cvq_cmd_page_len(), true);
601     if (unlikely(r < 0)) {
602         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
603     }
604 
605     return r;
606 }
607 
608 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
609 {
610     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
611 
612     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
613 
614     if (s->vhost_vdpa.shadow_vqs_enabled) {
615         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
616         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
617     }
618 
619     vhost_vdpa_net_client_stop(nc);
620 }
621 
622 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
623                                       size_t in_len)
624 {
625     /* Buffers for the device */
626     const struct iovec out = {
627         .iov_base = s->cvq_cmd_out_buffer,
628         .iov_len = out_len,
629     };
630     const struct iovec in = {
631         .iov_base = s->status,
632         .iov_len = sizeof(virtio_net_ctrl_ack),
633     };
634     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
635     int r;
636 
637     r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
638     if (unlikely(r != 0)) {
639         if (unlikely(r == -ENOSPC)) {
640             qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
641                           __func__);
642         }
643         return r;
644     }
645 
646     /*
647      * We can poll here since we've had BQL from the time we sent the
648      * descriptor. Also, we need to take the answer before SVQ pulls by itself,
649      * when BQL is released
650      */
651     return vhost_svq_poll(svq, 1);
652 }
653 
654 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
655                                        uint8_t cmd, const struct iovec *data_sg,
656                                        size_t data_num)
657 {
658     const struct virtio_net_ctrl_hdr ctrl = {
659         .class = class,
660         .cmd = cmd,
661     };
662     size_t data_size = iov_size(data_sg, data_num);
663 
664     assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
665 
666     /* pack the CVQ command header */
667     memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
668 
669     /* pack the CVQ command command-specific-data */
670     iov_to_buf(data_sg, data_num, 0,
671                s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
672 
673     return vhost_vdpa_net_cvq_add(s, data_size + sizeof(ctrl),
674                                   sizeof(virtio_net_ctrl_ack));
675 }
676 
677 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
678 {
679     if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
680         const struct iovec data = {
681             .iov_base = (void *)n->mac,
682             .iov_len = sizeof(n->mac),
683         };
684         ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
685                                                   VIRTIO_NET_CTRL_MAC_ADDR_SET,
686                                                   &data, 1);
687         if (unlikely(dev_written < 0)) {
688             return dev_written;
689         }
690         if (*s->status != VIRTIO_NET_OK) {
691             return -EIO;
692         }
693     }
694 
695     /*
696      * According to VirtIO standard, "The device MUST have an
697      * empty MAC filtering table on reset.".
698      *
699      * Therefore, there is no need to send this CVQ command if the
700      * driver also sets an empty MAC filter table, which aligns with
701      * the device's defaults.
702      *
703      * Note that the device's defaults can mismatch the driver's
704      * configuration only at live migration.
705      */
706     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) ||
707         n->mac_table.in_use == 0) {
708         return 0;
709     }
710 
711     uint32_t uni_entries = n->mac_table.first_multi,
712              uni_macs_size = uni_entries * ETH_ALEN,
713              mul_entries = n->mac_table.in_use - uni_entries,
714              mul_macs_size = mul_entries * ETH_ALEN;
715     struct virtio_net_ctrl_mac uni = {
716         .entries = cpu_to_le32(uni_entries),
717     };
718     struct virtio_net_ctrl_mac mul = {
719         .entries = cpu_to_le32(mul_entries),
720     };
721     const struct iovec data[] = {
722         {
723             .iov_base = &uni,
724             .iov_len = sizeof(uni),
725         }, {
726             .iov_base = n->mac_table.macs,
727             .iov_len = uni_macs_size,
728         }, {
729             .iov_base = &mul,
730             .iov_len = sizeof(mul),
731         }, {
732             .iov_base = &n->mac_table.macs[uni_macs_size],
733             .iov_len = mul_macs_size,
734         },
735     };
736     ssize_t dev_written = vhost_vdpa_net_load_cmd(s,
737                                 VIRTIO_NET_CTRL_MAC,
738                                 VIRTIO_NET_CTRL_MAC_TABLE_SET,
739                                 data, ARRAY_SIZE(data));
740     if (unlikely(dev_written < 0)) {
741         return dev_written;
742     }
743     if (*s->status != VIRTIO_NET_OK) {
744         return -EIO;
745     }
746 
747     return 0;
748 }
749 
750 static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
751                                   const VirtIONet *n)
752 {
753     struct virtio_net_ctrl_mq mq;
754     ssize_t dev_written;
755 
756     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
757         return 0;
758     }
759 
760     mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
761     const struct iovec data = {
762         .iov_base = &mq,
763         .iov_len = sizeof(mq),
764     };
765     dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
766                                           VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
767                                           &data, 1);
768     if (unlikely(dev_written < 0)) {
769         return dev_written;
770     }
771     if (*s->status != VIRTIO_NET_OK) {
772         return -EIO;
773     }
774 
775     return 0;
776 }
777 
778 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
779                                         const VirtIONet *n)
780 {
781     uint64_t offloads;
782     ssize_t dev_written;
783 
784     if (!virtio_vdev_has_feature(&n->parent_obj,
785                                  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
786         return 0;
787     }
788 
789     if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
790         /*
791          * According to VirtIO standard, "Upon feature negotiation
792          * corresponding offload gets enabled to preserve
793          * backward compatibility.".
794          *
795          * Therefore, there is no need to send this CVQ command if the
796          * driver also enables all supported offloads, which aligns with
797          * the device's defaults.
798          *
799          * Note that the device's defaults can mismatch the driver's
800          * configuration only at live migration.
801          */
802         return 0;
803     }
804 
805     offloads = cpu_to_le64(n->curr_guest_offloads);
806     const struct iovec data = {
807         .iov_base = &offloads,
808         .iov_len = sizeof(offloads),
809     };
810     dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
811                                           VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
812                                           &data, 1);
813     if (unlikely(dev_written < 0)) {
814         return dev_written;
815     }
816     if (*s->status != VIRTIO_NET_OK) {
817         return -EIO;
818     }
819 
820     return 0;
821 }
822 
823 static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
824                                        uint8_t cmd,
825                                        uint8_t on)
826 {
827     const struct iovec data = {
828         .iov_base = &on,
829         .iov_len = sizeof(on),
830     };
831     return vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX,
832                                    cmd, &data, 1);
833 }
834 
835 static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
836                                   const VirtIONet *n)
837 {
838     ssize_t dev_written;
839 
840     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
841         return 0;
842     }
843 
844     /*
845      * According to virtio_net_reset(), device turns promiscuous mode
846      * on by default.
847      *
848      * Additionally, according to VirtIO standard, "Since there are
849      * no guarantees, it can use a hash filter or silently switch to
850      * allmulti or promiscuous mode if it is given too many addresses.".
851      * QEMU marks `n->mac_table.uni_overflow` if guest sets too many
852      * non-multicast MAC addresses, indicating that promiscuous mode
853      * should be enabled.
854      *
855      * Therefore, QEMU should only send this CVQ command if the
856      * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
857      * which sets promiscuous mode on, different from the device's defaults.
858      *
859      * Note that the device's defaults can mismatch the driver's
860      * configuration only at live migration.
861      */
862     if (!n->mac_table.uni_overflow && !n->promisc) {
863         dev_written = vhost_vdpa_net_load_rx_mode(s,
864                                             VIRTIO_NET_CTRL_RX_PROMISC, 0);
865         if (unlikely(dev_written < 0)) {
866             return dev_written;
867         }
868         if (*s->status != VIRTIO_NET_OK) {
869             return -EIO;
870         }
871     }
872 
873     /*
874      * According to virtio_net_reset(), device turns all-multicast mode
875      * off by default.
876      *
877      * According to VirtIO standard, "Since there are no guarantees,
878      * it can use a hash filter or silently switch to allmulti or
879      * promiscuous mode if it is given too many addresses.". QEMU marks
880      * `n->mac_table.multi_overflow` if guest sets too many
881      * non-multicast MAC addresses.
882      *
883      * Therefore, QEMU should only send this CVQ command if the
884      * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
885      * which sets all-multicast mode on, different from the device's defaults.
886      *
887      * Note that the device's defaults can mismatch the driver's
888      * configuration only at live migration.
889      */
890     if (n->mac_table.multi_overflow || n->allmulti) {
891         dev_written = vhost_vdpa_net_load_rx_mode(s,
892                                             VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
893         if (unlikely(dev_written < 0)) {
894             return dev_written;
895         }
896         if (*s->status != VIRTIO_NET_OK) {
897             return -EIO;
898         }
899     }
900 
901     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) {
902         return 0;
903     }
904 
905     /*
906      * According to virtio_net_reset(), device turns all-unicast mode
907      * off by default.
908      *
909      * Therefore, QEMU should only send this CVQ command if the driver
910      * sets all-unicast mode on, different from the device's defaults.
911      *
912      * Note that the device's defaults can mismatch the driver's
913      * configuration only at live migration.
914      */
915     if (n->alluni) {
916         dev_written = vhost_vdpa_net_load_rx_mode(s,
917                                             VIRTIO_NET_CTRL_RX_ALLUNI, 1);
918         if (dev_written < 0) {
919             return dev_written;
920         }
921         if (*s->status != VIRTIO_NET_OK) {
922             return -EIO;
923         }
924     }
925 
926     /*
927      * According to virtio_net_reset(), device turns non-multicast mode
928      * off by default.
929      *
930      * Therefore, QEMU should only send this CVQ command if the driver
931      * sets non-multicast mode on, different from the device's defaults.
932      *
933      * Note that the device's defaults can mismatch the driver's
934      * configuration only at live migration.
935      */
936     if (n->nomulti) {
937         dev_written = vhost_vdpa_net_load_rx_mode(s,
938                                             VIRTIO_NET_CTRL_RX_NOMULTI, 1);
939         if (dev_written < 0) {
940             return dev_written;
941         }
942         if (*s->status != VIRTIO_NET_OK) {
943             return -EIO;
944         }
945     }
946 
947     /*
948      * According to virtio_net_reset(), device turns non-unicast mode
949      * off by default.
950      *
951      * Therefore, QEMU should only send this CVQ command if the driver
952      * sets non-unicast mode on, different from the device's defaults.
953      *
954      * Note that the device's defaults can mismatch the driver's
955      * configuration only at live migration.
956      */
957     if (n->nouni) {
958         dev_written = vhost_vdpa_net_load_rx_mode(s,
959                                             VIRTIO_NET_CTRL_RX_NOUNI, 1);
960         if (dev_written < 0) {
961             return dev_written;
962         }
963         if (*s->status != VIRTIO_NET_OK) {
964             return -EIO;
965         }
966     }
967 
968     /*
969      * According to virtio_net_reset(), device turns non-broadcast mode
970      * off by default.
971      *
972      * Therefore, QEMU should only send this CVQ command if the driver
973      * sets non-broadcast mode on, different from the device's defaults.
974      *
975      * Note that the device's defaults can mismatch the driver's
976      * configuration only at live migration.
977      */
978     if (n->nobcast) {
979         dev_written = vhost_vdpa_net_load_rx_mode(s,
980                                             VIRTIO_NET_CTRL_RX_NOBCAST, 1);
981         if (dev_written < 0) {
982             return dev_written;
983         }
984         if (*s->status != VIRTIO_NET_OK) {
985             return -EIO;
986         }
987     }
988 
989     return 0;
990 }
991 
992 static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
993                                            const VirtIONet *n,
994                                            uint16_t vid)
995 {
996     const struct iovec data = {
997         .iov_base = &vid,
998         .iov_len = sizeof(vid),
999     };
1000     ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
1001                                                   VIRTIO_NET_CTRL_VLAN_ADD,
1002                                                   &data, 1);
1003     if (unlikely(dev_written < 0)) {
1004         return dev_written;
1005     }
1006     if (unlikely(*s->status != VIRTIO_NET_OK)) {
1007         return -EIO;
1008     }
1009 
1010     return 0;
1011 }
1012 
1013 static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
1014                                     const VirtIONet *n)
1015 {
1016     int r;
1017 
1018     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) {
1019         return 0;
1020     }
1021 
1022     for (int i = 0; i < MAX_VLAN >> 5; i++) {
1023         for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
1024             if (n->vlans[i] & (1U << j)) {
1025                 r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
1026                 if (unlikely(r != 0)) {
1027                     return r;
1028                 }
1029             }
1030         }
1031     }
1032 
1033     return 0;
1034 }
1035 
1036 static int vhost_vdpa_net_cvq_load(NetClientState *nc)
1037 {
1038     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
1039     struct vhost_vdpa *v = &s->vhost_vdpa;
1040     const VirtIONet *n;
1041     int r;
1042 
1043     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1044 
1045     vhost_vdpa_set_vring_ready(v, v->dev->vq_index);
1046 
1047     if (v->shadow_vqs_enabled) {
1048         n = VIRTIO_NET(v->dev->vdev);
1049         r = vhost_vdpa_net_load_mac(s, n);
1050         if (unlikely(r < 0)) {
1051             return r;
1052         }
1053         r = vhost_vdpa_net_load_mq(s, n);
1054         if (unlikely(r)) {
1055             return r;
1056         }
1057         r = vhost_vdpa_net_load_offloads(s, n);
1058         if (unlikely(r)) {
1059             return r;
1060         }
1061         r = vhost_vdpa_net_load_rx(s, n);
1062         if (unlikely(r)) {
1063             return r;
1064         }
1065         r = vhost_vdpa_net_load_vlan(s, n);
1066         if (unlikely(r)) {
1067             return r;
1068         }
1069     }
1070 
1071     for (int i = 0; i < v->dev->vq_index; ++i) {
1072         vhost_vdpa_set_vring_ready(v, i);
1073     }
1074 
1075     return 0;
1076 }
1077 
1078 static NetClientInfo net_vhost_vdpa_cvq_info = {
1079     .type = NET_CLIENT_DRIVER_VHOST_VDPA,
1080     .size = sizeof(VhostVDPAState),
1081     .receive = vhost_vdpa_receive,
1082     .start = vhost_vdpa_net_cvq_start,
1083     .load = vhost_vdpa_net_cvq_load,
1084     .stop = vhost_vdpa_net_cvq_stop,
1085     .cleanup = vhost_vdpa_cleanup,
1086     .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
1087     .has_ufo = vhost_vdpa_has_ufo,
1088     .check_peer_type = vhost_vdpa_check_peer_type,
1089 };
1090 
1091 /*
1092  * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
1093  * vdpa device.
1094  *
1095  * Considering that QEMU cannot send the entire filter table to the
1096  * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
1097  * command to enable promiscuous mode to receive all packets,
1098  * according to VirtIO standard, "Since there are no guarantees,
1099  * it can use a hash filter or silently switch to allmulti or
1100  * promiscuous mode if it is given too many addresses.".
1101  *
1102  * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
1103  * marks `n->mac_table.x_overflow` accordingly, it should have
1104  * the same effect on the device model to receive
1105  * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1106  * The same applies to multicast MAC addresses.
1107  *
1108  * Therefore, QEMU can provide the device model with a fake
1109  * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
1110  * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1111  * MAC addresses. This ensures that the device model marks
1112  * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1113  * allowing all packets to be received, which aligns with the
1114  * state of the vdpa device.
1115  */
1116 static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
1117                                                        VirtQueueElement *elem,
1118                                                        struct iovec *out)
1119 {
1120     struct virtio_net_ctrl_mac mac_data, *mac_ptr;
1121     struct virtio_net_ctrl_hdr *hdr_ptr;
1122     uint32_t cursor;
1123     ssize_t r;
1124 
1125     /* parse the non-multicast MAC address entries from CVQ command */
1126     cursor = sizeof(*hdr_ptr);
1127     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1128                    &mac_data, sizeof(mac_data));
1129     if (unlikely(r != sizeof(mac_data))) {
1130         /*
1131          * If the CVQ command is invalid, we should simulate the vdpa device
1132          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1133          */
1134         *s->status = VIRTIO_NET_ERR;
1135         return sizeof(*s->status);
1136     }
1137     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1138 
1139     /* parse the multicast MAC address entries from CVQ command */
1140     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1141                    &mac_data, sizeof(mac_data));
1142     if (r != sizeof(mac_data)) {
1143         /*
1144          * If the CVQ command is invalid, we should simulate the vdpa device
1145          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1146          */
1147         *s->status = VIRTIO_NET_ERR;
1148         return sizeof(*s->status);
1149     }
1150     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1151 
1152     /* validate the CVQ command */
1153     if (iov_size(elem->out_sg, elem->out_num) != cursor) {
1154         /*
1155          * If the CVQ command is invalid, we should simulate the vdpa device
1156          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1157          */
1158         *s->status = VIRTIO_NET_ERR;
1159         return sizeof(*s->status);
1160     }
1161 
1162     /*
1163      * According to VirtIO standard, "Since there are no guarantees,
1164      * it can use a hash filter or silently switch to allmulti or
1165      * promiscuous mode if it is given too many addresses.".
1166      *
1167      * Therefore, considering that QEMU is unable to send the entire
1168      * filter table to the vdpa device, it should send the
1169      * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
1170      */
1171     r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 1);
1172     if (unlikely(r < 0)) {
1173         return r;
1174     }
1175     if (*s->status != VIRTIO_NET_OK) {
1176         return sizeof(*s->status);
1177     }
1178 
1179     /*
1180      * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
1181      * command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
1182      * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
1183      * multicast MAC addresses.
1184      *
1185      * By doing so, the device model can mark `n->mac_table.uni_overflow`
1186      * and `n->mac_table.multi_overflow`, enabling all packets to be
1187      * received, which aligns with the state of the vdpa device.
1188      */
1189     cursor = 0;
1190     uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1,
1191              fake_mul_entries = MAC_TABLE_ENTRIES + 1,
1192              fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) +
1193                              sizeof(mac_data) + fake_uni_entries * ETH_ALEN +
1194                              sizeof(mac_data) + fake_mul_entries * ETH_ALEN;
1195 
1196     assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len());
1197     out->iov_len = fake_cvq_size;
1198 
1199     /* pack the header for fake CVQ command */
1200     hdr_ptr = out->iov_base + cursor;
1201     hdr_ptr->class = VIRTIO_NET_CTRL_MAC;
1202     hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1203     cursor += sizeof(*hdr_ptr);
1204 
1205     /*
1206      * Pack the non-multicast MAC addresses part for fake CVQ command.
1207      *
1208      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1209      * addresses provided in CVQ command. Therefore, only the entries
1210      * field need to be prepared in the CVQ command.
1211      */
1212     mac_ptr = out->iov_base + cursor;
1213     mac_ptr->entries = cpu_to_le32(fake_uni_entries);
1214     cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN;
1215 
1216     /*
1217      * Pack the multicast MAC addresses part for fake CVQ command.
1218      *
1219      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1220      * addresses provided in CVQ command. Therefore, only the entries
1221      * field need to be prepared in the CVQ command.
1222      */
1223     mac_ptr = out->iov_base + cursor;
1224     mac_ptr->entries = cpu_to_le32(fake_mul_entries);
1225 
1226     /*
1227      * Simulating QEMU poll a vdpa device used buffer
1228      * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1229      */
1230     return sizeof(*s->status);
1231 }
1232 
1233 /**
1234  * Validate and copy control virtqueue commands.
1235  *
1236  * Following QEMU guidelines, we offer a copy of the buffers to the device to
1237  * prevent TOCTOU bugs.
1238  */
1239 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
1240                                             VirtQueueElement *elem,
1241                                             void *opaque)
1242 {
1243     VhostVDPAState *s = opaque;
1244     size_t in_len;
1245     const struct virtio_net_ctrl_hdr *ctrl;
1246     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1247     /* Out buffer sent to both the vdpa device and the device model */
1248     struct iovec out = {
1249         .iov_base = s->cvq_cmd_out_buffer,
1250     };
1251     /* in buffer used for device model */
1252     const struct iovec in = {
1253         .iov_base = &status,
1254         .iov_len = sizeof(status),
1255     };
1256     ssize_t dev_written = -EINVAL;
1257 
1258     out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
1259                              s->cvq_cmd_out_buffer,
1260                              vhost_vdpa_net_cvq_cmd_page_len());
1261 
1262     ctrl = s->cvq_cmd_out_buffer;
1263     if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
1264         /*
1265          * Guest announce capability is emulated by qemu, so don't forward to
1266          * the device.
1267          */
1268         dev_written = sizeof(status);
1269         *s->status = VIRTIO_NET_OK;
1270     } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC &&
1271                         ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET &&
1272                         iov_size(elem->out_sg, elem->out_num) > out.iov_len)) {
1273         /*
1274          * Due to the size limitation of the out buffer sent to the vdpa device,
1275          * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
1276          * MAC addresses set by the driver for the filter table can cause
1277          * truncation of the CVQ command in QEMU. As a result, the vdpa device
1278          * rejects the flawed CVQ command.
1279          *
1280          * Therefore, QEMU must handle this situation instead of sending
1281          * the CVQ command directly.
1282          */
1283         dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
1284                                                                   &out);
1285         if (unlikely(dev_written < 0)) {
1286             goto out;
1287         }
1288     } else {
1289         dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
1290         if (unlikely(dev_written < 0)) {
1291             goto out;
1292         }
1293     }
1294 
1295     if (unlikely(dev_written < sizeof(status))) {
1296         error_report("Insufficient written data (%zu)", dev_written);
1297         goto out;
1298     }
1299 
1300     if (*s->status != VIRTIO_NET_OK) {
1301         goto out;
1302     }
1303 
1304     status = VIRTIO_NET_ERR;
1305     virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
1306     if (status != VIRTIO_NET_OK) {
1307         error_report("Bad CVQ processing in model");
1308     }
1309 
1310 out:
1311     in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
1312                           sizeof(status));
1313     if (unlikely(in_len < sizeof(status))) {
1314         error_report("Bad device CVQ written length");
1315     }
1316     vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
1317     /*
1318      * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
1319      * the function successfully forwards the CVQ command, indicated
1320      * by a non-negative value of `dev_written`. Otherwise, it still
1321      * belongs to SVQ.
1322      * This function should only free the `elem` when it owns.
1323      */
1324     if (dev_written >= 0) {
1325         g_free(elem);
1326     }
1327     return dev_written < 0 ? dev_written : 0;
1328 }
1329 
1330 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
1331     .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
1332 };
1333 
1334 /**
1335  * Probe if CVQ is isolated
1336  *
1337  * @device_fd         The vdpa device fd
1338  * @features          Features offered by the device.
1339  * @cvq_index         The control vq pair index
1340  *
1341  * Returns <0 in case of failure, 0 if false and 1 if true.
1342  */
1343 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
1344                                           int cvq_index, Error **errp)
1345 {
1346     uint64_t backend_features;
1347     int64_t cvq_group;
1348     uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
1349                      VIRTIO_CONFIG_S_DRIVER;
1350     int r;
1351 
1352     ERRP_GUARD();
1353 
1354     r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
1355     if (unlikely(r < 0)) {
1356         error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
1357         return r;
1358     }
1359 
1360     if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
1361         return 0;
1362     }
1363 
1364     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1365     if (unlikely(r)) {
1366         error_setg_errno(errp, -r, "Cannot set device status");
1367         goto out;
1368     }
1369 
1370     r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
1371     if (unlikely(r)) {
1372         error_setg_errno(errp, -r, "Cannot set features");
1373         goto out;
1374     }
1375 
1376     status |= VIRTIO_CONFIG_S_FEATURES_OK;
1377     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1378     if (unlikely(r)) {
1379         error_setg_errno(errp, -r, "Cannot set device status");
1380         goto out;
1381     }
1382 
1383     cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
1384     if (unlikely(cvq_group < 0)) {
1385         if (cvq_group != -ENOTSUP) {
1386             r = cvq_group;
1387             goto out;
1388         }
1389 
1390         /*
1391          * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
1392          * support ASID even if the parent driver does not.  The CVQ cannot be
1393          * isolated in this case.
1394          */
1395         error_free(*errp);
1396         *errp = NULL;
1397         r = 0;
1398         goto out;
1399     }
1400 
1401     for (int i = 0; i < cvq_index; ++i) {
1402         int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
1403         if (unlikely(group < 0)) {
1404             r = group;
1405             goto out;
1406         }
1407 
1408         if (group == (int64_t)cvq_group) {
1409             r = 0;
1410             goto out;
1411         }
1412     }
1413 
1414     r = 1;
1415 
1416 out:
1417     status = 0;
1418     ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1419     return r;
1420 }
1421 
1422 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
1423                                        const char *device,
1424                                        const char *name,
1425                                        int vdpa_device_fd,
1426                                        int queue_pair_index,
1427                                        int nvqs,
1428                                        bool is_datapath,
1429                                        bool svq,
1430                                        struct vhost_vdpa_iova_range iova_range,
1431                                        uint64_t features,
1432                                        Error **errp)
1433 {
1434     NetClientState *nc = NULL;
1435     VhostVDPAState *s;
1436     int ret = 0;
1437     assert(name);
1438     int cvq_isolated = 0;
1439 
1440     if (is_datapath) {
1441         nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
1442                                  name);
1443     } else {
1444         cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
1445                                                       queue_pair_index * 2,
1446                                                       errp);
1447         if (unlikely(cvq_isolated < 0)) {
1448             return NULL;
1449         }
1450 
1451         nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
1452                                          device, name);
1453     }
1454     qemu_set_info_str(nc, TYPE_VHOST_VDPA);
1455     s = DO_UPCAST(VhostVDPAState, nc, nc);
1456 
1457     s->vhost_vdpa.device_fd = vdpa_device_fd;
1458     s->vhost_vdpa.index = queue_pair_index;
1459     s->always_svq = svq;
1460     s->migration_state.notify = NULL;
1461     s->vhost_vdpa.shadow_vqs_enabled = svq;
1462     s->vhost_vdpa.iova_range = iova_range;
1463     s->vhost_vdpa.shadow_data = svq;
1464     if (queue_pair_index == 0) {
1465         vhost_vdpa_net_valid_svq_features(features,
1466                                           &s->vhost_vdpa.migration_blocker);
1467     } else if (!is_datapath) {
1468         s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1469                                      PROT_READ | PROT_WRITE,
1470                                      MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1471         s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1472                          PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
1473                          -1, 0);
1474 
1475         s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
1476         s->vhost_vdpa.shadow_vq_ops_opaque = s;
1477         s->cvq_isolated = cvq_isolated;
1478     }
1479     ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
1480     if (ret) {
1481         qemu_del_net_client(nc);
1482         return NULL;
1483     }
1484     return nc;
1485 }
1486 
1487 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
1488 {
1489     int ret = ioctl(fd, VHOST_GET_FEATURES, features);
1490     if (unlikely(ret < 0)) {
1491         error_setg_errno(errp, errno,
1492                          "Fail to query features from vhost-vDPA device");
1493     }
1494     return ret;
1495 }
1496 
1497 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
1498                                           int *has_cvq, Error **errp)
1499 {
1500     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
1501     g_autofree struct vhost_vdpa_config *config = NULL;
1502     __virtio16 *max_queue_pairs;
1503     int ret;
1504 
1505     if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
1506         *has_cvq = 1;
1507     } else {
1508         *has_cvq = 0;
1509     }
1510 
1511     if (features & (1 << VIRTIO_NET_F_MQ)) {
1512         config = g_malloc0(config_size + sizeof(*max_queue_pairs));
1513         config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
1514         config->len = sizeof(*max_queue_pairs);
1515 
1516         ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
1517         if (ret) {
1518             error_setg(errp, "Fail to get config from vhost-vDPA device");
1519             return -ret;
1520         }
1521 
1522         max_queue_pairs = (__virtio16 *)&config->buf;
1523 
1524         return lduw_le_p(max_queue_pairs);
1525     }
1526 
1527     return 1;
1528 }
1529 
1530 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
1531                         NetClientState *peer, Error **errp)
1532 {
1533     const NetdevVhostVDPAOptions *opts;
1534     uint64_t features;
1535     int vdpa_device_fd;
1536     g_autofree NetClientState **ncs = NULL;
1537     struct vhost_vdpa_iova_range iova_range;
1538     NetClientState *nc;
1539     int queue_pairs, r, i = 0, has_cvq = 0;
1540 
1541     assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1542     opts = &netdev->u.vhost_vdpa;
1543     if (!opts->vhostdev && !opts->vhostfd) {
1544         error_setg(errp,
1545                    "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1546         return -1;
1547     }
1548 
1549     if (opts->vhostdev && opts->vhostfd) {
1550         error_setg(errp,
1551                    "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1552         return -1;
1553     }
1554 
1555     if (opts->vhostdev) {
1556         vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1557         if (vdpa_device_fd == -1) {
1558             return -errno;
1559         }
1560     } else {
1561         /* has_vhostfd */
1562         vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1563         if (vdpa_device_fd == -1) {
1564             error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1565             return -1;
1566         }
1567     }
1568 
1569     r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1570     if (unlikely(r < 0)) {
1571         goto err;
1572     }
1573 
1574     queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
1575                                                  &has_cvq, errp);
1576     if (queue_pairs < 0) {
1577         qemu_close(vdpa_device_fd);
1578         return queue_pairs;
1579     }
1580 
1581     r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1582     if (unlikely(r < 0)) {
1583         error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1584                    strerror(-r));
1585         goto err;
1586     }
1587 
1588     if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1589         goto err;
1590     }
1591 
1592     ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1593 
1594     for (i = 0; i < queue_pairs; i++) {
1595         ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1596                                      vdpa_device_fd, i, 2, true, opts->x_svq,
1597                                      iova_range, features, errp);
1598         if (!ncs[i])
1599             goto err;
1600     }
1601 
1602     if (has_cvq) {
1603         nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1604                                  vdpa_device_fd, i, 1, false,
1605                                  opts->x_svq, iova_range, features, errp);
1606         if (!nc)
1607             goto err;
1608     }
1609 
1610     return 0;
1611 
1612 err:
1613     if (i) {
1614         for (i--; i >= 0; i--) {
1615             qemu_del_net_client(ncs[i]);
1616         }
1617     }
1618 
1619     qemu_close(vdpa_device_fd);
1620 
1621     return -1;
1622 }
1623