xref: /openbmc/qemu/net/vhost-vdpa.c (revision f7cbfa71)
1 /*
2  * vhost-vdpa.c
3  *
4  * Copyright(c) 2017-2018 Intel Corporation.
5  * Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "clients.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
26 #include <err.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/migration.h"
30 #include "migration/misc.h"
31 #include "hw/virtio/vhost.h"
32 
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState {
35     NetClientState nc;
36     struct vhost_vdpa vhost_vdpa;
37     Notifier migration_state;
38     VHostNetState *vhost_net;
39 
40     /* Control commands shadow buffers */
41     void *cvq_cmd_out_buffer;
42     virtio_net_ctrl_ack *status;
43 
44     /* The device always have SVQ enabled */
45     bool always_svq;
46 
47     /* The device can isolate CVQ in its own ASID */
48     bool cvq_isolated;
49 
50     bool started;
51 } VhostVDPAState;
52 
53 /*
54  * The array is sorted alphabetically in ascending order,
55  * with the exception of VHOST_INVALID_FEATURE_BIT,
56  * which should always be the last entry.
57  */
58 const int vdpa_feature_bits[] = {
59     VIRTIO_F_ANY_LAYOUT,
60     VIRTIO_F_IOMMU_PLATFORM,
61     VIRTIO_F_NOTIFY_ON_EMPTY,
62     VIRTIO_F_RING_PACKED,
63     VIRTIO_F_RING_RESET,
64     VIRTIO_F_VERSION_1,
65     VIRTIO_NET_F_CSUM,
66     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
67     VIRTIO_NET_F_CTRL_MAC_ADDR,
68     VIRTIO_NET_F_CTRL_RX,
69     VIRTIO_NET_F_CTRL_RX_EXTRA,
70     VIRTIO_NET_F_CTRL_VLAN,
71     VIRTIO_NET_F_CTRL_VQ,
72     VIRTIO_NET_F_GSO,
73     VIRTIO_NET_F_GUEST_CSUM,
74     VIRTIO_NET_F_GUEST_ECN,
75     VIRTIO_NET_F_GUEST_TSO4,
76     VIRTIO_NET_F_GUEST_TSO6,
77     VIRTIO_NET_F_GUEST_UFO,
78     VIRTIO_NET_F_GUEST_USO4,
79     VIRTIO_NET_F_GUEST_USO6,
80     VIRTIO_NET_F_HASH_REPORT,
81     VIRTIO_NET_F_HOST_ECN,
82     VIRTIO_NET_F_HOST_TSO4,
83     VIRTIO_NET_F_HOST_TSO6,
84     VIRTIO_NET_F_HOST_UFO,
85     VIRTIO_NET_F_HOST_USO,
86     VIRTIO_NET_F_MQ,
87     VIRTIO_NET_F_MRG_RXBUF,
88     VIRTIO_NET_F_MTU,
89     VIRTIO_NET_F_RSS,
90     VIRTIO_NET_F_STATUS,
91     VIRTIO_RING_F_EVENT_IDX,
92     VIRTIO_RING_F_INDIRECT_DESC,
93 
94     /* VHOST_INVALID_FEATURE_BIT should always be the last entry */
95     VHOST_INVALID_FEATURE_BIT
96 };
97 
98 /** Supported device specific feature bits with SVQ */
99 static const uint64_t vdpa_svq_device_features =
100     BIT_ULL(VIRTIO_NET_F_CSUM) |
101     BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
102     BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
103     BIT_ULL(VIRTIO_NET_F_MTU) |
104     BIT_ULL(VIRTIO_NET_F_MAC) |
105     BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
106     BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
107     BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
108     BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
109     BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
110     BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
111     BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
112     BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
113     BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
114     BIT_ULL(VIRTIO_NET_F_STATUS) |
115     BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
116     BIT_ULL(VIRTIO_NET_F_CTRL_RX) |
117     BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) |
118     BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) |
119     BIT_ULL(VIRTIO_NET_F_MQ) |
120     BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
121     BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
122     /* VHOST_F_LOG_ALL is exposed by SVQ */
123     BIT_ULL(VHOST_F_LOG_ALL) |
124     BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
125     BIT_ULL(VIRTIO_NET_F_STANDBY) |
126     BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
127 
128 #define VHOST_VDPA_NET_CVQ_ASID 1
129 
130 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
131 {
132     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
133     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
134     return s->vhost_net;
135 }
136 
137 static size_t vhost_vdpa_net_cvq_cmd_len(void)
138 {
139     /*
140      * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
141      * In buffer is always 1 byte, so it should fit here
142      */
143     return sizeof(struct virtio_net_ctrl_hdr) +
144            2 * sizeof(struct virtio_net_ctrl_mac) +
145            MAC_TABLE_ENTRIES * ETH_ALEN;
146 }
147 
148 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
149 {
150     return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
151 }
152 
153 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
154 {
155     uint64_t invalid_dev_features =
156         features & ~vdpa_svq_device_features &
157         /* Transport are all accepted at this point */
158         ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
159                          VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
160 
161     if (invalid_dev_features) {
162         error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
163                    invalid_dev_features);
164         return false;
165     }
166 
167     return vhost_svq_valid_features(features, errp);
168 }
169 
170 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
171 {
172     uint32_t device_id;
173     int ret;
174     struct vhost_dev *hdev;
175 
176     hdev = (struct vhost_dev *)&net->dev;
177     ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
178     if (device_id != VIRTIO_ID_NET) {
179         return -ENOTSUP;
180     }
181     return ret;
182 }
183 
184 static int vhost_vdpa_add(NetClientState *ncs, void *be,
185                           int queue_pair_index, int nvqs)
186 {
187     VhostNetOptions options;
188     struct vhost_net *net = NULL;
189     VhostVDPAState *s;
190     int ret;
191 
192     options.backend_type = VHOST_BACKEND_TYPE_VDPA;
193     assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
194     s = DO_UPCAST(VhostVDPAState, nc, ncs);
195     options.net_backend = ncs;
196     options.opaque      = be;
197     options.busyloop_timeout = 0;
198     options.nvqs = nvqs;
199 
200     net = vhost_net_init(&options);
201     if (!net) {
202         error_report("failed to init vhost_net for queue");
203         goto err_init;
204     }
205     s->vhost_net = net;
206     ret = vhost_vdpa_net_check_device_id(net);
207     if (ret) {
208         goto err_check;
209     }
210     return 0;
211 err_check:
212     vhost_net_cleanup(net);
213     g_free(net);
214 err_init:
215     return -1;
216 }
217 
218 static void vhost_vdpa_cleanup(NetClientState *nc)
219 {
220     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
221 
222     /*
223      * If a peer NIC is attached, do not cleanup anything.
224      * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
225      * when the guest is shutting down.
226      */
227     if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
228         return;
229     }
230     munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
231     munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
232     if (s->vhost_net) {
233         vhost_net_cleanup(s->vhost_net);
234         g_free(s->vhost_net);
235         s->vhost_net = NULL;
236     }
237      if (s->vhost_vdpa.device_fd >= 0) {
238         qemu_close(s->vhost_vdpa.device_fd);
239         s->vhost_vdpa.device_fd = -1;
240     }
241 }
242 
243 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
244 {
245     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
246 
247     return true;
248 }
249 
250 static bool vhost_vdpa_has_ufo(NetClientState *nc)
251 {
252     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
253     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
254     uint64_t features = 0;
255     features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
256     features = vhost_net_get_features(s->vhost_net, features);
257     return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
258 
259 }
260 
261 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
262                                        Error **errp)
263 {
264     const char *driver = object_class_get_name(oc);
265 
266     if (!g_str_has_prefix(driver, "virtio-net-")) {
267         error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
268         return false;
269     }
270 
271     return true;
272 }
273 
274 /** Dummy receive in case qemu falls back to userland tap networking */
275 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
276                                   size_t size)
277 {
278     return size;
279 }
280 
281 /** From any vdpa net client, get the netclient of the first queue pair */
282 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
283 {
284     NICState *nic = qemu_get_nic(s->nc.peer);
285     NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
286 
287     return DO_UPCAST(VhostVDPAState, nc, nc0);
288 }
289 
290 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
291 {
292     struct vhost_vdpa *v = &s->vhost_vdpa;
293     VirtIONet *n;
294     VirtIODevice *vdev;
295     int data_queue_pairs, cvq, r;
296 
297     /* We are only called on the first data vqs and only if x-svq is not set */
298     if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
299         return;
300     }
301 
302     vdev = v->dev->vdev;
303     n = VIRTIO_NET(vdev);
304     if (!n->vhost_started) {
305         return;
306     }
307 
308     data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
309     cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
310                                   n->max_ncs - n->max_queue_pairs : 0;
311     /*
312      * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
313      * in the future and resume the device if read-only operations between
314      * suspend and reset goes wrong.
315      */
316     vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
317 
318     /* Start will check migration setup_or_active to configure or not SVQ */
319     r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
320     if (unlikely(r < 0)) {
321         error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
322     }
323 }
324 
325 static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data)
326 {
327     MigrationState *migration = data;
328     VhostVDPAState *s = container_of(notifier, VhostVDPAState,
329                                      migration_state);
330 
331     if (migration_in_setup(migration)) {
332         vhost_vdpa_net_log_global_enable(s, true);
333     } else if (migration_has_failed(migration)) {
334         vhost_vdpa_net_log_global_enable(s, false);
335     }
336 }
337 
338 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
339 {
340     struct vhost_vdpa *v = &s->vhost_vdpa;
341 
342     add_migration_state_change_notifier(&s->migration_state);
343     if (v->shadow_vqs_enabled) {
344         v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
345                                            v->iova_range.last);
346     }
347 }
348 
349 static int vhost_vdpa_net_data_start(NetClientState *nc)
350 {
351     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
352     struct vhost_vdpa *v = &s->vhost_vdpa;
353 
354     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
355 
356     if (s->always_svq ||
357         migration_is_setup_or_active(migrate_get_current()->state)) {
358         v->shadow_vqs_enabled = true;
359         v->shadow_data = true;
360     } else {
361         v->shadow_vqs_enabled = false;
362         v->shadow_data = false;
363     }
364 
365     if (v->index == 0) {
366         vhost_vdpa_net_data_start_first(s);
367         return 0;
368     }
369 
370     if (v->shadow_vqs_enabled) {
371         VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
372         v->iova_tree = s0->vhost_vdpa.iova_tree;
373     }
374 
375     return 0;
376 }
377 
378 static int vhost_vdpa_net_data_load(NetClientState *nc)
379 {
380     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
381     struct vhost_vdpa *v = &s->vhost_vdpa;
382     bool has_cvq = v->dev->vq_index_end % 2;
383 
384     if (has_cvq) {
385         return 0;
386     }
387 
388     for (int i = 0; i < v->dev->nvqs; ++i) {
389         vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index);
390     }
391     return 0;
392 }
393 
394 static void vhost_vdpa_net_client_stop(NetClientState *nc)
395 {
396     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
397     struct vhost_dev *dev;
398 
399     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
400 
401     if (s->vhost_vdpa.index == 0) {
402         remove_migration_state_change_notifier(&s->migration_state);
403     }
404 
405     dev = s->vhost_vdpa.dev;
406     if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
407         g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
408     } else {
409         s->vhost_vdpa.iova_tree = NULL;
410     }
411 }
412 
413 static NetClientInfo net_vhost_vdpa_info = {
414         .type = NET_CLIENT_DRIVER_VHOST_VDPA,
415         .size = sizeof(VhostVDPAState),
416         .receive = vhost_vdpa_receive,
417         .start = vhost_vdpa_net_data_start,
418         .load = vhost_vdpa_net_data_load,
419         .stop = vhost_vdpa_net_client_stop,
420         .cleanup = vhost_vdpa_cleanup,
421         .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
422         .has_ufo = vhost_vdpa_has_ufo,
423         .check_peer_type = vhost_vdpa_check_peer_type,
424 };
425 
426 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
427                                           Error **errp)
428 {
429     struct vhost_vring_state state = {
430         .index = vq_index,
431     };
432     int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
433 
434     if (unlikely(r < 0)) {
435         r = -errno;
436         error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
437         return r;
438     }
439 
440     return state.num;
441 }
442 
443 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
444                                            unsigned vq_group,
445                                            unsigned asid_num)
446 {
447     struct vhost_vring_state asid = {
448         .index = vq_group,
449         .num = asid_num,
450     };
451     int r;
452 
453     r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
454     if (unlikely(r < 0)) {
455         error_report("Can't set vq group %u asid %u, errno=%d (%s)",
456                      asid.index, asid.num, errno, g_strerror(errno));
457     }
458     return r;
459 }
460 
461 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
462 {
463     VhostIOVATree *tree = v->iova_tree;
464     DMAMap needle = {
465         /*
466          * No need to specify size or to look for more translations since
467          * this contiguous chunk was allocated by us.
468          */
469         .translated_addr = (hwaddr)(uintptr_t)addr,
470     };
471     const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
472     int r;
473 
474     if (unlikely(!map)) {
475         error_report("Cannot locate expected map");
476         return;
477     }
478 
479     r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
480     if (unlikely(r != 0)) {
481         error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
482     }
483 
484     vhost_iova_tree_remove(tree, *map);
485 }
486 
487 /** Map CVQ buffer. */
488 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
489                                   bool write)
490 {
491     DMAMap map = {};
492     int r;
493 
494     map.translated_addr = (hwaddr)(uintptr_t)buf;
495     map.size = size - 1;
496     map.perm = write ? IOMMU_RW : IOMMU_RO,
497     r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
498     if (unlikely(r != IOVA_OK)) {
499         error_report("Cannot map injected element");
500         return r;
501     }
502 
503     r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
504                            vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
505     if (unlikely(r < 0)) {
506         goto dma_map_err;
507     }
508 
509     return 0;
510 
511 dma_map_err:
512     vhost_iova_tree_remove(v->iova_tree, map);
513     return r;
514 }
515 
516 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
517 {
518     VhostVDPAState *s, *s0;
519     struct vhost_vdpa *v;
520     int64_t cvq_group;
521     int r;
522     Error *err = NULL;
523 
524     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
525 
526     s = DO_UPCAST(VhostVDPAState, nc, nc);
527     v = &s->vhost_vdpa;
528 
529     s0 = vhost_vdpa_net_first_nc_vdpa(s);
530     v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
531     v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled;
532     s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
533 
534     if (s->vhost_vdpa.shadow_data) {
535         /* SVQ is already configured for all virtqueues */
536         goto out;
537     }
538 
539     /*
540      * If we early return in these cases SVQ will not be enabled. The migration
541      * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
542      */
543     if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
544         return 0;
545     }
546 
547     if (!s->cvq_isolated) {
548         return 0;
549     }
550 
551     cvq_group = vhost_vdpa_get_vring_group(v->device_fd,
552                                            v->dev->vq_index_end - 1,
553                                            &err);
554     if (unlikely(cvq_group < 0)) {
555         error_report_err(err);
556         return cvq_group;
557     }
558 
559     r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
560     if (unlikely(r < 0)) {
561         return r;
562     }
563 
564     v->shadow_vqs_enabled = true;
565     s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
566 
567 out:
568     if (!s->vhost_vdpa.shadow_vqs_enabled) {
569         return 0;
570     }
571 
572     if (s0->vhost_vdpa.iova_tree) {
573         /*
574          * SVQ is already configured for all virtqueues.  Reuse IOVA tree for
575          * simplicity, whether CVQ shares ASID with guest or not, because:
576          * - Memory listener need access to guest's memory addresses allocated
577          *   in the IOVA tree.
578          * - There should be plenty of IOVA address space for both ASID not to
579          *   worry about collisions between them.  Guest's translations are
580          *   still validated with virtio virtqueue_pop so there is no risk for
581          *   the guest to access memory that it shouldn't.
582          *
583          * To allocate a iova tree per ASID is doable but it complicates the
584          * code and it is not worth it for the moment.
585          */
586         v->iova_tree = s0->vhost_vdpa.iova_tree;
587     } else {
588         v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
589                                            v->iova_range.last);
590     }
591 
592     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
593                                vhost_vdpa_net_cvq_cmd_page_len(), false);
594     if (unlikely(r < 0)) {
595         return r;
596     }
597 
598     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
599                                vhost_vdpa_net_cvq_cmd_page_len(), true);
600     if (unlikely(r < 0)) {
601         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
602     }
603 
604     return r;
605 }
606 
607 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
608 {
609     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
610 
611     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
612 
613     if (s->vhost_vdpa.shadow_vqs_enabled) {
614         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
615         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
616     }
617 
618     vhost_vdpa_net_client_stop(nc);
619 }
620 
621 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
622                                       size_t in_len)
623 {
624     /* Buffers for the device */
625     const struct iovec out = {
626         .iov_base = s->cvq_cmd_out_buffer,
627         .iov_len = out_len,
628     };
629     const struct iovec in = {
630         .iov_base = s->status,
631         .iov_len = sizeof(virtio_net_ctrl_ack),
632     };
633     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
634     int r;
635 
636     r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
637     if (unlikely(r != 0)) {
638         if (unlikely(r == -ENOSPC)) {
639             qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
640                           __func__);
641         }
642         return r;
643     }
644 
645     /*
646      * We can poll here since we've had BQL from the time we sent the
647      * descriptor. Also, we need to take the answer before SVQ pulls by itself,
648      * when BQL is released
649      */
650     return vhost_svq_poll(svq, 1);
651 }
652 
653 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
654                                        uint8_t cmd, const struct iovec *data_sg,
655                                        size_t data_num)
656 {
657     const struct virtio_net_ctrl_hdr ctrl = {
658         .class = class,
659         .cmd = cmd,
660     };
661     size_t data_size = iov_size(data_sg, data_num);
662 
663     assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
664 
665     /* pack the CVQ command header */
666     memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
667 
668     /* pack the CVQ command command-specific-data */
669     iov_to_buf(data_sg, data_num, 0,
670                s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
671 
672     return vhost_vdpa_net_cvq_add(s, data_size + sizeof(ctrl),
673                                   sizeof(virtio_net_ctrl_ack));
674 }
675 
676 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
677 {
678     if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
679         const struct iovec data = {
680             .iov_base = (void *)n->mac,
681             .iov_len = sizeof(n->mac),
682         };
683         ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
684                                                   VIRTIO_NET_CTRL_MAC_ADDR_SET,
685                                                   &data, 1);
686         if (unlikely(dev_written < 0)) {
687             return dev_written;
688         }
689         if (*s->status != VIRTIO_NET_OK) {
690             return -EIO;
691         }
692     }
693 
694     /*
695      * According to VirtIO standard, "The device MUST have an
696      * empty MAC filtering table on reset.".
697      *
698      * Therefore, there is no need to send this CVQ command if the
699      * driver also sets an empty MAC filter table, which aligns with
700      * the device's defaults.
701      *
702      * Note that the device's defaults can mismatch the driver's
703      * configuration only at live migration.
704      */
705     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) ||
706         n->mac_table.in_use == 0) {
707         return 0;
708     }
709 
710     uint32_t uni_entries = n->mac_table.first_multi,
711              uni_macs_size = uni_entries * ETH_ALEN,
712              mul_entries = n->mac_table.in_use - uni_entries,
713              mul_macs_size = mul_entries * ETH_ALEN;
714     struct virtio_net_ctrl_mac uni = {
715         .entries = cpu_to_le32(uni_entries),
716     };
717     struct virtio_net_ctrl_mac mul = {
718         .entries = cpu_to_le32(mul_entries),
719     };
720     const struct iovec data[] = {
721         {
722             .iov_base = &uni,
723             .iov_len = sizeof(uni),
724         }, {
725             .iov_base = n->mac_table.macs,
726             .iov_len = uni_macs_size,
727         }, {
728             .iov_base = &mul,
729             .iov_len = sizeof(mul),
730         }, {
731             .iov_base = &n->mac_table.macs[uni_macs_size],
732             .iov_len = mul_macs_size,
733         },
734     };
735     ssize_t dev_written = vhost_vdpa_net_load_cmd(s,
736                                 VIRTIO_NET_CTRL_MAC,
737                                 VIRTIO_NET_CTRL_MAC_TABLE_SET,
738                                 data, ARRAY_SIZE(data));
739     if (unlikely(dev_written < 0)) {
740         return dev_written;
741     }
742     if (*s->status != VIRTIO_NET_OK) {
743         return -EIO;
744     }
745 
746     return 0;
747 }
748 
749 static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
750                                   const VirtIONet *n)
751 {
752     struct virtio_net_ctrl_mq mq;
753     ssize_t dev_written;
754 
755     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
756         return 0;
757     }
758 
759     mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
760     const struct iovec data = {
761         .iov_base = &mq,
762         .iov_len = sizeof(mq),
763     };
764     dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
765                                           VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
766                                           &data, 1);
767     if (unlikely(dev_written < 0)) {
768         return dev_written;
769     }
770     if (*s->status != VIRTIO_NET_OK) {
771         return -EIO;
772     }
773 
774     return 0;
775 }
776 
777 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
778                                         const VirtIONet *n)
779 {
780     uint64_t offloads;
781     ssize_t dev_written;
782 
783     if (!virtio_vdev_has_feature(&n->parent_obj,
784                                  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
785         return 0;
786     }
787 
788     if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
789         /*
790          * According to VirtIO standard, "Upon feature negotiation
791          * corresponding offload gets enabled to preserve
792          * backward compatibility.".
793          *
794          * Therefore, there is no need to send this CVQ command if the
795          * driver also enables all supported offloads, which aligns with
796          * the device's defaults.
797          *
798          * Note that the device's defaults can mismatch the driver's
799          * configuration only at live migration.
800          */
801         return 0;
802     }
803 
804     offloads = cpu_to_le64(n->curr_guest_offloads);
805     const struct iovec data = {
806         .iov_base = &offloads,
807         .iov_len = sizeof(offloads),
808     };
809     dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
810                                           VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
811                                           &data, 1);
812     if (unlikely(dev_written < 0)) {
813         return dev_written;
814     }
815     if (*s->status != VIRTIO_NET_OK) {
816         return -EIO;
817     }
818 
819     return 0;
820 }
821 
822 static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
823                                        uint8_t cmd,
824                                        uint8_t on)
825 {
826     const struct iovec data = {
827         .iov_base = &on,
828         .iov_len = sizeof(on),
829     };
830     return vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX,
831                                    cmd, &data, 1);
832 }
833 
834 static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
835                                   const VirtIONet *n)
836 {
837     ssize_t dev_written;
838 
839     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
840         return 0;
841     }
842 
843     /*
844      * According to virtio_net_reset(), device turns promiscuous mode
845      * on by default.
846      *
847      * Additionally, according to VirtIO standard, "Since there are
848      * no guarantees, it can use a hash filter or silently switch to
849      * allmulti or promiscuous mode if it is given too many addresses.".
850      * QEMU marks `n->mac_table.uni_overflow` if guest sets too many
851      * non-multicast MAC addresses, indicating that promiscuous mode
852      * should be enabled.
853      *
854      * Therefore, QEMU should only send this CVQ command if the
855      * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
856      * which sets promiscuous mode on, different from the device's defaults.
857      *
858      * Note that the device's defaults can mismatch the driver's
859      * configuration only at live migration.
860      */
861     if (!n->mac_table.uni_overflow && !n->promisc) {
862         dev_written = vhost_vdpa_net_load_rx_mode(s,
863                                             VIRTIO_NET_CTRL_RX_PROMISC, 0);
864         if (unlikely(dev_written < 0)) {
865             return dev_written;
866         }
867         if (*s->status != VIRTIO_NET_OK) {
868             return -EIO;
869         }
870     }
871 
872     /*
873      * According to virtio_net_reset(), device turns all-multicast mode
874      * off by default.
875      *
876      * According to VirtIO standard, "Since there are no guarantees,
877      * it can use a hash filter or silently switch to allmulti or
878      * promiscuous mode if it is given too many addresses.". QEMU marks
879      * `n->mac_table.multi_overflow` if guest sets too many
880      * non-multicast MAC addresses.
881      *
882      * Therefore, QEMU should only send this CVQ command if the
883      * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
884      * which sets all-multicast mode on, different from the device's defaults.
885      *
886      * Note that the device's defaults can mismatch the driver's
887      * configuration only at live migration.
888      */
889     if (n->mac_table.multi_overflow || n->allmulti) {
890         dev_written = vhost_vdpa_net_load_rx_mode(s,
891                                             VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
892         if (unlikely(dev_written < 0)) {
893             return dev_written;
894         }
895         if (*s->status != VIRTIO_NET_OK) {
896             return -EIO;
897         }
898     }
899 
900     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) {
901         return 0;
902     }
903 
904     /*
905      * According to virtio_net_reset(), device turns all-unicast mode
906      * off by default.
907      *
908      * Therefore, QEMU should only send this CVQ command if the driver
909      * sets all-unicast mode on, different from the device's defaults.
910      *
911      * Note that the device's defaults can mismatch the driver's
912      * configuration only at live migration.
913      */
914     if (n->alluni) {
915         dev_written = vhost_vdpa_net_load_rx_mode(s,
916                                             VIRTIO_NET_CTRL_RX_ALLUNI, 1);
917         if (dev_written < 0) {
918             return dev_written;
919         }
920         if (*s->status != VIRTIO_NET_OK) {
921             return -EIO;
922         }
923     }
924 
925     /*
926      * According to virtio_net_reset(), device turns non-multicast mode
927      * off by default.
928      *
929      * Therefore, QEMU should only send this CVQ command if the driver
930      * sets non-multicast mode on, different from the device's defaults.
931      *
932      * Note that the device's defaults can mismatch the driver's
933      * configuration only at live migration.
934      */
935     if (n->nomulti) {
936         dev_written = vhost_vdpa_net_load_rx_mode(s,
937                                             VIRTIO_NET_CTRL_RX_NOMULTI, 1);
938         if (dev_written < 0) {
939             return dev_written;
940         }
941         if (*s->status != VIRTIO_NET_OK) {
942             return -EIO;
943         }
944     }
945 
946     /*
947      * According to virtio_net_reset(), device turns non-unicast mode
948      * off by default.
949      *
950      * Therefore, QEMU should only send this CVQ command if the driver
951      * sets non-unicast mode on, different from the device's defaults.
952      *
953      * Note that the device's defaults can mismatch the driver's
954      * configuration only at live migration.
955      */
956     if (n->nouni) {
957         dev_written = vhost_vdpa_net_load_rx_mode(s,
958                                             VIRTIO_NET_CTRL_RX_NOUNI, 1);
959         if (dev_written < 0) {
960             return dev_written;
961         }
962         if (*s->status != VIRTIO_NET_OK) {
963             return -EIO;
964         }
965     }
966 
967     /*
968      * According to virtio_net_reset(), device turns non-broadcast mode
969      * off by default.
970      *
971      * Therefore, QEMU should only send this CVQ command if the driver
972      * sets non-broadcast mode on, different from the device's defaults.
973      *
974      * Note that the device's defaults can mismatch the driver's
975      * configuration only at live migration.
976      */
977     if (n->nobcast) {
978         dev_written = vhost_vdpa_net_load_rx_mode(s,
979                                             VIRTIO_NET_CTRL_RX_NOBCAST, 1);
980         if (dev_written < 0) {
981             return dev_written;
982         }
983         if (*s->status != VIRTIO_NET_OK) {
984             return -EIO;
985         }
986     }
987 
988     return 0;
989 }
990 
991 static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
992                                            const VirtIONet *n,
993                                            uint16_t vid)
994 {
995     const struct iovec data = {
996         .iov_base = &vid,
997         .iov_len = sizeof(vid),
998     };
999     ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
1000                                                   VIRTIO_NET_CTRL_VLAN_ADD,
1001                                                   &data, 1);
1002     if (unlikely(dev_written < 0)) {
1003         return dev_written;
1004     }
1005     if (unlikely(*s->status != VIRTIO_NET_OK)) {
1006         return -EIO;
1007     }
1008 
1009     return 0;
1010 }
1011 
1012 static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
1013                                     const VirtIONet *n)
1014 {
1015     int r;
1016 
1017     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) {
1018         return 0;
1019     }
1020 
1021     for (int i = 0; i < MAX_VLAN >> 5; i++) {
1022         for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
1023             if (n->vlans[i] & (1U << j)) {
1024                 r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
1025                 if (unlikely(r != 0)) {
1026                     return r;
1027                 }
1028             }
1029         }
1030     }
1031 
1032     return 0;
1033 }
1034 
1035 static int vhost_vdpa_net_cvq_load(NetClientState *nc)
1036 {
1037     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
1038     struct vhost_vdpa *v = &s->vhost_vdpa;
1039     const VirtIONet *n;
1040     int r;
1041 
1042     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1043 
1044     vhost_vdpa_set_vring_ready(v, v->dev->vq_index);
1045 
1046     if (v->shadow_vqs_enabled) {
1047         n = VIRTIO_NET(v->dev->vdev);
1048         r = vhost_vdpa_net_load_mac(s, n);
1049         if (unlikely(r < 0)) {
1050             return r;
1051         }
1052         r = vhost_vdpa_net_load_mq(s, n);
1053         if (unlikely(r)) {
1054             return r;
1055         }
1056         r = vhost_vdpa_net_load_offloads(s, n);
1057         if (unlikely(r)) {
1058             return r;
1059         }
1060         r = vhost_vdpa_net_load_rx(s, n);
1061         if (unlikely(r)) {
1062             return r;
1063         }
1064         r = vhost_vdpa_net_load_vlan(s, n);
1065         if (unlikely(r)) {
1066             return r;
1067         }
1068     }
1069 
1070     for (int i = 0; i < v->dev->vq_index; ++i) {
1071         vhost_vdpa_set_vring_ready(v, i);
1072     }
1073 
1074     return 0;
1075 }
1076 
1077 static NetClientInfo net_vhost_vdpa_cvq_info = {
1078     .type = NET_CLIENT_DRIVER_VHOST_VDPA,
1079     .size = sizeof(VhostVDPAState),
1080     .receive = vhost_vdpa_receive,
1081     .start = vhost_vdpa_net_cvq_start,
1082     .load = vhost_vdpa_net_cvq_load,
1083     .stop = vhost_vdpa_net_cvq_stop,
1084     .cleanup = vhost_vdpa_cleanup,
1085     .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
1086     .has_ufo = vhost_vdpa_has_ufo,
1087     .check_peer_type = vhost_vdpa_check_peer_type,
1088 };
1089 
1090 /*
1091  * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
1092  * vdpa device.
1093  *
1094  * Considering that QEMU cannot send the entire filter table to the
1095  * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
1096  * command to enable promiscuous mode to receive all packets,
1097  * according to VirtIO standard, "Since there are no guarantees,
1098  * it can use a hash filter or silently switch to allmulti or
1099  * promiscuous mode if it is given too many addresses.".
1100  *
1101  * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
1102  * marks `n->mac_table.x_overflow` accordingly, it should have
1103  * the same effect on the device model to receive
1104  * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1105  * The same applies to multicast MAC addresses.
1106  *
1107  * Therefore, QEMU can provide the device model with a fake
1108  * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
1109  * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1110  * MAC addresses. This ensures that the device model marks
1111  * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1112  * allowing all packets to be received, which aligns with the
1113  * state of the vdpa device.
1114  */
1115 static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
1116                                                        VirtQueueElement *elem,
1117                                                        struct iovec *out)
1118 {
1119     struct virtio_net_ctrl_mac mac_data, *mac_ptr;
1120     struct virtio_net_ctrl_hdr *hdr_ptr;
1121     uint32_t cursor;
1122     ssize_t r;
1123 
1124     /* parse the non-multicast MAC address entries from CVQ command */
1125     cursor = sizeof(*hdr_ptr);
1126     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1127                    &mac_data, sizeof(mac_data));
1128     if (unlikely(r != sizeof(mac_data))) {
1129         /*
1130          * If the CVQ command is invalid, we should simulate the vdpa device
1131          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1132          */
1133         *s->status = VIRTIO_NET_ERR;
1134         return sizeof(*s->status);
1135     }
1136     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1137 
1138     /* parse the multicast MAC address entries from CVQ command */
1139     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1140                    &mac_data, sizeof(mac_data));
1141     if (r != sizeof(mac_data)) {
1142         /*
1143          * If the CVQ command is invalid, we should simulate the vdpa device
1144          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1145          */
1146         *s->status = VIRTIO_NET_ERR;
1147         return sizeof(*s->status);
1148     }
1149     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1150 
1151     /* validate the CVQ command */
1152     if (iov_size(elem->out_sg, elem->out_num) != cursor) {
1153         /*
1154          * If the CVQ command is invalid, we should simulate the vdpa device
1155          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1156          */
1157         *s->status = VIRTIO_NET_ERR;
1158         return sizeof(*s->status);
1159     }
1160 
1161     /*
1162      * According to VirtIO standard, "Since there are no guarantees,
1163      * it can use a hash filter or silently switch to allmulti or
1164      * promiscuous mode if it is given too many addresses.".
1165      *
1166      * Therefore, considering that QEMU is unable to send the entire
1167      * filter table to the vdpa device, it should send the
1168      * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
1169      */
1170     r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 1);
1171     if (unlikely(r < 0)) {
1172         return r;
1173     }
1174     if (*s->status != VIRTIO_NET_OK) {
1175         return sizeof(*s->status);
1176     }
1177 
1178     /*
1179      * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
1180      * command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
1181      * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
1182      * multicast MAC addresses.
1183      *
1184      * By doing so, the device model can mark `n->mac_table.uni_overflow`
1185      * and `n->mac_table.multi_overflow`, enabling all packets to be
1186      * received, which aligns with the state of the vdpa device.
1187      */
1188     cursor = 0;
1189     uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1,
1190              fake_mul_entries = MAC_TABLE_ENTRIES + 1,
1191              fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) +
1192                              sizeof(mac_data) + fake_uni_entries * ETH_ALEN +
1193                              sizeof(mac_data) + fake_mul_entries * ETH_ALEN;
1194 
1195     assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len());
1196     out->iov_len = fake_cvq_size;
1197 
1198     /* pack the header for fake CVQ command */
1199     hdr_ptr = out->iov_base + cursor;
1200     hdr_ptr->class = VIRTIO_NET_CTRL_MAC;
1201     hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1202     cursor += sizeof(*hdr_ptr);
1203 
1204     /*
1205      * Pack the non-multicast MAC addresses part for fake CVQ command.
1206      *
1207      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1208      * addresses provided in CVQ command. Therefore, only the entries
1209      * field need to be prepared in the CVQ command.
1210      */
1211     mac_ptr = out->iov_base + cursor;
1212     mac_ptr->entries = cpu_to_le32(fake_uni_entries);
1213     cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN;
1214 
1215     /*
1216      * Pack the multicast MAC addresses part for fake CVQ command.
1217      *
1218      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1219      * addresses provided in CVQ command. Therefore, only the entries
1220      * field need to be prepared in the CVQ command.
1221      */
1222     mac_ptr = out->iov_base + cursor;
1223     mac_ptr->entries = cpu_to_le32(fake_mul_entries);
1224 
1225     /*
1226      * Simulating QEMU poll a vdpa device used buffer
1227      * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1228      */
1229     return sizeof(*s->status);
1230 }
1231 
1232 /**
1233  * Validate and copy control virtqueue commands.
1234  *
1235  * Following QEMU guidelines, we offer a copy of the buffers to the device to
1236  * prevent TOCTOU bugs.
1237  */
1238 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
1239                                             VirtQueueElement *elem,
1240                                             void *opaque)
1241 {
1242     VhostVDPAState *s = opaque;
1243     size_t in_len;
1244     const struct virtio_net_ctrl_hdr *ctrl;
1245     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1246     /* Out buffer sent to both the vdpa device and the device model */
1247     struct iovec out = {
1248         .iov_base = s->cvq_cmd_out_buffer,
1249     };
1250     /* in buffer used for device model */
1251     const struct iovec in = {
1252         .iov_base = &status,
1253         .iov_len = sizeof(status),
1254     };
1255     ssize_t dev_written = -EINVAL;
1256 
1257     out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
1258                              s->cvq_cmd_out_buffer,
1259                              vhost_vdpa_net_cvq_cmd_page_len());
1260 
1261     ctrl = s->cvq_cmd_out_buffer;
1262     if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
1263         /*
1264          * Guest announce capability is emulated by qemu, so don't forward to
1265          * the device.
1266          */
1267         dev_written = sizeof(status);
1268         *s->status = VIRTIO_NET_OK;
1269     } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC &&
1270                         ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET &&
1271                         iov_size(elem->out_sg, elem->out_num) > out.iov_len)) {
1272         /*
1273          * Due to the size limitation of the out buffer sent to the vdpa device,
1274          * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
1275          * MAC addresses set by the driver for the filter table can cause
1276          * truncation of the CVQ command in QEMU. As a result, the vdpa device
1277          * rejects the flawed CVQ command.
1278          *
1279          * Therefore, QEMU must handle this situation instead of sending
1280          * the CVQ command directly.
1281          */
1282         dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
1283                                                                   &out);
1284         if (unlikely(dev_written < 0)) {
1285             goto out;
1286         }
1287     } else {
1288         dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
1289         if (unlikely(dev_written < 0)) {
1290             goto out;
1291         }
1292     }
1293 
1294     if (unlikely(dev_written < sizeof(status))) {
1295         error_report("Insufficient written data (%zu)", dev_written);
1296         goto out;
1297     }
1298 
1299     if (*s->status != VIRTIO_NET_OK) {
1300         goto out;
1301     }
1302 
1303     status = VIRTIO_NET_ERR;
1304     virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
1305     if (status != VIRTIO_NET_OK) {
1306         error_report("Bad CVQ processing in model");
1307     }
1308 
1309 out:
1310     in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
1311                           sizeof(status));
1312     if (unlikely(in_len < sizeof(status))) {
1313         error_report("Bad device CVQ written length");
1314     }
1315     vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
1316     /*
1317      * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
1318      * the function successfully forwards the CVQ command, indicated
1319      * by a non-negative value of `dev_written`. Otherwise, it still
1320      * belongs to SVQ.
1321      * This function should only free the `elem` when it owns.
1322      */
1323     if (dev_written >= 0) {
1324         g_free(elem);
1325     }
1326     return dev_written < 0 ? dev_written : 0;
1327 }
1328 
1329 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
1330     .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
1331 };
1332 
1333 /**
1334  * Probe if CVQ is isolated
1335  *
1336  * @device_fd         The vdpa device fd
1337  * @features          Features offered by the device.
1338  * @cvq_index         The control vq pair index
1339  *
1340  * Returns <0 in case of failure, 0 if false and 1 if true.
1341  */
1342 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
1343                                           int cvq_index, Error **errp)
1344 {
1345     uint64_t backend_features;
1346     int64_t cvq_group;
1347     uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
1348                      VIRTIO_CONFIG_S_DRIVER;
1349     int r;
1350 
1351     ERRP_GUARD();
1352 
1353     r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
1354     if (unlikely(r < 0)) {
1355         error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
1356         return r;
1357     }
1358 
1359     if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
1360         return 0;
1361     }
1362 
1363     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1364     if (unlikely(r)) {
1365         error_setg_errno(errp, -r, "Cannot set device status");
1366         goto out;
1367     }
1368 
1369     r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
1370     if (unlikely(r)) {
1371         error_setg_errno(errp, -r, "Cannot set features");
1372         goto out;
1373     }
1374 
1375     status |= VIRTIO_CONFIG_S_FEATURES_OK;
1376     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1377     if (unlikely(r)) {
1378         error_setg_errno(errp, -r, "Cannot set device status");
1379         goto out;
1380     }
1381 
1382     cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
1383     if (unlikely(cvq_group < 0)) {
1384         if (cvq_group != -ENOTSUP) {
1385             r = cvq_group;
1386             goto out;
1387         }
1388 
1389         /*
1390          * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
1391          * support ASID even if the parent driver does not.  The CVQ cannot be
1392          * isolated in this case.
1393          */
1394         error_free(*errp);
1395         *errp = NULL;
1396         r = 0;
1397         goto out;
1398     }
1399 
1400     for (int i = 0; i < cvq_index; ++i) {
1401         int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
1402         if (unlikely(group < 0)) {
1403             r = group;
1404             goto out;
1405         }
1406 
1407         if (group == (int64_t)cvq_group) {
1408             r = 0;
1409             goto out;
1410         }
1411     }
1412 
1413     r = 1;
1414 
1415 out:
1416     status = 0;
1417     ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1418     return r;
1419 }
1420 
1421 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
1422                                        const char *device,
1423                                        const char *name,
1424                                        int vdpa_device_fd,
1425                                        int queue_pair_index,
1426                                        int nvqs,
1427                                        bool is_datapath,
1428                                        bool svq,
1429                                        struct vhost_vdpa_iova_range iova_range,
1430                                        uint64_t features,
1431                                        Error **errp)
1432 {
1433     NetClientState *nc = NULL;
1434     VhostVDPAState *s;
1435     int ret = 0;
1436     assert(name);
1437     int cvq_isolated = 0;
1438 
1439     if (is_datapath) {
1440         nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
1441                                  name);
1442     } else {
1443         cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
1444                                                       queue_pair_index * 2,
1445                                                       errp);
1446         if (unlikely(cvq_isolated < 0)) {
1447             return NULL;
1448         }
1449 
1450         nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
1451                                          device, name);
1452     }
1453     qemu_set_info_str(nc, TYPE_VHOST_VDPA);
1454     s = DO_UPCAST(VhostVDPAState, nc, nc);
1455 
1456     s->vhost_vdpa.device_fd = vdpa_device_fd;
1457     s->vhost_vdpa.index = queue_pair_index;
1458     s->always_svq = svq;
1459     s->migration_state.notify = vdpa_net_migration_state_notifier;
1460     s->vhost_vdpa.shadow_vqs_enabled = svq;
1461     s->vhost_vdpa.iova_range = iova_range;
1462     s->vhost_vdpa.shadow_data = svq;
1463     if (queue_pair_index == 0) {
1464         vhost_vdpa_net_valid_svq_features(features,
1465                                           &s->vhost_vdpa.migration_blocker);
1466     } else if (!is_datapath) {
1467         s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1468                                      PROT_READ | PROT_WRITE,
1469                                      MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1470         s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1471                          PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
1472                          -1, 0);
1473 
1474         s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
1475         s->vhost_vdpa.shadow_vq_ops_opaque = s;
1476         s->cvq_isolated = cvq_isolated;
1477     }
1478     ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
1479     if (ret) {
1480         qemu_del_net_client(nc);
1481         return NULL;
1482     }
1483     return nc;
1484 }
1485 
1486 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
1487 {
1488     int ret = ioctl(fd, VHOST_GET_FEATURES, features);
1489     if (unlikely(ret < 0)) {
1490         error_setg_errno(errp, errno,
1491                          "Fail to query features from vhost-vDPA device");
1492     }
1493     return ret;
1494 }
1495 
1496 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
1497                                           int *has_cvq, Error **errp)
1498 {
1499     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
1500     g_autofree struct vhost_vdpa_config *config = NULL;
1501     __virtio16 *max_queue_pairs;
1502     int ret;
1503 
1504     if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
1505         *has_cvq = 1;
1506     } else {
1507         *has_cvq = 0;
1508     }
1509 
1510     if (features & (1 << VIRTIO_NET_F_MQ)) {
1511         config = g_malloc0(config_size + sizeof(*max_queue_pairs));
1512         config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
1513         config->len = sizeof(*max_queue_pairs);
1514 
1515         ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
1516         if (ret) {
1517             error_setg(errp, "Fail to get config from vhost-vDPA device");
1518             return -ret;
1519         }
1520 
1521         max_queue_pairs = (__virtio16 *)&config->buf;
1522 
1523         return lduw_le_p(max_queue_pairs);
1524     }
1525 
1526     return 1;
1527 }
1528 
1529 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
1530                         NetClientState *peer, Error **errp)
1531 {
1532     const NetdevVhostVDPAOptions *opts;
1533     uint64_t features;
1534     int vdpa_device_fd;
1535     g_autofree NetClientState **ncs = NULL;
1536     struct vhost_vdpa_iova_range iova_range;
1537     NetClientState *nc;
1538     int queue_pairs, r, i = 0, has_cvq = 0;
1539 
1540     assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1541     opts = &netdev->u.vhost_vdpa;
1542     if (!opts->vhostdev && !opts->vhostfd) {
1543         error_setg(errp,
1544                    "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1545         return -1;
1546     }
1547 
1548     if (opts->vhostdev && opts->vhostfd) {
1549         error_setg(errp,
1550                    "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1551         return -1;
1552     }
1553 
1554     if (opts->vhostdev) {
1555         vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1556         if (vdpa_device_fd == -1) {
1557             return -errno;
1558         }
1559     } else {
1560         /* has_vhostfd */
1561         vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1562         if (vdpa_device_fd == -1) {
1563             error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1564             return -1;
1565         }
1566     }
1567 
1568     r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1569     if (unlikely(r < 0)) {
1570         goto err;
1571     }
1572 
1573     queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
1574                                                  &has_cvq, errp);
1575     if (queue_pairs < 0) {
1576         qemu_close(vdpa_device_fd);
1577         return queue_pairs;
1578     }
1579 
1580     r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1581     if (unlikely(r < 0)) {
1582         error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1583                    strerror(-r));
1584         goto err;
1585     }
1586 
1587     if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1588         goto err;
1589     }
1590 
1591     ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1592 
1593     for (i = 0; i < queue_pairs; i++) {
1594         ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1595                                      vdpa_device_fd, i, 2, true, opts->x_svq,
1596                                      iova_range, features, errp);
1597         if (!ncs[i])
1598             goto err;
1599     }
1600 
1601     if (has_cvq) {
1602         nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1603                                  vdpa_device_fd, i, 1, false,
1604                                  opts->x_svq, iova_range, features, errp);
1605         if (!nc)
1606             goto err;
1607     }
1608 
1609     return 0;
1610 
1611 err:
1612     if (i) {
1613         for (i--; i >= 0; i--) {
1614             qemu_del_net_client(ncs[i]);
1615         }
1616     }
1617 
1618     qemu_close(vdpa_device_fd);
1619 
1620     return -1;
1621 }
1622