xref: /openbmc/qemu/net/vhost-vdpa.c (revision 0b58d368)
1 /*
2  * vhost-vdpa.c
3  *
4  * Copyright(c) 2017-2018 Intel Corporation.
5  * Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "clients.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
26 #include <err.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/migration.h"
30 #include "migration/misc.h"
31 #include "hw/virtio/vhost.h"
32 
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState {
35     NetClientState nc;
36     struct vhost_vdpa vhost_vdpa;
37     Notifier migration_state;
38     VHostNetState *vhost_net;
39 
40     /* Control commands shadow buffers */
41     void *cvq_cmd_out_buffer;
42     virtio_net_ctrl_ack *status;
43 
44     /* The device always have SVQ enabled */
45     bool always_svq;
46 
47     /* The device can isolate CVQ in its own ASID */
48     bool cvq_isolated;
49 
50     bool started;
51 } VhostVDPAState;
52 
53 const int vdpa_feature_bits[] = {
54     VIRTIO_F_NOTIFY_ON_EMPTY,
55     VIRTIO_RING_F_INDIRECT_DESC,
56     VIRTIO_RING_F_EVENT_IDX,
57     VIRTIO_F_ANY_LAYOUT,
58     VIRTIO_F_VERSION_1,
59     VIRTIO_NET_F_CSUM,
60     VIRTIO_NET_F_GUEST_CSUM,
61     VIRTIO_NET_F_GSO,
62     VIRTIO_NET_F_GUEST_TSO4,
63     VIRTIO_NET_F_GUEST_TSO6,
64     VIRTIO_NET_F_GUEST_ECN,
65     VIRTIO_NET_F_GUEST_UFO,
66     VIRTIO_NET_F_HOST_TSO4,
67     VIRTIO_NET_F_HOST_TSO6,
68     VIRTIO_NET_F_HOST_ECN,
69     VIRTIO_NET_F_HOST_UFO,
70     VIRTIO_NET_F_MRG_RXBUF,
71     VIRTIO_NET_F_MTU,
72     VIRTIO_NET_F_CTRL_RX,
73     VIRTIO_NET_F_CTRL_RX_EXTRA,
74     VIRTIO_NET_F_CTRL_VLAN,
75     VIRTIO_NET_F_CTRL_MAC_ADDR,
76     VIRTIO_NET_F_RSS,
77     VIRTIO_NET_F_MQ,
78     VIRTIO_NET_F_CTRL_VQ,
79     VIRTIO_F_IOMMU_PLATFORM,
80     VIRTIO_F_RING_PACKED,
81     VIRTIO_F_RING_RESET,
82     VIRTIO_NET_F_RSS,
83     VIRTIO_NET_F_HASH_REPORT,
84     VIRTIO_NET_F_STATUS,
85     VHOST_INVALID_FEATURE_BIT
86 };
87 
88 /** Supported device specific feature bits with SVQ */
89 static const uint64_t vdpa_svq_device_features =
90     BIT_ULL(VIRTIO_NET_F_CSUM) |
91     BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
92     BIT_ULL(VIRTIO_NET_F_MTU) |
93     BIT_ULL(VIRTIO_NET_F_MAC) |
94     BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
95     BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
96     BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
97     BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
98     BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
99     BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
100     BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
101     BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
102     BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
103     BIT_ULL(VIRTIO_NET_F_STATUS) |
104     BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
105     BIT_ULL(VIRTIO_NET_F_MQ) |
106     BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
107     BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
108     /* VHOST_F_LOG_ALL is exposed by SVQ */
109     BIT_ULL(VHOST_F_LOG_ALL) |
110     BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
111     BIT_ULL(VIRTIO_NET_F_STANDBY) |
112     BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
113 
114 #define VHOST_VDPA_NET_CVQ_ASID 1
115 
116 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
117 {
118     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
119     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
120     return s->vhost_net;
121 }
122 
123 static size_t vhost_vdpa_net_cvq_cmd_len(void)
124 {
125     /*
126      * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
127      * In buffer is always 1 byte, so it should fit here
128      */
129     return sizeof(struct virtio_net_ctrl_hdr) +
130            2 * sizeof(struct virtio_net_ctrl_mac) +
131            MAC_TABLE_ENTRIES * ETH_ALEN;
132 }
133 
134 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
135 {
136     return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
137 }
138 
139 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
140 {
141     uint64_t invalid_dev_features =
142         features & ~vdpa_svq_device_features &
143         /* Transport are all accepted at this point */
144         ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
145                          VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
146 
147     if (invalid_dev_features) {
148         error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
149                    invalid_dev_features);
150         return false;
151     }
152 
153     return vhost_svq_valid_features(features, errp);
154 }
155 
156 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
157 {
158     uint32_t device_id;
159     int ret;
160     struct vhost_dev *hdev;
161 
162     hdev = (struct vhost_dev *)&net->dev;
163     ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
164     if (device_id != VIRTIO_ID_NET) {
165         return -ENOTSUP;
166     }
167     return ret;
168 }
169 
170 static int vhost_vdpa_add(NetClientState *ncs, void *be,
171                           int queue_pair_index, int nvqs)
172 {
173     VhostNetOptions options;
174     struct vhost_net *net = NULL;
175     VhostVDPAState *s;
176     int ret;
177 
178     options.backend_type = VHOST_BACKEND_TYPE_VDPA;
179     assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
180     s = DO_UPCAST(VhostVDPAState, nc, ncs);
181     options.net_backend = ncs;
182     options.opaque      = be;
183     options.busyloop_timeout = 0;
184     options.nvqs = nvqs;
185 
186     net = vhost_net_init(&options);
187     if (!net) {
188         error_report("failed to init vhost_net for queue");
189         goto err_init;
190     }
191     s->vhost_net = net;
192     ret = vhost_vdpa_net_check_device_id(net);
193     if (ret) {
194         goto err_check;
195     }
196     return 0;
197 err_check:
198     vhost_net_cleanup(net);
199     g_free(net);
200 err_init:
201     return -1;
202 }
203 
204 static void vhost_vdpa_cleanup(NetClientState *nc)
205 {
206     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
207 
208     munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
209     munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
210     if (s->vhost_net) {
211         vhost_net_cleanup(s->vhost_net);
212         g_free(s->vhost_net);
213         s->vhost_net = NULL;
214     }
215      if (s->vhost_vdpa.device_fd >= 0) {
216         qemu_close(s->vhost_vdpa.device_fd);
217         s->vhost_vdpa.device_fd = -1;
218     }
219 }
220 
221 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
222 {
223     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
224 
225     return true;
226 }
227 
228 static bool vhost_vdpa_has_ufo(NetClientState *nc)
229 {
230     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
231     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
232     uint64_t features = 0;
233     features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
234     features = vhost_net_get_features(s->vhost_net, features);
235     return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
236 
237 }
238 
239 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
240                                        Error **errp)
241 {
242     const char *driver = object_class_get_name(oc);
243 
244     if (!g_str_has_prefix(driver, "virtio-net-")) {
245         error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
246         return false;
247     }
248 
249     return true;
250 }
251 
252 /** Dummy receive in case qemu falls back to userland tap networking */
253 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
254                                   size_t size)
255 {
256     return size;
257 }
258 
259 /** From any vdpa net client, get the netclient of the first queue pair */
260 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
261 {
262     NICState *nic = qemu_get_nic(s->nc.peer);
263     NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
264 
265     return DO_UPCAST(VhostVDPAState, nc, nc0);
266 }
267 
268 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
269 {
270     struct vhost_vdpa *v = &s->vhost_vdpa;
271     VirtIONet *n;
272     VirtIODevice *vdev;
273     int data_queue_pairs, cvq, r;
274 
275     /* We are only called on the first data vqs and only if x-svq is not set */
276     if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
277         return;
278     }
279 
280     vdev = v->dev->vdev;
281     n = VIRTIO_NET(vdev);
282     if (!n->vhost_started) {
283         return;
284     }
285 
286     data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
287     cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
288                                   n->max_ncs - n->max_queue_pairs : 0;
289     /*
290      * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
291      * in the future and resume the device if read-only operations between
292      * suspend and reset goes wrong.
293      */
294     vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
295 
296     /* Start will check migration setup_or_active to configure or not SVQ */
297     r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
298     if (unlikely(r < 0)) {
299         error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
300     }
301 }
302 
303 static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data)
304 {
305     MigrationState *migration = data;
306     VhostVDPAState *s = container_of(notifier, VhostVDPAState,
307                                      migration_state);
308 
309     if (migration_in_setup(migration)) {
310         vhost_vdpa_net_log_global_enable(s, true);
311     } else if (migration_has_failed(migration)) {
312         vhost_vdpa_net_log_global_enable(s, false);
313     }
314 }
315 
316 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
317 {
318     struct vhost_vdpa *v = &s->vhost_vdpa;
319 
320     add_migration_state_change_notifier(&s->migration_state);
321     if (v->shadow_vqs_enabled) {
322         v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
323                                            v->iova_range.last);
324     }
325 }
326 
327 static int vhost_vdpa_net_data_start(NetClientState *nc)
328 {
329     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
330     struct vhost_vdpa *v = &s->vhost_vdpa;
331 
332     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
333 
334     if (s->always_svq ||
335         migration_is_setup_or_active(migrate_get_current()->state)) {
336         v->shadow_vqs_enabled = true;
337         v->shadow_data = true;
338     } else {
339         v->shadow_vqs_enabled = false;
340         v->shadow_data = false;
341     }
342 
343     if (v->index == 0) {
344         vhost_vdpa_net_data_start_first(s);
345         return 0;
346     }
347 
348     if (v->shadow_vqs_enabled) {
349         VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
350         v->iova_tree = s0->vhost_vdpa.iova_tree;
351     }
352 
353     return 0;
354 }
355 
356 static void vhost_vdpa_net_client_stop(NetClientState *nc)
357 {
358     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
359     struct vhost_dev *dev;
360 
361     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
362 
363     if (s->vhost_vdpa.index == 0) {
364         remove_migration_state_change_notifier(&s->migration_state);
365     }
366 
367     dev = s->vhost_vdpa.dev;
368     if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
369         g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
370     }
371 }
372 
373 static NetClientInfo net_vhost_vdpa_info = {
374         .type = NET_CLIENT_DRIVER_VHOST_VDPA,
375         .size = sizeof(VhostVDPAState),
376         .receive = vhost_vdpa_receive,
377         .start = vhost_vdpa_net_data_start,
378         .stop = vhost_vdpa_net_client_stop,
379         .cleanup = vhost_vdpa_cleanup,
380         .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
381         .has_ufo = vhost_vdpa_has_ufo,
382         .check_peer_type = vhost_vdpa_check_peer_type,
383 };
384 
385 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
386                                           Error **errp)
387 {
388     struct vhost_vring_state state = {
389         .index = vq_index,
390     };
391     int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
392 
393     if (unlikely(r < 0)) {
394         r = -errno;
395         error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
396         return r;
397     }
398 
399     return state.num;
400 }
401 
402 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
403                                            unsigned vq_group,
404                                            unsigned asid_num)
405 {
406     struct vhost_vring_state asid = {
407         .index = vq_group,
408         .num = asid_num,
409     };
410     int r;
411 
412     r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
413     if (unlikely(r < 0)) {
414         error_report("Can't set vq group %u asid %u, errno=%d (%s)",
415                      asid.index, asid.num, errno, g_strerror(errno));
416     }
417     return r;
418 }
419 
420 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
421 {
422     VhostIOVATree *tree = v->iova_tree;
423     DMAMap needle = {
424         /*
425          * No need to specify size or to look for more translations since
426          * this contiguous chunk was allocated by us.
427          */
428         .translated_addr = (hwaddr)(uintptr_t)addr,
429     };
430     const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
431     int r;
432 
433     if (unlikely(!map)) {
434         error_report("Cannot locate expected map");
435         return;
436     }
437 
438     r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
439     if (unlikely(r != 0)) {
440         error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
441     }
442 
443     vhost_iova_tree_remove(tree, *map);
444 }
445 
446 /** Map CVQ buffer. */
447 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
448                                   bool write)
449 {
450     DMAMap map = {};
451     int r;
452 
453     map.translated_addr = (hwaddr)(uintptr_t)buf;
454     map.size = size - 1;
455     map.perm = write ? IOMMU_RW : IOMMU_RO,
456     r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
457     if (unlikely(r != IOVA_OK)) {
458         error_report("Cannot map injected element");
459         return r;
460     }
461 
462     r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
463                            vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
464     if (unlikely(r < 0)) {
465         goto dma_map_err;
466     }
467 
468     return 0;
469 
470 dma_map_err:
471     vhost_iova_tree_remove(v->iova_tree, map);
472     return r;
473 }
474 
475 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
476 {
477     VhostVDPAState *s, *s0;
478     struct vhost_vdpa *v;
479     int64_t cvq_group;
480     int r;
481     Error *err = NULL;
482 
483     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
484 
485     s = DO_UPCAST(VhostVDPAState, nc, nc);
486     v = &s->vhost_vdpa;
487 
488     s0 = vhost_vdpa_net_first_nc_vdpa(s);
489     v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
490     v->shadow_vqs_enabled = s->always_svq;
491     s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
492 
493     if (s->vhost_vdpa.shadow_data) {
494         /* SVQ is already configured for all virtqueues */
495         goto out;
496     }
497 
498     /*
499      * If we early return in these cases SVQ will not be enabled. The migration
500      * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
501      */
502     if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
503         return 0;
504     }
505 
506     if (!s->cvq_isolated) {
507         return 0;
508     }
509 
510     cvq_group = vhost_vdpa_get_vring_group(v->device_fd,
511                                            v->dev->vq_index_end - 1,
512                                            &err);
513     if (unlikely(cvq_group < 0)) {
514         error_report_err(err);
515         return cvq_group;
516     }
517 
518     r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
519     if (unlikely(r < 0)) {
520         return r;
521     }
522 
523     v->shadow_vqs_enabled = true;
524     s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
525 
526 out:
527     if (!s->vhost_vdpa.shadow_vqs_enabled) {
528         return 0;
529     }
530 
531     if (s0->vhost_vdpa.iova_tree) {
532         /*
533          * SVQ is already configured for all virtqueues.  Reuse IOVA tree for
534          * simplicity, whether CVQ shares ASID with guest or not, because:
535          * - Memory listener need access to guest's memory addresses allocated
536          *   in the IOVA tree.
537          * - There should be plenty of IOVA address space for both ASID not to
538          *   worry about collisions between them.  Guest's translations are
539          *   still validated with virtio virtqueue_pop so there is no risk for
540          *   the guest to access memory that it shouldn't.
541          *
542          * To allocate a iova tree per ASID is doable but it complicates the
543          * code and it is not worth it for the moment.
544          */
545         v->iova_tree = s0->vhost_vdpa.iova_tree;
546     } else {
547         v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
548                                            v->iova_range.last);
549     }
550 
551     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
552                                vhost_vdpa_net_cvq_cmd_page_len(), false);
553     if (unlikely(r < 0)) {
554         return r;
555     }
556 
557     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
558                                vhost_vdpa_net_cvq_cmd_page_len(), true);
559     if (unlikely(r < 0)) {
560         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
561     }
562 
563     return r;
564 }
565 
566 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
567 {
568     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
569 
570     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
571 
572     if (s->vhost_vdpa.shadow_vqs_enabled) {
573         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
574         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
575     }
576 
577     vhost_vdpa_net_client_stop(nc);
578 }
579 
580 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
581                                       size_t in_len)
582 {
583     /* Buffers for the device */
584     const struct iovec out = {
585         .iov_base = s->cvq_cmd_out_buffer,
586         .iov_len = out_len,
587     };
588     const struct iovec in = {
589         .iov_base = s->status,
590         .iov_len = sizeof(virtio_net_ctrl_ack),
591     };
592     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
593     int r;
594 
595     r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
596     if (unlikely(r != 0)) {
597         if (unlikely(r == -ENOSPC)) {
598             qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
599                           __func__);
600         }
601         return r;
602     }
603 
604     /*
605      * We can poll here since we've had BQL from the time we sent the
606      * descriptor. Also, we need to take the answer before SVQ pulls by itself,
607      * when BQL is released
608      */
609     return vhost_svq_poll(svq);
610 }
611 
612 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
613                                        uint8_t cmd, const void *data,
614                                        size_t data_size)
615 {
616     const struct virtio_net_ctrl_hdr ctrl = {
617         .class = class,
618         .cmd = cmd,
619     };
620 
621     assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
622 
623     memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
624     memcpy(s->cvq_cmd_out_buffer + sizeof(ctrl), data, data_size);
625 
626     return vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + data_size,
627                                   sizeof(virtio_net_ctrl_ack));
628 }
629 
630 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
631 {
632     if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
633         ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
634                                                   VIRTIO_NET_CTRL_MAC_ADDR_SET,
635                                                   n->mac, sizeof(n->mac));
636         if (unlikely(dev_written < 0)) {
637             return dev_written;
638         }
639 
640         return *s->status != VIRTIO_NET_OK;
641     }
642 
643     return 0;
644 }
645 
646 static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
647                                   const VirtIONet *n)
648 {
649     struct virtio_net_ctrl_mq mq;
650     ssize_t dev_written;
651 
652     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
653         return 0;
654     }
655 
656     mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
657     dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
658                                           VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &mq,
659                                           sizeof(mq));
660     if (unlikely(dev_written < 0)) {
661         return dev_written;
662     }
663 
664     return *s->status != VIRTIO_NET_OK;
665 }
666 
667 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
668                                         const VirtIONet *n)
669 {
670     uint64_t offloads;
671     ssize_t dev_written;
672 
673     if (!virtio_vdev_has_feature(&n->parent_obj,
674                                  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
675         return 0;
676     }
677 
678     if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
679         /*
680          * According to VirtIO standard, "Upon feature negotiation
681          * corresponding offload gets enabled to preserve
682          * backward compatibility.".
683          *
684          * Therefore, there is no need to send this CVQ command if the
685          * driver also enables all supported offloads, which aligns with
686          * the device's defaults.
687          *
688          * Note that the device's defaults can mismatch the driver's
689          * configuration only at live migration.
690          */
691         return 0;
692     }
693 
694     offloads = cpu_to_le64(n->curr_guest_offloads);
695     dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
696                                           VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
697                                           &offloads, sizeof(offloads));
698     if (unlikely(dev_written < 0)) {
699         return dev_written;
700     }
701 
702     return *s->status != VIRTIO_NET_OK;
703 }
704 
705 static int vhost_vdpa_net_load(NetClientState *nc)
706 {
707     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
708     struct vhost_vdpa *v = &s->vhost_vdpa;
709     const VirtIONet *n;
710     int r;
711 
712     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
713 
714     if (!v->shadow_vqs_enabled) {
715         return 0;
716     }
717 
718     n = VIRTIO_NET(v->dev->vdev);
719     r = vhost_vdpa_net_load_mac(s, n);
720     if (unlikely(r < 0)) {
721         return r;
722     }
723     r = vhost_vdpa_net_load_mq(s, n);
724     if (unlikely(r)) {
725         return r;
726     }
727     r = vhost_vdpa_net_load_offloads(s, n);
728     if (unlikely(r)) {
729         return r;
730     }
731 
732     return 0;
733 }
734 
735 static NetClientInfo net_vhost_vdpa_cvq_info = {
736     .type = NET_CLIENT_DRIVER_VHOST_VDPA,
737     .size = sizeof(VhostVDPAState),
738     .receive = vhost_vdpa_receive,
739     .start = vhost_vdpa_net_cvq_start,
740     .load = vhost_vdpa_net_load,
741     .stop = vhost_vdpa_net_cvq_stop,
742     .cleanup = vhost_vdpa_cleanup,
743     .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
744     .has_ufo = vhost_vdpa_has_ufo,
745     .check_peer_type = vhost_vdpa_check_peer_type,
746 };
747 
748 /**
749  * Validate and copy control virtqueue commands.
750  *
751  * Following QEMU guidelines, we offer a copy of the buffers to the device to
752  * prevent TOCTOU bugs.
753  */
754 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
755                                             VirtQueueElement *elem,
756                                             void *opaque)
757 {
758     VhostVDPAState *s = opaque;
759     size_t in_len;
760     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
761     /* Out buffer sent to both the vdpa device and the device model */
762     struct iovec out = {
763         .iov_base = s->cvq_cmd_out_buffer,
764     };
765     /* in buffer used for device model */
766     const struct iovec in = {
767         .iov_base = &status,
768         .iov_len = sizeof(status),
769     };
770     ssize_t dev_written = -EINVAL;
771 
772     out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
773                              s->cvq_cmd_out_buffer,
774                              vhost_vdpa_net_cvq_cmd_len());
775     if (*(uint8_t *)s->cvq_cmd_out_buffer == VIRTIO_NET_CTRL_ANNOUNCE) {
776         /*
777          * Guest announce capability is emulated by qemu, so don't forward to
778          * the device.
779          */
780         dev_written = sizeof(status);
781         *s->status = VIRTIO_NET_OK;
782     } else {
783         dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
784         if (unlikely(dev_written < 0)) {
785             goto out;
786         }
787     }
788 
789     if (unlikely(dev_written < sizeof(status))) {
790         error_report("Insufficient written data (%zu)", dev_written);
791         goto out;
792     }
793 
794     if (*s->status != VIRTIO_NET_OK) {
795         return VIRTIO_NET_ERR;
796     }
797 
798     status = VIRTIO_NET_ERR;
799     virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
800     if (status != VIRTIO_NET_OK) {
801         error_report("Bad CVQ processing in model");
802     }
803 
804 out:
805     in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
806                           sizeof(status));
807     if (unlikely(in_len < sizeof(status))) {
808         error_report("Bad device CVQ written length");
809     }
810     vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
811     g_free(elem);
812     return dev_written < 0 ? dev_written : 0;
813 }
814 
815 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
816     .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
817 };
818 
819 /**
820  * Probe if CVQ is isolated
821  *
822  * @device_fd         The vdpa device fd
823  * @features          Features offered by the device.
824  * @cvq_index         The control vq pair index
825  *
826  * Returns <0 in case of failure, 0 if false and 1 if true.
827  */
828 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
829                                           int cvq_index, Error **errp)
830 {
831     uint64_t backend_features;
832     int64_t cvq_group;
833     uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
834                      VIRTIO_CONFIG_S_DRIVER |
835                      VIRTIO_CONFIG_S_FEATURES_OK;
836     int r;
837 
838     ERRP_GUARD();
839 
840     r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
841     if (unlikely(r < 0)) {
842         error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
843         return r;
844     }
845 
846     if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
847         return 0;
848     }
849 
850     r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
851     if (unlikely(r)) {
852         error_setg_errno(errp, errno, "Cannot set features");
853     }
854 
855     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
856     if (unlikely(r)) {
857         error_setg_errno(errp, -r, "Cannot set device features");
858         goto out;
859     }
860 
861     cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
862     if (unlikely(cvq_group < 0)) {
863         if (cvq_group != -ENOTSUP) {
864             r = cvq_group;
865             goto out;
866         }
867 
868         /*
869          * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
870          * support ASID even if the parent driver does not.  The CVQ cannot be
871          * isolated in this case.
872          */
873         error_free(*errp);
874         *errp = NULL;
875         r = 0;
876         goto out;
877     }
878 
879     for (int i = 0; i < cvq_index; ++i) {
880         int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
881         if (unlikely(group < 0)) {
882             r = group;
883             goto out;
884         }
885 
886         if (group == (int64_t)cvq_group) {
887             r = 0;
888             goto out;
889         }
890     }
891 
892     r = 1;
893 
894 out:
895     status = 0;
896     ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
897     return r;
898 }
899 
900 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
901                                        const char *device,
902                                        const char *name,
903                                        int vdpa_device_fd,
904                                        int queue_pair_index,
905                                        int nvqs,
906                                        bool is_datapath,
907                                        bool svq,
908                                        struct vhost_vdpa_iova_range iova_range,
909                                        uint64_t features,
910                                        Error **errp)
911 {
912     NetClientState *nc = NULL;
913     VhostVDPAState *s;
914     int ret = 0;
915     assert(name);
916     int cvq_isolated;
917 
918     if (is_datapath) {
919         nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
920                                  name);
921     } else {
922         cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
923                                                       queue_pair_index * 2,
924                                                       errp);
925         if (unlikely(cvq_isolated < 0)) {
926             return NULL;
927         }
928 
929         nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
930                                          device, name);
931     }
932     qemu_set_info_str(nc, TYPE_VHOST_VDPA);
933     s = DO_UPCAST(VhostVDPAState, nc, nc);
934 
935     s->vhost_vdpa.device_fd = vdpa_device_fd;
936     s->vhost_vdpa.index = queue_pair_index;
937     s->always_svq = svq;
938     s->migration_state.notify = vdpa_net_migration_state_notifier;
939     s->vhost_vdpa.shadow_vqs_enabled = svq;
940     s->vhost_vdpa.iova_range = iova_range;
941     s->vhost_vdpa.shadow_data = svq;
942     if (queue_pair_index == 0) {
943         vhost_vdpa_net_valid_svq_features(features,
944                                           &s->vhost_vdpa.migration_blocker);
945     } else if (!is_datapath) {
946         s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
947                                      PROT_READ | PROT_WRITE,
948                                      MAP_SHARED | MAP_ANONYMOUS, -1, 0);
949         s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
950                          PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
951                          -1, 0);
952 
953         s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
954         s->vhost_vdpa.shadow_vq_ops_opaque = s;
955         s->cvq_isolated = cvq_isolated;
956 
957         /*
958          * TODO: We cannot migrate devices with CVQ and no x-svq enabled as
959          * there is no way to set the device state (MAC, MQ, etc) before
960          * starting the datapath.
961          *
962          * Migration blocker ownership now belongs to s->vhost_vdpa.
963          */
964         if (!svq) {
965             error_setg(&s->vhost_vdpa.migration_blocker,
966                        "net vdpa cannot migrate with CVQ feature");
967         }
968     }
969     ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
970     if (ret) {
971         qemu_del_net_client(nc);
972         return NULL;
973     }
974     return nc;
975 }
976 
977 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
978 {
979     int ret = ioctl(fd, VHOST_GET_FEATURES, features);
980     if (unlikely(ret < 0)) {
981         error_setg_errno(errp, errno,
982                          "Fail to query features from vhost-vDPA device");
983     }
984     return ret;
985 }
986 
987 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
988                                           int *has_cvq, Error **errp)
989 {
990     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
991     g_autofree struct vhost_vdpa_config *config = NULL;
992     __virtio16 *max_queue_pairs;
993     int ret;
994 
995     if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
996         *has_cvq = 1;
997     } else {
998         *has_cvq = 0;
999     }
1000 
1001     if (features & (1 << VIRTIO_NET_F_MQ)) {
1002         config = g_malloc0(config_size + sizeof(*max_queue_pairs));
1003         config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
1004         config->len = sizeof(*max_queue_pairs);
1005 
1006         ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
1007         if (ret) {
1008             error_setg(errp, "Fail to get config from vhost-vDPA device");
1009             return -ret;
1010         }
1011 
1012         max_queue_pairs = (__virtio16 *)&config->buf;
1013 
1014         return lduw_le_p(max_queue_pairs);
1015     }
1016 
1017     return 1;
1018 }
1019 
1020 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
1021                         NetClientState *peer, Error **errp)
1022 {
1023     const NetdevVhostVDPAOptions *opts;
1024     uint64_t features;
1025     int vdpa_device_fd;
1026     g_autofree NetClientState **ncs = NULL;
1027     struct vhost_vdpa_iova_range iova_range;
1028     NetClientState *nc;
1029     int queue_pairs, r, i = 0, has_cvq = 0;
1030 
1031     assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1032     opts = &netdev->u.vhost_vdpa;
1033     if (!opts->vhostdev && !opts->vhostfd) {
1034         error_setg(errp,
1035                    "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1036         return -1;
1037     }
1038 
1039     if (opts->vhostdev && opts->vhostfd) {
1040         error_setg(errp,
1041                    "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1042         return -1;
1043     }
1044 
1045     if (opts->vhostdev) {
1046         vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1047         if (vdpa_device_fd == -1) {
1048             return -errno;
1049         }
1050     } else {
1051         /* has_vhostfd */
1052         vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1053         if (vdpa_device_fd == -1) {
1054             error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1055             return -1;
1056         }
1057     }
1058 
1059     r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1060     if (unlikely(r < 0)) {
1061         goto err;
1062     }
1063 
1064     queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
1065                                                  &has_cvq, errp);
1066     if (queue_pairs < 0) {
1067         qemu_close(vdpa_device_fd);
1068         return queue_pairs;
1069     }
1070 
1071     r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1072     if (unlikely(r < 0)) {
1073         error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1074                    strerror(-r));
1075         goto err;
1076     }
1077 
1078     if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1079         goto err;
1080     }
1081 
1082     ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1083 
1084     for (i = 0; i < queue_pairs; i++) {
1085         ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1086                                      vdpa_device_fd, i, 2, true, opts->x_svq,
1087                                      iova_range, features, errp);
1088         if (!ncs[i])
1089             goto err;
1090     }
1091 
1092     if (has_cvq) {
1093         nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1094                                  vdpa_device_fd, i, 1, false,
1095                                  opts->x_svq, iova_range, features, errp);
1096         if (!nc)
1097             goto err;
1098     }
1099 
1100     return 0;
1101 
1102 err:
1103     if (i) {
1104         for (i--; i >= 0; i--) {
1105             qemu_del_net_client(ncs[i]);
1106         }
1107     }
1108 
1109     qemu_close(vdpa_device_fd);
1110 
1111     return -1;
1112 }
1113