xref: /openbmc/qemu/net/vhost-vdpa.c (revision aee97017)
1 /*
2  * vhost-vdpa.c
3  *
4  * Copyright(c) 2017-2018 Intel Corporation.
5  * Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "clients.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
26 #include <err.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/migration.h"
30 #include "migration/misc.h"
31 #include "hw/virtio/vhost.h"
32 
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState {
35     NetClientState nc;
36     struct vhost_vdpa vhost_vdpa;
37     Notifier migration_state;
38     VHostNetState *vhost_net;
39 
40     /* Control commands shadow buffers */
41     void *cvq_cmd_out_buffer;
42     virtio_net_ctrl_ack *status;
43 
44     /* The device always have SVQ enabled */
45     bool always_svq;
46 
47     /* The device can isolate CVQ in its own ASID */
48     bool cvq_isolated;
49 
50     bool started;
51 } VhostVDPAState;
52 
53 const int vdpa_feature_bits[] = {
54     VIRTIO_F_NOTIFY_ON_EMPTY,
55     VIRTIO_RING_F_INDIRECT_DESC,
56     VIRTIO_RING_F_EVENT_IDX,
57     VIRTIO_F_ANY_LAYOUT,
58     VIRTIO_F_VERSION_1,
59     VIRTIO_NET_F_CSUM,
60     VIRTIO_NET_F_GUEST_CSUM,
61     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
62     VIRTIO_NET_F_GSO,
63     VIRTIO_NET_F_GUEST_TSO4,
64     VIRTIO_NET_F_GUEST_TSO6,
65     VIRTIO_NET_F_GUEST_ECN,
66     VIRTIO_NET_F_GUEST_UFO,
67     VIRTIO_NET_F_HOST_TSO4,
68     VIRTIO_NET_F_HOST_TSO6,
69     VIRTIO_NET_F_HOST_ECN,
70     VIRTIO_NET_F_HOST_UFO,
71     VIRTIO_NET_F_MRG_RXBUF,
72     VIRTIO_NET_F_MTU,
73     VIRTIO_NET_F_CTRL_RX,
74     VIRTIO_NET_F_CTRL_RX_EXTRA,
75     VIRTIO_NET_F_CTRL_VLAN,
76     VIRTIO_NET_F_CTRL_MAC_ADDR,
77     VIRTIO_NET_F_MQ,
78     VIRTIO_NET_F_CTRL_VQ,
79     VIRTIO_F_IOMMU_PLATFORM,
80     VIRTIO_F_RING_PACKED,
81     VIRTIO_F_RING_RESET,
82     VIRTIO_NET_F_RSS,
83     VIRTIO_NET_F_HASH_REPORT,
84     VIRTIO_NET_F_STATUS,
85     VHOST_INVALID_FEATURE_BIT
86 };
87 
88 /** Supported device specific feature bits with SVQ */
89 static const uint64_t vdpa_svq_device_features =
90     BIT_ULL(VIRTIO_NET_F_CSUM) |
91     BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
92     BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
93     BIT_ULL(VIRTIO_NET_F_MTU) |
94     BIT_ULL(VIRTIO_NET_F_MAC) |
95     BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
96     BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
97     BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
98     BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
99     BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
100     BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
101     BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
102     BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
103     BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
104     BIT_ULL(VIRTIO_NET_F_STATUS) |
105     BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
106     BIT_ULL(VIRTIO_NET_F_MQ) |
107     BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
108     BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
109     /* VHOST_F_LOG_ALL is exposed by SVQ */
110     BIT_ULL(VHOST_F_LOG_ALL) |
111     BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
112     BIT_ULL(VIRTIO_NET_F_STANDBY) |
113     BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
114 
115 #define VHOST_VDPA_NET_CVQ_ASID 1
116 
117 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
118 {
119     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
120     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
121     return s->vhost_net;
122 }
123 
124 static size_t vhost_vdpa_net_cvq_cmd_len(void)
125 {
126     /*
127      * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
128      * In buffer is always 1 byte, so it should fit here
129      */
130     return sizeof(struct virtio_net_ctrl_hdr) +
131            2 * sizeof(struct virtio_net_ctrl_mac) +
132            MAC_TABLE_ENTRIES * ETH_ALEN;
133 }
134 
135 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
136 {
137     return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
138 }
139 
140 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
141 {
142     uint64_t invalid_dev_features =
143         features & ~vdpa_svq_device_features &
144         /* Transport are all accepted at this point */
145         ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
146                          VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
147 
148     if (invalid_dev_features) {
149         error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
150                    invalid_dev_features);
151         return false;
152     }
153 
154     return vhost_svq_valid_features(features, errp);
155 }
156 
157 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
158 {
159     uint32_t device_id;
160     int ret;
161     struct vhost_dev *hdev;
162 
163     hdev = (struct vhost_dev *)&net->dev;
164     ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
165     if (device_id != VIRTIO_ID_NET) {
166         return -ENOTSUP;
167     }
168     return ret;
169 }
170 
171 static int vhost_vdpa_add(NetClientState *ncs, void *be,
172                           int queue_pair_index, int nvqs)
173 {
174     VhostNetOptions options;
175     struct vhost_net *net = NULL;
176     VhostVDPAState *s;
177     int ret;
178 
179     options.backend_type = VHOST_BACKEND_TYPE_VDPA;
180     assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
181     s = DO_UPCAST(VhostVDPAState, nc, ncs);
182     options.net_backend = ncs;
183     options.opaque      = be;
184     options.busyloop_timeout = 0;
185     options.nvqs = nvqs;
186 
187     net = vhost_net_init(&options);
188     if (!net) {
189         error_report("failed to init vhost_net for queue");
190         goto err_init;
191     }
192     s->vhost_net = net;
193     ret = vhost_vdpa_net_check_device_id(net);
194     if (ret) {
195         goto err_check;
196     }
197     return 0;
198 err_check:
199     vhost_net_cleanup(net);
200     g_free(net);
201 err_init:
202     return -1;
203 }
204 
205 static void vhost_vdpa_cleanup(NetClientState *nc)
206 {
207     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
208 
209     /*
210      * If a peer NIC is attached, do not cleanup anything.
211      * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
212      * when the guest is shutting down.
213      */
214     if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
215         return;
216     }
217     munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
218     munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
219     if (s->vhost_net) {
220         vhost_net_cleanup(s->vhost_net);
221         g_free(s->vhost_net);
222         s->vhost_net = NULL;
223     }
224      if (s->vhost_vdpa.device_fd >= 0) {
225         qemu_close(s->vhost_vdpa.device_fd);
226         s->vhost_vdpa.device_fd = -1;
227     }
228 }
229 
230 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
231 {
232     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
233 
234     return true;
235 }
236 
237 static bool vhost_vdpa_has_ufo(NetClientState *nc)
238 {
239     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
240     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
241     uint64_t features = 0;
242     features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
243     features = vhost_net_get_features(s->vhost_net, features);
244     return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
245 
246 }
247 
248 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
249                                        Error **errp)
250 {
251     const char *driver = object_class_get_name(oc);
252 
253     if (!g_str_has_prefix(driver, "virtio-net-")) {
254         error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
255         return false;
256     }
257 
258     return true;
259 }
260 
261 /** Dummy receive in case qemu falls back to userland tap networking */
262 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
263                                   size_t size)
264 {
265     return size;
266 }
267 
268 /** From any vdpa net client, get the netclient of the first queue pair */
269 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
270 {
271     NICState *nic = qemu_get_nic(s->nc.peer);
272     NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
273 
274     return DO_UPCAST(VhostVDPAState, nc, nc0);
275 }
276 
277 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
278 {
279     struct vhost_vdpa *v = &s->vhost_vdpa;
280     VirtIONet *n;
281     VirtIODevice *vdev;
282     int data_queue_pairs, cvq, r;
283 
284     /* We are only called on the first data vqs and only if x-svq is not set */
285     if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
286         return;
287     }
288 
289     vdev = v->dev->vdev;
290     n = VIRTIO_NET(vdev);
291     if (!n->vhost_started) {
292         return;
293     }
294 
295     data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
296     cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
297                                   n->max_ncs - n->max_queue_pairs : 0;
298     /*
299      * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
300      * in the future and resume the device if read-only operations between
301      * suspend and reset goes wrong.
302      */
303     vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
304 
305     /* Start will check migration setup_or_active to configure or not SVQ */
306     r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
307     if (unlikely(r < 0)) {
308         error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
309     }
310 }
311 
312 static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data)
313 {
314     MigrationState *migration = data;
315     VhostVDPAState *s = container_of(notifier, VhostVDPAState,
316                                      migration_state);
317 
318     if (migration_in_setup(migration)) {
319         vhost_vdpa_net_log_global_enable(s, true);
320     } else if (migration_has_failed(migration)) {
321         vhost_vdpa_net_log_global_enable(s, false);
322     }
323 }
324 
325 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
326 {
327     struct vhost_vdpa *v = &s->vhost_vdpa;
328 
329     add_migration_state_change_notifier(&s->migration_state);
330     if (v->shadow_vqs_enabled) {
331         v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
332                                            v->iova_range.last);
333     }
334 }
335 
336 static int vhost_vdpa_net_data_start(NetClientState *nc)
337 {
338     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
339     struct vhost_vdpa *v = &s->vhost_vdpa;
340 
341     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
342 
343     if (s->always_svq ||
344         migration_is_setup_or_active(migrate_get_current()->state)) {
345         v->shadow_vqs_enabled = true;
346         v->shadow_data = true;
347     } else {
348         v->shadow_vqs_enabled = false;
349         v->shadow_data = false;
350     }
351 
352     if (v->index == 0) {
353         vhost_vdpa_net_data_start_first(s);
354         return 0;
355     }
356 
357     if (v->shadow_vqs_enabled) {
358         VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
359         v->iova_tree = s0->vhost_vdpa.iova_tree;
360     }
361 
362     return 0;
363 }
364 
365 static void vhost_vdpa_net_client_stop(NetClientState *nc)
366 {
367     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
368     struct vhost_dev *dev;
369 
370     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
371 
372     if (s->vhost_vdpa.index == 0) {
373         remove_migration_state_change_notifier(&s->migration_state);
374     }
375 
376     dev = s->vhost_vdpa.dev;
377     if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
378         g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
379     }
380 }
381 
382 static NetClientInfo net_vhost_vdpa_info = {
383         .type = NET_CLIENT_DRIVER_VHOST_VDPA,
384         .size = sizeof(VhostVDPAState),
385         .receive = vhost_vdpa_receive,
386         .start = vhost_vdpa_net_data_start,
387         .stop = vhost_vdpa_net_client_stop,
388         .cleanup = vhost_vdpa_cleanup,
389         .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
390         .has_ufo = vhost_vdpa_has_ufo,
391         .check_peer_type = vhost_vdpa_check_peer_type,
392 };
393 
394 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
395                                           Error **errp)
396 {
397     struct vhost_vring_state state = {
398         .index = vq_index,
399     };
400     int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
401 
402     if (unlikely(r < 0)) {
403         r = -errno;
404         error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
405         return r;
406     }
407 
408     return state.num;
409 }
410 
411 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
412                                            unsigned vq_group,
413                                            unsigned asid_num)
414 {
415     struct vhost_vring_state asid = {
416         .index = vq_group,
417         .num = asid_num,
418     };
419     int r;
420 
421     r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
422     if (unlikely(r < 0)) {
423         error_report("Can't set vq group %u asid %u, errno=%d (%s)",
424                      asid.index, asid.num, errno, g_strerror(errno));
425     }
426     return r;
427 }
428 
429 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
430 {
431     VhostIOVATree *tree = v->iova_tree;
432     DMAMap needle = {
433         /*
434          * No need to specify size or to look for more translations since
435          * this contiguous chunk was allocated by us.
436          */
437         .translated_addr = (hwaddr)(uintptr_t)addr,
438     };
439     const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
440     int r;
441 
442     if (unlikely(!map)) {
443         error_report("Cannot locate expected map");
444         return;
445     }
446 
447     r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
448     if (unlikely(r != 0)) {
449         error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
450     }
451 
452     vhost_iova_tree_remove(tree, *map);
453 }
454 
455 /** Map CVQ buffer. */
456 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
457                                   bool write)
458 {
459     DMAMap map = {};
460     int r;
461 
462     map.translated_addr = (hwaddr)(uintptr_t)buf;
463     map.size = size - 1;
464     map.perm = write ? IOMMU_RW : IOMMU_RO,
465     r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
466     if (unlikely(r != IOVA_OK)) {
467         error_report("Cannot map injected element");
468         return r;
469     }
470 
471     r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
472                            vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
473     if (unlikely(r < 0)) {
474         goto dma_map_err;
475     }
476 
477     return 0;
478 
479 dma_map_err:
480     vhost_iova_tree_remove(v->iova_tree, map);
481     return r;
482 }
483 
484 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
485 {
486     VhostVDPAState *s, *s0;
487     struct vhost_vdpa *v;
488     int64_t cvq_group;
489     int r;
490     Error *err = NULL;
491 
492     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
493 
494     s = DO_UPCAST(VhostVDPAState, nc, nc);
495     v = &s->vhost_vdpa;
496 
497     s0 = vhost_vdpa_net_first_nc_vdpa(s);
498     v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
499     v->shadow_vqs_enabled = s->always_svq;
500     s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
501 
502     if (s->vhost_vdpa.shadow_data) {
503         /* SVQ is already configured for all virtqueues */
504         goto out;
505     }
506 
507     /*
508      * If we early return in these cases SVQ will not be enabled. The migration
509      * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
510      */
511     if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
512         return 0;
513     }
514 
515     if (!s->cvq_isolated) {
516         return 0;
517     }
518 
519     cvq_group = vhost_vdpa_get_vring_group(v->device_fd,
520                                            v->dev->vq_index_end - 1,
521                                            &err);
522     if (unlikely(cvq_group < 0)) {
523         error_report_err(err);
524         return cvq_group;
525     }
526 
527     r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
528     if (unlikely(r < 0)) {
529         return r;
530     }
531 
532     v->shadow_vqs_enabled = true;
533     s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
534 
535 out:
536     if (!s->vhost_vdpa.shadow_vqs_enabled) {
537         return 0;
538     }
539 
540     if (s0->vhost_vdpa.iova_tree) {
541         /*
542          * SVQ is already configured for all virtqueues.  Reuse IOVA tree for
543          * simplicity, whether CVQ shares ASID with guest or not, because:
544          * - Memory listener need access to guest's memory addresses allocated
545          *   in the IOVA tree.
546          * - There should be plenty of IOVA address space for both ASID not to
547          *   worry about collisions between them.  Guest's translations are
548          *   still validated with virtio virtqueue_pop so there is no risk for
549          *   the guest to access memory that it shouldn't.
550          *
551          * To allocate a iova tree per ASID is doable but it complicates the
552          * code and it is not worth it for the moment.
553          */
554         v->iova_tree = s0->vhost_vdpa.iova_tree;
555     } else {
556         v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
557                                            v->iova_range.last);
558     }
559 
560     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
561                                vhost_vdpa_net_cvq_cmd_page_len(), false);
562     if (unlikely(r < 0)) {
563         return r;
564     }
565 
566     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
567                                vhost_vdpa_net_cvq_cmd_page_len(), true);
568     if (unlikely(r < 0)) {
569         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
570     }
571 
572     return r;
573 }
574 
575 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
576 {
577     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
578 
579     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
580 
581     if (s->vhost_vdpa.shadow_vqs_enabled) {
582         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
583         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
584     }
585 
586     vhost_vdpa_net_client_stop(nc);
587 }
588 
589 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
590                                       size_t in_len)
591 {
592     /* Buffers for the device */
593     const struct iovec out = {
594         .iov_base = s->cvq_cmd_out_buffer,
595         .iov_len = out_len,
596     };
597     const struct iovec in = {
598         .iov_base = s->status,
599         .iov_len = sizeof(virtio_net_ctrl_ack),
600     };
601     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
602     int r;
603 
604     r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
605     if (unlikely(r != 0)) {
606         if (unlikely(r == -ENOSPC)) {
607             qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
608                           __func__);
609         }
610         return r;
611     }
612 
613     /*
614      * We can poll here since we've had BQL from the time we sent the
615      * descriptor. Also, we need to take the answer before SVQ pulls by itself,
616      * when BQL is released
617      */
618     return vhost_svq_poll(svq);
619 }
620 
621 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
622                                        uint8_t cmd, const void *data,
623                                        size_t data_size)
624 {
625     const struct virtio_net_ctrl_hdr ctrl = {
626         .class = class,
627         .cmd = cmd,
628     };
629 
630     assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
631 
632     memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
633     memcpy(s->cvq_cmd_out_buffer + sizeof(ctrl), data, data_size);
634 
635     return vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + data_size,
636                                   sizeof(virtio_net_ctrl_ack));
637 }
638 
639 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
640 {
641     if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
642         ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
643                                                   VIRTIO_NET_CTRL_MAC_ADDR_SET,
644                                                   n->mac, sizeof(n->mac));
645         if (unlikely(dev_written < 0)) {
646             return dev_written;
647         }
648 
649         return *s->status != VIRTIO_NET_OK;
650     }
651 
652     return 0;
653 }
654 
655 static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
656                                   const VirtIONet *n)
657 {
658     struct virtio_net_ctrl_mq mq;
659     ssize_t dev_written;
660 
661     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
662         return 0;
663     }
664 
665     mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
666     dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
667                                           VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &mq,
668                                           sizeof(mq));
669     if (unlikely(dev_written < 0)) {
670         return dev_written;
671     }
672 
673     return *s->status != VIRTIO_NET_OK;
674 }
675 
676 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
677                                         const VirtIONet *n)
678 {
679     uint64_t offloads;
680     ssize_t dev_written;
681 
682     if (!virtio_vdev_has_feature(&n->parent_obj,
683                                  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
684         return 0;
685     }
686 
687     if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
688         /*
689          * According to VirtIO standard, "Upon feature negotiation
690          * corresponding offload gets enabled to preserve
691          * backward compatibility.".
692          *
693          * Therefore, there is no need to send this CVQ command if the
694          * driver also enables all supported offloads, which aligns with
695          * the device's defaults.
696          *
697          * Note that the device's defaults can mismatch the driver's
698          * configuration only at live migration.
699          */
700         return 0;
701     }
702 
703     offloads = cpu_to_le64(n->curr_guest_offloads);
704     dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
705                                           VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
706                                           &offloads, sizeof(offloads));
707     if (unlikely(dev_written < 0)) {
708         return dev_written;
709     }
710 
711     return *s->status != VIRTIO_NET_OK;
712 }
713 
714 static int vhost_vdpa_net_load(NetClientState *nc)
715 {
716     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
717     struct vhost_vdpa *v = &s->vhost_vdpa;
718     const VirtIONet *n;
719     int r;
720 
721     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
722 
723     if (!v->shadow_vqs_enabled) {
724         return 0;
725     }
726 
727     n = VIRTIO_NET(v->dev->vdev);
728     r = vhost_vdpa_net_load_mac(s, n);
729     if (unlikely(r < 0)) {
730         return r;
731     }
732     r = vhost_vdpa_net_load_mq(s, n);
733     if (unlikely(r)) {
734         return r;
735     }
736     r = vhost_vdpa_net_load_offloads(s, n);
737     if (unlikely(r)) {
738         return r;
739     }
740 
741     return 0;
742 }
743 
744 static NetClientInfo net_vhost_vdpa_cvq_info = {
745     .type = NET_CLIENT_DRIVER_VHOST_VDPA,
746     .size = sizeof(VhostVDPAState),
747     .receive = vhost_vdpa_receive,
748     .start = vhost_vdpa_net_cvq_start,
749     .load = vhost_vdpa_net_load,
750     .stop = vhost_vdpa_net_cvq_stop,
751     .cleanup = vhost_vdpa_cleanup,
752     .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
753     .has_ufo = vhost_vdpa_has_ufo,
754     .check_peer_type = vhost_vdpa_check_peer_type,
755 };
756 
757 /**
758  * Validate and copy control virtqueue commands.
759  *
760  * Following QEMU guidelines, we offer a copy of the buffers to the device to
761  * prevent TOCTOU bugs.
762  */
763 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
764                                             VirtQueueElement *elem,
765                                             void *opaque)
766 {
767     VhostVDPAState *s = opaque;
768     size_t in_len;
769     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
770     /* Out buffer sent to both the vdpa device and the device model */
771     struct iovec out = {
772         .iov_base = s->cvq_cmd_out_buffer,
773     };
774     /* in buffer used for device model */
775     const struct iovec in = {
776         .iov_base = &status,
777         .iov_len = sizeof(status),
778     };
779     ssize_t dev_written = -EINVAL;
780 
781     out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
782                              s->cvq_cmd_out_buffer,
783                              vhost_vdpa_net_cvq_cmd_len());
784     if (*(uint8_t *)s->cvq_cmd_out_buffer == VIRTIO_NET_CTRL_ANNOUNCE) {
785         /*
786          * Guest announce capability is emulated by qemu, so don't forward to
787          * the device.
788          */
789         dev_written = sizeof(status);
790         *s->status = VIRTIO_NET_OK;
791     } else {
792         dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
793         if (unlikely(dev_written < 0)) {
794             goto out;
795         }
796     }
797 
798     if (unlikely(dev_written < sizeof(status))) {
799         error_report("Insufficient written data (%zu)", dev_written);
800         goto out;
801     }
802 
803     if (*s->status != VIRTIO_NET_OK) {
804         goto out;
805     }
806 
807     status = VIRTIO_NET_ERR;
808     virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
809     if (status != VIRTIO_NET_OK) {
810         error_report("Bad CVQ processing in model");
811     }
812 
813 out:
814     in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
815                           sizeof(status));
816     if (unlikely(in_len < sizeof(status))) {
817         error_report("Bad device CVQ written length");
818     }
819     vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
820     g_free(elem);
821     return dev_written < 0 ? dev_written : 0;
822 }
823 
824 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
825     .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
826 };
827 
828 /**
829  * Probe if CVQ is isolated
830  *
831  * @device_fd         The vdpa device fd
832  * @features          Features offered by the device.
833  * @cvq_index         The control vq pair index
834  *
835  * Returns <0 in case of failure, 0 if false and 1 if true.
836  */
837 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
838                                           int cvq_index, Error **errp)
839 {
840     uint64_t backend_features;
841     int64_t cvq_group;
842     uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
843                      VIRTIO_CONFIG_S_DRIVER |
844                      VIRTIO_CONFIG_S_FEATURES_OK;
845     int r;
846 
847     ERRP_GUARD();
848 
849     r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
850     if (unlikely(r < 0)) {
851         error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
852         return r;
853     }
854 
855     if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
856         return 0;
857     }
858 
859     r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
860     if (unlikely(r)) {
861         error_setg_errno(errp, errno, "Cannot set features");
862     }
863 
864     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
865     if (unlikely(r)) {
866         error_setg_errno(errp, -r, "Cannot set device features");
867         goto out;
868     }
869 
870     cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
871     if (unlikely(cvq_group < 0)) {
872         if (cvq_group != -ENOTSUP) {
873             r = cvq_group;
874             goto out;
875         }
876 
877         /*
878          * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
879          * support ASID even if the parent driver does not.  The CVQ cannot be
880          * isolated in this case.
881          */
882         error_free(*errp);
883         *errp = NULL;
884         r = 0;
885         goto out;
886     }
887 
888     for (int i = 0; i < cvq_index; ++i) {
889         int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
890         if (unlikely(group < 0)) {
891             r = group;
892             goto out;
893         }
894 
895         if (group == (int64_t)cvq_group) {
896             r = 0;
897             goto out;
898         }
899     }
900 
901     r = 1;
902 
903 out:
904     status = 0;
905     ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
906     return r;
907 }
908 
909 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
910                                        const char *device,
911                                        const char *name,
912                                        int vdpa_device_fd,
913                                        int queue_pair_index,
914                                        int nvqs,
915                                        bool is_datapath,
916                                        bool svq,
917                                        struct vhost_vdpa_iova_range iova_range,
918                                        uint64_t features,
919                                        Error **errp)
920 {
921     NetClientState *nc = NULL;
922     VhostVDPAState *s;
923     int ret = 0;
924     assert(name);
925     int cvq_isolated;
926 
927     if (is_datapath) {
928         nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
929                                  name);
930     } else {
931         cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
932                                                       queue_pair_index * 2,
933                                                       errp);
934         if (unlikely(cvq_isolated < 0)) {
935             return NULL;
936         }
937 
938         nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
939                                          device, name);
940     }
941     qemu_set_info_str(nc, TYPE_VHOST_VDPA);
942     s = DO_UPCAST(VhostVDPAState, nc, nc);
943 
944     s->vhost_vdpa.device_fd = vdpa_device_fd;
945     s->vhost_vdpa.index = queue_pair_index;
946     s->always_svq = svq;
947     s->migration_state.notify = vdpa_net_migration_state_notifier;
948     s->vhost_vdpa.shadow_vqs_enabled = svq;
949     s->vhost_vdpa.iova_range = iova_range;
950     s->vhost_vdpa.shadow_data = svq;
951     if (queue_pair_index == 0) {
952         vhost_vdpa_net_valid_svq_features(features,
953                                           &s->vhost_vdpa.migration_blocker);
954     } else if (!is_datapath) {
955         s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
956                                      PROT_READ | PROT_WRITE,
957                                      MAP_SHARED | MAP_ANONYMOUS, -1, 0);
958         s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
959                          PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
960                          -1, 0);
961 
962         s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
963         s->vhost_vdpa.shadow_vq_ops_opaque = s;
964         s->cvq_isolated = cvq_isolated;
965 
966         /*
967          * TODO: We cannot migrate devices with CVQ and no x-svq enabled as
968          * there is no way to set the device state (MAC, MQ, etc) before
969          * starting the datapath.
970          *
971          * Migration blocker ownership now belongs to s->vhost_vdpa.
972          */
973         if (!svq) {
974             error_setg(&s->vhost_vdpa.migration_blocker,
975                        "net vdpa cannot migrate with CVQ feature");
976         }
977     }
978     ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
979     if (ret) {
980         qemu_del_net_client(nc);
981         return NULL;
982     }
983     return nc;
984 }
985 
986 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
987 {
988     int ret = ioctl(fd, VHOST_GET_FEATURES, features);
989     if (unlikely(ret < 0)) {
990         error_setg_errno(errp, errno,
991                          "Fail to query features from vhost-vDPA device");
992     }
993     return ret;
994 }
995 
996 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
997                                           int *has_cvq, Error **errp)
998 {
999     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
1000     g_autofree struct vhost_vdpa_config *config = NULL;
1001     __virtio16 *max_queue_pairs;
1002     int ret;
1003 
1004     if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
1005         *has_cvq = 1;
1006     } else {
1007         *has_cvq = 0;
1008     }
1009 
1010     if (features & (1 << VIRTIO_NET_F_MQ)) {
1011         config = g_malloc0(config_size + sizeof(*max_queue_pairs));
1012         config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
1013         config->len = sizeof(*max_queue_pairs);
1014 
1015         ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
1016         if (ret) {
1017             error_setg(errp, "Fail to get config from vhost-vDPA device");
1018             return -ret;
1019         }
1020 
1021         max_queue_pairs = (__virtio16 *)&config->buf;
1022 
1023         return lduw_le_p(max_queue_pairs);
1024     }
1025 
1026     return 1;
1027 }
1028 
1029 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
1030                         NetClientState *peer, Error **errp)
1031 {
1032     const NetdevVhostVDPAOptions *opts;
1033     uint64_t features;
1034     int vdpa_device_fd;
1035     g_autofree NetClientState **ncs = NULL;
1036     struct vhost_vdpa_iova_range iova_range;
1037     NetClientState *nc;
1038     int queue_pairs, r, i = 0, has_cvq = 0;
1039 
1040     assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1041     opts = &netdev->u.vhost_vdpa;
1042     if (!opts->vhostdev && !opts->vhostfd) {
1043         error_setg(errp,
1044                    "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1045         return -1;
1046     }
1047 
1048     if (opts->vhostdev && opts->vhostfd) {
1049         error_setg(errp,
1050                    "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1051         return -1;
1052     }
1053 
1054     if (opts->vhostdev) {
1055         vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1056         if (vdpa_device_fd == -1) {
1057             return -errno;
1058         }
1059     } else {
1060         /* has_vhostfd */
1061         vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1062         if (vdpa_device_fd == -1) {
1063             error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1064             return -1;
1065         }
1066     }
1067 
1068     r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1069     if (unlikely(r < 0)) {
1070         goto err;
1071     }
1072 
1073     queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
1074                                                  &has_cvq, errp);
1075     if (queue_pairs < 0) {
1076         qemu_close(vdpa_device_fd);
1077         return queue_pairs;
1078     }
1079 
1080     r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1081     if (unlikely(r < 0)) {
1082         error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1083                    strerror(-r));
1084         goto err;
1085     }
1086 
1087     if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1088         goto err;
1089     }
1090 
1091     ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1092 
1093     for (i = 0; i < queue_pairs; i++) {
1094         ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1095                                      vdpa_device_fd, i, 2, true, opts->x_svq,
1096                                      iova_range, features, errp);
1097         if (!ncs[i])
1098             goto err;
1099     }
1100 
1101     if (has_cvq) {
1102         nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1103                                  vdpa_device_fd, i, 1, false,
1104                                  opts->x_svq, iova_range, features, errp);
1105         if (!nc)
1106             goto err;
1107     }
1108 
1109     return 0;
1110 
1111 err:
1112     if (i) {
1113         for (i--; i >= 0; i--) {
1114             qemu_del_net_client(ncs[i]);
1115         }
1116     }
1117 
1118     qemu_close(vdpa_device_fd);
1119 
1120     return -1;
1121 }
1122