xref: /openbmc/qemu/net/vhost-vdpa.c (revision 9ea2e69f)
1 /*
2  * vhost-vdpa.c
3  *
4  * Copyright(c) 2017-2018 Intel Corporation.
5  * Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "clients.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
26 #include <err.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/migration.h"
30 #include "migration/misc.h"
31 #include "hw/virtio/vhost.h"
32 
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState {
35     NetClientState nc;
36     struct vhost_vdpa vhost_vdpa;
37     Notifier migration_state;
38     VHostNetState *vhost_net;
39 
40     /* Control commands shadow buffers */
41     void *cvq_cmd_out_buffer;
42     virtio_net_ctrl_ack *status;
43 
44     /* The device always have SVQ enabled */
45     bool always_svq;
46 
47     /* The device can isolate CVQ in its own ASID */
48     bool cvq_isolated;
49 
50     bool started;
51 } VhostVDPAState;
52 
53 const int vdpa_feature_bits[] = {
54     VIRTIO_F_NOTIFY_ON_EMPTY,
55     VIRTIO_RING_F_INDIRECT_DESC,
56     VIRTIO_RING_F_EVENT_IDX,
57     VIRTIO_F_ANY_LAYOUT,
58     VIRTIO_F_VERSION_1,
59     VIRTIO_NET_F_CSUM,
60     VIRTIO_NET_F_GUEST_CSUM,
61     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
62     VIRTIO_NET_F_GSO,
63     VIRTIO_NET_F_GUEST_TSO4,
64     VIRTIO_NET_F_GUEST_TSO6,
65     VIRTIO_NET_F_GUEST_ECN,
66     VIRTIO_NET_F_GUEST_UFO,
67     VIRTIO_NET_F_HOST_TSO4,
68     VIRTIO_NET_F_HOST_TSO6,
69     VIRTIO_NET_F_HOST_ECN,
70     VIRTIO_NET_F_HOST_UFO,
71     VIRTIO_NET_F_MRG_RXBUF,
72     VIRTIO_NET_F_MTU,
73     VIRTIO_NET_F_CTRL_RX,
74     VIRTIO_NET_F_CTRL_RX_EXTRA,
75     VIRTIO_NET_F_CTRL_VLAN,
76     VIRTIO_NET_F_CTRL_MAC_ADDR,
77     VIRTIO_NET_F_RSS,
78     VIRTIO_NET_F_MQ,
79     VIRTIO_NET_F_CTRL_VQ,
80     VIRTIO_F_IOMMU_PLATFORM,
81     VIRTIO_F_RING_PACKED,
82     VIRTIO_F_RING_RESET,
83     VIRTIO_NET_F_RSS,
84     VIRTIO_NET_F_HASH_REPORT,
85     VIRTIO_NET_F_STATUS,
86     VHOST_INVALID_FEATURE_BIT
87 };
88 
89 /** Supported device specific feature bits with SVQ */
90 static const uint64_t vdpa_svq_device_features =
91     BIT_ULL(VIRTIO_NET_F_CSUM) |
92     BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
93     BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
94     BIT_ULL(VIRTIO_NET_F_MTU) |
95     BIT_ULL(VIRTIO_NET_F_MAC) |
96     BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
97     BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
98     BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
99     BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
100     BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
101     BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
102     BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
103     BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
104     BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
105     BIT_ULL(VIRTIO_NET_F_STATUS) |
106     BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
107     BIT_ULL(VIRTIO_NET_F_MQ) |
108     BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
109     BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
110     /* VHOST_F_LOG_ALL is exposed by SVQ */
111     BIT_ULL(VHOST_F_LOG_ALL) |
112     BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
113     BIT_ULL(VIRTIO_NET_F_STANDBY) |
114     BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
115 
116 #define VHOST_VDPA_NET_CVQ_ASID 1
117 
118 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
119 {
120     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
121     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
122     return s->vhost_net;
123 }
124 
125 static size_t vhost_vdpa_net_cvq_cmd_len(void)
126 {
127     /*
128      * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
129      * In buffer is always 1 byte, so it should fit here
130      */
131     return sizeof(struct virtio_net_ctrl_hdr) +
132            2 * sizeof(struct virtio_net_ctrl_mac) +
133            MAC_TABLE_ENTRIES * ETH_ALEN;
134 }
135 
136 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
137 {
138     return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
139 }
140 
141 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
142 {
143     uint64_t invalid_dev_features =
144         features & ~vdpa_svq_device_features &
145         /* Transport are all accepted at this point */
146         ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
147                          VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
148 
149     if (invalid_dev_features) {
150         error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
151                    invalid_dev_features);
152         return false;
153     }
154 
155     return vhost_svq_valid_features(features, errp);
156 }
157 
158 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
159 {
160     uint32_t device_id;
161     int ret;
162     struct vhost_dev *hdev;
163 
164     hdev = (struct vhost_dev *)&net->dev;
165     ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
166     if (device_id != VIRTIO_ID_NET) {
167         return -ENOTSUP;
168     }
169     return ret;
170 }
171 
172 static int vhost_vdpa_add(NetClientState *ncs, void *be,
173                           int queue_pair_index, int nvqs)
174 {
175     VhostNetOptions options;
176     struct vhost_net *net = NULL;
177     VhostVDPAState *s;
178     int ret;
179 
180     options.backend_type = VHOST_BACKEND_TYPE_VDPA;
181     assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
182     s = DO_UPCAST(VhostVDPAState, nc, ncs);
183     options.net_backend = ncs;
184     options.opaque      = be;
185     options.busyloop_timeout = 0;
186     options.nvqs = nvqs;
187 
188     net = vhost_net_init(&options);
189     if (!net) {
190         error_report("failed to init vhost_net for queue");
191         goto err_init;
192     }
193     s->vhost_net = net;
194     ret = vhost_vdpa_net_check_device_id(net);
195     if (ret) {
196         goto err_check;
197     }
198     return 0;
199 err_check:
200     vhost_net_cleanup(net);
201     g_free(net);
202 err_init:
203     return -1;
204 }
205 
206 static void vhost_vdpa_cleanup(NetClientState *nc)
207 {
208     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
209 
210     /*
211      * If a peer NIC is attached, do not cleanup anything.
212      * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
213      * when the guest is shutting down.
214      */
215     if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
216         return;
217     }
218     munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
219     munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
220     if (s->vhost_net) {
221         vhost_net_cleanup(s->vhost_net);
222         g_free(s->vhost_net);
223         s->vhost_net = NULL;
224     }
225      if (s->vhost_vdpa.device_fd >= 0) {
226         qemu_close(s->vhost_vdpa.device_fd);
227         s->vhost_vdpa.device_fd = -1;
228     }
229 }
230 
231 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
232 {
233     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
234 
235     return true;
236 }
237 
238 static bool vhost_vdpa_has_ufo(NetClientState *nc)
239 {
240     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
241     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
242     uint64_t features = 0;
243     features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
244     features = vhost_net_get_features(s->vhost_net, features);
245     return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
246 
247 }
248 
249 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
250                                        Error **errp)
251 {
252     const char *driver = object_class_get_name(oc);
253 
254     if (!g_str_has_prefix(driver, "virtio-net-")) {
255         error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
256         return false;
257     }
258 
259     return true;
260 }
261 
262 /** Dummy receive in case qemu falls back to userland tap networking */
263 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
264                                   size_t size)
265 {
266     return size;
267 }
268 
269 /** From any vdpa net client, get the netclient of the first queue pair */
270 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
271 {
272     NICState *nic = qemu_get_nic(s->nc.peer);
273     NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
274 
275     return DO_UPCAST(VhostVDPAState, nc, nc0);
276 }
277 
278 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
279 {
280     struct vhost_vdpa *v = &s->vhost_vdpa;
281     VirtIONet *n;
282     VirtIODevice *vdev;
283     int data_queue_pairs, cvq, r;
284 
285     /* We are only called on the first data vqs and only if x-svq is not set */
286     if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
287         return;
288     }
289 
290     vdev = v->dev->vdev;
291     n = VIRTIO_NET(vdev);
292     if (!n->vhost_started) {
293         return;
294     }
295 
296     data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
297     cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
298                                   n->max_ncs - n->max_queue_pairs : 0;
299     /*
300      * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
301      * in the future and resume the device if read-only operations between
302      * suspend and reset goes wrong.
303      */
304     vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
305 
306     /* Start will check migration setup_or_active to configure or not SVQ */
307     r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
308     if (unlikely(r < 0)) {
309         error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
310     }
311 }
312 
313 static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data)
314 {
315     MigrationState *migration = data;
316     VhostVDPAState *s = container_of(notifier, VhostVDPAState,
317                                      migration_state);
318 
319     if (migration_in_setup(migration)) {
320         vhost_vdpa_net_log_global_enable(s, true);
321     } else if (migration_has_failed(migration)) {
322         vhost_vdpa_net_log_global_enable(s, false);
323     }
324 }
325 
326 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
327 {
328     struct vhost_vdpa *v = &s->vhost_vdpa;
329 
330     add_migration_state_change_notifier(&s->migration_state);
331     if (v->shadow_vqs_enabled) {
332         v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
333                                            v->iova_range.last);
334     }
335 }
336 
337 static int vhost_vdpa_net_data_start(NetClientState *nc)
338 {
339     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
340     struct vhost_vdpa *v = &s->vhost_vdpa;
341 
342     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
343 
344     if (s->always_svq ||
345         migration_is_setup_or_active(migrate_get_current()->state)) {
346         v->shadow_vqs_enabled = true;
347         v->shadow_data = true;
348     } else {
349         v->shadow_vqs_enabled = false;
350         v->shadow_data = false;
351     }
352 
353     if (v->index == 0) {
354         vhost_vdpa_net_data_start_first(s);
355         return 0;
356     }
357 
358     if (v->shadow_vqs_enabled) {
359         VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
360         v->iova_tree = s0->vhost_vdpa.iova_tree;
361     }
362 
363     return 0;
364 }
365 
366 static void vhost_vdpa_net_client_stop(NetClientState *nc)
367 {
368     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
369     struct vhost_dev *dev;
370 
371     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
372 
373     if (s->vhost_vdpa.index == 0) {
374         remove_migration_state_change_notifier(&s->migration_state);
375     }
376 
377     dev = s->vhost_vdpa.dev;
378     if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
379         g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
380     }
381 }
382 
383 static NetClientInfo net_vhost_vdpa_info = {
384         .type = NET_CLIENT_DRIVER_VHOST_VDPA,
385         .size = sizeof(VhostVDPAState),
386         .receive = vhost_vdpa_receive,
387         .start = vhost_vdpa_net_data_start,
388         .stop = vhost_vdpa_net_client_stop,
389         .cleanup = vhost_vdpa_cleanup,
390         .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
391         .has_ufo = vhost_vdpa_has_ufo,
392         .check_peer_type = vhost_vdpa_check_peer_type,
393 };
394 
395 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
396                                           Error **errp)
397 {
398     struct vhost_vring_state state = {
399         .index = vq_index,
400     };
401     int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
402 
403     if (unlikely(r < 0)) {
404         r = -errno;
405         error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
406         return r;
407     }
408 
409     return state.num;
410 }
411 
412 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
413                                            unsigned vq_group,
414                                            unsigned asid_num)
415 {
416     struct vhost_vring_state asid = {
417         .index = vq_group,
418         .num = asid_num,
419     };
420     int r;
421 
422     r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
423     if (unlikely(r < 0)) {
424         error_report("Can't set vq group %u asid %u, errno=%d (%s)",
425                      asid.index, asid.num, errno, g_strerror(errno));
426     }
427     return r;
428 }
429 
430 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
431 {
432     VhostIOVATree *tree = v->iova_tree;
433     DMAMap needle = {
434         /*
435          * No need to specify size or to look for more translations since
436          * this contiguous chunk was allocated by us.
437          */
438         .translated_addr = (hwaddr)(uintptr_t)addr,
439     };
440     const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
441     int r;
442 
443     if (unlikely(!map)) {
444         error_report("Cannot locate expected map");
445         return;
446     }
447 
448     r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
449     if (unlikely(r != 0)) {
450         error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
451     }
452 
453     vhost_iova_tree_remove(tree, *map);
454 }
455 
456 /** Map CVQ buffer. */
457 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
458                                   bool write)
459 {
460     DMAMap map = {};
461     int r;
462 
463     map.translated_addr = (hwaddr)(uintptr_t)buf;
464     map.size = size - 1;
465     map.perm = write ? IOMMU_RW : IOMMU_RO,
466     r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
467     if (unlikely(r != IOVA_OK)) {
468         error_report("Cannot map injected element");
469         return r;
470     }
471 
472     r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
473                            vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
474     if (unlikely(r < 0)) {
475         goto dma_map_err;
476     }
477 
478     return 0;
479 
480 dma_map_err:
481     vhost_iova_tree_remove(v->iova_tree, map);
482     return r;
483 }
484 
485 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
486 {
487     VhostVDPAState *s, *s0;
488     struct vhost_vdpa *v;
489     int64_t cvq_group;
490     int r;
491     Error *err = NULL;
492 
493     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
494 
495     s = DO_UPCAST(VhostVDPAState, nc, nc);
496     v = &s->vhost_vdpa;
497 
498     s0 = vhost_vdpa_net_first_nc_vdpa(s);
499     v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
500     v->shadow_vqs_enabled = s->always_svq;
501     s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
502 
503     if (s->vhost_vdpa.shadow_data) {
504         /* SVQ is already configured for all virtqueues */
505         goto out;
506     }
507 
508     /*
509      * If we early return in these cases SVQ will not be enabled. The migration
510      * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
511      */
512     if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
513         return 0;
514     }
515 
516     if (!s->cvq_isolated) {
517         return 0;
518     }
519 
520     cvq_group = vhost_vdpa_get_vring_group(v->device_fd,
521                                            v->dev->vq_index_end - 1,
522                                            &err);
523     if (unlikely(cvq_group < 0)) {
524         error_report_err(err);
525         return cvq_group;
526     }
527 
528     r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
529     if (unlikely(r < 0)) {
530         return r;
531     }
532 
533     v->shadow_vqs_enabled = true;
534     s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
535 
536 out:
537     if (!s->vhost_vdpa.shadow_vqs_enabled) {
538         return 0;
539     }
540 
541     if (s0->vhost_vdpa.iova_tree) {
542         /*
543          * SVQ is already configured for all virtqueues.  Reuse IOVA tree for
544          * simplicity, whether CVQ shares ASID with guest or not, because:
545          * - Memory listener need access to guest's memory addresses allocated
546          *   in the IOVA tree.
547          * - There should be plenty of IOVA address space for both ASID not to
548          *   worry about collisions between them.  Guest's translations are
549          *   still validated with virtio virtqueue_pop so there is no risk for
550          *   the guest to access memory that it shouldn't.
551          *
552          * To allocate a iova tree per ASID is doable but it complicates the
553          * code and it is not worth it for the moment.
554          */
555         v->iova_tree = s0->vhost_vdpa.iova_tree;
556     } else {
557         v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
558                                            v->iova_range.last);
559     }
560 
561     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
562                                vhost_vdpa_net_cvq_cmd_page_len(), false);
563     if (unlikely(r < 0)) {
564         return r;
565     }
566 
567     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
568                                vhost_vdpa_net_cvq_cmd_page_len(), true);
569     if (unlikely(r < 0)) {
570         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
571     }
572 
573     return r;
574 }
575 
576 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
577 {
578     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
579 
580     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
581 
582     if (s->vhost_vdpa.shadow_vqs_enabled) {
583         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
584         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
585     }
586 
587     vhost_vdpa_net_client_stop(nc);
588 }
589 
590 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
591                                       size_t in_len)
592 {
593     /* Buffers for the device */
594     const struct iovec out = {
595         .iov_base = s->cvq_cmd_out_buffer,
596         .iov_len = out_len,
597     };
598     const struct iovec in = {
599         .iov_base = s->status,
600         .iov_len = sizeof(virtio_net_ctrl_ack),
601     };
602     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
603     int r;
604 
605     r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
606     if (unlikely(r != 0)) {
607         if (unlikely(r == -ENOSPC)) {
608             qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
609                           __func__);
610         }
611         return r;
612     }
613 
614     /*
615      * We can poll here since we've had BQL from the time we sent the
616      * descriptor. Also, we need to take the answer before SVQ pulls by itself,
617      * when BQL is released
618      */
619     return vhost_svq_poll(svq);
620 }
621 
622 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
623                                        uint8_t cmd, const void *data,
624                                        size_t data_size)
625 {
626     const struct virtio_net_ctrl_hdr ctrl = {
627         .class = class,
628         .cmd = cmd,
629     };
630 
631     assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
632 
633     memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
634     memcpy(s->cvq_cmd_out_buffer + sizeof(ctrl), data, data_size);
635 
636     return vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + data_size,
637                                   sizeof(virtio_net_ctrl_ack));
638 }
639 
640 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
641 {
642     if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
643         ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
644                                                   VIRTIO_NET_CTRL_MAC_ADDR_SET,
645                                                   n->mac, sizeof(n->mac));
646         if (unlikely(dev_written < 0)) {
647             return dev_written;
648         }
649 
650         return *s->status != VIRTIO_NET_OK;
651     }
652 
653     return 0;
654 }
655 
656 static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
657                                   const VirtIONet *n)
658 {
659     struct virtio_net_ctrl_mq mq;
660     ssize_t dev_written;
661 
662     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
663         return 0;
664     }
665 
666     mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
667     dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
668                                           VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &mq,
669                                           sizeof(mq));
670     if (unlikely(dev_written < 0)) {
671         return dev_written;
672     }
673 
674     return *s->status != VIRTIO_NET_OK;
675 }
676 
677 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
678                                         const VirtIONet *n)
679 {
680     uint64_t offloads;
681     ssize_t dev_written;
682 
683     if (!virtio_vdev_has_feature(&n->parent_obj,
684                                  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
685         return 0;
686     }
687 
688     if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
689         /*
690          * According to VirtIO standard, "Upon feature negotiation
691          * corresponding offload gets enabled to preserve
692          * backward compatibility.".
693          *
694          * Therefore, there is no need to send this CVQ command if the
695          * driver also enables all supported offloads, which aligns with
696          * the device's defaults.
697          *
698          * Note that the device's defaults can mismatch the driver's
699          * configuration only at live migration.
700          */
701         return 0;
702     }
703 
704     offloads = cpu_to_le64(n->curr_guest_offloads);
705     dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
706                                           VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
707                                           &offloads, sizeof(offloads));
708     if (unlikely(dev_written < 0)) {
709         return dev_written;
710     }
711 
712     return *s->status != VIRTIO_NET_OK;
713 }
714 
715 static int vhost_vdpa_net_load(NetClientState *nc)
716 {
717     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
718     struct vhost_vdpa *v = &s->vhost_vdpa;
719     const VirtIONet *n;
720     int r;
721 
722     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
723 
724     if (!v->shadow_vqs_enabled) {
725         return 0;
726     }
727 
728     n = VIRTIO_NET(v->dev->vdev);
729     r = vhost_vdpa_net_load_mac(s, n);
730     if (unlikely(r < 0)) {
731         return r;
732     }
733     r = vhost_vdpa_net_load_mq(s, n);
734     if (unlikely(r)) {
735         return r;
736     }
737     r = vhost_vdpa_net_load_offloads(s, n);
738     if (unlikely(r)) {
739         return r;
740     }
741 
742     return 0;
743 }
744 
745 static NetClientInfo net_vhost_vdpa_cvq_info = {
746     .type = NET_CLIENT_DRIVER_VHOST_VDPA,
747     .size = sizeof(VhostVDPAState),
748     .receive = vhost_vdpa_receive,
749     .start = vhost_vdpa_net_cvq_start,
750     .load = vhost_vdpa_net_load,
751     .stop = vhost_vdpa_net_cvq_stop,
752     .cleanup = vhost_vdpa_cleanup,
753     .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
754     .has_ufo = vhost_vdpa_has_ufo,
755     .check_peer_type = vhost_vdpa_check_peer_type,
756 };
757 
758 /**
759  * Validate and copy control virtqueue commands.
760  *
761  * Following QEMU guidelines, we offer a copy of the buffers to the device to
762  * prevent TOCTOU bugs.
763  */
764 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
765                                             VirtQueueElement *elem,
766                                             void *opaque)
767 {
768     VhostVDPAState *s = opaque;
769     size_t in_len;
770     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
771     /* Out buffer sent to both the vdpa device and the device model */
772     struct iovec out = {
773         .iov_base = s->cvq_cmd_out_buffer,
774     };
775     /* in buffer used for device model */
776     const struct iovec in = {
777         .iov_base = &status,
778         .iov_len = sizeof(status),
779     };
780     ssize_t dev_written = -EINVAL;
781 
782     out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
783                              s->cvq_cmd_out_buffer,
784                              vhost_vdpa_net_cvq_cmd_len());
785     if (*(uint8_t *)s->cvq_cmd_out_buffer == VIRTIO_NET_CTRL_ANNOUNCE) {
786         /*
787          * Guest announce capability is emulated by qemu, so don't forward to
788          * the device.
789          */
790         dev_written = sizeof(status);
791         *s->status = VIRTIO_NET_OK;
792     } else {
793         dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
794         if (unlikely(dev_written < 0)) {
795             goto out;
796         }
797     }
798 
799     if (unlikely(dev_written < sizeof(status))) {
800         error_report("Insufficient written data (%zu)", dev_written);
801         goto out;
802     }
803 
804     if (*s->status != VIRTIO_NET_OK) {
805         goto out;
806     }
807 
808     status = VIRTIO_NET_ERR;
809     virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
810     if (status != VIRTIO_NET_OK) {
811         error_report("Bad CVQ processing in model");
812     }
813 
814 out:
815     in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
816                           sizeof(status));
817     if (unlikely(in_len < sizeof(status))) {
818         error_report("Bad device CVQ written length");
819     }
820     vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
821     g_free(elem);
822     return dev_written < 0 ? dev_written : 0;
823 }
824 
825 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
826     .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
827 };
828 
829 /**
830  * Probe if CVQ is isolated
831  *
832  * @device_fd         The vdpa device fd
833  * @features          Features offered by the device.
834  * @cvq_index         The control vq pair index
835  *
836  * Returns <0 in case of failure, 0 if false and 1 if true.
837  */
838 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
839                                           int cvq_index, Error **errp)
840 {
841     uint64_t backend_features;
842     int64_t cvq_group;
843     uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
844                      VIRTIO_CONFIG_S_DRIVER |
845                      VIRTIO_CONFIG_S_FEATURES_OK;
846     int r;
847 
848     ERRP_GUARD();
849 
850     r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
851     if (unlikely(r < 0)) {
852         error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
853         return r;
854     }
855 
856     if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
857         return 0;
858     }
859 
860     r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
861     if (unlikely(r)) {
862         error_setg_errno(errp, errno, "Cannot set features");
863     }
864 
865     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
866     if (unlikely(r)) {
867         error_setg_errno(errp, -r, "Cannot set device features");
868         goto out;
869     }
870 
871     cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
872     if (unlikely(cvq_group < 0)) {
873         if (cvq_group != -ENOTSUP) {
874             r = cvq_group;
875             goto out;
876         }
877 
878         /*
879          * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
880          * support ASID even if the parent driver does not.  The CVQ cannot be
881          * isolated in this case.
882          */
883         error_free(*errp);
884         *errp = NULL;
885         r = 0;
886         goto out;
887     }
888 
889     for (int i = 0; i < cvq_index; ++i) {
890         int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
891         if (unlikely(group < 0)) {
892             r = group;
893             goto out;
894         }
895 
896         if (group == (int64_t)cvq_group) {
897             r = 0;
898             goto out;
899         }
900     }
901 
902     r = 1;
903 
904 out:
905     status = 0;
906     ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
907     return r;
908 }
909 
910 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
911                                        const char *device,
912                                        const char *name,
913                                        int vdpa_device_fd,
914                                        int queue_pair_index,
915                                        int nvqs,
916                                        bool is_datapath,
917                                        bool svq,
918                                        struct vhost_vdpa_iova_range iova_range,
919                                        uint64_t features,
920                                        Error **errp)
921 {
922     NetClientState *nc = NULL;
923     VhostVDPAState *s;
924     int ret = 0;
925     assert(name);
926     int cvq_isolated;
927 
928     if (is_datapath) {
929         nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
930                                  name);
931     } else {
932         cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
933                                                       queue_pair_index * 2,
934                                                       errp);
935         if (unlikely(cvq_isolated < 0)) {
936             return NULL;
937         }
938 
939         nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
940                                          device, name);
941     }
942     qemu_set_info_str(nc, TYPE_VHOST_VDPA);
943     s = DO_UPCAST(VhostVDPAState, nc, nc);
944 
945     s->vhost_vdpa.device_fd = vdpa_device_fd;
946     s->vhost_vdpa.index = queue_pair_index;
947     s->always_svq = svq;
948     s->migration_state.notify = vdpa_net_migration_state_notifier;
949     s->vhost_vdpa.shadow_vqs_enabled = svq;
950     s->vhost_vdpa.iova_range = iova_range;
951     s->vhost_vdpa.shadow_data = svq;
952     if (queue_pair_index == 0) {
953         vhost_vdpa_net_valid_svq_features(features,
954                                           &s->vhost_vdpa.migration_blocker);
955     } else if (!is_datapath) {
956         s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
957                                      PROT_READ | PROT_WRITE,
958                                      MAP_SHARED | MAP_ANONYMOUS, -1, 0);
959         s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
960                          PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
961                          -1, 0);
962 
963         s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
964         s->vhost_vdpa.shadow_vq_ops_opaque = s;
965         s->cvq_isolated = cvq_isolated;
966 
967         /*
968          * TODO: We cannot migrate devices with CVQ and no x-svq enabled as
969          * there is no way to set the device state (MAC, MQ, etc) before
970          * starting the datapath.
971          *
972          * Migration blocker ownership now belongs to s->vhost_vdpa.
973          */
974         if (!svq) {
975             error_setg(&s->vhost_vdpa.migration_blocker,
976                        "net vdpa cannot migrate with CVQ feature");
977         }
978     }
979     ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
980     if (ret) {
981         qemu_del_net_client(nc);
982         return NULL;
983     }
984     return nc;
985 }
986 
987 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
988 {
989     int ret = ioctl(fd, VHOST_GET_FEATURES, features);
990     if (unlikely(ret < 0)) {
991         error_setg_errno(errp, errno,
992                          "Fail to query features from vhost-vDPA device");
993     }
994     return ret;
995 }
996 
997 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
998                                           int *has_cvq, Error **errp)
999 {
1000     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
1001     g_autofree struct vhost_vdpa_config *config = NULL;
1002     __virtio16 *max_queue_pairs;
1003     int ret;
1004 
1005     if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
1006         *has_cvq = 1;
1007     } else {
1008         *has_cvq = 0;
1009     }
1010 
1011     if (features & (1 << VIRTIO_NET_F_MQ)) {
1012         config = g_malloc0(config_size + sizeof(*max_queue_pairs));
1013         config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
1014         config->len = sizeof(*max_queue_pairs);
1015 
1016         ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
1017         if (ret) {
1018             error_setg(errp, "Fail to get config from vhost-vDPA device");
1019             return -ret;
1020         }
1021 
1022         max_queue_pairs = (__virtio16 *)&config->buf;
1023 
1024         return lduw_le_p(max_queue_pairs);
1025     }
1026 
1027     return 1;
1028 }
1029 
1030 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
1031                         NetClientState *peer, Error **errp)
1032 {
1033     const NetdevVhostVDPAOptions *opts;
1034     uint64_t features;
1035     int vdpa_device_fd;
1036     g_autofree NetClientState **ncs = NULL;
1037     struct vhost_vdpa_iova_range iova_range;
1038     NetClientState *nc;
1039     int queue_pairs, r, i = 0, has_cvq = 0;
1040 
1041     assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1042     opts = &netdev->u.vhost_vdpa;
1043     if (!opts->vhostdev && !opts->vhostfd) {
1044         error_setg(errp,
1045                    "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1046         return -1;
1047     }
1048 
1049     if (opts->vhostdev && opts->vhostfd) {
1050         error_setg(errp,
1051                    "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1052         return -1;
1053     }
1054 
1055     if (opts->vhostdev) {
1056         vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1057         if (vdpa_device_fd == -1) {
1058             return -errno;
1059         }
1060     } else {
1061         /* has_vhostfd */
1062         vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1063         if (vdpa_device_fd == -1) {
1064             error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1065             return -1;
1066         }
1067     }
1068 
1069     r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1070     if (unlikely(r < 0)) {
1071         goto err;
1072     }
1073 
1074     queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
1075                                                  &has_cvq, errp);
1076     if (queue_pairs < 0) {
1077         qemu_close(vdpa_device_fd);
1078         return queue_pairs;
1079     }
1080 
1081     r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1082     if (unlikely(r < 0)) {
1083         error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1084                    strerror(-r));
1085         goto err;
1086     }
1087 
1088     if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1089         goto err;
1090     }
1091 
1092     ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1093 
1094     for (i = 0; i < queue_pairs; i++) {
1095         ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1096                                      vdpa_device_fd, i, 2, true, opts->x_svq,
1097                                      iova_range, features, errp);
1098         if (!ncs[i])
1099             goto err;
1100     }
1101 
1102     if (has_cvq) {
1103         nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1104                                  vdpa_device_fd, i, 1, false,
1105                                  opts->x_svq, iova_range, features, errp);
1106         if (!nc)
1107             goto err;
1108     }
1109 
1110     return 0;
1111 
1112 err:
1113     if (i) {
1114         for (i--; i >= 0; i--) {
1115             qemu_del_net_client(ncs[i]);
1116         }
1117     }
1118 
1119     qemu_close(vdpa_device_fd);
1120 
1121     return -1;
1122 }
1123