1 /*
2 * vhost shadow virtqueue
3 *
4 * SPDX-FileCopyrightText: Red Hat, Inc. 2021
5 * SPDX-FileContributor: Author: Eugenio Pérez <eperezma@redhat.com>
6 *
7 * SPDX-License-Identifier: GPL-2.0-or-later
8 */
9
10 #include "qemu/osdep.h"
11 #include "hw/virtio/vhost-shadow-virtqueue.h"
12
13 #include "qemu/error-report.h"
14 #include "qapi/error.h"
15 #include "qemu/main-loop.h"
16 #include "qemu/log.h"
17 #include "qemu/memalign.h"
18 #include "linux-headers/linux/vhost.h"
19
20 /**
21 * Validate the transport device features that both guests can use with the SVQ
22 * and SVQs can use with the device.
23 *
24 * @dev_features: The features
25 * @errp: Error pointer
26 */
vhost_svq_valid_features(uint64_t features,Error ** errp)27 bool vhost_svq_valid_features(uint64_t features, Error **errp)
28 {
29 bool ok = true;
30 uint64_t svq_features = features;
31
32 for (uint64_t b = VIRTIO_TRANSPORT_F_START; b <= VIRTIO_TRANSPORT_F_END;
33 ++b) {
34 switch (b) {
35 case VIRTIO_F_ANY_LAYOUT:
36 case VIRTIO_RING_F_EVENT_IDX:
37 continue;
38
39 case VIRTIO_F_ACCESS_PLATFORM:
40 /* SVQ trust in the host's IOMMU to translate addresses */
41 case VIRTIO_F_VERSION_1:
42 /* SVQ trust that the guest vring is little endian */
43 if (!(svq_features & BIT_ULL(b))) {
44 svq_features |= BIT_ULL(b);
45 ok = false;
46 }
47 continue;
48
49 default:
50 if (svq_features & BIT_ULL(b)) {
51 svq_features &= ~BIT_ULL(b);
52 ok = false;
53 }
54 }
55 }
56
57 if (!ok) {
58 error_setg(errp, "SVQ Invalid device feature flags, offer: 0x%"PRIx64
59 ", ok: 0x%"PRIx64, features, svq_features);
60 }
61 return ok;
62 }
63
64 /**
65 * Number of descriptors that the SVQ can make available from the guest.
66 *
67 * @svq: The svq
68 */
vhost_svq_available_slots(const VhostShadowVirtqueue * svq)69 uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq)
70 {
71 return svq->num_free;
72 }
73
74 /**
75 * Translate addresses between the qemu's virtual address and the SVQ IOVA
76 *
77 * @svq: Shadow VirtQueue
78 * @vaddr: Translated IOVA addresses
79 * @iovec: Source qemu's VA addresses
80 * @num: Length of iovec and minimum length of vaddr
81 */
vhost_svq_translate_addr(const VhostShadowVirtqueue * svq,hwaddr * addrs,const struct iovec * iovec,size_t num)82 static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq,
83 hwaddr *addrs, const struct iovec *iovec,
84 size_t num)
85 {
86 if (num == 0) {
87 return true;
88 }
89
90 for (size_t i = 0; i < num; ++i) {
91 DMAMap needle = {
92 .translated_addr = (hwaddr)(uintptr_t)iovec[i].iov_base,
93 .size = iovec[i].iov_len,
94 };
95 Int128 needle_last, map_last;
96 size_t off;
97
98 const DMAMap *map = vhost_iova_tree_find_iova(svq->iova_tree, &needle);
99 /*
100 * Map cannot be NULL since iova map contains all guest space and
101 * qemu already has a physical address mapped
102 */
103 if (unlikely(!map)) {
104 qemu_log_mask(LOG_GUEST_ERROR,
105 "Invalid address 0x%"HWADDR_PRIx" given by guest",
106 needle.translated_addr);
107 return false;
108 }
109
110 off = needle.translated_addr - map->translated_addr;
111 addrs[i] = map->iova + off;
112
113 needle_last = int128_add(int128_make64(needle.translated_addr),
114 int128_makes64(iovec[i].iov_len - 1));
115 map_last = int128_make64(map->translated_addr + map->size);
116 if (unlikely(int128_gt(needle_last, map_last))) {
117 qemu_log_mask(LOG_GUEST_ERROR,
118 "Guest buffer expands over iova range");
119 return false;
120 }
121 }
122
123 return true;
124 }
125
126 /**
127 * Write descriptors to SVQ vring
128 *
129 * @svq: The shadow virtqueue
130 * @sg: Cache for hwaddr
131 * @iovec: The iovec from the guest
132 * @num: iovec length
133 * @more_descs: True if more descriptors come in the chain
134 * @write: True if they are writeable descriptors
135 *
136 * Return true if success, false otherwise and print error.
137 */
vhost_svq_vring_write_descs(VhostShadowVirtqueue * svq,hwaddr * sg,const struct iovec * iovec,size_t num,bool more_descs,bool write)138 static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
139 const struct iovec *iovec, size_t num,
140 bool more_descs, bool write)
141 {
142 uint16_t i = svq->free_head, last = svq->free_head;
143 unsigned n;
144 uint16_t flags = write ? cpu_to_le16(VRING_DESC_F_WRITE) : 0;
145 vring_desc_t *descs = svq->vring.desc;
146 bool ok;
147
148 if (num == 0) {
149 return true;
150 }
151
152 ok = vhost_svq_translate_addr(svq, sg, iovec, num);
153 if (unlikely(!ok)) {
154 return false;
155 }
156
157 for (n = 0; n < num; n++) {
158 if (more_descs || (n + 1 < num)) {
159 descs[i].flags = flags | cpu_to_le16(VRING_DESC_F_NEXT);
160 descs[i].next = cpu_to_le16(svq->desc_next[i]);
161 } else {
162 descs[i].flags = flags;
163 }
164 descs[i].addr = cpu_to_le64(sg[n]);
165 descs[i].len = cpu_to_le32(iovec[n].iov_len);
166
167 last = i;
168 i = svq->desc_next[i];
169 }
170
171 svq->free_head = svq->desc_next[last];
172 return true;
173 }
174
vhost_svq_add_split(VhostShadowVirtqueue * svq,const struct iovec * out_sg,size_t out_num,const struct iovec * in_sg,size_t in_num,unsigned * head)175 static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
176 const struct iovec *out_sg, size_t out_num,
177 const struct iovec *in_sg, size_t in_num,
178 unsigned *head)
179 {
180 unsigned avail_idx;
181 vring_avail_t *avail = svq->vring.avail;
182 bool ok;
183 g_autofree hwaddr *sgs = g_new(hwaddr, MAX(out_num, in_num));
184
185 *head = svq->free_head;
186
187 /* We need some descriptors here */
188 if (unlikely(!out_num && !in_num)) {
189 qemu_log_mask(LOG_GUEST_ERROR,
190 "Guest provided element with no descriptors");
191 return false;
192 }
193
194 ok = vhost_svq_vring_write_descs(svq, sgs, out_sg, out_num, in_num > 0,
195 false);
196 if (unlikely(!ok)) {
197 return false;
198 }
199
200 ok = vhost_svq_vring_write_descs(svq, sgs, in_sg, in_num, false, true);
201 if (unlikely(!ok)) {
202 return false;
203 }
204
205 /*
206 * Put the entry in the available array (but don't update avail->idx until
207 * they do sync).
208 */
209 avail_idx = svq->shadow_avail_idx & (svq->vring.num - 1);
210 avail->ring[avail_idx] = cpu_to_le16(*head);
211 svq->shadow_avail_idx++;
212
213 /* Update the avail index after write the descriptor */
214 smp_wmb();
215 avail->idx = cpu_to_le16(svq->shadow_avail_idx);
216
217 return true;
218 }
219
vhost_svq_kick(VhostShadowVirtqueue * svq)220 static void vhost_svq_kick(VhostShadowVirtqueue *svq)
221 {
222 bool needs_kick;
223
224 /*
225 * We need to expose the available array entries before checking the used
226 * flags
227 */
228 smp_mb();
229
230 if (virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
231 uint16_t avail_event = le16_to_cpu(
232 *(uint16_t *)(&svq->vring.used->ring[svq->vring.num]));
233 needs_kick = vring_need_event(avail_event, svq->shadow_avail_idx, svq->shadow_avail_idx - 1);
234 } else {
235 needs_kick =
236 !(svq->vring.used->flags & cpu_to_le16(VRING_USED_F_NO_NOTIFY));
237 }
238
239 if (!needs_kick) {
240 return;
241 }
242
243 event_notifier_set(&svq->hdev_kick);
244 }
245
246 /**
247 * Add an element to a SVQ.
248 *
249 * Return -EINVAL if element is invalid, -ENOSPC if dev queue is full
250 */
vhost_svq_add(VhostShadowVirtqueue * svq,const struct iovec * out_sg,size_t out_num,const struct iovec * in_sg,size_t in_num,VirtQueueElement * elem)251 int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
252 size_t out_num, const struct iovec *in_sg, size_t in_num,
253 VirtQueueElement *elem)
254 {
255 unsigned qemu_head;
256 unsigned ndescs = in_num + out_num;
257 bool ok;
258
259 if (unlikely(ndescs > vhost_svq_available_slots(svq))) {
260 return -ENOSPC;
261 }
262
263 ok = vhost_svq_add_split(svq, out_sg, out_num, in_sg, in_num, &qemu_head);
264 if (unlikely(!ok)) {
265 return -EINVAL;
266 }
267
268 svq->num_free -= ndescs;
269 svq->desc_state[qemu_head].elem = elem;
270 svq->desc_state[qemu_head].ndescs = ndescs;
271 vhost_svq_kick(svq);
272 return 0;
273 }
274
275 /* Convenience wrapper to add a guest's element to SVQ */
vhost_svq_add_element(VhostShadowVirtqueue * svq,VirtQueueElement * elem)276 static int vhost_svq_add_element(VhostShadowVirtqueue *svq,
277 VirtQueueElement *elem)
278 {
279 return vhost_svq_add(svq, elem->out_sg, elem->out_num, elem->in_sg,
280 elem->in_num, elem);
281 }
282
283 /**
284 * Forward available buffers.
285 *
286 * @svq: Shadow VirtQueue
287 *
288 * Note that this function does not guarantee that all guest's available
289 * buffers are available to the device in SVQ avail ring. The guest may have
290 * exposed a GPA / GIOVA contiguous buffer, but it may not be contiguous in
291 * qemu vaddr.
292 *
293 * If that happens, guest's kick notifications will be disabled until the
294 * device uses some buffers.
295 */
vhost_handle_guest_kick(VhostShadowVirtqueue * svq)296 static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
297 {
298 /* Clear event notifier */
299 event_notifier_test_and_clear(&svq->svq_kick);
300
301 /* Forward to the device as many available buffers as possible */
302 do {
303 virtio_queue_set_notification(svq->vq, false);
304
305 while (true) {
306 g_autofree VirtQueueElement *elem = NULL;
307 int r;
308
309 if (svq->next_guest_avail_elem) {
310 elem = g_steal_pointer(&svq->next_guest_avail_elem);
311 } else {
312 elem = virtqueue_pop(svq->vq, sizeof(*elem));
313 }
314
315 if (!elem) {
316 break;
317 }
318
319 if (svq->ops) {
320 r = svq->ops->avail_handler(svq, elem, svq->ops_opaque);
321 } else {
322 r = vhost_svq_add_element(svq, elem);
323 }
324 if (unlikely(r != 0)) {
325 if (r == -ENOSPC) {
326 /*
327 * This condition is possible since a contiguous buffer in
328 * GPA does not imply a contiguous buffer in qemu's VA
329 * scatter-gather segments. If that happens, the buffer
330 * exposed to the device needs to be a chain of descriptors
331 * at this moment.
332 *
333 * SVQ cannot hold more available buffers if we are here:
334 * queue the current guest descriptor and ignore kicks
335 * until some elements are used.
336 */
337 svq->next_guest_avail_elem = g_steal_pointer(&elem);
338 }
339
340 /* VQ is full or broken, just return and ignore kicks */
341 return;
342 }
343 /* elem belongs to SVQ or external caller now */
344 elem = NULL;
345 }
346
347 virtio_queue_set_notification(svq->vq, true);
348 } while (!virtio_queue_empty(svq->vq));
349 }
350
351 /**
352 * Handle guest's kick.
353 *
354 * @n: guest kick event notifier, the one that guest set to notify svq.
355 */
vhost_handle_guest_kick_notifier(EventNotifier * n)356 static void vhost_handle_guest_kick_notifier(EventNotifier *n)
357 {
358 VhostShadowVirtqueue *svq = container_of(n, VhostShadowVirtqueue, svq_kick);
359 event_notifier_test_and_clear(n);
360 vhost_handle_guest_kick(svq);
361 }
362
vhost_svq_more_used(VhostShadowVirtqueue * svq)363 static bool vhost_svq_more_used(VhostShadowVirtqueue *svq)
364 {
365 uint16_t *used_idx = &svq->vring.used->idx;
366 if (svq->last_used_idx != svq->shadow_used_idx) {
367 return true;
368 }
369
370 svq->shadow_used_idx = le16_to_cpu(*(volatile uint16_t *)used_idx);
371
372 return svq->last_used_idx != svq->shadow_used_idx;
373 }
374
375 /**
376 * Enable vhost device calls after disable them.
377 *
378 * @svq: The svq
379 *
380 * It returns false if there are pending used buffers from the vhost device,
381 * avoiding the possible races between SVQ checking for more work and enabling
382 * callbacks. True if SVQ used vring has no more pending buffers.
383 */
vhost_svq_enable_notification(VhostShadowVirtqueue * svq)384 static bool vhost_svq_enable_notification(VhostShadowVirtqueue *svq)
385 {
386 if (virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
387 uint16_t *used_event = (uint16_t *)&svq->vring.avail->ring[svq->vring.num];
388 *used_event = cpu_to_le16(svq->shadow_used_idx);
389 } else {
390 svq->vring.avail->flags &= ~cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
391 }
392
393 /* Make sure the event is enabled before the read of used_idx */
394 smp_mb();
395 return !vhost_svq_more_used(svq);
396 }
397
vhost_svq_disable_notification(VhostShadowVirtqueue * svq)398 static void vhost_svq_disable_notification(VhostShadowVirtqueue *svq)
399 {
400 /*
401 * No need to disable notification in the event idx case, since used event
402 * index is already an index too far away.
403 */
404 if (!virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
405 svq->vring.avail->flags |= cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
406 }
407 }
408
vhost_svq_last_desc_of_chain(const VhostShadowVirtqueue * svq,uint16_t num,uint16_t i)409 static uint16_t vhost_svq_last_desc_of_chain(const VhostShadowVirtqueue *svq,
410 uint16_t num, uint16_t i)
411 {
412 for (uint16_t j = 0; j < (num - 1); ++j) {
413 i = svq->desc_next[i];
414 }
415
416 return i;
417 }
418
419 G_GNUC_WARN_UNUSED_RESULT
vhost_svq_get_buf(VhostShadowVirtqueue * svq,uint32_t * len)420 static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
421 uint32_t *len)
422 {
423 const vring_used_t *used = svq->vring.used;
424 vring_used_elem_t used_elem;
425 uint16_t last_used, last_used_chain, num;
426
427 if (!vhost_svq_more_used(svq)) {
428 return NULL;
429 }
430
431 /* Only get used array entries after they have been exposed by dev */
432 smp_rmb();
433 last_used = svq->last_used_idx & (svq->vring.num - 1);
434 used_elem.id = le32_to_cpu(used->ring[last_used].id);
435 used_elem.len = le32_to_cpu(used->ring[last_used].len);
436
437 svq->last_used_idx++;
438 if (unlikely(used_elem.id >= svq->vring.num)) {
439 qemu_log_mask(LOG_GUEST_ERROR, "Device %s says index %u is used",
440 svq->vdev->name, used_elem.id);
441 return NULL;
442 }
443
444 if (unlikely(!svq->desc_state[used_elem.id].ndescs)) {
445 qemu_log_mask(LOG_GUEST_ERROR,
446 "Device %s says index %u is used, but it was not available",
447 svq->vdev->name, used_elem.id);
448 return NULL;
449 }
450
451 num = svq->desc_state[used_elem.id].ndescs;
452 svq->desc_state[used_elem.id].ndescs = 0;
453 last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id);
454 svq->desc_next[last_used_chain] = svq->free_head;
455 svq->free_head = used_elem.id;
456 svq->num_free += num;
457
458 *len = used_elem.len;
459 return g_steal_pointer(&svq->desc_state[used_elem.id].elem);
460 }
461
462 /**
463 * Push an element to SVQ, returning it to the guest.
464 */
vhost_svq_push_elem(VhostShadowVirtqueue * svq,const VirtQueueElement * elem,uint32_t len)465 void vhost_svq_push_elem(VhostShadowVirtqueue *svq,
466 const VirtQueueElement *elem, uint32_t len)
467 {
468 virtqueue_push(svq->vq, elem, len);
469 if (svq->next_guest_avail_elem) {
470 /*
471 * Avail ring was full when vhost_svq_flush was called, so it's a
472 * good moment to make more descriptors available if possible.
473 */
474 vhost_handle_guest_kick(svq);
475 }
476 }
477
vhost_svq_flush(VhostShadowVirtqueue * svq,bool check_for_avail_queue)478 static void vhost_svq_flush(VhostShadowVirtqueue *svq,
479 bool check_for_avail_queue)
480 {
481 VirtQueue *vq = svq->vq;
482
483 /* Forward as many used buffers as possible. */
484 do {
485 unsigned i = 0;
486
487 vhost_svq_disable_notification(svq);
488 while (true) {
489 uint32_t len;
490 g_autofree VirtQueueElement *elem = vhost_svq_get_buf(svq, &len);
491 if (!elem) {
492 break;
493 }
494
495 if (unlikely(i >= svq->vring.num)) {
496 qemu_log_mask(LOG_GUEST_ERROR,
497 "More than %u used buffers obtained in a %u size SVQ",
498 i, svq->vring.num);
499 virtqueue_fill(vq, elem, len, i);
500 virtqueue_flush(vq, i);
501 return;
502 }
503 virtqueue_fill(vq, elem, len, i++);
504 }
505
506 virtqueue_flush(vq, i);
507 event_notifier_set(&svq->svq_call);
508
509 if (check_for_avail_queue && svq->next_guest_avail_elem) {
510 /*
511 * Avail ring was full when vhost_svq_flush was called, so it's a
512 * good moment to make more descriptors available if possible.
513 */
514 vhost_handle_guest_kick(svq);
515 }
516 } while (!vhost_svq_enable_notification(svq));
517 }
518
519 /**
520 * Poll the SVQ to wait for the device to use the specified number
521 * of elements and return the total length written by the device.
522 *
523 * This function race with main event loop SVQ polling, so extra
524 * synchronization is needed.
525 *
526 * @svq: The svq
527 * @num: The number of elements that need to be used
528 */
vhost_svq_poll(VhostShadowVirtqueue * svq,size_t num)529 size_t vhost_svq_poll(VhostShadowVirtqueue *svq, size_t num)
530 {
531 size_t len = 0;
532
533 while (num--) {
534 g_autofree VirtQueueElement *elem = NULL;
535 int64_t start_us = g_get_monotonic_time();
536 uint32_t r = 0;
537
538 do {
539 if (vhost_svq_more_used(svq)) {
540 break;
541 }
542
543 if (unlikely(g_get_monotonic_time() - start_us > 10e6)) {
544 return len;
545 }
546 } while (true);
547
548 elem = vhost_svq_get_buf(svq, &r);
549 len += r;
550 }
551
552 return len;
553 }
554
555 /**
556 * Forward used buffers.
557 *
558 * @n: hdev call event notifier, the one that device set to notify svq.
559 *
560 * Note that we are not making any buffers available in the loop, there is no
561 * way that it runs more than virtqueue size times.
562 */
vhost_svq_handle_call(EventNotifier * n)563 static void vhost_svq_handle_call(EventNotifier *n)
564 {
565 VhostShadowVirtqueue *svq = container_of(n, VhostShadowVirtqueue,
566 hdev_call);
567 event_notifier_test_and_clear(n);
568 vhost_svq_flush(svq, true);
569 }
570
571 /**
572 * Set the call notifier for the SVQ to call the guest
573 *
574 * @svq: Shadow virtqueue
575 * @call_fd: call notifier
576 *
577 * Called on BQL context.
578 */
vhost_svq_set_svq_call_fd(VhostShadowVirtqueue * svq,int call_fd)579 void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd)
580 {
581 if (call_fd == VHOST_FILE_UNBIND) {
582 /*
583 * Fail event_notifier_set if called handling device call.
584 *
585 * SVQ still needs device notifications, since it needs to keep
586 * forwarding used buffers even with the unbind.
587 */
588 memset(&svq->svq_call, 0, sizeof(svq->svq_call));
589 } else {
590 event_notifier_init_fd(&svq->svq_call, call_fd);
591 }
592 }
593
594 /**
595 * Get the shadow vq vring address.
596 * @svq: Shadow virtqueue
597 * @addr: Destination to store address
598 */
vhost_svq_get_vring_addr(const VhostShadowVirtqueue * svq,struct vhost_vring_addr * addr)599 void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
600 struct vhost_vring_addr *addr)
601 {
602 addr->desc_user_addr = (uint64_t)(uintptr_t)svq->vring.desc;
603 addr->avail_user_addr = (uint64_t)(uintptr_t)svq->vring.avail;
604 addr->used_user_addr = (uint64_t)(uintptr_t)svq->vring.used;
605 }
606
vhost_svq_driver_area_size(const VhostShadowVirtqueue * svq)607 size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq)
608 {
609 size_t desc_size = sizeof(vring_desc_t) * svq->vring.num;
610 size_t avail_size = offsetof(vring_avail_t, ring[svq->vring.num]) +
611 sizeof(uint16_t);
612
613 return ROUND_UP(desc_size + avail_size, qemu_real_host_page_size());
614 }
615
vhost_svq_device_area_size(const VhostShadowVirtqueue * svq)616 size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq)
617 {
618 size_t used_size = offsetof(vring_used_t, ring[svq->vring.num]) +
619 sizeof(uint16_t);
620 return ROUND_UP(used_size, qemu_real_host_page_size());
621 }
622
623 /**
624 * Set a new file descriptor for the guest to kick the SVQ and notify for avail
625 *
626 * @svq: The svq
627 * @svq_kick_fd: The svq kick fd
628 *
629 * Note that the SVQ will never close the old file descriptor.
630 */
vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue * svq,int svq_kick_fd)631 void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd)
632 {
633 EventNotifier *svq_kick = &svq->svq_kick;
634 bool poll_stop = VHOST_FILE_UNBIND != event_notifier_get_fd(svq_kick);
635 bool poll_start = svq_kick_fd != VHOST_FILE_UNBIND;
636
637 if (poll_stop) {
638 event_notifier_set_handler(svq_kick, NULL);
639 }
640
641 event_notifier_init_fd(svq_kick, svq_kick_fd);
642 /*
643 * event_notifier_set_handler already checks for guest's notifications if
644 * they arrive at the new file descriptor in the switch, so there is no
645 * need to explicitly check for them.
646 */
647 if (poll_start) {
648 event_notifier_set(svq_kick);
649 event_notifier_set_handler(svq_kick, vhost_handle_guest_kick_notifier);
650 }
651 }
652
653 /**
654 * Start the shadow virtqueue operation.
655 *
656 * @svq: Shadow Virtqueue
657 * @vdev: VirtIO device
658 * @vq: Virtqueue to shadow
659 * @iova_tree: Tree to perform descriptors translations
660 */
vhost_svq_start(VhostShadowVirtqueue * svq,VirtIODevice * vdev,VirtQueue * vq,VhostIOVATree * iova_tree)661 void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
662 VirtQueue *vq, VhostIOVATree *iova_tree)
663 {
664 size_t desc_size;
665
666 event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
667 svq->next_guest_avail_elem = NULL;
668 svq->shadow_avail_idx = 0;
669 svq->shadow_used_idx = 0;
670 svq->last_used_idx = 0;
671 svq->vdev = vdev;
672 svq->vq = vq;
673 svq->iova_tree = iova_tree;
674
675 svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq));
676 svq->num_free = svq->vring.num;
677 svq->vring.desc = mmap(NULL, vhost_svq_driver_area_size(svq),
678 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
679 -1, 0);
680 desc_size = sizeof(vring_desc_t) * svq->vring.num;
681 svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size);
682 svq->vring.used = mmap(NULL, vhost_svq_device_area_size(svq),
683 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
684 -1, 0);
685 svq->desc_state = g_new0(SVQDescState, svq->vring.num);
686 svq->desc_next = g_new0(uint16_t, svq->vring.num);
687 for (unsigned i = 0; i < svq->vring.num - 1; i++) {
688 svq->desc_next[i] = i + 1;
689 }
690 }
691
692 /**
693 * Stop the shadow virtqueue operation.
694 * @svq: Shadow Virtqueue
695 */
vhost_svq_stop(VhostShadowVirtqueue * svq)696 void vhost_svq_stop(VhostShadowVirtqueue *svq)
697 {
698 vhost_svq_set_svq_kick_fd(svq, VHOST_FILE_UNBIND);
699 g_autofree VirtQueueElement *next_avail_elem = NULL;
700
701 if (!svq->vq) {
702 return;
703 }
704
705 /* Send all pending used descriptors to guest */
706 vhost_svq_flush(svq, false);
707
708 for (unsigned i = 0; i < svq->vring.num; ++i) {
709 g_autofree VirtQueueElement *elem = NULL;
710 elem = g_steal_pointer(&svq->desc_state[i].elem);
711 if (elem) {
712 /*
713 * TODO: This is ok for networking, but other kinds of devices
714 * might have problems with just unpop these.
715 */
716 virtqueue_unpop(svq->vq, elem, 0);
717 }
718 }
719
720 next_avail_elem = g_steal_pointer(&svq->next_guest_avail_elem);
721 if (next_avail_elem) {
722 virtqueue_unpop(svq->vq, next_avail_elem, 0);
723 }
724 svq->vq = NULL;
725 g_free(svq->desc_next);
726 g_free(svq->desc_state);
727 munmap(svq->vring.desc, vhost_svq_driver_area_size(svq));
728 munmap(svq->vring.used, vhost_svq_device_area_size(svq));
729 event_notifier_set_handler(&svq->hdev_call, NULL);
730 }
731
732 /**
733 * Creates vhost shadow virtqueue, and instructs the vhost device to use the
734 * shadow methods and file descriptors.
735 *
736 * @ops: SVQ owner callbacks
737 * @ops_opaque: ops opaque pointer
738 */
vhost_svq_new(const VhostShadowVirtqueueOps * ops,void * ops_opaque)739 VhostShadowVirtqueue *vhost_svq_new(const VhostShadowVirtqueueOps *ops,
740 void *ops_opaque)
741 {
742 VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1);
743
744 event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND);
745 svq->ops = ops;
746 svq->ops_opaque = ops_opaque;
747 return svq;
748 }
749
750 /**
751 * Free the resources of the shadow virtqueue.
752 *
753 * @pvq: gpointer to SVQ so it can be used by autofree functions.
754 */
vhost_svq_free(gpointer pvq)755 void vhost_svq_free(gpointer pvq)
756 {
757 VhostShadowVirtqueue *vq = pvq;
758 vhost_svq_stop(vq);
759 g_free(vq);
760 }
761