xref: /openbmc/qemu/util/async.c (revision 47b74464)
1c2b38b27SPaolo Bonzini /*
2c2b38b27SPaolo Bonzini  * Data plane event loop
3c2b38b27SPaolo Bonzini  *
4c2b38b27SPaolo Bonzini  * Copyright (c) 2003-2008 Fabrice Bellard
5c2b38b27SPaolo Bonzini  * Copyright (c) 2009-2017 QEMU contributors
6c2b38b27SPaolo Bonzini  *
7c2b38b27SPaolo Bonzini  * Permission is hereby granted, free of charge, to any person obtaining a copy
8c2b38b27SPaolo Bonzini  * of this software and associated documentation files (the "Software"), to deal
9c2b38b27SPaolo Bonzini  * in the Software without restriction, including without limitation the rights
10c2b38b27SPaolo Bonzini  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11c2b38b27SPaolo Bonzini  * copies of the Software, and to permit persons to whom the Software is
12c2b38b27SPaolo Bonzini  * furnished to do so, subject to the following conditions:
13c2b38b27SPaolo Bonzini  *
14c2b38b27SPaolo Bonzini  * The above copyright notice and this permission notice shall be included in
15c2b38b27SPaolo Bonzini  * all copies or substantial portions of the Software.
16c2b38b27SPaolo Bonzini  *
17c2b38b27SPaolo Bonzini  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18c2b38b27SPaolo Bonzini  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19c2b38b27SPaolo Bonzini  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20c2b38b27SPaolo Bonzini  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21c2b38b27SPaolo Bonzini  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22c2b38b27SPaolo Bonzini  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23c2b38b27SPaolo Bonzini  * THE SOFTWARE.
24c2b38b27SPaolo Bonzini  */
25c2b38b27SPaolo Bonzini 
26c2b38b27SPaolo Bonzini #include "qemu/osdep.h"
27c2b38b27SPaolo Bonzini #include "qapi/error.h"
28c2b38b27SPaolo Bonzini #include "block/aio.h"
29c2b38b27SPaolo Bonzini #include "block/thread-pool.h"
30c2b38b27SPaolo Bonzini #include "qemu/main-loop.h"
31c2b38b27SPaolo Bonzini #include "qemu/atomic.h"
328c6b0356SStefan Hajnoczi #include "qemu/rcu_queue.h"
33c2b38b27SPaolo Bonzini #include "block/raw-aio.h"
340c330a73SPaolo Bonzini #include "qemu/coroutine_int.h"
35*47b74464SStefan Hajnoczi #include "qemu/coroutine-tls.h"
360c330a73SPaolo Bonzini #include "trace.h"
37c2b38b27SPaolo Bonzini 
38c2b38b27SPaolo Bonzini /***********************************************************/
39c2b38b27SPaolo Bonzini /* bottom halves (can be seen as timers which expire ASAP) */
40c2b38b27SPaolo Bonzini 
418c6b0356SStefan Hajnoczi /* QEMUBH::flags values */
428c6b0356SStefan Hajnoczi enum {
438c6b0356SStefan Hajnoczi     /* Already enqueued and waiting for aio_bh_poll() */
448c6b0356SStefan Hajnoczi     BH_PENDING   = (1 << 0),
458c6b0356SStefan Hajnoczi 
468c6b0356SStefan Hajnoczi     /* Invoke the callback */
478c6b0356SStefan Hajnoczi     BH_SCHEDULED = (1 << 1),
488c6b0356SStefan Hajnoczi 
498c6b0356SStefan Hajnoczi     /* Delete without invoking callback */
508c6b0356SStefan Hajnoczi     BH_DELETED   = (1 << 2),
518c6b0356SStefan Hajnoczi 
528c6b0356SStefan Hajnoczi     /* Delete after invoking callback */
538c6b0356SStefan Hajnoczi     BH_ONESHOT   = (1 << 3),
548c6b0356SStefan Hajnoczi 
558c6b0356SStefan Hajnoczi     /* Schedule periodically when the event loop is idle */
568c6b0356SStefan Hajnoczi     BH_IDLE      = (1 << 4),
578c6b0356SStefan Hajnoczi };
588c6b0356SStefan Hajnoczi 
59c2b38b27SPaolo Bonzini struct QEMUBH {
60c2b38b27SPaolo Bonzini     AioContext *ctx;
610f08586cSStefan Hajnoczi     const char *name;
62c2b38b27SPaolo Bonzini     QEMUBHFunc *cb;
63c2b38b27SPaolo Bonzini     void *opaque;
648c6b0356SStefan Hajnoczi     QSLIST_ENTRY(QEMUBH) next;
658c6b0356SStefan Hajnoczi     unsigned flags;
66c2b38b27SPaolo Bonzini };
67c2b38b27SPaolo Bonzini 
688c6b0356SStefan Hajnoczi /* Called concurrently from any thread */
698c6b0356SStefan Hajnoczi static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags)
708c6b0356SStefan Hajnoczi {
718c6b0356SStefan Hajnoczi     AioContext *ctx = bh->ctx;
728c6b0356SStefan Hajnoczi     unsigned old_flags;
738c6b0356SStefan Hajnoczi 
748c6b0356SStefan Hajnoczi     /*
75d73415a3SStefan Hajnoczi      * The memory barrier implicit in qatomic_fetch_or makes sure that:
768c6b0356SStefan Hajnoczi      * 1. idle & any writes needed by the callback are done before the
778c6b0356SStefan Hajnoczi      *    locations are read in the aio_bh_poll.
788c6b0356SStefan Hajnoczi      * 2. ctx is loaded before the callback has a chance to execute and bh
798c6b0356SStefan Hajnoczi      *    could be freed.
808c6b0356SStefan Hajnoczi      */
81d73415a3SStefan Hajnoczi     old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
828c6b0356SStefan Hajnoczi     if (!(old_flags & BH_PENDING)) {
838c6b0356SStefan Hajnoczi         QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next);
848c6b0356SStefan Hajnoczi     }
858c6b0356SStefan Hajnoczi 
868c6b0356SStefan Hajnoczi     aio_notify(ctx);
878c6b0356SStefan Hajnoczi }
888c6b0356SStefan Hajnoczi 
898c6b0356SStefan Hajnoczi /* Only called from aio_bh_poll() and aio_ctx_finalize() */
908c6b0356SStefan Hajnoczi static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags)
918c6b0356SStefan Hajnoczi {
928c6b0356SStefan Hajnoczi     QEMUBH *bh = QSLIST_FIRST_RCU(head);
938c6b0356SStefan Hajnoczi 
948c6b0356SStefan Hajnoczi     if (!bh) {
958c6b0356SStefan Hajnoczi         return NULL;
968c6b0356SStefan Hajnoczi     }
978c6b0356SStefan Hajnoczi 
988c6b0356SStefan Hajnoczi     QSLIST_REMOVE_HEAD(head, next);
998c6b0356SStefan Hajnoczi 
1008c6b0356SStefan Hajnoczi     /*
101d73415a3SStefan Hajnoczi      * The qatomic_and is paired with aio_bh_enqueue().  The implicit memory
1028c6b0356SStefan Hajnoczi      * barrier ensures that the callback sees all writes done by the scheduling
1038c6b0356SStefan Hajnoczi      * thread.  It also ensures that the scheduling thread sees the cleared
1048c6b0356SStefan Hajnoczi      * flag before bh->cb has run, and thus will call aio_notify again if
1058c6b0356SStefan Hajnoczi      * necessary.
1068c6b0356SStefan Hajnoczi      */
107d73415a3SStefan Hajnoczi     *flags = qatomic_fetch_and(&bh->flags,
1088c6b0356SStefan Hajnoczi                               ~(BH_PENDING | BH_SCHEDULED | BH_IDLE));
1098c6b0356SStefan Hajnoczi     return bh;
1108c6b0356SStefan Hajnoczi }
1118c6b0356SStefan Hajnoczi 
1120f08586cSStefan Hajnoczi void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb,
1130f08586cSStefan Hajnoczi                                   void *opaque, const char *name)
114c2b38b27SPaolo Bonzini {
115c2b38b27SPaolo Bonzini     QEMUBH *bh;
116c2b38b27SPaolo Bonzini     bh = g_new(QEMUBH, 1);
117c2b38b27SPaolo Bonzini     *bh = (QEMUBH){
118c2b38b27SPaolo Bonzini         .ctx = ctx,
119c2b38b27SPaolo Bonzini         .cb = cb,
120c2b38b27SPaolo Bonzini         .opaque = opaque,
1210f08586cSStefan Hajnoczi         .name = name,
122c2b38b27SPaolo Bonzini     };
1238c6b0356SStefan Hajnoczi     aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT);
124c2b38b27SPaolo Bonzini }
125c2b38b27SPaolo Bonzini 
1260f08586cSStefan Hajnoczi QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
1270f08586cSStefan Hajnoczi                         const char *name)
128c2b38b27SPaolo Bonzini {
129c2b38b27SPaolo Bonzini     QEMUBH *bh;
130c2b38b27SPaolo Bonzini     bh = g_new(QEMUBH, 1);
131c2b38b27SPaolo Bonzini     *bh = (QEMUBH){
132c2b38b27SPaolo Bonzini         .ctx = ctx,
133c2b38b27SPaolo Bonzini         .cb = cb,
134c2b38b27SPaolo Bonzini         .opaque = opaque,
1350f08586cSStefan Hajnoczi         .name = name,
136c2b38b27SPaolo Bonzini     };
137c2b38b27SPaolo Bonzini     return bh;
138c2b38b27SPaolo Bonzini }
139c2b38b27SPaolo Bonzini 
140c2b38b27SPaolo Bonzini void aio_bh_call(QEMUBH *bh)
141c2b38b27SPaolo Bonzini {
142c2b38b27SPaolo Bonzini     bh->cb(bh->opaque);
143c2b38b27SPaolo Bonzini }
144c2b38b27SPaolo Bonzini 
1458c6b0356SStefan Hajnoczi /* Multiple occurrences of aio_bh_poll cannot be called concurrently. */
146c2b38b27SPaolo Bonzini int aio_bh_poll(AioContext *ctx)
147c2b38b27SPaolo Bonzini {
1488c6b0356SStefan Hajnoczi     BHListSlice slice;
1498c6b0356SStefan Hajnoczi     BHListSlice *s;
1508c6b0356SStefan Hajnoczi     int ret = 0;
151c2b38b27SPaolo Bonzini 
1528c6b0356SStefan Hajnoczi     QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list);
1538c6b0356SStefan Hajnoczi     QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next);
1548c6b0356SStefan Hajnoczi 
1558c6b0356SStefan Hajnoczi     while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) {
1568c6b0356SStefan Hajnoczi         QEMUBH *bh;
1578c6b0356SStefan Hajnoczi         unsigned flags;
1588c6b0356SStefan Hajnoczi 
1598c6b0356SStefan Hajnoczi         bh = aio_bh_dequeue(&s->bh_list, &flags);
1608c6b0356SStefan Hajnoczi         if (!bh) {
1618c6b0356SStefan Hajnoczi             QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next);
1628c6b0356SStefan Hajnoczi             continue;
1638c6b0356SStefan Hajnoczi         }
1648c6b0356SStefan Hajnoczi 
1658c6b0356SStefan Hajnoczi         if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
166c2b38b27SPaolo Bonzini             /* Idle BHs don't count as progress */
1678c6b0356SStefan Hajnoczi             if (!(flags & BH_IDLE)) {
168c2b38b27SPaolo Bonzini                 ret = 1;
169c2b38b27SPaolo Bonzini             }
170c2b38b27SPaolo Bonzini             aio_bh_call(bh);
171c2b38b27SPaolo Bonzini         }
1728c6b0356SStefan Hajnoczi         if (flags & (BH_DELETED | BH_ONESHOT)) {
173c2b38b27SPaolo Bonzini             g_free(bh);
174c2b38b27SPaolo Bonzini         }
175c2b38b27SPaolo Bonzini     }
1768c6b0356SStefan Hajnoczi 
177c2b38b27SPaolo Bonzini     return ret;
178c2b38b27SPaolo Bonzini }
179c2b38b27SPaolo Bonzini 
180c2b38b27SPaolo Bonzini void qemu_bh_schedule_idle(QEMUBH *bh)
181c2b38b27SPaolo Bonzini {
1828c6b0356SStefan Hajnoczi     aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE);
183c2b38b27SPaolo Bonzini }
184c2b38b27SPaolo Bonzini 
185c2b38b27SPaolo Bonzini void qemu_bh_schedule(QEMUBH *bh)
186c2b38b27SPaolo Bonzini {
1878c6b0356SStefan Hajnoczi     aio_bh_enqueue(bh, BH_SCHEDULED);
188c2b38b27SPaolo Bonzini }
189c2b38b27SPaolo Bonzini 
190c2b38b27SPaolo Bonzini /* This func is async.
191c2b38b27SPaolo Bonzini  */
192c2b38b27SPaolo Bonzini void qemu_bh_cancel(QEMUBH *bh)
193c2b38b27SPaolo Bonzini {
194d73415a3SStefan Hajnoczi     qatomic_and(&bh->flags, ~BH_SCHEDULED);
195c2b38b27SPaolo Bonzini }
196c2b38b27SPaolo Bonzini 
197c2b38b27SPaolo Bonzini /* This func is async.The bottom half will do the delete action at the finial
198c2b38b27SPaolo Bonzini  * end.
199c2b38b27SPaolo Bonzini  */
200c2b38b27SPaolo Bonzini void qemu_bh_delete(QEMUBH *bh)
201c2b38b27SPaolo Bonzini {
2028c6b0356SStefan Hajnoczi     aio_bh_enqueue(bh, BH_DELETED);
203c2b38b27SPaolo Bonzini }
204c2b38b27SPaolo Bonzini 
2058c6b0356SStefan Hajnoczi static int64_t aio_compute_bh_timeout(BHList *head, int timeout)
206c2b38b27SPaolo Bonzini {
207c2b38b27SPaolo Bonzini     QEMUBH *bh;
208c2b38b27SPaolo Bonzini 
2098c6b0356SStefan Hajnoczi     QSLIST_FOREACH_RCU(bh, head, next) {
2108c6b0356SStefan Hajnoczi         if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
2118c6b0356SStefan Hajnoczi             if (bh->flags & BH_IDLE) {
212c2b38b27SPaolo Bonzini                 /* idle bottom halves will be polled at least
213c2b38b27SPaolo Bonzini                  * every 10ms */
214c2b38b27SPaolo Bonzini                 timeout = 10000000;
215c2b38b27SPaolo Bonzini             } else {
216c2b38b27SPaolo Bonzini                 /* non-idle bottom halves will be executed
217c2b38b27SPaolo Bonzini                  * immediately */
218c2b38b27SPaolo Bonzini                 return 0;
219c2b38b27SPaolo Bonzini             }
220c2b38b27SPaolo Bonzini         }
221c2b38b27SPaolo Bonzini     }
222c2b38b27SPaolo Bonzini 
2238c6b0356SStefan Hajnoczi     return timeout;
2248c6b0356SStefan Hajnoczi }
2258c6b0356SStefan Hajnoczi 
2268c6b0356SStefan Hajnoczi int64_t
2278c6b0356SStefan Hajnoczi aio_compute_timeout(AioContext *ctx)
2288c6b0356SStefan Hajnoczi {
2298c6b0356SStefan Hajnoczi     BHListSlice *s;
2308c6b0356SStefan Hajnoczi     int64_t deadline;
2318c6b0356SStefan Hajnoczi     int timeout = -1;
2328c6b0356SStefan Hajnoczi 
2338c6b0356SStefan Hajnoczi     timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout);
2348c6b0356SStefan Hajnoczi     if (timeout == 0) {
2358c6b0356SStefan Hajnoczi         return 0;
2368c6b0356SStefan Hajnoczi     }
2378c6b0356SStefan Hajnoczi 
2388c6b0356SStefan Hajnoczi     QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
2398c6b0356SStefan Hajnoczi         timeout = aio_compute_bh_timeout(&s->bh_list, timeout);
2408c6b0356SStefan Hajnoczi         if (timeout == 0) {
2418c6b0356SStefan Hajnoczi             return 0;
2428c6b0356SStefan Hajnoczi         }
2438c6b0356SStefan Hajnoczi     }
2448c6b0356SStefan Hajnoczi 
245c2b38b27SPaolo Bonzini     deadline = timerlistgroup_deadline_ns(&ctx->tlg);
246c2b38b27SPaolo Bonzini     if (deadline == 0) {
247c2b38b27SPaolo Bonzini         return 0;
248c2b38b27SPaolo Bonzini     } else {
249c2b38b27SPaolo Bonzini         return qemu_soonest_timeout(timeout, deadline);
250c2b38b27SPaolo Bonzini     }
251c2b38b27SPaolo Bonzini }
252c2b38b27SPaolo Bonzini 
253c2b38b27SPaolo Bonzini static gboolean
254c2b38b27SPaolo Bonzini aio_ctx_prepare(GSource *source, gint    *timeout)
255c2b38b27SPaolo Bonzini {
256c2b38b27SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
257c2b38b27SPaolo Bonzini 
258d73415a3SStefan Hajnoczi     qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1);
2595710a3e0SPaolo Bonzini 
2605710a3e0SPaolo Bonzini     /*
2615710a3e0SPaolo Bonzini      * Write ctx->notify_me before computing the timeout
2625710a3e0SPaolo Bonzini      * (reading bottom half flags, etc.).  Pairs with
2635710a3e0SPaolo Bonzini      * smp_mb in aio_notify().
2645710a3e0SPaolo Bonzini      */
2655710a3e0SPaolo Bonzini     smp_mb();
266c2b38b27SPaolo Bonzini 
267c2b38b27SPaolo Bonzini     /* We assume there is no timeout already supplied */
268c2b38b27SPaolo Bonzini     *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
269c2b38b27SPaolo Bonzini 
270c2b38b27SPaolo Bonzini     if (aio_prepare(ctx)) {
271c2b38b27SPaolo Bonzini         *timeout = 0;
272c2b38b27SPaolo Bonzini     }
273c2b38b27SPaolo Bonzini 
274c2b38b27SPaolo Bonzini     return *timeout == 0;
275c2b38b27SPaolo Bonzini }
276c2b38b27SPaolo Bonzini 
277c2b38b27SPaolo Bonzini static gboolean
278c2b38b27SPaolo Bonzini aio_ctx_check(GSource *source)
279c2b38b27SPaolo Bonzini {
280c2b38b27SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
281c2b38b27SPaolo Bonzini     QEMUBH *bh;
2828c6b0356SStefan Hajnoczi     BHListSlice *s;
283c2b38b27SPaolo Bonzini 
2845710a3e0SPaolo Bonzini     /* Finish computing the timeout before clearing the flag.  */
285d73415a3SStefan Hajnoczi     qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1);
286c2b38b27SPaolo Bonzini     aio_notify_accept(ctx);
287c2b38b27SPaolo Bonzini 
2888c6b0356SStefan Hajnoczi     QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) {
2898c6b0356SStefan Hajnoczi         if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
290c2b38b27SPaolo Bonzini             return true;
291c2b38b27SPaolo Bonzini         }
292c2b38b27SPaolo Bonzini     }
2938c6b0356SStefan Hajnoczi 
2948c6b0356SStefan Hajnoczi     QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
2958c6b0356SStefan Hajnoczi         QSLIST_FOREACH_RCU(bh, &s->bh_list, next) {
2968c6b0356SStefan Hajnoczi             if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
2978c6b0356SStefan Hajnoczi                 return true;
2988c6b0356SStefan Hajnoczi             }
2998c6b0356SStefan Hajnoczi         }
3008c6b0356SStefan Hajnoczi     }
301c2b38b27SPaolo Bonzini     return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
302c2b38b27SPaolo Bonzini }
303c2b38b27SPaolo Bonzini 
304c2b38b27SPaolo Bonzini static gboolean
305c2b38b27SPaolo Bonzini aio_ctx_dispatch(GSource     *source,
306c2b38b27SPaolo Bonzini                  GSourceFunc  callback,
307c2b38b27SPaolo Bonzini                  gpointer     user_data)
308c2b38b27SPaolo Bonzini {
309c2b38b27SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
310c2b38b27SPaolo Bonzini 
311c2b38b27SPaolo Bonzini     assert(callback == NULL);
312a153bf52SPaolo Bonzini     aio_dispatch(ctx);
313c2b38b27SPaolo Bonzini     return true;
314c2b38b27SPaolo Bonzini }
315c2b38b27SPaolo Bonzini 
316c2b38b27SPaolo Bonzini static void
317c2b38b27SPaolo Bonzini aio_ctx_finalize(GSource     *source)
318c2b38b27SPaolo Bonzini {
319c2b38b27SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
3208c6b0356SStefan Hajnoczi     QEMUBH *bh;
3218c6b0356SStefan Hajnoczi     unsigned flags;
322c2b38b27SPaolo Bonzini 
323c2b38b27SPaolo Bonzini     thread_pool_free(ctx->thread_pool);
324c2b38b27SPaolo Bonzini 
325c2b38b27SPaolo Bonzini #ifdef CONFIG_LINUX_AIO
326c2b38b27SPaolo Bonzini     if (ctx->linux_aio) {
327c2b38b27SPaolo Bonzini         laio_detach_aio_context(ctx->linux_aio, ctx);
328c2b38b27SPaolo Bonzini         laio_cleanup(ctx->linux_aio);
329c2b38b27SPaolo Bonzini         ctx->linux_aio = NULL;
330c2b38b27SPaolo Bonzini     }
331c2b38b27SPaolo Bonzini #endif
332c2b38b27SPaolo Bonzini 
333fcb7a4a4SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING
334fcb7a4a4SAarushi Mehta     if (ctx->linux_io_uring) {
335fcb7a4a4SAarushi Mehta         luring_detach_aio_context(ctx->linux_io_uring, ctx);
336fcb7a4a4SAarushi Mehta         luring_cleanup(ctx->linux_io_uring);
337fcb7a4a4SAarushi Mehta         ctx->linux_io_uring = NULL;
338fcb7a4a4SAarushi Mehta     }
339fcb7a4a4SAarushi Mehta #endif
340fcb7a4a4SAarushi Mehta 
3410c330a73SPaolo Bonzini     assert(QSLIST_EMPTY(&ctx->scheduled_coroutines));
3420c330a73SPaolo Bonzini     qemu_bh_delete(ctx->co_schedule_bh);
3430c330a73SPaolo Bonzini 
3448c6b0356SStefan Hajnoczi     /* There must be no aio_bh_poll() calls going on */
3458c6b0356SStefan Hajnoczi     assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list));
346c2b38b27SPaolo Bonzini 
3478c6b0356SStefan Hajnoczi     while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) {
348023ca420SStefan Hajnoczi         /*
349023ca420SStefan Hajnoczi          * qemu_bh_delete() must have been called on BHs in this AioContext. In
350023ca420SStefan Hajnoczi          * many cases memory leaks, hangs, or inconsistent state occur when a
351023ca420SStefan Hajnoczi          * BH is leaked because something still expects it to run.
352023ca420SStefan Hajnoczi          *
353023ca420SStefan Hajnoczi          * If you hit this, fix the lifecycle of the BH so that
354023ca420SStefan Hajnoczi          * qemu_bh_delete() and any associated cleanup is called before the
355023ca420SStefan Hajnoczi          * AioContext is finalized.
356023ca420SStefan Hajnoczi          */
357023ca420SStefan Hajnoczi         if (unlikely(!(flags & BH_DELETED))) {
358023ca420SStefan Hajnoczi             fprintf(stderr, "%s: BH '%s' leaked, aborting...\n",
359023ca420SStefan Hajnoczi                     __func__, bh->name);
360023ca420SStefan Hajnoczi             abort();
361023ca420SStefan Hajnoczi         }
362c2b38b27SPaolo Bonzini 
3638c6b0356SStefan Hajnoczi         g_free(bh);
364c2b38b27SPaolo Bonzini     }
365c2b38b27SPaolo Bonzini 
366826cc324SStefan Hajnoczi     aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL, NULL);
367c2b38b27SPaolo Bonzini     event_notifier_cleanup(&ctx->notifier);
368c2b38b27SPaolo Bonzini     qemu_rec_mutex_destroy(&ctx->lock);
369c2b38b27SPaolo Bonzini     qemu_lockcnt_destroy(&ctx->list_lock);
370c2b38b27SPaolo Bonzini     timerlistgroup_deinit(&ctx->tlg);
371cd0a6d2bSJie Wang     aio_context_destroy(ctx);
372c2b38b27SPaolo Bonzini }
373c2b38b27SPaolo Bonzini 
374c2b38b27SPaolo Bonzini static GSourceFuncs aio_source_funcs = {
375c2b38b27SPaolo Bonzini     aio_ctx_prepare,
376c2b38b27SPaolo Bonzini     aio_ctx_check,
377c2b38b27SPaolo Bonzini     aio_ctx_dispatch,
378c2b38b27SPaolo Bonzini     aio_ctx_finalize
379c2b38b27SPaolo Bonzini };
380c2b38b27SPaolo Bonzini 
381c2b38b27SPaolo Bonzini GSource *aio_get_g_source(AioContext *ctx)
382c2b38b27SPaolo Bonzini {
383ba607ca8SStefan Hajnoczi     aio_context_use_g_source(ctx);
384c2b38b27SPaolo Bonzini     g_source_ref(&ctx->source);
385c2b38b27SPaolo Bonzini     return &ctx->source;
386c2b38b27SPaolo Bonzini }
387c2b38b27SPaolo Bonzini 
388c2b38b27SPaolo Bonzini ThreadPool *aio_get_thread_pool(AioContext *ctx)
389c2b38b27SPaolo Bonzini {
390c2b38b27SPaolo Bonzini     if (!ctx->thread_pool) {
391c2b38b27SPaolo Bonzini         ctx->thread_pool = thread_pool_new(ctx);
392c2b38b27SPaolo Bonzini     }
393c2b38b27SPaolo Bonzini     return ctx->thread_pool;
394c2b38b27SPaolo Bonzini }
395c2b38b27SPaolo Bonzini 
396c2b38b27SPaolo Bonzini #ifdef CONFIG_LINUX_AIO
397ed6e2161SNishanth Aravamudan LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp)
398c2b38b27SPaolo Bonzini {
399c2b38b27SPaolo Bonzini     if (!ctx->linux_aio) {
400ed6e2161SNishanth Aravamudan         ctx->linux_aio = laio_init(errp);
401ed6e2161SNishanth Aravamudan         if (ctx->linux_aio) {
402c2b38b27SPaolo Bonzini             laio_attach_aio_context(ctx->linux_aio, ctx);
403c2b38b27SPaolo Bonzini         }
404ed6e2161SNishanth Aravamudan     }
405ed6e2161SNishanth Aravamudan     return ctx->linux_aio;
406ed6e2161SNishanth Aravamudan }
407ed6e2161SNishanth Aravamudan 
408ed6e2161SNishanth Aravamudan LinuxAioState *aio_get_linux_aio(AioContext *ctx)
409ed6e2161SNishanth Aravamudan {
410ed6e2161SNishanth Aravamudan     assert(ctx->linux_aio);
411c2b38b27SPaolo Bonzini     return ctx->linux_aio;
412c2b38b27SPaolo Bonzini }
413c2b38b27SPaolo Bonzini #endif
414c2b38b27SPaolo Bonzini 
415fcb7a4a4SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING
416fcb7a4a4SAarushi Mehta LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp)
417fcb7a4a4SAarushi Mehta {
418fcb7a4a4SAarushi Mehta     if (ctx->linux_io_uring) {
419fcb7a4a4SAarushi Mehta         return ctx->linux_io_uring;
420fcb7a4a4SAarushi Mehta     }
421fcb7a4a4SAarushi Mehta 
422fcb7a4a4SAarushi Mehta     ctx->linux_io_uring = luring_init(errp);
423fcb7a4a4SAarushi Mehta     if (!ctx->linux_io_uring) {
424fcb7a4a4SAarushi Mehta         return NULL;
425fcb7a4a4SAarushi Mehta     }
426fcb7a4a4SAarushi Mehta 
427fcb7a4a4SAarushi Mehta     luring_attach_aio_context(ctx->linux_io_uring, ctx);
428fcb7a4a4SAarushi Mehta     return ctx->linux_io_uring;
429fcb7a4a4SAarushi Mehta }
430fcb7a4a4SAarushi Mehta 
431fcb7a4a4SAarushi Mehta LuringState *aio_get_linux_io_uring(AioContext *ctx)
432fcb7a4a4SAarushi Mehta {
433fcb7a4a4SAarushi Mehta     assert(ctx->linux_io_uring);
434fcb7a4a4SAarushi Mehta     return ctx->linux_io_uring;
435fcb7a4a4SAarushi Mehta }
436fcb7a4a4SAarushi Mehta #endif
437fcb7a4a4SAarushi Mehta 
438c2b38b27SPaolo Bonzini void aio_notify(AioContext *ctx)
439c2b38b27SPaolo Bonzini {
440601829f8SStefan Hajnoczi     /*
441601829f8SStefan Hajnoczi      * Write e.g. bh->flags before writing ctx->notified.  Pairs with smp_mb in
442601829f8SStefan Hajnoczi      * aio_notify_accept.
443601829f8SStefan Hajnoczi      */
444601829f8SStefan Hajnoczi     smp_wmb();
445d73415a3SStefan Hajnoczi     qatomic_set(&ctx->notified, true);
446601829f8SStefan Hajnoczi 
447601829f8SStefan Hajnoczi     /*
448601829f8SStefan Hajnoczi      * Write ctx->notified before reading ctx->notify_me.  Pairs
4495710a3e0SPaolo Bonzini      * with smp_mb in aio_ctx_prepare or aio_poll.
450c2b38b27SPaolo Bonzini      */
451c2b38b27SPaolo Bonzini     smp_mb();
452d73415a3SStefan Hajnoczi     if (qatomic_read(&ctx->notify_me)) {
453c2b38b27SPaolo Bonzini         event_notifier_set(&ctx->notifier);
454c2b38b27SPaolo Bonzini     }
455c2b38b27SPaolo Bonzini }
456c2b38b27SPaolo Bonzini 
457c2b38b27SPaolo Bonzini void aio_notify_accept(AioContext *ctx)
458c2b38b27SPaolo Bonzini {
459d73415a3SStefan Hajnoczi     qatomic_set(&ctx->notified, false);
460601829f8SStefan Hajnoczi 
461601829f8SStefan Hajnoczi     /*
462601829f8SStefan Hajnoczi      * Write ctx->notified before reading e.g. bh->flags.  Pairs with smp_wmb
463601829f8SStefan Hajnoczi      * in aio_notify.
464601829f8SStefan Hajnoczi      */
465601829f8SStefan Hajnoczi     smp_mb();
466c2b38b27SPaolo Bonzini }
467c2b38b27SPaolo Bonzini 
4683f53bc61SPaolo Bonzini static void aio_timerlist_notify(void *opaque, QEMUClockType type)
469c2b38b27SPaolo Bonzini {
470c2b38b27SPaolo Bonzini     aio_notify(opaque);
471c2b38b27SPaolo Bonzini }
472c2b38b27SPaolo Bonzini 
473601829f8SStefan Hajnoczi static void aio_context_notifier_cb(EventNotifier *e)
474c2b38b27SPaolo Bonzini {
475601829f8SStefan Hajnoczi     AioContext *ctx = container_of(e, AioContext, notifier);
476601829f8SStefan Hajnoczi 
477601829f8SStefan Hajnoczi     event_notifier_test_and_clear(&ctx->notifier);
478c2b38b27SPaolo Bonzini }
479c2b38b27SPaolo Bonzini 
480c2b38b27SPaolo Bonzini /* Returns true if aio_notify() was called (e.g. a BH was scheduled) */
481c13be5a1SStefan Hajnoczi static bool aio_context_notifier_poll(void *opaque)
482c2b38b27SPaolo Bonzini {
483c2b38b27SPaolo Bonzini     EventNotifier *e = opaque;
484c2b38b27SPaolo Bonzini     AioContext *ctx = container_of(e, AioContext, notifier);
485c2b38b27SPaolo Bonzini 
486d73415a3SStefan Hajnoczi     return qatomic_read(&ctx->notified);
487c2b38b27SPaolo Bonzini }
488c2b38b27SPaolo Bonzini 
489826cc324SStefan Hajnoczi static void aio_context_notifier_poll_ready(EventNotifier *e)
490826cc324SStefan Hajnoczi {
491826cc324SStefan Hajnoczi     /* Do nothing, we just wanted to kick the event loop */
492826cc324SStefan Hajnoczi }
493826cc324SStefan Hajnoczi 
4940c330a73SPaolo Bonzini static void co_schedule_bh_cb(void *opaque)
4950c330a73SPaolo Bonzini {
4960c330a73SPaolo Bonzini     AioContext *ctx = opaque;
4970c330a73SPaolo Bonzini     QSLIST_HEAD(, Coroutine) straight, reversed;
4980c330a73SPaolo Bonzini 
4990c330a73SPaolo Bonzini     QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines);
5000c330a73SPaolo Bonzini     QSLIST_INIT(&straight);
5010c330a73SPaolo Bonzini 
5020c330a73SPaolo Bonzini     while (!QSLIST_EMPTY(&reversed)) {
5030c330a73SPaolo Bonzini         Coroutine *co = QSLIST_FIRST(&reversed);
5040c330a73SPaolo Bonzini         QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next);
5050c330a73SPaolo Bonzini         QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next);
5060c330a73SPaolo Bonzini     }
5070c330a73SPaolo Bonzini 
5080c330a73SPaolo Bonzini     while (!QSLIST_EMPTY(&straight)) {
5090c330a73SPaolo Bonzini         Coroutine *co = QSLIST_FIRST(&straight);
5100c330a73SPaolo Bonzini         QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
5110c330a73SPaolo Bonzini         trace_aio_co_schedule_bh_cb(ctx, co);
5121919631eSPaolo Bonzini         aio_context_acquire(ctx);
5136133b39fSJeff Cody 
5146133b39fSJeff Cody         /* Protected by write barrier in qemu_aio_coroutine_enter */
515d73415a3SStefan Hajnoczi         qatomic_set(&co->scheduled, NULL);
5166808ae04SSergio Lopez         qemu_aio_coroutine_enter(ctx, co);
5171919631eSPaolo Bonzini         aio_context_release(ctx);
5180c330a73SPaolo Bonzini     }
5190c330a73SPaolo Bonzini }
5200c330a73SPaolo Bonzini 
521c2b38b27SPaolo Bonzini AioContext *aio_context_new(Error **errp)
522c2b38b27SPaolo Bonzini {
523c2b38b27SPaolo Bonzini     int ret;
524c2b38b27SPaolo Bonzini     AioContext *ctx;
525c2b38b27SPaolo Bonzini 
526c2b38b27SPaolo Bonzini     ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
5278c6b0356SStefan Hajnoczi     QSLIST_INIT(&ctx->bh_list);
5288c6b0356SStefan Hajnoczi     QSIMPLEQ_INIT(&ctx->bh_slice_list);
529c2b38b27SPaolo Bonzini     aio_context_setup(ctx);
530c2b38b27SPaolo Bonzini 
531c2b38b27SPaolo Bonzini     ret = event_notifier_init(&ctx->notifier, false);
532c2b38b27SPaolo Bonzini     if (ret < 0) {
533c2b38b27SPaolo Bonzini         error_setg_errno(errp, -ret, "Failed to initialize event notifier");
534c2b38b27SPaolo Bonzini         goto fail;
535c2b38b27SPaolo Bonzini     }
536c2b38b27SPaolo Bonzini     g_source_set_can_recurse(&ctx->source, true);
537c2b38b27SPaolo Bonzini     qemu_lockcnt_init(&ctx->list_lock);
5380c330a73SPaolo Bonzini 
5390c330a73SPaolo Bonzini     ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx);
5400c330a73SPaolo Bonzini     QSLIST_INIT(&ctx->scheduled_coroutines);
5410c330a73SPaolo Bonzini 
542c2b38b27SPaolo Bonzini     aio_set_event_notifier(ctx, &ctx->notifier,
543c2b38b27SPaolo Bonzini                            false,
544601829f8SStefan Hajnoczi                            aio_context_notifier_cb,
545826cc324SStefan Hajnoczi                            aio_context_notifier_poll,
546826cc324SStefan Hajnoczi                            aio_context_notifier_poll_ready);
547c2b38b27SPaolo Bonzini #ifdef CONFIG_LINUX_AIO
548c2b38b27SPaolo Bonzini     ctx->linux_aio = NULL;
549c2b38b27SPaolo Bonzini #endif
550fcb7a4a4SAarushi Mehta 
551fcb7a4a4SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING
552fcb7a4a4SAarushi Mehta     ctx->linux_io_uring = NULL;
553fcb7a4a4SAarushi Mehta #endif
554fcb7a4a4SAarushi Mehta 
555c2b38b27SPaolo Bonzini     ctx->thread_pool = NULL;
556c2b38b27SPaolo Bonzini     qemu_rec_mutex_init(&ctx->lock);
557c2b38b27SPaolo Bonzini     timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
558c2b38b27SPaolo Bonzini 
559c2b38b27SPaolo Bonzini     ctx->poll_ns = 0;
560c2b38b27SPaolo Bonzini     ctx->poll_max_ns = 0;
561c2b38b27SPaolo Bonzini     ctx->poll_grow = 0;
562c2b38b27SPaolo Bonzini     ctx->poll_shrink = 0;
563c2b38b27SPaolo Bonzini 
5641793ad02SStefano Garzarella     ctx->aio_max_batch = 0;
5651793ad02SStefano Garzarella 
566c2b38b27SPaolo Bonzini     return ctx;
567c2b38b27SPaolo Bonzini fail:
568c2b38b27SPaolo Bonzini     g_source_destroy(&ctx->source);
569c2b38b27SPaolo Bonzini     return NULL;
570c2b38b27SPaolo Bonzini }
571c2b38b27SPaolo Bonzini 
5720c330a73SPaolo Bonzini void aio_co_schedule(AioContext *ctx, Coroutine *co)
5730c330a73SPaolo Bonzini {
5740c330a73SPaolo Bonzini     trace_aio_co_schedule(ctx, co);
575d73415a3SStefan Hajnoczi     const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL,
5766133b39fSJeff Cody                                            __func__);
5776133b39fSJeff Cody 
5786133b39fSJeff Cody     if (scheduled) {
5796133b39fSJeff Cody         fprintf(stderr,
5806133b39fSJeff Cody                 "%s: Co-routine was already scheduled in '%s'\n",
5816133b39fSJeff Cody                 __func__, scheduled);
5826133b39fSJeff Cody         abort();
5836133b39fSJeff Cody     }
5846133b39fSJeff Cody 
585f0f81002SStefan Hajnoczi     /* The coroutine might run and release the last ctx reference before we
586f0f81002SStefan Hajnoczi      * invoke qemu_bh_schedule().  Take a reference to keep ctx alive until
587f0f81002SStefan Hajnoczi      * we're done.
588f0f81002SStefan Hajnoczi      */
589f0f81002SStefan Hajnoczi     aio_context_ref(ctx);
590f0f81002SStefan Hajnoczi 
5910c330a73SPaolo Bonzini     QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
5920c330a73SPaolo Bonzini                               co, co_scheduled_next);
5930c330a73SPaolo Bonzini     qemu_bh_schedule(ctx->co_schedule_bh);
594f0f81002SStefan Hajnoczi 
595f0f81002SStefan Hajnoczi     aio_context_unref(ctx);
5960c330a73SPaolo Bonzini }
5970c330a73SPaolo Bonzini 
59826b0b698SKevin Wolf typedef struct AioCoRescheduleSelf {
59926b0b698SKevin Wolf     Coroutine *co;
60026b0b698SKevin Wolf     AioContext *new_ctx;
60126b0b698SKevin Wolf } AioCoRescheduleSelf;
60226b0b698SKevin Wolf 
60326b0b698SKevin Wolf static void aio_co_reschedule_self_bh(void *opaque)
60426b0b698SKevin Wolf {
60526b0b698SKevin Wolf     AioCoRescheduleSelf *data = opaque;
60626b0b698SKevin Wolf     aio_co_schedule(data->new_ctx, data->co);
60726b0b698SKevin Wolf }
60826b0b698SKevin Wolf 
60926b0b698SKevin Wolf void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx)
61026b0b698SKevin Wolf {
61126b0b698SKevin Wolf     AioContext *old_ctx = qemu_get_current_aio_context();
61226b0b698SKevin Wolf 
61326b0b698SKevin Wolf     if (old_ctx != new_ctx) {
61426b0b698SKevin Wolf         AioCoRescheduleSelf data = {
61526b0b698SKevin Wolf             .co = qemu_coroutine_self(),
61626b0b698SKevin Wolf             .new_ctx = new_ctx,
61726b0b698SKevin Wolf         };
61826b0b698SKevin Wolf         /*
61926b0b698SKevin Wolf          * We can't directly schedule the coroutine in the target context
62026b0b698SKevin Wolf          * because this would be racy: The other thread could try to enter the
62126b0b698SKevin Wolf          * coroutine before it has yielded in this one.
62226b0b698SKevin Wolf          */
62326b0b698SKevin Wolf         aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data);
62426b0b698SKevin Wolf         qemu_coroutine_yield();
62526b0b698SKevin Wolf     }
62626b0b698SKevin Wolf }
62726b0b698SKevin Wolf 
6280c330a73SPaolo Bonzini void aio_co_wake(struct Coroutine *co)
6290c330a73SPaolo Bonzini {
6300c330a73SPaolo Bonzini     AioContext *ctx;
6310c330a73SPaolo Bonzini 
6320c330a73SPaolo Bonzini     /* Read coroutine before co->ctx.  Matches smp_wmb in
6330c330a73SPaolo Bonzini      * qemu_coroutine_enter.
6340c330a73SPaolo Bonzini      */
6350c330a73SPaolo Bonzini     smp_read_barrier_depends();
636d73415a3SStefan Hajnoczi     ctx = qatomic_read(&co->ctx);
6370c330a73SPaolo Bonzini 
6388865852eSFam Zheng     aio_co_enter(ctx, co);
6398865852eSFam Zheng }
6408865852eSFam Zheng 
6418865852eSFam Zheng void aio_co_enter(AioContext *ctx, struct Coroutine *co)
6428865852eSFam Zheng {
6430c330a73SPaolo Bonzini     if (ctx != qemu_get_current_aio_context()) {
6440c330a73SPaolo Bonzini         aio_co_schedule(ctx, co);
6450c330a73SPaolo Bonzini         return;
6460c330a73SPaolo Bonzini     }
6470c330a73SPaolo Bonzini 
6480c330a73SPaolo Bonzini     if (qemu_in_coroutine()) {
6490c330a73SPaolo Bonzini         Coroutine *self = qemu_coroutine_self();
6500c330a73SPaolo Bonzini         assert(self != co);
6510c330a73SPaolo Bonzini         QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next);
6520c330a73SPaolo Bonzini     } else {
6530c330a73SPaolo Bonzini         aio_context_acquire(ctx);
6548865852eSFam Zheng         qemu_aio_coroutine_enter(ctx, co);
6550c330a73SPaolo Bonzini         aio_context_release(ctx);
6560c330a73SPaolo Bonzini     }
6570c330a73SPaolo Bonzini }
6580c330a73SPaolo Bonzini 
659c2b38b27SPaolo Bonzini void aio_context_ref(AioContext *ctx)
660c2b38b27SPaolo Bonzini {
661c2b38b27SPaolo Bonzini     g_source_ref(&ctx->source);
662c2b38b27SPaolo Bonzini }
663c2b38b27SPaolo Bonzini 
664c2b38b27SPaolo Bonzini void aio_context_unref(AioContext *ctx)
665c2b38b27SPaolo Bonzini {
666c2b38b27SPaolo Bonzini     g_source_unref(&ctx->source);
667c2b38b27SPaolo Bonzini }
668c2b38b27SPaolo Bonzini 
669c2b38b27SPaolo Bonzini void aio_context_acquire(AioContext *ctx)
670c2b38b27SPaolo Bonzini {
671c2b38b27SPaolo Bonzini     qemu_rec_mutex_lock(&ctx->lock);
672c2b38b27SPaolo Bonzini }
673c2b38b27SPaolo Bonzini 
674c2b38b27SPaolo Bonzini void aio_context_release(AioContext *ctx)
675c2b38b27SPaolo Bonzini {
676c2b38b27SPaolo Bonzini     qemu_rec_mutex_unlock(&ctx->lock);
677c2b38b27SPaolo Bonzini }
6785f50be9bSPaolo Bonzini 
679*47b74464SStefan Hajnoczi QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext)
6805f50be9bSPaolo Bonzini 
6815f50be9bSPaolo Bonzini AioContext *qemu_get_current_aio_context(void)
6825f50be9bSPaolo Bonzini {
683*47b74464SStefan Hajnoczi     AioContext *ctx = get_my_aiocontext();
684*47b74464SStefan Hajnoczi     if (ctx) {
685*47b74464SStefan Hajnoczi         return ctx;
6865f50be9bSPaolo Bonzini     }
6875f50be9bSPaolo Bonzini     if (qemu_mutex_iothread_locked()) {
6885f50be9bSPaolo Bonzini         /* Possibly in a vCPU thread.  */
6895f50be9bSPaolo Bonzini         return qemu_get_aio_context();
6905f50be9bSPaolo Bonzini     }
6915f50be9bSPaolo Bonzini     return NULL;
6925f50be9bSPaolo Bonzini }
6935f50be9bSPaolo Bonzini 
6945f50be9bSPaolo Bonzini void qemu_set_current_aio_context(AioContext *ctx)
6955f50be9bSPaolo Bonzini {
696*47b74464SStefan Hajnoczi     assert(!get_my_aiocontext());
697*47b74464SStefan Hajnoczi     set_my_aiocontext(ctx);
6985f50be9bSPaolo Bonzini }
699