xref: /openbmc/qemu/util/async.c (revision d66ba6dc)
1c2b38b27SPaolo Bonzini /*
2c2b38b27SPaolo Bonzini  * Data plane event loop
3c2b38b27SPaolo Bonzini  *
4c2b38b27SPaolo Bonzini  * Copyright (c) 2003-2008 Fabrice Bellard
5c2b38b27SPaolo Bonzini  * Copyright (c) 2009-2017 QEMU contributors
6c2b38b27SPaolo Bonzini  *
7c2b38b27SPaolo Bonzini  * Permission is hereby granted, free of charge, to any person obtaining a copy
8c2b38b27SPaolo Bonzini  * of this software and associated documentation files (the "Software"), to deal
9c2b38b27SPaolo Bonzini  * in the Software without restriction, including without limitation the rights
10c2b38b27SPaolo Bonzini  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11c2b38b27SPaolo Bonzini  * copies of the Software, and to permit persons to whom the Software is
12c2b38b27SPaolo Bonzini  * furnished to do so, subject to the following conditions:
13c2b38b27SPaolo Bonzini  *
14c2b38b27SPaolo Bonzini  * The above copyright notice and this permission notice shall be included in
15c2b38b27SPaolo Bonzini  * all copies or substantial portions of the Software.
16c2b38b27SPaolo Bonzini  *
17c2b38b27SPaolo Bonzini  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18c2b38b27SPaolo Bonzini  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19c2b38b27SPaolo Bonzini  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20c2b38b27SPaolo Bonzini  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21c2b38b27SPaolo Bonzini  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22c2b38b27SPaolo Bonzini  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23c2b38b27SPaolo Bonzini  * THE SOFTWARE.
24c2b38b27SPaolo Bonzini  */
25c2b38b27SPaolo Bonzini 
26c2b38b27SPaolo Bonzini #include "qemu/osdep.h"
27c2b38b27SPaolo Bonzini #include "qapi/error.h"
28c2b38b27SPaolo Bonzini #include "block/aio.h"
29c2b38b27SPaolo Bonzini #include "block/thread-pool.h"
30587d82faSEmanuele Giuseppe Esposito #include "block/graph-lock.h"
31c2b38b27SPaolo Bonzini #include "qemu/main-loop.h"
32c2b38b27SPaolo Bonzini #include "qemu/atomic.h"
338c6b0356SStefan Hajnoczi #include "qemu/rcu_queue.h"
34c2b38b27SPaolo Bonzini #include "block/raw-aio.h"
350c330a73SPaolo Bonzini #include "qemu/coroutine_int.h"
3647b74464SStefan Hajnoczi #include "qemu/coroutine-tls.h"
3775bbe5e5SPavel Dovgalyuk #include "sysemu/cpu-timers.h"
380c330a73SPaolo Bonzini #include "trace.h"
39c2b38b27SPaolo Bonzini 
40c2b38b27SPaolo Bonzini /***********************************************************/
41c2b38b27SPaolo Bonzini /* bottom halves (can be seen as timers which expire ASAP) */
42c2b38b27SPaolo Bonzini 
438c6b0356SStefan Hajnoczi /* QEMUBH::flags values */
448c6b0356SStefan Hajnoczi enum {
458c6b0356SStefan Hajnoczi     /* Already enqueued and waiting for aio_bh_poll() */
468c6b0356SStefan Hajnoczi     BH_PENDING   = (1 << 0),
478c6b0356SStefan Hajnoczi 
488c6b0356SStefan Hajnoczi     /* Invoke the callback */
498c6b0356SStefan Hajnoczi     BH_SCHEDULED = (1 << 1),
508c6b0356SStefan Hajnoczi 
518c6b0356SStefan Hajnoczi     /* Delete without invoking callback */
528c6b0356SStefan Hajnoczi     BH_DELETED   = (1 << 2),
538c6b0356SStefan Hajnoczi 
548c6b0356SStefan Hajnoczi     /* Delete after invoking callback */
558c6b0356SStefan Hajnoczi     BH_ONESHOT   = (1 << 3),
568c6b0356SStefan Hajnoczi 
578c6b0356SStefan Hajnoczi     /* Schedule periodically when the event loop is idle */
588c6b0356SStefan Hajnoczi     BH_IDLE      = (1 << 4),
598c6b0356SStefan Hajnoczi };
608c6b0356SStefan Hajnoczi 
61c2b38b27SPaolo Bonzini struct QEMUBH {
62c2b38b27SPaolo Bonzini     AioContext *ctx;
630f08586cSStefan Hajnoczi     const char *name;
64c2b38b27SPaolo Bonzini     QEMUBHFunc *cb;
65c2b38b27SPaolo Bonzini     void *opaque;
668c6b0356SStefan Hajnoczi     QSLIST_ENTRY(QEMUBH) next;
678c6b0356SStefan Hajnoczi     unsigned flags;
68c2b38b27SPaolo Bonzini };
69c2b38b27SPaolo Bonzini 
708c6b0356SStefan Hajnoczi /* Called concurrently from any thread */
718c6b0356SStefan Hajnoczi static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags)
728c6b0356SStefan Hajnoczi {
738c6b0356SStefan Hajnoczi     AioContext *ctx = bh->ctx;
748c6b0356SStefan Hajnoczi     unsigned old_flags;
758c6b0356SStefan Hajnoczi 
768c6b0356SStefan Hajnoczi     /*
778dd48650SPaolo Bonzini      * Synchronizes with atomic_fetch_and() in aio_bh_dequeue(), ensuring that
788dd48650SPaolo Bonzini      * insertion starts after BH_PENDING is set.
798dd48650SPaolo Bonzini      */
808dd48650SPaolo Bonzini     old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
818dd48650SPaolo Bonzini 
828dd48650SPaolo Bonzini     if (!(old_flags & BH_PENDING)) {
838dd48650SPaolo Bonzini         /*
848dd48650SPaolo Bonzini          * At this point the bottom half becomes visible to aio_bh_poll().
858dd48650SPaolo Bonzini          * This insertion thus synchronizes with QSLIST_MOVE_ATOMIC in
868dd48650SPaolo Bonzini          * aio_bh_poll(), ensuring that:
878dd48650SPaolo Bonzini          * 1. any writes needed by the callback are visible from the callback
888dd48650SPaolo Bonzini          *    after aio_bh_dequeue() returns bh.
898c6b0356SStefan Hajnoczi          * 2. ctx is loaded before the callback has a chance to execute and bh
908c6b0356SStefan Hajnoczi          *    could be freed.
918c6b0356SStefan Hajnoczi          */
928c6b0356SStefan Hajnoczi         QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next);
938c6b0356SStefan Hajnoczi     }
948c6b0356SStefan Hajnoczi 
958c6b0356SStefan Hajnoczi     aio_notify(ctx);
9675bbe5e5SPavel Dovgalyuk     /*
9775bbe5e5SPavel Dovgalyuk      * Workaround for record/replay.
9875bbe5e5SPavel Dovgalyuk      * vCPU execution should be suspended when new BH is set.
9975bbe5e5SPavel Dovgalyuk      * This is needed to avoid guest timeouts caused
10075bbe5e5SPavel Dovgalyuk      * by the long cycles of the execution.
10175bbe5e5SPavel Dovgalyuk      */
10275bbe5e5SPavel Dovgalyuk     icount_notify_exit();
1038c6b0356SStefan Hajnoczi }
1048c6b0356SStefan Hajnoczi 
1058c6b0356SStefan Hajnoczi /* Only called from aio_bh_poll() and aio_ctx_finalize() */
1068c6b0356SStefan Hajnoczi static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags)
1078c6b0356SStefan Hajnoczi {
1088c6b0356SStefan Hajnoczi     QEMUBH *bh = QSLIST_FIRST_RCU(head);
1098c6b0356SStefan Hajnoczi 
1108c6b0356SStefan Hajnoczi     if (!bh) {
1118c6b0356SStefan Hajnoczi         return NULL;
1128c6b0356SStefan Hajnoczi     }
1138c6b0356SStefan Hajnoczi 
1148c6b0356SStefan Hajnoczi     QSLIST_REMOVE_HEAD(head, next);
1158c6b0356SStefan Hajnoczi 
1168c6b0356SStefan Hajnoczi     /*
1178dd48650SPaolo Bonzini      * Synchronizes with qatomic_fetch_or() in aio_bh_enqueue(), ensuring that
1188dd48650SPaolo Bonzini      * the removal finishes before BH_PENDING is reset.
1198c6b0356SStefan Hajnoczi      */
120d73415a3SStefan Hajnoczi     *flags = qatomic_fetch_and(&bh->flags,
1218c6b0356SStefan Hajnoczi                               ~(BH_PENDING | BH_SCHEDULED | BH_IDLE));
1228c6b0356SStefan Hajnoczi     return bh;
1238c6b0356SStefan Hajnoczi }
1248c6b0356SStefan Hajnoczi 
1250f08586cSStefan Hajnoczi void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb,
1260f08586cSStefan Hajnoczi                                   void *opaque, const char *name)
127c2b38b27SPaolo Bonzini {
128c2b38b27SPaolo Bonzini     QEMUBH *bh;
129c2b38b27SPaolo Bonzini     bh = g_new(QEMUBH, 1);
130c2b38b27SPaolo Bonzini     *bh = (QEMUBH){
131c2b38b27SPaolo Bonzini         .ctx = ctx,
132c2b38b27SPaolo Bonzini         .cb = cb,
133c2b38b27SPaolo Bonzini         .opaque = opaque,
1340f08586cSStefan Hajnoczi         .name = name,
135c2b38b27SPaolo Bonzini     };
1368c6b0356SStefan Hajnoczi     aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT);
137c2b38b27SPaolo Bonzini }
138c2b38b27SPaolo Bonzini 
1390f08586cSStefan Hajnoczi QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
1400f08586cSStefan Hajnoczi                         const char *name)
141c2b38b27SPaolo Bonzini {
142c2b38b27SPaolo Bonzini     QEMUBH *bh;
143c2b38b27SPaolo Bonzini     bh = g_new(QEMUBH, 1);
144c2b38b27SPaolo Bonzini     *bh = (QEMUBH){
145c2b38b27SPaolo Bonzini         .ctx = ctx,
146c2b38b27SPaolo Bonzini         .cb = cb,
147c2b38b27SPaolo Bonzini         .opaque = opaque,
1480f08586cSStefan Hajnoczi         .name = name,
149c2b38b27SPaolo Bonzini     };
150c2b38b27SPaolo Bonzini     return bh;
151c2b38b27SPaolo Bonzini }
152c2b38b27SPaolo Bonzini 
153c2b38b27SPaolo Bonzini void aio_bh_call(QEMUBH *bh)
154c2b38b27SPaolo Bonzini {
155c2b38b27SPaolo Bonzini     bh->cb(bh->opaque);
156c2b38b27SPaolo Bonzini }
157c2b38b27SPaolo Bonzini 
1588c6b0356SStefan Hajnoczi /* Multiple occurrences of aio_bh_poll cannot be called concurrently. */
159c2b38b27SPaolo Bonzini int aio_bh_poll(AioContext *ctx)
160c2b38b27SPaolo Bonzini {
1618c6b0356SStefan Hajnoczi     BHListSlice slice;
1628c6b0356SStefan Hajnoczi     BHListSlice *s;
1638c6b0356SStefan Hajnoczi     int ret = 0;
164c2b38b27SPaolo Bonzini 
1658dd48650SPaolo Bonzini     /* Synchronizes with QSLIST_INSERT_HEAD_ATOMIC in aio_bh_enqueue().  */
1668c6b0356SStefan Hajnoczi     QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list);
167*d66ba6dcSCédric Le Goater 
168*d66ba6dcSCédric Le Goater     /*
169*d66ba6dcSCédric Le Goater      * GCC13 [-Werror=dangling-pointer=] complains that the local variable
170*d66ba6dcSCédric Le Goater      * 'slice' is being stored in the global 'ctx->bh_slice_list' but the
171*d66ba6dcSCédric Le Goater      * list is emptied before this function returns.
172*d66ba6dcSCédric Le Goater      */
173*d66ba6dcSCédric Le Goater #if !defined(__clang__)
174*d66ba6dcSCédric Le Goater #pragma GCC diagnostic push
175*d66ba6dcSCédric Le Goater #pragma GCC diagnostic ignored "-Wpragmas"
176*d66ba6dcSCédric Le Goater #pragma GCC diagnostic ignored "-Wdangling-pointer="
177*d66ba6dcSCédric Le Goater #endif
1788c6b0356SStefan Hajnoczi     QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next);
179*d66ba6dcSCédric Le Goater #if !defined(__clang__)
180*d66ba6dcSCédric Le Goater #pragma GCC diagnostic pop
181*d66ba6dcSCédric Le Goater #endif
1828c6b0356SStefan Hajnoczi 
1838c6b0356SStefan Hajnoczi     while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) {
1848c6b0356SStefan Hajnoczi         QEMUBH *bh;
1858c6b0356SStefan Hajnoczi         unsigned flags;
1868c6b0356SStefan Hajnoczi 
1878c6b0356SStefan Hajnoczi         bh = aio_bh_dequeue(&s->bh_list, &flags);
1888c6b0356SStefan Hajnoczi         if (!bh) {
1898c6b0356SStefan Hajnoczi             QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next);
1908c6b0356SStefan Hajnoczi             continue;
1918c6b0356SStefan Hajnoczi         }
1928c6b0356SStefan Hajnoczi 
1938c6b0356SStefan Hajnoczi         if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
194c2b38b27SPaolo Bonzini             /* Idle BHs don't count as progress */
1958c6b0356SStefan Hajnoczi             if (!(flags & BH_IDLE)) {
196c2b38b27SPaolo Bonzini                 ret = 1;
197c2b38b27SPaolo Bonzini             }
198c2b38b27SPaolo Bonzini             aio_bh_call(bh);
199c2b38b27SPaolo Bonzini         }
2008c6b0356SStefan Hajnoczi         if (flags & (BH_DELETED | BH_ONESHOT)) {
201c2b38b27SPaolo Bonzini             g_free(bh);
202c2b38b27SPaolo Bonzini         }
203c2b38b27SPaolo Bonzini     }
2048c6b0356SStefan Hajnoczi 
205c2b38b27SPaolo Bonzini     return ret;
206c2b38b27SPaolo Bonzini }
207c2b38b27SPaolo Bonzini 
208c2b38b27SPaolo Bonzini void qemu_bh_schedule_idle(QEMUBH *bh)
209c2b38b27SPaolo Bonzini {
2108c6b0356SStefan Hajnoczi     aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE);
211c2b38b27SPaolo Bonzini }
212c2b38b27SPaolo Bonzini 
213c2b38b27SPaolo Bonzini void qemu_bh_schedule(QEMUBH *bh)
214c2b38b27SPaolo Bonzini {
2158c6b0356SStefan Hajnoczi     aio_bh_enqueue(bh, BH_SCHEDULED);
216c2b38b27SPaolo Bonzini }
217c2b38b27SPaolo Bonzini 
218c2b38b27SPaolo Bonzini /* This func is async.
219c2b38b27SPaolo Bonzini  */
220c2b38b27SPaolo Bonzini void qemu_bh_cancel(QEMUBH *bh)
221c2b38b27SPaolo Bonzini {
222d73415a3SStefan Hajnoczi     qatomic_and(&bh->flags, ~BH_SCHEDULED);
223c2b38b27SPaolo Bonzini }
224c2b38b27SPaolo Bonzini 
225c2b38b27SPaolo Bonzini /* This func is async.The bottom half will do the delete action at the finial
226c2b38b27SPaolo Bonzini  * end.
227c2b38b27SPaolo Bonzini  */
228c2b38b27SPaolo Bonzini void qemu_bh_delete(QEMUBH *bh)
229c2b38b27SPaolo Bonzini {
2308c6b0356SStefan Hajnoczi     aio_bh_enqueue(bh, BH_DELETED);
231c2b38b27SPaolo Bonzini }
232c2b38b27SPaolo Bonzini 
2338c6b0356SStefan Hajnoczi static int64_t aio_compute_bh_timeout(BHList *head, int timeout)
234c2b38b27SPaolo Bonzini {
235c2b38b27SPaolo Bonzini     QEMUBH *bh;
236c2b38b27SPaolo Bonzini 
2378c6b0356SStefan Hajnoczi     QSLIST_FOREACH_RCU(bh, head, next) {
2388c6b0356SStefan Hajnoczi         if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
2398c6b0356SStefan Hajnoczi             if (bh->flags & BH_IDLE) {
240c2b38b27SPaolo Bonzini                 /* idle bottom halves will be polled at least
241c2b38b27SPaolo Bonzini                  * every 10ms */
242c2b38b27SPaolo Bonzini                 timeout = 10000000;
243c2b38b27SPaolo Bonzini             } else {
244c2b38b27SPaolo Bonzini                 /* non-idle bottom halves will be executed
245c2b38b27SPaolo Bonzini                  * immediately */
246c2b38b27SPaolo Bonzini                 return 0;
247c2b38b27SPaolo Bonzini             }
248c2b38b27SPaolo Bonzini         }
249c2b38b27SPaolo Bonzini     }
250c2b38b27SPaolo Bonzini 
2518c6b0356SStefan Hajnoczi     return timeout;
2528c6b0356SStefan Hajnoczi }
2538c6b0356SStefan Hajnoczi 
2548c6b0356SStefan Hajnoczi int64_t
2558c6b0356SStefan Hajnoczi aio_compute_timeout(AioContext *ctx)
2568c6b0356SStefan Hajnoczi {
2578c6b0356SStefan Hajnoczi     BHListSlice *s;
2588c6b0356SStefan Hajnoczi     int64_t deadline;
2598c6b0356SStefan Hajnoczi     int timeout = -1;
2608c6b0356SStefan Hajnoczi 
2618c6b0356SStefan Hajnoczi     timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout);
2628c6b0356SStefan Hajnoczi     if (timeout == 0) {
2638c6b0356SStefan Hajnoczi         return 0;
2648c6b0356SStefan Hajnoczi     }
2658c6b0356SStefan Hajnoczi 
2668c6b0356SStefan Hajnoczi     QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
2678c6b0356SStefan Hajnoczi         timeout = aio_compute_bh_timeout(&s->bh_list, timeout);
2688c6b0356SStefan Hajnoczi         if (timeout == 0) {
2698c6b0356SStefan Hajnoczi             return 0;
2708c6b0356SStefan Hajnoczi         }
2718c6b0356SStefan Hajnoczi     }
2728c6b0356SStefan Hajnoczi 
273c2b38b27SPaolo Bonzini     deadline = timerlistgroup_deadline_ns(&ctx->tlg);
274c2b38b27SPaolo Bonzini     if (deadline == 0) {
275c2b38b27SPaolo Bonzini         return 0;
276c2b38b27SPaolo Bonzini     } else {
277c2b38b27SPaolo Bonzini         return qemu_soonest_timeout(timeout, deadline);
278c2b38b27SPaolo Bonzini     }
279c2b38b27SPaolo Bonzini }
280c2b38b27SPaolo Bonzini 
281c2b38b27SPaolo Bonzini static gboolean
282c2b38b27SPaolo Bonzini aio_ctx_prepare(GSource *source, gint    *timeout)
283c2b38b27SPaolo Bonzini {
284c2b38b27SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
285c2b38b27SPaolo Bonzini 
286d73415a3SStefan Hajnoczi     qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1);
2875710a3e0SPaolo Bonzini 
2885710a3e0SPaolo Bonzini     /*
2895710a3e0SPaolo Bonzini      * Write ctx->notify_me before computing the timeout
2905710a3e0SPaolo Bonzini      * (reading bottom half flags, etc.).  Pairs with
2915710a3e0SPaolo Bonzini      * smp_mb in aio_notify().
2925710a3e0SPaolo Bonzini      */
2935710a3e0SPaolo Bonzini     smp_mb();
294c2b38b27SPaolo Bonzini 
295c2b38b27SPaolo Bonzini     /* We assume there is no timeout already supplied */
296c2b38b27SPaolo Bonzini     *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
297c2b38b27SPaolo Bonzini 
298c2b38b27SPaolo Bonzini     if (aio_prepare(ctx)) {
299c2b38b27SPaolo Bonzini         *timeout = 0;
300c2b38b27SPaolo Bonzini     }
301c2b38b27SPaolo Bonzini 
302c2b38b27SPaolo Bonzini     return *timeout == 0;
303c2b38b27SPaolo Bonzini }
304c2b38b27SPaolo Bonzini 
305c2b38b27SPaolo Bonzini static gboolean
306c2b38b27SPaolo Bonzini aio_ctx_check(GSource *source)
307c2b38b27SPaolo Bonzini {
308c2b38b27SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
309c2b38b27SPaolo Bonzini     QEMUBH *bh;
3108c6b0356SStefan Hajnoczi     BHListSlice *s;
311c2b38b27SPaolo Bonzini 
3125710a3e0SPaolo Bonzini     /* Finish computing the timeout before clearing the flag.  */
313d73415a3SStefan Hajnoczi     qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1);
314c2b38b27SPaolo Bonzini     aio_notify_accept(ctx);
315c2b38b27SPaolo Bonzini 
3168c6b0356SStefan Hajnoczi     QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) {
3178c6b0356SStefan Hajnoczi         if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
318c2b38b27SPaolo Bonzini             return true;
319c2b38b27SPaolo Bonzini         }
320c2b38b27SPaolo Bonzini     }
3218c6b0356SStefan Hajnoczi 
3228c6b0356SStefan Hajnoczi     QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
3238c6b0356SStefan Hajnoczi         QSLIST_FOREACH_RCU(bh, &s->bh_list, next) {
3248c6b0356SStefan Hajnoczi             if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
3258c6b0356SStefan Hajnoczi                 return true;
3268c6b0356SStefan Hajnoczi             }
3278c6b0356SStefan Hajnoczi         }
3288c6b0356SStefan Hajnoczi     }
329c2b38b27SPaolo Bonzini     return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
330c2b38b27SPaolo Bonzini }
331c2b38b27SPaolo Bonzini 
332c2b38b27SPaolo Bonzini static gboolean
333c2b38b27SPaolo Bonzini aio_ctx_dispatch(GSource     *source,
334c2b38b27SPaolo Bonzini                  GSourceFunc  callback,
335c2b38b27SPaolo Bonzini                  gpointer     user_data)
336c2b38b27SPaolo Bonzini {
337c2b38b27SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
338c2b38b27SPaolo Bonzini 
339c2b38b27SPaolo Bonzini     assert(callback == NULL);
340a153bf52SPaolo Bonzini     aio_dispatch(ctx);
341c2b38b27SPaolo Bonzini     return true;
342c2b38b27SPaolo Bonzini }
343c2b38b27SPaolo Bonzini 
344c2b38b27SPaolo Bonzini static void
345c2b38b27SPaolo Bonzini aio_ctx_finalize(GSource     *source)
346c2b38b27SPaolo Bonzini {
347c2b38b27SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
3488c6b0356SStefan Hajnoczi     QEMUBH *bh;
3498c6b0356SStefan Hajnoczi     unsigned flags;
350c2b38b27SPaolo Bonzini 
351c2b38b27SPaolo Bonzini     thread_pool_free(ctx->thread_pool);
352c2b38b27SPaolo Bonzini 
353c2b38b27SPaolo Bonzini #ifdef CONFIG_LINUX_AIO
354c2b38b27SPaolo Bonzini     if (ctx->linux_aio) {
355c2b38b27SPaolo Bonzini         laio_detach_aio_context(ctx->linux_aio, ctx);
356c2b38b27SPaolo Bonzini         laio_cleanup(ctx->linux_aio);
357c2b38b27SPaolo Bonzini         ctx->linux_aio = NULL;
358c2b38b27SPaolo Bonzini     }
359c2b38b27SPaolo Bonzini #endif
360c2b38b27SPaolo Bonzini 
361fcb7a4a4SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING
362fcb7a4a4SAarushi Mehta     if (ctx->linux_io_uring) {
363fcb7a4a4SAarushi Mehta         luring_detach_aio_context(ctx->linux_io_uring, ctx);
364fcb7a4a4SAarushi Mehta         luring_cleanup(ctx->linux_io_uring);
365fcb7a4a4SAarushi Mehta         ctx->linux_io_uring = NULL;
366fcb7a4a4SAarushi Mehta     }
367fcb7a4a4SAarushi Mehta #endif
368fcb7a4a4SAarushi Mehta 
3690c330a73SPaolo Bonzini     assert(QSLIST_EMPTY(&ctx->scheduled_coroutines));
3700c330a73SPaolo Bonzini     qemu_bh_delete(ctx->co_schedule_bh);
3710c330a73SPaolo Bonzini 
3728c6b0356SStefan Hajnoczi     /* There must be no aio_bh_poll() calls going on */
3738c6b0356SStefan Hajnoczi     assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list));
374c2b38b27SPaolo Bonzini 
3758c6b0356SStefan Hajnoczi     while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) {
376023ca420SStefan Hajnoczi         /*
377023ca420SStefan Hajnoczi          * qemu_bh_delete() must have been called on BHs in this AioContext. In
378023ca420SStefan Hajnoczi          * many cases memory leaks, hangs, or inconsistent state occur when a
379023ca420SStefan Hajnoczi          * BH is leaked because something still expects it to run.
380023ca420SStefan Hajnoczi          *
381023ca420SStefan Hajnoczi          * If you hit this, fix the lifecycle of the BH so that
382023ca420SStefan Hajnoczi          * qemu_bh_delete() and any associated cleanup is called before the
383023ca420SStefan Hajnoczi          * AioContext is finalized.
384023ca420SStefan Hajnoczi          */
385023ca420SStefan Hajnoczi         if (unlikely(!(flags & BH_DELETED))) {
386023ca420SStefan Hajnoczi             fprintf(stderr, "%s: BH '%s' leaked, aborting...\n",
387023ca420SStefan Hajnoczi                     __func__, bh->name);
388023ca420SStefan Hajnoczi             abort();
389023ca420SStefan Hajnoczi         }
390c2b38b27SPaolo Bonzini 
3918c6b0356SStefan Hajnoczi         g_free(bh);
392c2b38b27SPaolo Bonzini     }
393c2b38b27SPaolo Bonzini 
394826cc324SStefan Hajnoczi     aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL, NULL);
395c2b38b27SPaolo Bonzini     event_notifier_cleanup(&ctx->notifier);
396c2b38b27SPaolo Bonzini     qemu_rec_mutex_destroy(&ctx->lock);
397c2b38b27SPaolo Bonzini     qemu_lockcnt_destroy(&ctx->list_lock);
398c2b38b27SPaolo Bonzini     timerlistgroup_deinit(&ctx->tlg);
399587d82faSEmanuele Giuseppe Esposito     unregister_aiocontext(ctx);
400cd0a6d2bSJie Wang     aio_context_destroy(ctx);
401c2b38b27SPaolo Bonzini }
402c2b38b27SPaolo Bonzini 
403c2b38b27SPaolo Bonzini static GSourceFuncs aio_source_funcs = {
404c2b38b27SPaolo Bonzini     aio_ctx_prepare,
405c2b38b27SPaolo Bonzini     aio_ctx_check,
406c2b38b27SPaolo Bonzini     aio_ctx_dispatch,
407c2b38b27SPaolo Bonzini     aio_ctx_finalize
408c2b38b27SPaolo Bonzini };
409c2b38b27SPaolo Bonzini 
410c2b38b27SPaolo Bonzini GSource *aio_get_g_source(AioContext *ctx)
411c2b38b27SPaolo Bonzini {
412ba607ca8SStefan Hajnoczi     aio_context_use_g_source(ctx);
413c2b38b27SPaolo Bonzini     g_source_ref(&ctx->source);
414c2b38b27SPaolo Bonzini     return &ctx->source;
415c2b38b27SPaolo Bonzini }
416c2b38b27SPaolo Bonzini 
417c2b38b27SPaolo Bonzini ThreadPool *aio_get_thread_pool(AioContext *ctx)
418c2b38b27SPaolo Bonzini {
419c2b38b27SPaolo Bonzini     if (!ctx->thread_pool) {
420c2b38b27SPaolo Bonzini         ctx->thread_pool = thread_pool_new(ctx);
421c2b38b27SPaolo Bonzini     }
422c2b38b27SPaolo Bonzini     return ctx->thread_pool;
423c2b38b27SPaolo Bonzini }
424c2b38b27SPaolo Bonzini 
425c2b38b27SPaolo Bonzini #ifdef CONFIG_LINUX_AIO
426ed6e2161SNishanth Aravamudan LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp)
427c2b38b27SPaolo Bonzini {
428c2b38b27SPaolo Bonzini     if (!ctx->linux_aio) {
429ed6e2161SNishanth Aravamudan         ctx->linux_aio = laio_init(errp);
430ed6e2161SNishanth Aravamudan         if (ctx->linux_aio) {
431c2b38b27SPaolo Bonzini             laio_attach_aio_context(ctx->linux_aio, ctx);
432c2b38b27SPaolo Bonzini         }
433ed6e2161SNishanth Aravamudan     }
434ed6e2161SNishanth Aravamudan     return ctx->linux_aio;
435ed6e2161SNishanth Aravamudan }
436ed6e2161SNishanth Aravamudan 
437ed6e2161SNishanth Aravamudan LinuxAioState *aio_get_linux_aio(AioContext *ctx)
438ed6e2161SNishanth Aravamudan {
439ed6e2161SNishanth Aravamudan     assert(ctx->linux_aio);
440c2b38b27SPaolo Bonzini     return ctx->linux_aio;
441c2b38b27SPaolo Bonzini }
442c2b38b27SPaolo Bonzini #endif
443c2b38b27SPaolo Bonzini 
444fcb7a4a4SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING
445fcb7a4a4SAarushi Mehta LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp)
446fcb7a4a4SAarushi Mehta {
447fcb7a4a4SAarushi Mehta     if (ctx->linux_io_uring) {
448fcb7a4a4SAarushi Mehta         return ctx->linux_io_uring;
449fcb7a4a4SAarushi Mehta     }
450fcb7a4a4SAarushi Mehta 
451fcb7a4a4SAarushi Mehta     ctx->linux_io_uring = luring_init(errp);
452fcb7a4a4SAarushi Mehta     if (!ctx->linux_io_uring) {
453fcb7a4a4SAarushi Mehta         return NULL;
454fcb7a4a4SAarushi Mehta     }
455fcb7a4a4SAarushi Mehta 
456fcb7a4a4SAarushi Mehta     luring_attach_aio_context(ctx->linux_io_uring, ctx);
457fcb7a4a4SAarushi Mehta     return ctx->linux_io_uring;
458fcb7a4a4SAarushi Mehta }
459fcb7a4a4SAarushi Mehta 
460fcb7a4a4SAarushi Mehta LuringState *aio_get_linux_io_uring(AioContext *ctx)
461fcb7a4a4SAarushi Mehta {
462fcb7a4a4SAarushi Mehta     assert(ctx->linux_io_uring);
463fcb7a4a4SAarushi Mehta     return ctx->linux_io_uring;
464fcb7a4a4SAarushi Mehta }
465fcb7a4a4SAarushi Mehta #endif
466fcb7a4a4SAarushi Mehta 
467c2b38b27SPaolo Bonzini void aio_notify(AioContext *ctx)
468c2b38b27SPaolo Bonzini {
469601829f8SStefan Hajnoczi     /*
4708dd48650SPaolo Bonzini      * Write e.g. ctx->bh_list before writing ctx->notified.  Pairs with
4718dd48650SPaolo Bonzini      * smp_mb() in aio_notify_accept().
472601829f8SStefan Hajnoczi      */
473601829f8SStefan Hajnoczi     smp_wmb();
474d73415a3SStefan Hajnoczi     qatomic_set(&ctx->notified, true);
475601829f8SStefan Hajnoczi 
476601829f8SStefan Hajnoczi     /*
4778dd48650SPaolo Bonzini      * Write ctx->notified (and also ctx->bh_list) before reading ctx->notify_me.
4788dd48650SPaolo Bonzini      * Pairs with smp_mb() in aio_ctx_prepare or aio_poll.
479c2b38b27SPaolo Bonzini      */
480c2b38b27SPaolo Bonzini     smp_mb();
481d73415a3SStefan Hajnoczi     if (qatomic_read(&ctx->notify_me)) {
482c2b38b27SPaolo Bonzini         event_notifier_set(&ctx->notifier);
483c2b38b27SPaolo Bonzini     }
484c2b38b27SPaolo Bonzini }
485c2b38b27SPaolo Bonzini 
486c2b38b27SPaolo Bonzini void aio_notify_accept(AioContext *ctx)
487c2b38b27SPaolo Bonzini {
488d73415a3SStefan Hajnoczi     qatomic_set(&ctx->notified, false);
489601829f8SStefan Hajnoczi 
490601829f8SStefan Hajnoczi     /*
4916229438cSPaolo Bonzini      * Order reads of ctx->notified (in aio_context_notifier_poll()) and the
4926229438cSPaolo Bonzini      * above clearing of ctx->notified before reads of e.g. bh->flags.  Pairs
4936229438cSPaolo Bonzini      * with smp_wmb() in aio_notify.
494601829f8SStefan Hajnoczi      */
495601829f8SStefan Hajnoczi     smp_mb();
496c2b38b27SPaolo Bonzini }
497c2b38b27SPaolo Bonzini 
4983f53bc61SPaolo Bonzini static void aio_timerlist_notify(void *opaque, QEMUClockType type)
499c2b38b27SPaolo Bonzini {
500c2b38b27SPaolo Bonzini     aio_notify(opaque);
501c2b38b27SPaolo Bonzini }
502c2b38b27SPaolo Bonzini 
503601829f8SStefan Hajnoczi static void aio_context_notifier_cb(EventNotifier *e)
504c2b38b27SPaolo Bonzini {
505601829f8SStefan Hajnoczi     AioContext *ctx = container_of(e, AioContext, notifier);
506601829f8SStefan Hajnoczi 
507601829f8SStefan Hajnoczi     event_notifier_test_and_clear(&ctx->notifier);
508c2b38b27SPaolo Bonzini }
509c2b38b27SPaolo Bonzini 
510c2b38b27SPaolo Bonzini /* Returns true if aio_notify() was called (e.g. a BH was scheduled) */
511c13be5a1SStefan Hajnoczi static bool aio_context_notifier_poll(void *opaque)
512c2b38b27SPaolo Bonzini {
513c2b38b27SPaolo Bonzini     EventNotifier *e = opaque;
514c2b38b27SPaolo Bonzini     AioContext *ctx = container_of(e, AioContext, notifier);
515c2b38b27SPaolo Bonzini 
5166229438cSPaolo Bonzini     /*
5176229438cSPaolo Bonzini      * No need for load-acquire because we just want to kick the
5186229438cSPaolo Bonzini      * event loop.  aio_notify_accept() takes care of synchronizing
5196229438cSPaolo Bonzini      * the event loop with the producers.
5206229438cSPaolo Bonzini      */
521d73415a3SStefan Hajnoczi     return qatomic_read(&ctx->notified);
522c2b38b27SPaolo Bonzini }
523c2b38b27SPaolo Bonzini 
524826cc324SStefan Hajnoczi static void aio_context_notifier_poll_ready(EventNotifier *e)
525826cc324SStefan Hajnoczi {
526826cc324SStefan Hajnoczi     /* Do nothing, we just wanted to kick the event loop */
527826cc324SStefan Hajnoczi }
528826cc324SStefan Hajnoczi 
5290c330a73SPaolo Bonzini static void co_schedule_bh_cb(void *opaque)
5300c330a73SPaolo Bonzini {
5310c330a73SPaolo Bonzini     AioContext *ctx = opaque;
5320c330a73SPaolo Bonzini     QSLIST_HEAD(, Coroutine) straight, reversed;
5330c330a73SPaolo Bonzini 
5340c330a73SPaolo Bonzini     QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines);
5350c330a73SPaolo Bonzini     QSLIST_INIT(&straight);
5360c330a73SPaolo Bonzini 
5370c330a73SPaolo Bonzini     while (!QSLIST_EMPTY(&reversed)) {
5380c330a73SPaolo Bonzini         Coroutine *co = QSLIST_FIRST(&reversed);
5390c330a73SPaolo Bonzini         QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next);
5400c330a73SPaolo Bonzini         QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next);
5410c330a73SPaolo Bonzini     }
5420c330a73SPaolo Bonzini 
5430c330a73SPaolo Bonzini     while (!QSLIST_EMPTY(&straight)) {
5440c330a73SPaolo Bonzini         Coroutine *co = QSLIST_FIRST(&straight);
5450c330a73SPaolo Bonzini         QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
5460c330a73SPaolo Bonzini         trace_aio_co_schedule_bh_cb(ctx, co);
5471919631eSPaolo Bonzini         aio_context_acquire(ctx);
5486133b39fSJeff Cody 
5496133b39fSJeff Cody         /* Protected by write barrier in qemu_aio_coroutine_enter */
550d73415a3SStefan Hajnoczi         qatomic_set(&co->scheduled, NULL);
5516808ae04SSergio Lopez         qemu_aio_coroutine_enter(ctx, co);
5521919631eSPaolo Bonzini         aio_context_release(ctx);
5530c330a73SPaolo Bonzini     }
5540c330a73SPaolo Bonzini }
5550c330a73SPaolo Bonzini 
556c2b38b27SPaolo Bonzini AioContext *aio_context_new(Error **errp)
557c2b38b27SPaolo Bonzini {
558c2b38b27SPaolo Bonzini     int ret;
559c2b38b27SPaolo Bonzini     AioContext *ctx;
560c2b38b27SPaolo Bonzini 
561c2b38b27SPaolo Bonzini     ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
5628c6b0356SStefan Hajnoczi     QSLIST_INIT(&ctx->bh_list);
5638c6b0356SStefan Hajnoczi     QSIMPLEQ_INIT(&ctx->bh_slice_list);
564c2b38b27SPaolo Bonzini     aio_context_setup(ctx);
565c2b38b27SPaolo Bonzini 
566c2b38b27SPaolo Bonzini     ret = event_notifier_init(&ctx->notifier, false);
567c2b38b27SPaolo Bonzini     if (ret < 0) {
568c2b38b27SPaolo Bonzini         error_setg_errno(errp, -ret, "Failed to initialize event notifier");
569c2b38b27SPaolo Bonzini         goto fail;
570c2b38b27SPaolo Bonzini     }
571c2b38b27SPaolo Bonzini     g_source_set_can_recurse(&ctx->source, true);
572c2b38b27SPaolo Bonzini     qemu_lockcnt_init(&ctx->list_lock);
5730c330a73SPaolo Bonzini 
5740c330a73SPaolo Bonzini     ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx);
5750c330a73SPaolo Bonzini     QSLIST_INIT(&ctx->scheduled_coroutines);
5760c330a73SPaolo Bonzini 
577c2b38b27SPaolo Bonzini     aio_set_event_notifier(ctx, &ctx->notifier,
578c2b38b27SPaolo Bonzini                            false,
579601829f8SStefan Hajnoczi                            aio_context_notifier_cb,
580826cc324SStefan Hajnoczi                            aio_context_notifier_poll,
581826cc324SStefan Hajnoczi                            aio_context_notifier_poll_ready);
582c2b38b27SPaolo Bonzini #ifdef CONFIG_LINUX_AIO
583c2b38b27SPaolo Bonzini     ctx->linux_aio = NULL;
584c2b38b27SPaolo Bonzini #endif
585fcb7a4a4SAarushi Mehta 
586fcb7a4a4SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING
587fcb7a4a4SAarushi Mehta     ctx->linux_io_uring = NULL;
588fcb7a4a4SAarushi Mehta #endif
589fcb7a4a4SAarushi Mehta 
590c2b38b27SPaolo Bonzini     ctx->thread_pool = NULL;
591c2b38b27SPaolo Bonzini     qemu_rec_mutex_init(&ctx->lock);
592c2b38b27SPaolo Bonzini     timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
593c2b38b27SPaolo Bonzini 
594c2b38b27SPaolo Bonzini     ctx->poll_ns = 0;
595c2b38b27SPaolo Bonzini     ctx->poll_max_ns = 0;
596c2b38b27SPaolo Bonzini     ctx->poll_grow = 0;
597c2b38b27SPaolo Bonzini     ctx->poll_shrink = 0;
598c2b38b27SPaolo Bonzini 
5991793ad02SStefano Garzarella     ctx->aio_max_batch = 0;
6001793ad02SStefano Garzarella 
60171ad4713SNicolas Saenz Julienne     ctx->thread_pool_min = 0;
60271ad4713SNicolas Saenz Julienne     ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT;
60371ad4713SNicolas Saenz Julienne 
604587d82faSEmanuele Giuseppe Esposito     register_aiocontext(ctx);
605587d82faSEmanuele Giuseppe Esposito 
606c2b38b27SPaolo Bonzini     return ctx;
607c2b38b27SPaolo Bonzini fail:
608c2b38b27SPaolo Bonzini     g_source_destroy(&ctx->source);
609c2b38b27SPaolo Bonzini     return NULL;
610c2b38b27SPaolo Bonzini }
611c2b38b27SPaolo Bonzini 
6120c330a73SPaolo Bonzini void aio_co_schedule(AioContext *ctx, Coroutine *co)
6130c330a73SPaolo Bonzini {
6140c330a73SPaolo Bonzini     trace_aio_co_schedule(ctx, co);
615d73415a3SStefan Hajnoczi     const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL,
6166133b39fSJeff Cody                                            __func__);
6176133b39fSJeff Cody 
6186133b39fSJeff Cody     if (scheduled) {
6196133b39fSJeff Cody         fprintf(stderr,
6206133b39fSJeff Cody                 "%s: Co-routine was already scheduled in '%s'\n",
6216133b39fSJeff Cody                 __func__, scheduled);
6226133b39fSJeff Cody         abort();
6236133b39fSJeff Cody     }
6246133b39fSJeff Cody 
625f0f81002SStefan Hajnoczi     /* The coroutine might run and release the last ctx reference before we
626f0f81002SStefan Hajnoczi      * invoke qemu_bh_schedule().  Take a reference to keep ctx alive until
627f0f81002SStefan Hajnoczi      * we're done.
628f0f81002SStefan Hajnoczi      */
629f0f81002SStefan Hajnoczi     aio_context_ref(ctx);
630f0f81002SStefan Hajnoczi 
6310c330a73SPaolo Bonzini     QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
6320c330a73SPaolo Bonzini                               co, co_scheduled_next);
6330c330a73SPaolo Bonzini     qemu_bh_schedule(ctx->co_schedule_bh);
634f0f81002SStefan Hajnoczi 
635f0f81002SStefan Hajnoczi     aio_context_unref(ctx);
6360c330a73SPaolo Bonzini }
6370c330a73SPaolo Bonzini 
63826b0b698SKevin Wolf typedef struct AioCoRescheduleSelf {
63926b0b698SKevin Wolf     Coroutine *co;
64026b0b698SKevin Wolf     AioContext *new_ctx;
64126b0b698SKevin Wolf } AioCoRescheduleSelf;
64226b0b698SKevin Wolf 
64326b0b698SKevin Wolf static void aio_co_reschedule_self_bh(void *opaque)
64426b0b698SKevin Wolf {
64526b0b698SKevin Wolf     AioCoRescheduleSelf *data = opaque;
64626b0b698SKevin Wolf     aio_co_schedule(data->new_ctx, data->co);
64726b0b698SKevin Wolf }
64826b0b698SKevin Wolf 
64926b0b698SKevin Wolf void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx)
65026b0b698SKevin Wolf {
65126b0b698SKevin Wolf     AioContext *old_ctx = qemu_get_current_aio_context();
65226b0b698SKevin Wolf 
65326b0b698SKevin Wolf     if (old_ctx != new_ctx) {
65426b0b698SKevin Wolf         AioCoRescheduleSelf data = {
65526b0b698SKevin Wolf             .co = qemu_coroutine_self(),
65626b0b698SKevin Wolf             .new_ctx = new_ctx,
65726b0b698SKevin Wolf         };
65826b0b698SKevin Wolf         /*
65926b0b698SKevin Wolf          * We can't directly schedule the coroutine in the target context
66026b0b698SKevin Wolf          * because this would be racy: The other thread could try to enter the
66126b0b698SKevin Wolf          * coroutine before it has yielded in this one.
66226b0b698SKevin Wolf          */
66326b0b698SKevin Wolf         aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data);
66426b0b698SKevin Wolf         qemu_coroutine_yield();
66526b0b698SKevin Wolf     }
66626b0b698SKevin Wolf }
66726b0b698SKevin Wolf 
66843695601SMarkus Armbruster void aio_co_wake(Coroutine *co)
6690c330a73SPaolo Bonzini {
6700c330a73SPaolo Bonzini     AioContext *ctx;
6710c330a73SPaolo Bonzini 
6720c330a73SPaolo Bonzini     /* Read coroutine before co->ctx.  Matches smp_wmb in
6730c330a73SPaolo Bonzini      * qemu_coroutine_enter.
6740c330a73SPaolo Bonzini      */
6750c330a73SPaolo Bonzini     smp_read_barrier_depends();
676d73415a3SStefan Hajnoczi     ctx = qatomic_read(&co->ctx);
6770c330a73SPaolo Bonzini 
6788865852eSFam Zheng     aio_co_enter(ctx, co);
6798865852eSFam Zheng }
6808865852eSFam Zheng 
68143695601SMarkus Armbruster void aio_co_enter(AioContext *ctx, Coroutine *co)
6828865852eSFam Zheng {
6830c330a73SPaolo Bonzini     if (ctx != qemu_get_current_aio_context()) {
6840c330a73SPaolo Bonzini         aio_co_schedule(ctx, co);
6850c330a73SPaolo Bonzini         return;
6860c330a73SPaolo Bonzini     }
6870c330a73SPaolo Bonzini 
6880c330a73SPaolo Bonzini     if (qemu_in_coroutine()) {
6890c330a73SPaolo Bonzini         Coroutine *self = qemu_coroutine_self();
6900c330a73SPaolo Bonzini         assert(self != co);
6910c330a73SPaolo Bonzini         QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next);
6920c330a73SPaolo Bonzini     } else {
6930c330a73SPaolo Bonzini         aio_context_acquire(ctx);
6948865852eSFam Zheng         qemu_aio_coroutine_enter(ctx, co);
6950c330a73SPaolo Bonzini         aio_context_release(ctx);
6960c330a73SPaolo Bonzini     }
6970c330a73SPaolo Bonzini }
6980c330a73SPaolo Bonzini 
699c2b38b27SPaolo Bonzini void aio_context_ref(AioContext *ctx)
700c2b38b27SPaolo Bonzini {
701c2b38b27SPaolo Bonzini     g_source_ref(&ctx->source);
702c2b38b27SPaolo Bonzini }
703c2b38b27SPaolo Bonzini 
704c2b38b27SPaolo Bonzini void aio_context_unref(AioContext *ctx)
705c2b38b27SPaolo Bonzini {
706c2b38b27SPaolo Bonzini     g_source_unref(&ctx->source);
707c2b38b27SPaolo Bonzini }
708c2b38b27SPaolo Bonzini 
709c2b38b27SPaolo Bonzini void aio_context_acquire(AioContext *ctx)
710c2b38b27SPaolo Bonzini {
711c2b38b27SPaolo Bonzini     qemu_rec_mutex_lock(&ctx->lock);
712c2b38b27SPaolo Bonzini }
713c2b38b27SPaolo Bonzini 
714c2b38b27SPaolo Bonzini void aio_context_release(AioContext *ctx)
715c2b38b27SPaolo Bonzini {
716c2b38b27SPaolo Bonzini     qemu_rec_mutex_unlock(&ctx->lock);
717c2b38b27SPaolo Bonzini }
7185f50be9bSPaolo Bonzini 
71947b74464SStefan Hajnoczi QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext)
7205f50be9bSPaolo Bonzini 
7215f50be9bSPaolo Bonzini AioContext *qemu_get_current_aio_context(void)
7225f50be9bSPaolo Bonzini {
72347b74464SStefan Hajnoczi     AioContext *ctx = get_my_aiocontext();
72447b74464SStefan Hajnoczi     if (ctx) {
72547b74464SStefan Hajnoczi         return ctx;
7265f50be9bSPaolo Bonzini     }
7275f50be9bSPaolo Bonzini     if (qemu_mutex_iothread_locked()) {
7285f50be9bSPaolo Bonzini         /* Possibly in a vCPU thread.  */
7295f50be9bSPaolo Bonzini         return qemu_get_aio_context();
7305f50be9bSPaolo Bonzini     }
7315f50be9bSPaolo Bonzini     return NULL;
7325f50be9bSPaolo Bonzini }
7335f50be9bSPaolo Bonzini 
7345f50be9bSPaolo Bonzini void qemu_set_current_aio_context(AioContext *ctx)
7355f50be9bSPaolo Bonzini {
73647b74464SStefan Hajnoczi     assert(!get_my_aiocontext());
73747b74464SStefan Hajnoczi     set_my_aiocontext(ctx);
7385f50be9bSPaolo Bonzini }
73971ad4713SNicolas Saenz Julienne 
74071ad4713SNicolas Saenz Julienne void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
74171ad4713SNicolas Saenz Julienne                                         int64_t max, Error **errp)
74271ad4713SNicolas Saenz Julienne {
74371ad4713SNicolas Saenz Julienne 
74471ad4713SNicolas Saenz Julienne     if (min > max || !max || min > INT_MAX || max > INT_MAX) {
74571ad4713SNicolas Saenz Julienne         error_setg(errp, "bad thread-pool-min/thread-pool-max values");
74671ad4713SNicolas Saenz Julienne         return;
74771ad4713SNicolas Saenz Julienne     }
74871ad4713SNicolas Saenz Julienne 
74971ad4713SNicolas Saenz Julienne     ctx->thread_pool_min = min;
75071ad4713SNicolas Saenz Julienne     ctx->thread_pool_max = max;
75171ad4713SNicolas Saenz Julienne 
75271ad4713SNicolas Saenz Julienne     if (ctx->thread_pool) {
75371ad4713SNicolas Saenz Julienne         thread_pool_update_params(ctx->thread_pool, ctx);
75471ad4713SNicolas Saenz Julienne     }
75571ad4713SNicolas Saenz Julienne }
756