1c2b38b27SPaolo Bonzini /* 2c2b38b27SPaolo Bonzini * Data plane event loop 3c2b38b27SPaolo Bonzini * 4c2b38b27SPaolo Bonzini * Copyright (c) 2003-2008 Fabrice Bellard 5c2b38b27SPaolo Bonzini * Copyright (c) 2009-2017 QEMU contributors 6c2b38b27SPaolo Bonzini * 7c2b38b27SPaolo Bonzini * Permission is hereby granted, free of charge, to any person obtaining a copy 8c2b38b27SPaolo Bonzini * of this software and associated documentation files (the "Software"), to deal 9c2b38b27SPaolo Bonzini * in the Software without restriction, including without limitation the rights 10c2b38b27SPaolo Bonzini * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11c2b38b27SPaolo Bonzini * copies of the Software, and to permit persons to whom the Software is 12c2b38b27SPaolo Bonzini * furnished to do so, subject to the following conditions: 13c2b38b27SPaolo Bonzini * 14c2b38b27SPaolo Bonzini * The above copyright notice and this permission notice shall be included in 15c2b38b27SPaolo Bonzini * all copies or substantial portions of the Software. 16c2b38b27SPaolo Bonzini * 17c2b38b27SPaolo Bonzini * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18c2b38b27SPaolo Bonzini * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19c2b38b27SPaolo Bonzini * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20c2b38b27SPaolo Bonzini * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21c2b38b27SPaolo Bonzini * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22c2b38b27SPaolo Bonzini * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23c2b38b27SPaolo Bonzini * THE SOFTWARE. 24c2b38b27SPaolo Bonzini */ 25c2b38b27SPaolo Bonzini 26c2b38b27SPaolo Bonzini #include "qemu/osdep.h" 27c2b38b27SPaolo Bonzini #include "qapi/error.h" 28c2b38b27SPaolo Bonzini #include "block/aio.h" 29c2b38b27SPaolo Bonzini #include "block/thread-pool.h" 30587d82faSEmanuele Giuseppe Esposito #include "block/graph-lock.h" 31c2b38b27SPaolo Bonzini #include "qemu/main-loop.h" 32c2b38b27SPaolo Bonzini #include "qemu/atomic.h" 338c6b0356SStefan Hajnoczi #include "qemu/rcu_queue.h" 34c2b38b27SPaolo Bonzini #include "block/raw-aio.h" 350c330a73SPaolo Bonzini #include "qemu/coroutine_int.h" 3647b74464SStefan Hajnoczi #include "qemu/coroutine-tls.h" 3775bbe5e5SPavel Dovgalyuk #include "sysemu/cpu-timers.h" 380c330a73SPaolo Bonzini #include "trace.h" 39c2b38b27SPaolo Bonzini 40c2b38b27SPaolo Bonzini /***********************************************************/ 41c2b38b27SPaolo Bonzini /* bottom halves (can be seen as timers which expire ASAP) */ 42c2b38b27SPaolo Bonzini 438c6b0356SStefan Hajnoczi /* QEMUBH::flags values */ 448c6b0356SStefan Hajnoczi enum { 458c6b0356SStefan Hajnoczi /* Already enqueued and waiting for aio_bh_poll() */ 468c6b0356SStefan Hajnoczi BH_PENDING = (1 << 0), 478c6b0356SStefan Hajnoczi 488c6b0356SStefan Hajnoczi /* Invoke the callback */ 498c6b0356SStefan Hajnoczi BH_SCHEDULED = (1 << 1), 508c6b0356SStefan Hajnoczi 518c6b0356SStefan Hajnoczi /* Delete without invoking callback */ 528c6b0356SStefan Hajnoczi BH_DELETED = (1 << 2), 538c6b0356SStefan Hajnoczi 548c6b0356SStefan Hajnoczi /* Delete after invoking callback */ 558c6b0356SStefan Hajnoczi BH_ONESHOT = (1 << 3), 568c6b0356SStefan Hajnoczi 578c6b0356SStefan Hajnoczi /* Schedule periodically when the event loop is idle */ 588c6b0356SStefan Hajnoczi BH_IDLE = (1 << 4), 598c6b0356SStefan Hajnoczi }; 608c6b0356SStefan Hajnoczi 61c2b38b27SPaolo Bonzini struct QEMUBH { 62c2b38b27SPaolo Bonzini AioContext *ctx; 630f08586cSStefan Hajnoczi const char *name; 64c2b38b27SPaolo Bonzini QEMUBHFunc *cb; 65c2b38b27SPaolo Bonzini void *opaque; 668c6b0356SStefan Hajnoczi QSLIST_ENTRY(QEMUBH) next; 678c6b0356SStefan Hajnoczi unsigned flags; 68*9c86c97fSAlexander Bulekov MemReentrancyGuard *reentrancy_guard; 69c2b38b27SPaolo Bonzini }; 70c2b38b27SPaolo Bonzini 718c6b0356SStefan Hajnoczi /* Called concurrently from any thread */ 728c6b0356SStefan Hajnoczi static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags) 738c6b0356SStefan Hajnoczi { 748c6b0356SStefan Hajnoczi AioContext *ctx = bh->ctx; 758c6b0356SStefan Hajnoczi unsigned old_flags; 768c6b0356SStefan Hajnoczi 778c6b0356SStefan Hajnoczi /* 788dd48650SPaolo Bonzini * Synchronizes with atomic_fetch_and() in aio_bh_dequeue(), ensuring that 798dd48650SPaolo Bonzini * insertion starts after BH_PENDING is set. 808dd48650SPaolo Bonzini */ 818dd48650SPaolo Bonzini old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags); 828dd48650SPaolo Bonzini 838dd48650SPaolo Bonzini if (!(old_flags & BH_PENDING)) { 848dd48650SPaolo Bonzini /* 858dd48650SPaolo Bonzini * At this point the bottom half becomes visible to aio_bh_poll(). 868dd48650SPaolo Bonzini * This insertion thus synchronizes with QSLIST_MOVE_ATOMIC in 878dd48650SPaolo Bonzini * aio_bh_poll(), ensuring that: 888dd48650SPaolo Bonzini * 1. any writes needed by the callback are visible from the callback 898dd48650SPaolo Bonzini * after aio_bh_dequeue() returns bh. 908c6b0356SStefan Hajnoczi * 2. ctx is loaded before the callback has a chance to execute and bh 918c6b0356SStefan Hajnoczi * could be freed. 928c6b0356SStefan Hajnoczi */ 938c6b0356SStefan Hajnoczi QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next); 948c6b0356SStefan Hajnoczi } 958c6b0356SStefan Hajnoczi 968c6b0356SStefan Hajnoczi aio_notify(ctx); 9775bbe5e5SPavel Dovgalyuk /* 9875bbe5e5SPavel Dovgalyuk * Workaround for record/replay. 9975bbe5e5SPavel Dovgalyuk * vCPU execution should be suspended when new BH is set. 10075bbe5e5SPavel Dovgalyuk * This is needed to avoid guest timeouts caused 10175bbe5e5SPavel Dovgalyuk * by the long cycles of the execution. 10275bbe5e5SPavel Dovgalyuk */ 10375bbe5e5SPavel Dovgalyuk icount_notify_exit(); 1048c6b0356SStefan Hajnoczi } 1058c6b0356SStefan Hajnoczi 1068c6b0356SStefan Hajnoczi /* Only called from aio_bh_poll() and aio_ctx_finalize() */ 1078c6b0356SStefan Hajnoczi static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags) 1088c6b0356SStefan Hajnoczi { 1098c6b0356SStefan Hajnoczi QEMUBH *bh = QSLIST_FIRST_RCU(head); 1108c6b0356SStefan Hajnoczi 1118c6b0356SStefan Hajnoczi if (!bh) { 1128c6b0356SStefan Hajnoczi return NULL; 1138c6b0356SStefan Hajnoczi } 1148c6b0356SStefan Hajnoczi 1158c6b0356SStefan Hajnoczi QSLIST_REMOVE_HEAD(head, next); 1168c6b0356SStefan Hajnoczi 1178c6b0356SStefan Hajnoczi /* 1188dd48650SPaolo Bonzini * Synchronizes with qatomic_fetch_or() in aio_bh_enqueue(), ensuring that 1198dd48650SPaolo Bonzini * the removal finishes before BH_PENDING is reset. 1208c6b0356SStefan Hajnoczi */ 121d73415a3SStefan Hajnoczi *flags = qatomic_fetch_and(&bh->flags, 1228c6b0356SStefan Hajnoczi ~(BH_PENDING | BH_SCHEDULED | BH_IDLE)); 1238c6b0356SStefan Hajnoczi return bh; 1248c6b0356SStefan Hajnoczi } 1258c6b0356SStefan Hajnoczi 1260f08586cSStefan Hajnoczi void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, 1270f08586cSStefan Hajnoczi void *opaque, const char *name) 128c2b38b27SPaolo Bonzini { 129c2b38b27SPaolo Bonzini QEMUBH *bh; 130c2b38b27SPaolo Bonzini bh = g_new(QEMUBH, 1); 131c2b38b27SPaolo Bonzini *bh = (QEMUBH){ 132c2b38b27SPaolo Bonzini .ctx = ctx, 133c2b38b27SPaolo Bonzini .cb = cb, 134c2b38b27SPaolo Bonzini .opaque = opaque, 1350f08586cSStefan Hajnoczi .name = name, 136c2b38b27SPaolo Bonzini }; 1378c6b0356SStefan Hajnoczi aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT); 138c2b38b27SPaolo Bonzini } 139c2b38b27SPaolo Bonzini 1400f08586cSStefan Hajnoczi QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, 141*9c86c97fSAlexander Bulekov const char *name, MemReentrancyGuard *reentrancy_guard) 142c2b38b27SPaolo Bonzini { 143c2b38b27SPaolo Bonzini QEMUBH *bh; 144c2b38b27SPaolo Bonzini bh = g_new(QEMUBH, 1); 145c2b38b27SPaolo Bonzini *bh = (QEMUBH){ 146c2b38b27SPaolo Bonzini .ctx = ctx, 147c2b38b27SPaolo Bonzini .cb = cb, 148c2b38b27SPaolo Bonzini .opaque = opaque, 1490f08586cSStefan Hajnoczi .name = name, 150*9c86c97fSAlexander Bulekov .reentrancy_guard = reentrancy_guard, 151c2b38b27SPaolo Bonzini }; 152c2b38b27SPaolo Bonzini return bh; 153c2b38b27SPaolo Bonzini } 154c2b38b27SPaolo Bonzini 155c2b38b27SPaolo Bonzini void aio_bh_call(QEMUBH *bh) 156c2b38b27SPaolo Bonzini { 157*9c86c97fSAlexander Bulekov bool last_engaged_in_io = false; 158*9c86c97fSAlexander Bulekov 159*9c86c97fSAlexander Bulekov if (bh->reentrancy_guard) { 160*9c86c97fSAlexander Bulekov last_engaged_in_io = bh->reentrancy_guard->engaged_in_io; 161*9c86c97fSAlexander Bulekov if (bh->reentrancy_guard->engaged_in_io) { 162*9c86c97fSAlexander Bulekov trace_reentrant_aio(bh->ctx, bh->name); 163*9c86c97fSAlexander Bulekov } 164*9c86c97fSAlexander Bulekov bh->reentrancy_guard->engaged_in_io = true; 165*9c86c97fSAlexander Bulekov } 166*9c86c97fSAlexander Bulekov 167c2b38b27SPaolo Bonzini bh->cb(bh->opaque); 168*9c86c97fSAlexander Bulekov 169*9c86c97fSAlexander Bulekov if (bh->reentrancy_guard) { 170*9c86c97fSAlexander Bulekov bh->reentrancy_guard->engaged_in_io = last_engaged_in_io; 171*9c86c97fSAlexander Bulekov } 172c2b38b27SPaolo Bonzini } 173c2b38b27SPaolo Bonzini 1748c6b0356SStefan Hajnoczi /* Multiple occurrences of aio_bh_poll cannot be called concurrently. */ 175c2b38b27SPaolo Bonzini int aio_bh_poll(AioContext *ctx) 176c2b38b27SPaolo Bonzini { 1778c6b0356SStefan Hajnoczi BHListSlice slice; 1788c6b0356SStefan Hajnoczi BHListSlice *s; 1798c6b0356SStefan Hajnoczi int ret = 0; 180c2b38b27SPaolo Bonzini 1818dd48650SPaolo Bonzini /* Synchronizes with QSLIST_INSERT_HEAD_ATOMIC in aio_bh_enqueue(). */ 1828c6b0356SStefan Hajnoczi QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list); 1838c6b0356SStefan Hajnoczi QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next); 1848c6b0356SStefan Hajnoczi 1858c6b0356SStefan Hajnoczi while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) { 1868c6b0356SStefan Hajnoczi QEMUBH *bh; 1878c6b0356SStefan Hajnoczi unsigned flags; 1888c6b0356SStefan Hajnoczi 1898c6b0356SStefan Hajnoczi bh = aio_bh_dequeue(&s->bh_list, &flags); 1908c6b0356SStefan Hajnoczi if (!bh) { 1918c6b0356SStefan Hajnoczi QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next); 1928c6b0356SStefan Hajnoczi continue; 1938c6b0356SStefan Hajnoczi } 1948c6b0356SStefan Hajnoczi 1958c6b0356SStefan Hajnoczi if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { 196c2b38b27SPaolo Bonzini /* Idle BHs don't count as progress */ 1978c6b0356SStefan Hajnoczi if (!(flags & BH_IDLE)) { 198c2b38b27SPaolo Bonzini ret = 1; 199c2b38b27SPaolo Bonzini } 200c2b38b27SPaolo Bonzini aio_bh_call(bh); 201c2b38b27SPaolo Bonzini } 2028c6b0356SStefan Hajnoczi if (flags & (BH_DELETED | BH_ONESHOT)) { 203c2b38b27SPaolo Bonzini g_free(bh); 204c2b38b27SPaolo Bonzini } 205c2b38b27SPaolo Bonzini } 2068c6b0356SStefan Hajnoczi 207c2b38b27SPaolo Bonzini return ret; 208c2b38b27SPaolo Bonzini } 209c2b38b27SPaolo Bonzini 210c2b38b27SPaolo Bonzini void qemu_bh_schedule_idle(QEMUBH *bh) 211c2b38b27SPaolo Bonzini { 2128c6b0356SStefan Hajnoczi aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE); 213c2b38b27SPaolo Bonzini } 214c2b38b27SPaolo Bonzini 215c2b38b27SPaolo Bonzini void qemu_bh_schedule(QEMUBH *bh) 216c2b38b27SPaolo Bonzini { 2178c6b0356SStefan Hajnoczi aio_bh_enqueue(bh, BH_SCHEDULED); 218c2b38b27SPaolo Bonzini } 219c2b38b27SPaolo Bonzini 220c2b38b27SPaolo Bonzini /* This func is async. 221c2b38b27SPaolo Bonzini */ 222c2b38b27SPaolo Bonzini void qemu_bh_cancel(QEMUBH *bh) 223c2b38b27SPaolo Bonzini { 224d73415a3SStefan Hajnoczi qatomic_and(&bh->flags, ~BH_SCHEDULED); 225c2b38b27SPaolo Bonzini } 226c2b38b27SPaolo Bonzini 227c2b38b27SPaolo Bonzini /* This func is async.The bottom half will do the delete action at the finial 228c2b38b27SPaolo Bonzini * end. 229c2b38b27SPaolo Bonzini */ 230c2b38b27SPaolo Bonzini void qemu_bh_delete(QEMUBH *bh) 231c2b38b27SPaolo Bonzini { 2328c6b0356SStefan Hajnoczi aio_bh_enqueue(bh, BH_DELETED); 233c2b38b27SPaolo Bonzini } 234c2b38b27SPaolo Bonzini 2358c6b0356SStefan Hajnoczi static int64_t aio_compute_bh_timeout(BHList *head, int timeout) 236c2b38b27SPaolo Bonzini { 237c2b38b27SPaolo Bonzini QEMUBH *bh; 238c2b38b27SPaolo Bonzini 2398c6b0356SStefan Hajnoczi QSLIST_FOREACH_RCU(bh, head, next) { 2408c6b0356SStefan Hajnoczi if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { 2418c6b0356SStefan Hajnoczi if (bh->flags & BH_IDLE) { 242c2b38b27SPaolo Bonzini /* idle bottom halves will be polled at least 243c2b38b27SPaolo Bonzini * every 10ms */ 244c2b38b27SPaolo Bonzini timeout = 10000000; 245c2b38b27SPaolo Bonzini } else { 246c2b38b27SPaolo Bonzini /* non-idle bottom halves will be executed 247c2b38b27SPaolo Bonzini * immediately */ 248c2b38b27SPaolo Bonzini return 0; 249c2b38b27SPaolo Bonzini } 250c2b38b27SPaolo Bonzini } 251c2b38b27SPaolo Bonzini } 252c2b38b27SPaolo Bonzini 2538c6b0356SStefan Hajnoczi return timeout; 2548c6b0356SStefan Hajnoczi } 2558c6b0356SStefan Hajnoczi 2568c6b0356SStefan Hajnoczi int64_t 2578c6b0356SStefan Hajnoczi aio_compute_timeout(AioContext *ctx) 2588c6b0356SStefan Hajnoczi { 2598c6b0356SStefan Hajnoczi BHListSlice *s; 2608c6b0356SStefan Hajnoczi int64_t deadline; 2618c6b0356SStefan Hajnoczi int timeout = -1; 2628c6b0356SStefan Hajnoczi 2638c6b0356SStefan Hajnoczi timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout); 2648c6b0356SStefan Hajnoczi if (timeout == 0) { 2658c6b0356SStefan Hajnoczi return 0; 2668c6b0356SStefan Hajnoczi } 2678c6b0356SStefan Hajnoczi 2688c6b0356SStefan Hajnoczi QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) { 2698c6b0356SStefan Hajnoczi timeout = aio_compute_bh_timeout(&s->bh_list, timeout); 2708c6b0356SStefan Hajnoczi if (timeout == 0) { 2718c6b0356SStefan Hajnoczi return 0; 2728c6b0356SStefan Hajnoczi } 2738c6b0356SStefan Hajnoczi } 2748c6b0356SStefan Hajnoczi 275c2b38b27SPaolo Bonzini deadline = timerlistgroup_deadline_ns(&ctx->tlg); 276c2b38b27SPaolo Bonzini if (deadline == 0) { 277c2b38b27SPaolo Bonzini return 0; 278c2b38b27SPaolo Bonzini } else { 279c2b38b27SPaolo Bonzini return qemu_soonest_timeout(timeout, deadline); 280c2b38b27SPaolo Bonzini } 281c2b38b27SPaolo Bonzini } 282c2b38b27SPaolo Bonzini 283c2b38b27SPaolo Bonzini static gboolean 284c2b38b27SPaolo Bonzini aio_ctx_prepare(GSource *source, gint *timeout) 285c2b38b27SPaolo Bonzini { 286c2b38b27SPaolo Bonzini AioContext *ctx = (AioContext *) source; 287c2b38b27SPaolo Bonzini 288d73415a3SStefan Hajnoczi qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1); 2895710a3e0SPaolo Bonzini 2905710a3e0SPaolo Bonzini /* 2915710a3e0SPaolo Bonzini * Write ctx->notify_me before computing the timeout 2925710a3e0SPaolo Bonzini * (reading bottom half flags, etc.). Pairs with 2935710a3e0SPaolo Bonzini * smp_mb in aio_notify(). 2945710a3e0SPaolo Bonzini */ 2955710a3e0SPaolo Bonzini smp_mb(); 296c2b38b27SPaolo Bonzini 297c2b38b27SPaolo Bonzini /* We assume there is no timeout already supplied */ 298c2b38b27SPaolo Bonzini *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)); 299c2b38b27SPaolo Bonzini 300c2b38b27SPaolo Bonzini if (aio_prepare(ctx)) { 301c2b38b27SPaolo Bonzini *timeout = 0; 302c2b38b27SPaolo Bonzini } 303c2b38b27SPaolo Bonzini 304c2b38b27SPaolo Bonzini return *timeout == 0; 305c2b38b27SPaolo Bonzini } 306c2b38b27SPaolo Bonzini 307c2b38b27SPaolo Bonzini static gboolean 308c2b38b27SPaolo Bonzini aio_ctx_check(GSource *source) 309c2b38b27SPaolo Bonzini { 310c2b38b27SPaolo Bonzini AioContext *ctx = (AioContext *) source; 311c2b38b27SPaolo Bonzini QEMUBH *bh; 3128c6b0356SStefan Hajnoczi BHListSlice *s; 313c2b38b27SPaolo Bonzini 3145710a3e0SPaolo Bonzini /* Finish computing the timeout before clearing the flag. */ 315d73415a3SStefan Hajnoczi qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1); 316c2b38b27SPaolo Bonzini aio_notify_accept(ctx); 317c2b38b27SPaolo Bonzini 3188c6b0356SStefan Hajnoczi QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) { 3198c6b0356SStefan Hajnoczi if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { 320c2b38b27SPaolo Bonzini return true; 321c2b38b27SPaolo Bonzini } 322c2b38b27SPaolo Bonzini } 3238c6b0356SStefan Hajnoczi 3248c6b0356SStefan Hajnoczi QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) { 3258c6b0356SStefan Hajnoczi QSLIST_FOREACH_RCU(bh, &s->bh_list, next) { 3268c6b0356SStefan Hajnoczi if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { 3278c6b0356SStefan Hajnoczi return true; 3288c6b0356SStefan Hajnoczi } 3298c6b0356SStefan Hajnoczi } 3308c6b0356SStefan Hajnoczi } 331c2b38b27SPaolo Bonzini return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0); 332c2b38b27SPaolo Bonzini } 333c2b38b27SPaolo Bonzini 334c2b38b27SPaolo Bonzini static gboolean 335c2b38b27SPaolo Bonzini aio_ctx_dispatch(GSource *source, 336c2b38b27SPaolo Bonzini GSourceFunc callback, 337c2b38b27SPaolo Bonzini gpointer user_data) 338c2b38b27SPaolo Bonzini { 339c2b38b27SPaolo Bonzini AioContext *ctx = (AioContext *) source; 340c2b38b27SPaolo Bonzini 341c2b38b27SPaolo Bonzini assert(callback == NULL); 342a153bf52SPaolo Bonzini aio_dispatch(ctx); 343c2b38b27SPaolo Bonzini return true; 344c2b38b27SPaolo Bonzini } 345c2b38b27SPaolo Bonzini 346c2b38b27SPaolo Bonzini static void 347c2b38b27SPaolo Bonzini aio_ctx_finalize(GSource *source) 348c2b38b27SPaolo Bonzini { 349c2b38b27SPaolo Bonzini AioContext *ctx = (AioContext *) source; 3508c6b0356SStefan Hajnoczi QEMUBH *bh; 3518c6b0356SStefan Hajnoczi unsigned flags; 352c2b38b27SPaolo Bonzini 353c2b38b27SPaolo Bonzini thread_pool_free(ctx->thread_pool); 354c2b38b27SPaolo Bonzini 355c2b38b27SPaolo Bonzini #ifdef CONFIG_LINUX_AIO 356c2b38b27SPaolo Bonzini if (ctx->linux_aio) { 357c2b38b27SPaolo Bonzini laio_detach_aio_context(ctx->linux_aio, ctx); 358c2b38b27SPaolo Bonzini laio_cleanup(ctx->linux_aio); 359c2b38b27SPaolo Bonzini ctx->linux_aio = NULL; 360c2b38b27SPaolo Bonzini } 361c2b38b27SPaolo Bonzini #endif 362c2b38b27SPaolo Bonzini 363fcb7a4a4SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING 364fcb7a4a4SAarushi Mehta if (ctx->linux_io_uring) { 365fcb7a4a4SAarushi Mehta luring_detach_aio_context(ctx->linux_io_uring, ctx); 366fcb7a4a4SAarushi Mehta luring_cleanup(ctx->linux_io_uring); 367fcb7a4a4SAarushi Mehta ctx->linux_io_uring = NULL; 368fcb7a4a4SAarushi Mehta } 369fcb7a4a4SAarushi Mehta #endif 370fcb7a4a4SAarushi Mehta 3710c330a73SPaolo Bonzini assert(QSLIST_EMPTY(&ctx->scheduled_coroutines)); 3720c330a73SPaolo Bonzini qemu_bh_delete(ctx->co_schedule_bh); 3730c330a73SPaolo Bonzini 3748c6b0356SStefan Hajnoczi /* There must be no aio_bh_poll() calls going on */ 3758c6b0356SStefan Hajnoczi assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list)); 376c2b38b27SPaolo Bonzini 3778c6b0356SStefan Hajnoczi while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) { 378023ca420SStefan Hajnoczi /* 379023ca420SStefan Hajnoczi * qemu_bh_delete() must have been called on BHs in this AioContext. In 380023ca420SStefan Hajnoczi * many cases memory leaks, hangs, or inconsistent state occur when a 381023ca420SStefan Hajnoczi * BH is leaked because something still expects it to run. 382023ca420SStefan Hajnoczi * 383023ca420SStefan Hajnoczi * If you hit this, fix the lifecycle of the BH so that 384023ca420SStefan Hajnoczi * qemu_bh_delete() and any associated cleanup is called before the 385023ca420SStefan Hajnoczi * AioContext is finalized. 386023ca420SStefan Hajnoczi */ 387023ca420SStefan Hajnoczi if (unlikely(!(flags & BH_DELETED))) { 388023ca420SStefan Hajnoczi fprintf(stderr, "%s: BH '%s' leaked, aborting...\n", 389023ca420SStefan Hajnoczi __func__, bh->name); 390023ca420SStefan Hajnoczi abort(); 391023ca420SStefan Hajnoczi } 392c2b38b27SPaolo Bonzini 3938c6b0356SStefan Hajnoczi g_free(bh); 394c2b38b27SPaolo Bonzini } 395c2b38b27SPaolo Bonzini 396826cc324SStefan Hajnoczi aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL, NULL); 397c2b38b27SPaolo Bonzini event_notifier_cleanup(&ctx->notifier); 398c2b38b27SPaolo Bonzini qemu_rec_mutex_destroy(&ctx->lock); 399c2b38b27SPaolo Bonzini qemu_lockcnt_destroy(&ctx->list_lock); 400c2b38b27SPaolo Bonzini timerlistgroup_deinit(&ctx->tlg); 401587d82faSEmanuele Giuseppe Esposito unregister_aiocontext(ctx); 402cd0a6d2bSJie Wang aio_context_destroy(ctx); 403c2b38b27SPaolo Bonzini } 404c2b38b27SPaolo Bonzini 405c2b38b27SPaolo Bonzini static GSourceFuncs aio_source_funcs = { 406c2b38b27SPaolo Bonzini aio_ctx_prepare, 407c2b38b27SPaolo Bonzini aio_ctx_check, 408c2b38b27SPaolo Bonzini aio_ctx_dispatch, 409c2b38b27SPaolo Bonzini aio_ctx_finalize 410c2b38b27SPaolo Bonzini }; 411c2b38b27SPaolo Bonzini 412c2b38b27SPaolo Bonzini GSource *aio_get_g_source(AioContext *ctx) 413c2b38b27SPaolo Bonzini { 414ba607ca8SStefan Hajnoczi aio_context_use_g_source(ctx); 415c2b38b27SPaolo Bonzini g_source_ref(&ctx->source); 416c2b38b27SPaolo Bonzini return &ctx->source; 417c2b38b27SPaolo Bonzini } 418c2b38b27SPaolo Bonzini 419c2b38b27SPaolo Bonzini ThreadPool *aio_get_thread_pool(AioContext *ctx) 420c2b38b27SPaolo Bonzini { 421c2b38b27SPaolo Bonzini if (!ctx->thread_pool) { 422c2b38b27SPaolo Bonzini ctx->thread_pool = thread_pool_new(ctx); 423c2b38b27SPaolo Bonzini } 424c2b38b27SPaolo Bonzini return ctx->thread_pool; 425c2b38b27SPaolo Bonzini } 426c2b38b27SPaolo Bonzini 427c2b38b27SPaolo Bonzini #ifdef CONFIG_LINUX_AIO 428ed6e2161SNishanth Aravamudan LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp) 429c2b38b27SPaolo Bonzini { 430c2b38b27SPaolo Bonzini if (!ctx->linux_aio) { 431ed6e2161SNishanth Aravamudan ctx->linux_aio = laio_init(errp); 432ed6e2161SNishanth Aravamudan if (ctx->linux_aio) { 433c2b38b27SPaolo Bonzini laio_attach_aio_context(ctx->linux_aio, ctx); 434c2b38b27SPaolo Bonzini } 435ed6e2161SNishanth Aravamudan } 436ed6e2161SNishanth Aravamudan return ctx->linux_aio; 437ed6e2161SNishanth Aravamudan } 438ed6e2161SNishanth Aravamudan 439ed6e2161SNishanth Aravamudan LinuxAioState *aio_get_linux_aio(AioContext *ctx) 440ed6e2161SNishanth Aravamudan { 441ed6e2161SNishanth Aravamudan assert(ctx->linux_aio); 442c2b38b27SPaolo Bonzini return ctx->linux_aio; 443c2b38b27SPaolo Bonzini } 444c2b38b27SPaolo Bonzini #endif 445c2b38b27SPaolo Bonzini 446fcb7a4a4SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING 447fcb7a4a4SAarushi Mehta LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp) 448fcb7a4a4SAarushi Mehta { 449fcb7a4a4SAarushi Mehta if (ctx->linux_io_uring) { 450fcb7a4a4SAarushi Mehta return ctx->linux_io_uring; 451fcb7a4a4SAarushi Mehta } 452fcb7a4a4SAarushi Mehta 453fcb7a4a4SAarushi Mehta ctx->linux_io_uring = luring_init(errp); 454fcb7a4a4SAarushi Mehta if (!ctx->linux_io_uring) { 455fcb7a4a4SAarushi Mehta return NULL; 456fcb7a4a4SAarushi Mehta } 457fcb7a4a4SAarushi Mehta 458fcb7a4a4SAarushi Mehta luring_attach_aio_context(ctx->linux_io_uring, ctx); 459fcb7a4a4SAarushi Mehta return ctx->linux_io_uring; 460fcb7a4a4SAarushi Mehta } 461fcb7a4a4SAarushi Mehta 462fcb7a4a4SAarushi Mehta LuringState *aio_get_linux_io_uring(AioContext *ctx) 463fcb7a4a4SAarushi Mehta { 464fcb7a4a4SAarushi Mehta assert(ctx->linux_io_uring); 465fcb7a4a4SAarushi Mehta return ctx->linux_io_uring; 466fcb7a4a4SAarushi Mehta } 467fcb7a4a4SAarushi Mehta #endif 468fcb7a4a4SAarushi Mehta 469c2b38b27SPaolo Bonzini void aio_notify(AioContext *ctx) 470c2b38b27SPaolo Bonzini { 471601829f8SStefan Hajnoczi /* 4728dd48650SPaolo Bonzini * Write e.g. ctx->bh_list before writing ctx->notified. Pairs with 4738dd48650SPaolo Bonzini * smp_mb() in aio_notify_accept(). 474601829f8SStefan Hajnoczi */ 475601829f8SStefan Hajnoczi smp_wmb(); 476d73415a3SStefan Hajnoczi qatomic_set(&ctx->notified, true); 477601829f8SStefan Hajnoczi 478601829f8SStefan Hajnoczi /* 4798dd48650SPaolo Bonzini * Write ctx->notified (and also ctx->bh_list) before reading ctx->notify_me. 4808dd48650SPaolo Bonzini * Pairs with smp_mb() in aio_ctx_prepare or aio_poll. 481c2b38b27SPaolo Bonzini */ 482c2b38b27SPaolo Bonzini smp_mb(); 483d73415a3SStefan Hajnoczi if (qatomic_read(&ctx->notify_me)) { 484c2b38b27SPaolo Bonzini event_notifier_set(&ctx->notifier); 485c2b38b27SPaolo Bonzini } 486c2b38b27SPaolo Bonzini } 487c2b38b27SPaolo Bonzini 488c2b38b27SPaolo Bonzini void aio_notify_accept(AioContext *ctx) 489c2b38b27SPaolo Bonzini { 490d73415a3SStefan Hajnoczi qatomic_set(&ctx->notified, false); 491601829f8SStefan Hajnoczi 492601829f8SStefan Hajnoczi /* 4936229438cSPaolo Bonzini * Order reads of ctx->notified (in aio_context_notifier_poll()) and the 4946229438cSPaolo Bonzini * above clearing of ctx->notified before reads of e.g. bh->flags. Pairs 4956229438cSPaolo Bonzini * with smp_wmb() in aio_notify. 496601829f8SStefan Hajnoczi */ 497601829f8SStefan Hajnoczi smp_mb(); 498c2b38b27SPaolo Bonzini } 499c2b38b27SPaolo Bonzini 5003f53bc61SPaolo Bonzini static void aio_timerlist_notify(void *opaque, QEMUClockType type) 501c2b38b27SPaolo Bonzini { 502c2b38b27SPaolo Bonzini aio_notify(opaque); 503c2b38b27SPaolo Bonzini } 504c2b38b27SPaolo Bonzini 505601829f8SStefan Hajnoczi static void aio_context_notifier_cb(EventNotifier *e) 506c2b38b27SPaolo Bonzini { 507601829f8SStefan Hajnoczi AioContext *ctx = container_of(e, AioContext, notifier); 508601829f8SStefan Hajnoczi 509601829f8SStefan Hajnoczi event_notifier_test_and_clear(&ctx->notifier); 510c2b38b27SPaolo Bonzini } 511c2b38b27SPaolo Bonzini 512c2b38b27SPaolo Bonzini /* Returns true if aio_notify() was called (e.g. a BH was scheduled) */ 513c13be5a1SStefan Hajnoczi static bool aio_context_notifier_poll(void *opaque) 514c2b38b27SPaolo Bonzini { 515c2b38b27SPaolo Bonzini EventNotifier *e = opaque; 516c2b38b27SPaolo Bonzini AioContext *ctx = container_of(e, AioContext, notifier); 517c2b38b27SPaolo Bonzini 5186229438cSPaolo Bonzini /* 5196229438cSPaolo Bonzini * No need for load-acquire because we just want to kick the 5206229438cSPaolo Bonzini * event loop. aio_notify_accept() takes care of synchronizing 5216229438cSPaolo Bonzini * the event loop with the producers. 5226229438cSPaolo Bonzini */ 523d73415a3SStefan Hajnoczi return qatomic_read(&ctx->notified); 524c2b38b27SPaolo Bonzini } 525c2b38b27SPaolo Bonzini 526826cc324SStefan Hajnoczi static void aio_context_notifier_poll_ready(EventNotifier *e) 527826cc324SStefan Hajnoczi { 528826cc324SStefan Hajnoczi /* Do nothing, we just wanted to kick the event loop */ 529826cc324SStefan Hajnoczi } 530826cc324SStefan Hajnoczi 5310c330a73SPaolo Bonzini static void co_schedule_bh_cb(void *opaque) 5320c330a73SPaolo Bonzini { 5330c330a73SPaolo Bonzini AioContext *ctx = opaque; 5340c330a73SPaolo Bonzini QSLIST_HEAD(, Coroutine) straight, reversed; 5350c330a73SPaolo Bonzini 5360c330a73SPaolo Bonzini QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines); 5370c330a73SPaolo Bonzini QSLIST_INIT(&straight); 5380c330a73SPaolo Bonzini 5390c330a73SPaolo Bonzini while (!QSLIST_EMPTY(&reversed)) { 5400c330a73SPaolo Bonzini Coroutine *co = QSLIST_FIRST(&reversed); 5410c330a73SPaolo Bonzini QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next); 5420c330a73SPaolo Bonzini QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next); 5430c330a73SPaolo Bonzini } 5440c330a73SPaolo Bonzini 5450c330a73SPaolo Bonzini while (!QSLIST_EMPTY(&straight)) { 5460c330a73SPaolo Bonzini Coroutine *co = QSLIST_FIRST(&straight); 5470c330a73SPaolo Bonzini QSLIST_REMOVE_HEAD(&straight, co_scheduled_next); 5480c330a73SPaolo Bonzini trace_aio_co_schedule_bh_cb(ctx, co); 5491919631eSPaolo Bonzini aio_context_acquire(ctx); 5506133b39fSJeff Cody 5516133b39fSJeff Cody /* Protected by write barrier in qemu_aio_coroutine_enter */ 552d73415a3SStefan Hajnoczi qatomic_set(&co->scheduled, NULL); 5536808ae04SSergio Lopez qemu_aio_coroutine_enter(ctx, co); 5541919631eSPaolo Bonzini aio_context_release(ctx); 5550c330a73SPaolo Bonzini } 5560c330a73SPaolo Bonzini } 5570c330a73SPaolo Bonzini 558c2b38b27SPaolo Bonzini AioContext *aio_context_new(Error **errp) 559c2b38b27SPaolo Bonzini { 560c2b38b27SPaolo Bonzini int ret; 561c2b38b27SPaolo Bonzini AioContext *ctx; 562c2b38b27SPaolo Bonzini 563c2b38b27SPaolo Bonzini ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); 5648c6b0356SStefan Hajnoczi QSLIST_INIT(&ctx->bh_list); 5658c6b0356SStefan Hajnoczi QSIMPLEQ_INIT(&ctx->bh_slice_list); 566c2b38b27SPaolo Bonzini aio_context_setup(ctx); 567c2b38b27SPaolo Bonzini 568c2b38b27SPaolo Bonzini ret = event_notifier_init(&ctx->notifier, false); 569c2b38b27SPaolo Bonzini if (ret < 0) { 570c2b38b27SPaolo Bonzini error_setg_errno(errp, -ret, "Failed to initialize event notifier"); 571c2b38b27SPaolo Bonzini goto fail; 572c2b38b27SPaolo Bonzini } 573c2b38b27SPaolo Bonzini g_source_set_can_recurse(&ctx->source, true); 574c2b38b27SPaolo Bonzini qemu_lockcnt_init(&ctx->list_lock); 5750c330a73SPaolo Bonzini 5760c330a73SPaolo Bonzini ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx); 5770c330a73SPaolo Bonzini QSLIST_INIT(&ctx->scheduled_coroutines); 5780c330a73SPaolo Bonzini 579c2b38b27SPaolo Bonzini aio_set_event_notifier(ctx, &ctx->notifier, 580c2b38b27SPaolo Bonzini false, 581601829f8SStefan Hajnoczi aio_context_notifier_cb, 582826cc324SStefan Hajnoczi aio_context_notifier_poll, 583826cc324SStefan Hajnoczi aio_context_notifier_poll_ready); 584c2b38b27SPaolo Bonzini #ifdef CONFIG_LINUX_AIO 585c2b38b27SPaolo Bonzini ctx->linux_aio = NULL; 586c2b38b27SPaolo Bonzini #endif 587fcb7a4a4SAarushi Mehta 588fcb7a4a4SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING 589fcb7a4a4SAarushi Mehta ctx->linux_io_uring = NULL; 590fcb7a4a4SAarushi Mehta #endif 591fcb7a4a4SAarushi Mehta 592c2b38b27SPaolo Bonzini ctx->thread_pool = NULL; 593c2b38b27SPaolo Bonzini qemu_rec_mutex_init(&ctx->lock); 594c2b38b27SPaolo Bonzini timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); 595c2b38b27SPaolo Bonzini 596c2b38b27SPaolo Bonzini ctx->poll_ns = 0; 597c2b38b27SPaolo Bonzini ctx->poll_max_ns = 0; 598c2b38b27SPaolo Bonzini ctx->poll_grow = 0; 599c2b38b27SPaolo Bonzini ctx->poll_shrink = 0; 600c2b38b27SPaolo Bonzini 6011793ad02SStefano Garzarella ctx->aio_max_batch = 0; 6021793ad02SStefano Garzarella 60371ad4713SNicolas Saenz Julienne ctx->thread_pool_min = 0; 60471ad4713SNicolas Saenz Julienne ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT; 60571ad4713SNicolas Saenz Julienne 606587d82faSEmanuele Giuseppe Esposito register_aiocontext(ctx); 607587d82faSEmanuele Giuseppe Esposito 608c2b38b27SPaolo Bonzini return ctx; 609c2b38b27SPaolo Bonzini fail: 610c2b38b27SPaolo Bonzini g_source_destroy(&ctx->source); 611c2b38b27SPaolo Bonzini return NULL; 612c2b38b27SPaolo Bonzini } 613c2b38b27SPaolo Bonzini 6140c330a73SPaolo Bonzini void aio_co_schedule(AioContext *ctx, Coroutine *co) 6150c330a73SPaolo Bonzini { 6160c330a73SPaolo Bonzini trace_aio_co_schedule(ctx, co); 617d73415a3SStefan Hajnoczi const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL, 6186133b39fSJeff Cody __func__); 6196133b39fSJeff Cody 6206133b39fSJeff Cody if (scheduled) { 6216133b39fSJeff Cody fprintf(stderr, 6226133b39fSJeff Cody "%s: Co-routine was already scheduled in '%s'\n", 6236133b39fSJeff Cody __func__, scheduled); 6246133b39fSJeff Cody abort(); 6256133b39fSJeff Cody } 6266133b39fSJeff Cody 627f0f81002SStefan Hajnoczi /* The coroutine might run and release the last ctx reference before we 628f0f81002SStefan Hajnoczi * invoke qemu_bh_schedule(). Take a reference to keep ctx alive until 629f0f81002SStefan Hajnoczi * we're done. 630f0f81002SStefan Hajnoczi */ 631f0f81002SStefan Hajnoczi aio_context_ref(ctx); 632f0f81002SStefan Hajnoczi 6330c330a73SPaolo Bonzini QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines, 6340c330a73SPaolo Bonzini co, co_scheduled_next); 6350c330a73SPaolo Bonzini qemu_bh_schedule(ctx->co_schedule_bh); 636f0f81002SStefan Hajnoczi 637f0f81002SStefan Hajnoczi aio_context_unref(ctx); 6380c330a73SPaolo Bonzini } 6390c330a73SPaolo Bonzini 64026b0b698SKevin Wolf typedef struct AioCoRescheduleSelf { 64126b0b698SKevin Wolf Coroutine *co; 64226b0b698SKevin Wolf AioContext *new_ctx; 64326b0b698SKevin Wolf } AioCoRescheduleSelf; 64426b0b698SKevin Wolf 64526b0b698SKevin Wolf static void aio_co_reschedule_self_bh(void *opaque) 64626b0b698SKevin Wolf { 64726b0b698SKevin Wolf AioCoRescheduleSelf *data = opaque; 64826b0b698SKevin Wolf aio_co_schedule(data->new_ctx, data->co); 64926b0b698SKevin Wolf } 65026b0b698SKevin Wolf 65126b0b698SKevin Wolf void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx) 65226b0b698SKevin Wolf { 65326b0b698SKevin Wolf AioContext *old_ctx = qemu_get_current_aio_context(); 65426b0b698SKevin Wolf 65526b0b698SKevin Wolf if (old_ctx != new_ctx) { 65626b0b698SKevin Wolf AioCoRescheduleSelf data = { 65726b0b698SKevin Wolf .co = qemu_coroutine_self(), 65826b0b698SKevin Wolf .new_ctx = new_ctx, 65926b0b698SKevin Wolf }; 66026b0b698SKevin Wolf /* 66126b0b698SKevin Wolf * We can't directly schedule the coroutine in the target context 66226b0b698SKevin Wolf * because this would be racy: The other thread could try to enter the 66326b0b698SKevin Wolf * coroutine before it has yielded in this one. 66426b0b698SKevin Wolf */ 66526b0b698SKevin Wolf aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data); 66626b0b698SKevin Wolf qemu_coroutine_yield(); 66726b0b698SKevin Wolf } 66826b0b698SKevin Wolf } 66926b0b698SKevin Wolf 67043695601SMarkus Armbruster void aio_co_wake(Coroutine *co) 6710c330a73SPaolo Bonzini { 6720c330a73SPaolo Bonzini AioContext *ctx; 6730c330a73SPaolo Bonzini 6740c330a73SPaolo Bonzini /* Read coroutine before co->ctx. Matches smp_wmb in 6750c330a73SPaolo Bonzini * qemu_coroutine_enter. 6760c330a73SPaolo Bonzini */ 6770c330a73SPaolo Bonzini smp_read_barrier_depends(); 678d73415a3SStefan Hajnoczi ctx = qatomic_read(&co->ctx); 6790c330a73SPaolo Bonzini 6808865852eSFam Zheng aio_co_enter(ctx, co); 6818865852eSFam Zheng } 6828865852eSFam Zheng 68343695601SMarkus Armbruster void aio_co_enter(AioContext *ctx, Coroutine *co) 6848865852eSFam Zheng { 6850c330a73SPaolo Bonzini if (ctx != qemu_get_current_aio_context()) { 6860c330a73SPaolo Bonzini aio_co_schedule(ctx, co); 6870c330a73SPaolo Bonzini return; 6880c330a73SPaolo Bonzini } 6890c330a73SPaolo Bonzini 6900c330a73SPaolo Bonzini if (qemu_in_coroutine()) { 6910c330a73SPaolo Bonzini Coroutine *self = qemu_coroutine_self(); 6920c330a73SPaolo Bonzini assert(self != co); 6930c330a73SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next); 6940c330a73SPaolo Bonzini } else { 6950c330a73SPaolo Bonzini aio_context_acquire(ctx); 6968865852eSFam Zheng qemu_aio_coroutine_enter(ctx, co); 6970c330a73SPaolo Bonzini aio_context_release(ctx); 6980c330a73SPaolo Bonzini } 6990c330a73SPaolo Bonzini } 7000c330a73SPaolo Bonzini 701c2b38b27SPaolo Bonzini void aio_context_ref(AioContext *ctx) 702c2b38b27SPaolo Bonzini { 703c2b38b27SPaolo Bonzini g_source_ref(&ctx->source); 704c2b38b27SPaolo Bonzini } 705c2b38b27SPaolo Bonzini 706c2b38b27SPaolo Bonzini void aio_context_unref(AioContext *ctx) 707c2b38b27SPaolo Bonzini { 708c2b38b27SPaolo Bonzini g_source_unref(&ctx->source); 709c2b38b27SPaolo Bonzini } 710c2b38b27SPaolo Bonzini 711c2b38b27SPaolo Bonzini void aio_context_acquire(AioContext *ctx) 712c2b38b27SPaolo Bonzini { 713c2b38b27SPaolo Bonzini qemu_rec_mutex_lock(&ctx->lock); 714c2b38b27SPaolo Bonzini } 715c2b38b27SPaolo Bonzini 716c2b38b27SPaolo Bonzini void aio_context_release(AioContext *ctx) 717c2b38b27SPaolo Bonzini { 718c2b38b27SPaolo Bonzini qemu_rec_mutex_unlock(&ctx->lock); 719c2b38b27SPaolo Bonzini } 7205f50be9bSPaolo Bonzini 72147b74464SStefan Hajnoczi QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext) 7225f50be9bSPaolo Bonzini 7235f50be9bSPaolo Bonzini AioContext *qemu_get_current_aio_context(void) 7245f50be9bSPaolo Bonzini { 72547b74464SStefan Hajnoczi AioContext *ctx = get_my_aiocontext(); 72647b74464SStefan Hajnoczi if (ctx) { 72747b74464SStefan Hajnoczi return ctx; 7285f50be9bSPaolo Bonzini } 7295f50be9bSPaolo Bonzini if (qemu_mutex_iothread_locked()) { 7305f50be9bSPaolo Bonzini /* Possibly in a vCPU thread. */ 7315f50be9bSPaolo Bonzini return qemu_get_aio_context(); 7325f50be9bSPaolo Bonzini } 7335f50be9bSPaolo Bonzini return NULL; 7345f50be9bSPaolo Bonzini } 7355f50be9bSPaolo Bonzini 7365f50be9bSPaolo Bonzini void qemu_set_current_aio_context(AioContext *ctx) 7375f50be9bSPaolo Bonzini { 73847b74464SStefan Hajnoczi assert(!get_my_aiocontext()); 73947b74464SStefan Hajnoczi set_my_aiocontext(ctx); 7405f50be9bSPaolo Bonzini } 74171ad4713SNicolas Saenz Julienne 74271ad4713SNicolas Saenz Julienne void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min, 74371ad4713SNicolas Saenz Julienne int64_t max, Error **errp) 74471ad4713SNicolas Saenz Julienne { 74571ad4713SNicolas Saenz Julienne 74671ad4713SNicolas Saenz Julienne if (min > max || !max || min > INT_MAX || max > INT_MAX) { 74771ad4713SNicolas Saenz Julienne error_setg(errp, "bad thread-pool-min/thread-pool-max values"); 74871ad4713SNicolas Saenz Julienne return; 74971ad4713SNicolas Saenz Julienne } 75071ad4713SNicolas Saenz Julienne 75171ad4713SNicolas Saenz Julienne ctx->thread_pool_min = min; 75271ad4713SNicolas Saenz Julienne ctx->thread_pool_max = max; 75371ad4713SNicolas Saenz Julienne 75471ad4713SNicolas Saenz Julienne if (ctx->thread_pool) { 75571ad4713SNicolas Saenz Julienne thread_pool_update_params(ctx->thread_pool, ctx); 75671ad4713SNicolas Saenz Julienne } 75771ad4713SNicolas Saenz Julienne } 758