1*c2b38b27SPaolo Bonzini /* 2*c2b38b27SPaolo Bonzini * Data plane event loop 3*c2b38b27SPaolo Bonzini * 4*c2b38b27SPaolo Bonzini * Copyright (c) 2003-2008 Fabrice Bellard 5*c2b38b27SPaolo Bonzini * Copyright (c) 2009-2017 QEMU contributors 6*c2b38b27SPaolo Bonzini * 7*c2b38b27SPaolo Bonzini * Permission is hereby granted, free of charge, to any person obtaining a copy 8*c2b38b27SPaolo Bonzini * of this software and associated documentation files (the "Software"), to deal 9*c2b38b27SPaolo Bonzini * in the Software without restriction, including without limitation the rights 10*c2b38b27SPaolo Bonzini * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11*c2b38b27SPaolo Bonzini * copies of the Software, and to permit persons to whom the Software is 12*c2b38b27SPaolo Bonzini * furnished to do so, subject to the following conditions: 13*c2b38b27SPaolo Bonzini * 14*c2b38b27SPaolo Bonzini * The above copyright notice and this permission notice shall be included in 15*c2b38b27SPaolo Bonzini * all copies or substantial portions of the Software. 16*c2b38b27SPaolo Bonzini * 17*c2b38b27SPaolo Bonzini * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18*c2b38b27SPaolo Bonzini * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19*c2b38b27SPaolo Bonzini * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20*c2b38b27SPaolo Bonzini * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21*c2b38b27SPaolo Bonzini * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22*c2b38b27SPaolo Bonzini * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23*c2b38b27SPaolo Bonzini * THE SOFTWARE. 24*c2b38b27SPaolo Bonzini */ 25*c2b38b27SPaolo Bonzini 26*c2b38b27SPaolo Bonzini #include "qemu/osdep.h" 27*c2b38b27SPaolo Bonzini #include "qapi/error.h" 28*c2b38b27SPaolo Bonzini #include "qemu-common.h" 29*c2b38b27SPaolo Bonzini #include "block/aio.h" 30*c2b38b27SPaolo Bonzini #include "block/thread-pool.h" 31*c2b38b27SPaolo Bonzini #include "qemu/main-loop.h" 32*c2b38b27SPaolo Bonzini #include "qemu/atomic.h" 33*c2b38b27SPaolo Bonzini #include "block/raw-aio.h" 34*c2b38b27SPaolo Bonzini 35*c2b38b27SPaolo Bonzini /***********************************************************/ 36*c2b38b27SPaolo Bonzini /* bottom halves (can be seen as timers which expire ASAP) */ 37*c2b38b27SPaolo Bonzini 38*c2b38b27SPaolo Bonzini struct QEMUBH { 39*c2b38b27SPaolo Bonzini AioContext *ctx; 40*c2b38b27SPaolo Bonzini QEMUBHFunc *cb; 41*c2b38b27SPaolo Bonzini void *opaque; 42*c2b38b27SPaolo Bonzini QEMUBH *next; 43*c2b38b27SPaolo Bonzini bool scheduled; 44*c2b38b27SPaolo Bonzini bool idle; 45*c2b38b27SPaolo Bonzini bool deleted; 46*c2b38b27SPaolo Bonzini }; 47*c2b38b27SPaolo Bonzini 48*c2b38b27SPaolo Bonzini void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque) 49*c2b38b27SPaolo Bonzini { 50*c2b38b27SPaolo Bonzini QEMUBH *bh; 51*c2b38b27SPaolo Bonzini bh = g_new(QEMUBH, 1); 52*c2b38b27SPaolo Bonzini *bh = (QEMUBH){ 53*c2b38b27SPaolo Bonzini .ctx = ctx, 54*c2b38b27SPaolo Bonzini .cb = cb, 55*c2b38b27SPaolo Bonzini .opaque = opaque, 56*c2b38b27SPaolo Bonzini }; 57*c2b38b27SPaolo Bonzini qemu_lockcnt_lock(&ctx->list_lock); 58*c2b38b27SPaolo Bonzini bh->next = ctx->first_bh; 59*c2b38b27SPaolo Bonzini bh->scheduled = 1; 60*c2b38b27SPaolo Bonzini bh->deleted = 1; 61*c2b38b27SPaolo Bonzini /* Make sure that the members are ready before putting bh into list */ 62*c2b38b27SPaolo Bonzini smp_wmb(); 63*c2b38b27SPaolo Bonzini ctx->first_bh = bh; 64*c2b38b27SPaolo Bonzini qemu_lockcnt_unlock(&ctx->list_lock); 65*c2b38b27SPaolo Bonzini aio_notify(ctx); 66*c2b38b27SPaolo Bonzini } 67*c2b38b27SPaolo Bonzini 68*c2b38b27SPaolo Bonzini QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque) 69*c2b38b27SPaolo Bonzini { 70*c2b38b27SPaolo Bonzini QEMUBH *bh; 71*c2b38b27SPaolo Bonzini bh = g_new(QEMUBH, 1); 72*c2b38b27SPaolo Bonzini *bh = (QEMUBH){ 73*c2b38b27SPaolo Bonzini .ctx = ctx, 74*c2b38b27SPaolo Bonzini .cb = cb, 75*c2b38b27SPaolo Bonzini .opaque = opaque, 76*c2b38b27SPaolo Bonzini }; 77*c2b38b27SPaolo Bonzini qemu_lockcnt_lock(&ctx->list_lock); 78*c2b38b27SPaolo Bonzini bh->next = ctx->first_bh; 79*c2b38b27SPaolo Bonzini /* Make sure that the members are ready before putting bh into list */ 80*c2b38b27SPaolo Bonzini smp_wmb(); 81*c2b38b27SPaolo Bonzini ctx->first_bh = bh; 82*c2b38b27SPaolo Bonzini qemu_lockcnt_unlock(&ctx->list_lock); 83*c2b38b27SPaolo Bonzini return bh; 84*c2b38b27SPaolo Bonzini } 85*c2b38b27SPaolo Bonzini 86*c2b38b27SPaolo Bonzini void aio_bh_call(QEMUBH *bh) 87*c2b38b27SPaolo Bonzini { 88*c2b38b27SPaolo Bonzini bh->cb(bh->opaque); 89*c2b38b27SPaolo Bonzini } 90*c2b38b27SPaolo Bonzini 91*c2b38b27SPaolo Bonzini /* Multiple occurrences of aio_bh_poll cannot be called concurrently */ 92*c2b38b27SPaolo Bonzini int aio_bh_poll(AioContext *ctx) 93*c2b38b27SPaolo Bonzini { 94*c2b38b27SPaolo Bonzini QEMUBH *bh, **bhp, *next; 95*c2b38b27SPaolo Bonzini int ret; 96*c2b38b27SPaolo Bonzini bool deleted = false; 97*c2b38b27SPaolo Bonzini 98*c2b38b27SPaolo Bonzini qemu_lockcnt_inc(&ctx->list_lock); 99*c2b38b27SPaolo Bonzini 100*c2b38b27SPaolo Bonzini ret = 0; 101*c2b38b27SPaolo Bonzini for (bh = atomic_rcu_read(&ctx->first_bh); bh; bh = next) { 102*c2b38b27SPaolo Bonzini next = atomic_rcu_read(&bh->next); 103*c2b38b27SPaolo Bonzini /* The atomic_xchg is paired with the one in qemu_bh_schedule. The 104*c2b38b27SPaolo Bonzini * implicit memory barrier ensures that the callback sees all writes 105*c2b38b27SPaolo Bonzini * done by the scheduling thread. It also ensures that the scheduling 106*c2b38b27SPaolo Bonzini * thread sees the zero before bh->cb has run, and thus will call 107*c2b38b27SPaolo Bonzini * aio_notify again if necessary. 108*c2b38b27SPaolo Bonzini */ 109*c2b38b27SPaolo Bonzini if (atomic_xchg(&bh->scheduled, 0)) { 110*c2b38b27SPaolo Bonzini /* Idle BHs don't count as progress */ 111*c2b38b27SPaolo Bonzini if (!bh->idle) { 112*c2b38b27SPaolo Bonzini ret = 1; 113*c2b38b27SPaolo Bonzini } 114*c2b38b27SPaolo Bonzini bh->idle = 0; 115*c2b38b27SPaolo Bonzini aio_bh_call(bh); 116*c2b38b27SPaolo Bonzini } 117*c2b38b27SPaolo Bonzini if (bh->deleted) { 118*c2b38b27SPaolo Bonzini deleted = true; 119*c2b38b27SPaolo Bonzini } 120*c2b38b27SPaolo Bonzini } 121*c2b38b27SPaolo Bonzini 122*c2b38b27SPaolo Bonzini /* remove deleted bhs */ 123*c2b38b27SPaolo Bonzini if (!deleted) { 124*c2b38b27SPaolo Bonzini qemu_lockcnt_dec(&ctx->list_lock); 125*c2b38b27SPaolo Bonzini return ret; 126*c2b38b27SPaolo Bonzini } 127*c2b38b27SPaolo Bonzini 128*c2b38b27SPaolo Bonzini if (qemu_lockcnt_dec_and_lock(&ctx->list_lock)) { 129*c2b38b27SPaolo Bonzini bhp = &ctx->first_bh; 130*c2b38b27SPaolo Bonzini while (*bhp) { 131*c2b38b27SPaolo Bonzini bh = *bhp; 132*c2b38b27SPaolo Bonzini if (bh->deleted && !bh->scheduled) { 133*c2b38b27SPaolo Bonzini *bhp = bh->next; 134*c2b38b27SPaolo Bonzini g_free(bh); 135*c2b38b27SPaolo Bonzini } else { 136*c2b38b27SPaolo Bonzini bhp = &bh->next; 137*c2b38b27SPaolo Bonzini } 138*c2b38b27SPaolo Bonzini } 139*c2b38b27SPaolo Bonzini qemu_lockcnt_unlock(&ctx->list_lock); 140*c2b38b27SPaolo Bonzini } 141*c2b38b27SPaolo Bonzini return ret; 142*c2b38b27SPaolo Bonzini } 143*c2b38b27SPaolo Bonzini 144*c2b38b27SPaolo Bonzini void qemu_bh_schedule_idle(QEMUBH *bh) 145*c2b38b27SPaolo Bonzini { 146*c2b38b27SPaolo Bonzini bh->idle = 1; 147*c2b38b27SPaolo Bonzini /* Make sure that idle & any writes needed by the callback are done 148*c2b38b27SPaolo Bonzini * before the locations are read in the aio_bh_poll. 149*c2b38b27SPaolo Bonzini */ 150*c2b38b27SPaolo Bonzini atomic_mb_set(&bh->scheduled, 1); 151*c2b38b27SPaolo Bonzini } 152*c2b38b27SPaolo Bonzini 153*c2b38b27SPaolo Bonzini void qemu_bh_schedule(QEMUBH *bh) 154*c2b38b27SPaolo Bonzini { 155*c2b38b27SPaolo Bonzini AioContext *ctx; 156*c2b38b27SPaolo Bonzini 157*c2b38b27SPaolo Bonzini ctx = bh->ctx; 158*c2b38b27SPaolo Bonzini bh->idle = 0; 159*c2b38b27SPaolo Bonzini /* The memory barrier implicit in atomic_xchg makes sure that: 160*c2b38b27SPaolo Bonzini * 1. idle & any writes needed by the callback are done before the 161*c2b38b27SPaolo Bonzini * locations are read in the aio_bh_poll. 162*c2b38b27SPaolo Bonzini * 2. ctx is loaded before scheduled is set and the callback has a chance 163*c2b38b27SPaolo Bonzini * to execute. 164*c2b38b27SPaolo Bonzini */ 165*c2b38b27SPaolo Bonzini if (atomic_xchg(&bh->scheduled, 1) == 0) { 166*c2b38b27SPaolo Bonzini aio_notify(ctx); 167*c2b38b27SPaolo Bonzini } 168*c2b38b27SPaolo Bonzini } 169*c2b38b27SPaolo Bonzini 170*c2b38b27SPaolo Bonzini 171*c2b38b27SPaolo Bonzini /* This func is async. 172*c2b38b27SPaolo Bonzini */ 173*c2b38b27SPaolo Bonzini void qemu_bh_cancel(QEMUBH *bh) 174*c2b38b27SPaolo Bonzini { 175*c2b38b27SPaolo Bonzini bh->scheduled = 0; 176*c2b38b27SPaolo Bonzini } 177*c2b38b27SPaolo Bonzini 178*c2b38b27SPaolo Bonzini /* This func is async.The bottom half will do the delete action at the finial 179*c2b38b27SPaolo Bonzini * end. 180*c2b38b27SPaolo Bonzini */ 181*c2b38b27SPaolo Bonzini void qemu_bh_delete(QEMUBH *bh) 182*c2b38b27SPaolo Bonzini { 183*c2b38b27SPaolo Bonzini bh->scheduled = 0; 184*c2b38b27SPaolo Bonzini bh->deleted = 1; 185*c2b38b27SPaolo Bonzini } 186*c2b38b27SPaolo Bonzini 187*c2b38b27SPaolo Bonzini int64_t 188*c2b38b27SPaolo Bonzini aio_compute_timeout(AioContext *ctx) 189*c2b38b27SPaolo Bonzini { 190*c2b38b27SPaolo Bonzini int64_t deadline; 191*c2b38b27SPaolo Bonzini int timeout = -1; 192*c2b38b27SPaolo Bonzini QEMUBH *bh; 193*c2b38b27SPaolo Bonzini 194*c2b38b27SPaolo Bonzini for (bh = atomic_rcu_read(&ctx->first_bh); bh; 195*c2b38b27SPaolo Bonzini bh = atomic_rcu_read(&bh->next)) { 196*c2b38b27SPaolo Bonzini if (bh->scheduled) { 197*c2b38b27SPaolo Bonzini if (bh->idle) { 198*c2b38b27SPaolo Bonzini /* idle bottom halves will be polled at least 199*c2b38b27SPaolo Bonzini * every 10ms */ 200*c2b38b27SPaolo Bonzini timeout = 10000000; 201*c2b38b27SPaolo Bonzini } else { 202*c2b38b27SPaolo Bonzini /* non-idle bottom halves will be executed 203*c2b38b27SPaolo Bonzini * immediately */ 204*c2b38b27SPaolo Bonzini return 0; 205*c2b38b27SPaolo Bonzini } 206*c2b38b27SPaolo Bonzini } 207*c2b38b27SPaolo Bonzini } 208*c2b38b27SPaolo Bonzini 209*c2b38b27SPaolo Bonzini deadline = timerlistgroup_deadline_ns(&ctx->tlg); 210*c2b38b27SPaolo Bonzini if (deadline == 0) { 211*c2b38b27SPaolo Bonzini return 0; 212*c2b38b27SPaolo Bonzini } else { 213*c2b38b27SPaolo Bonzini return qemu_soonest_timeout(timeout, deadline); 214*c2b38b27SPaolo Bonzini } 215*c2b38b27SPaolo Bonzini } 216*c2b38b27SPaolo Bonzini 217*c2b38b27SPaolo Bonzini static gboolean 218*c2b38b27SPaolo Bonzini aio_ctx_prepare(GSource *source, gint *timeout) 219*c2b38b27SPaolo Bonzini { 220*c2b38b27SPaolo Bonzini AioContext *ctx = (AioContext *) source; 221*c2b38b27SPaolo Bonzini 222*c2b38b27SPaolo Bonzini atomic_or(&ctx->notify_me, 1); 223*c2b38b27SPaolo Bonzini 224*c2b38b27SPaolo Bonzini /* We assume there is no timeout already supplied */ 225*c2b38b27SPaolo Bonzini *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)); 226*c2b38b27SPaolo Bonzini 227*c2b38b27SPaolo Bonzini if (aio_prepare(ctx)) { 228*c2b38b27SPaolo Bonzini *timeout = 0; 229*c2b38b27SPaolo Bonzini } 230*c2b38b27SPaolo Bonzini 231*c2b38b27SPaolo Bonzini return *timeout == 0; 232*c2b38b27SPaolo Bonzini } 233*c2b38b27SPaolo Bonzini 234*c2b38b27SPaolo Bonzini static gboolean 235*c2b38b27SPaolo Bonzini aio_ctx_check(GSource *source) 236*c2b38b27SPaolo Bonzini { 237*c2b38b27SPaolo Bonzini AioContext *ctx = (AioContext *) source; 238*c2b38b27SPaolo Bonzini QEMUBH *bh; 239*c2b38b27SPaolo Bonzini 240*c2b38b27SPaolo Bonzini atomic_and(&ctx->notify_me, ~1); 241*c2b38b27SPaolo Bonzini aio_notify_accept(ctx); 242*c2b38b27SPaolo Bonzini 243*c2b38b27SPaolo Bonzini for (bh = ctx->first_bh; bh; bh = bh->next) { 244*c2b38b27SPaolo Bonzini if (bh->scheduled) { 245*c2b38b27SPaolo Bonzini return true; 246*c2b38b27SPaolo Bonzini } 247*c2b38b27SPaolo Bonzini } 248*c2b38b27SPaolo Bonzini return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0); 249*c2b38b27SPaolo Bonzini } 250*c2b38b27SPaolo Bonzini 251*c2b38b27SPaolo Bonzini static gboolean 252*c2b38b27SPaolo Bonzini aio_ctx_dispatch(GSource *source, 253*c2b38b27SPaolo Bonzini GSourceFunc callback, 254*c2b38b27SPaolo Bonzini gpointer user_data) 255*c2b38b27SPaolo Bonzini { 256*c2b38b27SPaolo Bonzini AioContext *ctx = (AioContext *) source; 257*c2b38b27SPaolo Bonzini 258*c2b38b27SPaolo Bonzini assert(callback == NULL); 259*c2b38b27SPaolo Bonzini aio_dispatch(ctx, true); 260*c2b38b27SPaolo Bonzini return true; 261*c2b38b27SPaolo Bonzini } 262*c2b38b27SPaolo Bonzini 263*c2b38b27SPaolo Bonzini static void 264*c2b38b27SPaolo Bonzini aio_ctx_finalize(GSource *source) 265*c2b38b27SPaolo Bonzini { 266*c2b38b27SPaolo Bonzini AioContext *ctx = (AioContext *) source; 267*c2b38b27SPaolo Bonzini 268*c2b38b27SPaolo Bonzini thread_pool_free(ctx->thread_pool); 269*c2b38b27SPaolo Bonzini 270*c2b38b27SPaolo Bonzini #ifdef CONFIG_LINUX_AIO 271*c2b38b27SPaolo Bonzini if (ctx->linux_aio) { 272*c2b38b27SPaolo Bonzini laio_detach_aio_context(ctx->linux_aio, ctx); 273*c2b38b27SPaolo Bonzini laio_cleanup(ctx->linux_aio); 274*c2b38b27SPaolo Bonzini ctx->linux_aio = NULL; 275*c2b38b27SPaolo Bonzini } 276*c2b38b27SPaolo Bonzini #endif 277*c2b38b27SPaolo Bonzini 278*c2b38b27SPaolo Bonzini qemu_lockcnt_lock(&ctx->list_lock); 279*c2b38b27SPaolo Bonzini assert(!qemu_lockcnt_count(&ctx->list_lock)); 280*c2b38b27SPaolo Bonzini while (ctx->first_bh) { 281*c2b38b27SPaolo Bonzini QEMUBH *next = ctx->first_bh->next; 282*c2b38b27SPaolo Bonzini 283*c2b38b27SPaolo Bonzini /* qemu_bh_delete() must have been called on BHs in this AioContext */ 284*c2b38b27SPaolo Bonzini assert(ctx->first_bh->deleted); 285*c2b38b27SPaolo Bonzini 286*c2b38b27SPaolo Bonzini g_free(ctx->first_bh); 287*c2b38b27SPaolo Bonzini ctx->first_bh = next; 288*c2b38b27SPaolo Bonzini } 289*c2b38b27SPaolo Bonzini qemu_lockcnt_unlock(&ctx->list_lock); 290*c2b38b27SPaolo Bonzini 291*c2b38b27SPaolo Bonzini aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL); 292*c2b38b27SPaolo Bonzini event_notifier_cleanup(&ctx->notifier); 293*c2b38b27SPaolo Bonzini qemu_rec_mutex_destroy(&ctx->lock); 294*c2b38b27SPaolo Bonzini qemu_lockcnt_destroy(&ctx->list_lock); 295*c2b38b27SPaolo Bonzini timerlistgroup_deinit(&ctx->tlg); 296*c2b38b27SPaolo Bonzini } 297*c2b38b27SPaolo Bonzini 298*c2b38b27SPaolo Bonzini static GSourceFuncs aio_source_funcs = { 299*c2b38b27SPaolo Bonzini aio_ctx_prepare, 300*c2b38b27SPaolo Bonzini aio_ctx_check, 301*c2b38b27SPaolo Bonzini aio_ctx_dispatch, 302*c2b38b27SPaolo Bonzini aio_ctx_finalize 303*c2b38b27SPaolo Bonzini }; 304*c2b38b27SPaolo Bonzini 305*c2b38b27SPaolo Bonzini GSource *aio_get_g_source(AioContext *ctx) 306*c2b38b27SPaolo Bonzini { 307*c2b38b27SPaolo Bonzini g_source_ref(&ctx->source); 308*c2b38b27SPaolo Bonzini return &ctx->source; 309*c2b38b27SPaolo Bonzini } 310*c2b38b27SPaolo Bonzini 311*c2b38b27SPaolo Bonzini ThreadPool *aio_get_thread_pool(AioContext *ctx) 312*c2b38b27SPaolo Bonzini { 313*c2b38b27SPaolo Bonzini if (!ctx->thread_pool) { 314*c2b38b27SPaolo Bonzini ctx->thread_pool = thread_pool_new(ctx); 315*c2b38b27SPaolo Bonzini } 316*c2b38b27SPaolo Bonzini return ctx->thread_pool; 317*c2b38b27SPaolo Bonzini } 318*c2b38b27SPaolo Bonzini 319*c2b38b27SPaolo Bonzini #ifdef CONFIG_LINUX_AIO 320*c2b38b27SPaolo Bonzini LinuxAioState *aio_get_linux_aio(AioContext *ctx) 321*c2b38b27SPaolo Bonzini { 322*c2b38b27SPaolo Bonzini if (!ctx->linux_aio) { 323*c2b38b27SPaolo Bonzini ctx->linux_aio = laio_init(); 324*c2b38b27SPaolo Bonzini laio_attach_aio_context(ctx->linux_aio, ctx); 325*c2b38b27SPaolo Bonzini } 326*c2b38b27SPaolo Bonzini return ctx->linux_aio; 327*c2b38b27SPaolo Bonzini } 328*c2b38b27SPaolo Bonzini #endif 329*c2b38b27SPaolo Bonzini 330*c2b38b27SPaolo Bonzini void aio_notify(AioContext *ctx) 331*c2b38b27SPaolo Bonzini { 332*c2b38b27SPaolo Bonzini /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs 333*c2b38b27SPaolo Bonzini * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll. 334*c2b38b27SPaolo Bonzini */ 335*c2b38b27SPaolo Bonzini smp_mb(); 336*c2b38b27SPaolo Bonzini if (ctx->notify_me) { 337*c2b38b27SPaolo Bonzini event_notifier_set(&ctx->notifier); 338*c2b38b27SPaolo Bonzini atomic_mb_set(&ctx->notified, true); 339*c2b38b27SPaolo Bonzini } 340*c2b38b27SPaolo Bonzini } 341*c2b38b27SPaolo Bonzini 342*c2b38b27SPaolo Bonzini void aio_notify_accept(AioContext *ctx) 343*c2b38b27SPaolo Bonzini { 344*c2b38b27SPaolo Bonzini if (atomic_xchg(&ctx->notified, false)) { 345*c2b38b27SPaolo Bonzini event_notifier_test_and_clear(&ctx->notifier); 346*c2b38b27SPaolo Bonzini } 347*c2b38b27SPaolo Bonzini } 348*c2b38b27SPaolo Bonzini 349*c2b38b27SPaolo Bonzini static void aio_timerlist_notify(void *opaque) 350*c2b38b27SPaolo Bonzini { 351*c2b38b27SPaolo Bonzini aio_notify(opaque); 352*c2b38b27SPaolo Bonzini } 353*c2b38b27SPaolo Bonzini 354*c2b38b27SPaolo Bonzini static void event_notifier_dummy_cb(EventNotifier *e) 355*c2b38b27SPaolo Bonzini { 356*c2b38b27SPaolo Bonzini } 357*c2b38b27SPaolo Bonzini 358*c2b38b27SPaolo Bonzini /* Returns true if aio_notify() was called (e.g. a BH was scheduled) */ 359*c2b38b27SPaolo Bonzini static bool event_notifier_poll(void *opaque) 360*c2b38b27SPaolo Bonzini { 361*c2b38b27SPaolo Bonzini EventNotifier *e = opaque; 362*c2b38b27SPaolo Bonzini AioContext *ctx = container_of(e, AioContext, notifier); 363*c2b38b27SPaolo Bonzini 364*c2b38b27SPaolo Bonzini return atomic_read(&ctx->notified); 365*c2b38b27SPaolo Bonzini } 366*c2b38b27SPaolo Bonzini 367*c2b38b27SPaolo Bonzini AioContext *aio_context_new(Error **errp) 368*c2b38b27SPaolo Bonzini { 369*c2b38b27SPaolo Bonzini int ret; 370*c2b38b27SPaolo Bonzini AioContext *ctx; 371*c2b38b27SPaolo Bonzini 372*c2b38b27SPaolo Bonzini ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); 373*c2b38b27SPaolo Bonzini aio_context_setup(ctx); 374*c2b38b27SPaolo Bonzini 375*c2b38b27SPaolo Bonzini ret = event_notifier_init(&ctx->notifier, false); 376*c2b38b27SPaolo Bonzini if (ret < 0) { 377*c2b38b27SPaolo Bonzini error_setg_errno(errp, -ret, "Failed to initialize event notifier"); 378*c2b38b27SPaolo Bonzini goto fail; 379*c2b38b27SPaolo Bonzini } 380*c2b38b27SPaolo Bonzini g_source_set_can_recurse(&ctx->source, true); 381*c2b38b27SPaolo Bonzini qemu_lockcnt_init(&ctx->list_lock); 382*c2b38b27SPaolo Bonzini aio_set_event_notifier(ctx, &ctx->notifier, 383*c2b38b27SPaolo Bonzini false, 384*c2b38b27SPaolo Bonzini (EventNotifierHandler *) 385*c2b38b27SPaolo Bonzini event_notifier_dummy_cb, 386*c2b38b27SPaolo Bonzini event_notifier_poll); 387*c2b38b27SPaolo Bonzini #ifdef CONFIG_LINUX_AIO 388*c2b38b27SPaolo Bonzini ctx->linux_aio = NULL; 389*c2b38b27SPaolo Bonzini #endif 390*c2b38b27SPaolo Bonzini ctx->thread_pool = NULL; 391*c2b38b27SPaolo Bonzini qemu_rec_mutex_init(&ctx->lock); 392*c2b38b27SPaolo Bonzini timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); 393*c2b38b27SPaolo Bonzini 394*c2b38b27SPaolo Bonzini ctx->poll_ns = 0; 395*c2b38b27SPaolo Bonzini ctx->poll_max_ns = 0; 396*c2b38b27SPaolo Bonzini ctx->poll_grow = 0; 397*c2b38b27SPaolo Bonzini ctx->poll_shrink = 0; 398*c2b38b27SPaolo Bonzini 399*c2b38b27SPaolo Bonzini return ctx; 400*c2b38b27SPaolo Bonzini fail: 401*c2b38b27SPaolo Bonzini g_source_destroy(&ctx->source); 402*c2b38b27SPaolo Bonzini return NULL; 403*c2b38b27SPaolo Bonzini } 404*c2b38b27SPaolo Bonzini 405*c2b38b27SPaolo Bonzini void aio_context_ref(AioContext *ctx) 406*c2b38b27SPaolo Bonzini { 407*c2b38b27SPaolo Bonzini g_source_ref(&ctx->source); 408*c2b38b27SPaolo Bonzini } 409*c2b38b27SPaolo Bonzini 410*c2b38b27SPaolo Bonzini void aio_context_unref(AioContext *ctx) 411*c2b38b27SPaolo Bonzini { 412*c2b38b27SPaolo Bonzini g_source_unref(&ctx->source); 413*c2b38b27SPaolo Bonzini } 414*c2b38b27SPaolo Bonzini 415*c2b38b27SPaolo Bonzini void aio_context_acquire(AioContext *ctx) 416*c2b38b27SPaolo Bonzini { 417*c2b38b27SPaolo Bonzini qemu_rec_mutex_lock(&ctx->lock); 418*c2b38b27SPaolo Bonzini } 419*c2b38b27SPaolo Bonzini 420*c2b38b27SPaolo Bonzini void aio_context_release(AioContext *ctx) 421*c2b38b27SPaolo Bonzini { 422*c2b38b27SPaolo Bonzini qemu_rec_mutex_unlock(&ctx->lock); 423*c2b38b27SPaolo Bonzini } 424