1 /* 2 * Data plane event loop 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * Copyright (c) 2009-2017 QEMU contributors 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "qapi/error.h" 28 #include "block/aio.h" 29 #include "block/thread-pool.h" 30 #include "block/graph-lock.h" 31 #include "qemu/main-loop.h" 32 #include "qemu/atomic.h" 33 #include "qemu/rcu_queue.h" 34 #include "block/raw-aio.h" 35 #include "qemu/coroutine_int.h" 36 #include "qemu/coroutine-tls.h" 37 #include "sysemu/cpu-timers.h" 38 #include "trace.h" 39 40 /***********************************************************/ 41 /* bottom halves (can be seen as timers which expire ASAP) */ 42 43 /* QEMUBH::flags values */ 44 enum { 45 /* Already enqueued and waiting for aio_bh_poll() */ 46 BH_PENDING = (1 << 0), 47 48 /* Invoke the callback */ 49 BH_SCHEDULED = (1 << 1), 50 51 /* Delete without invoking callback */ 52 BH_DELETED = (1 << 2), 53 54 /* Delete after invoking callback */ 55 BH_ONESHOT = (1 << 3), 56 57 /* Schedule periodically when the event loop is idle */ 58 BH_IDLE = (1 << 4), 59 }; 60 61 struct QEMUBH { 62 AioContext *ctx; 63 const char *name; 64 QEMUBHFunc *cb; 65 void *opaque; 66 QSLIST_ENTRY(QEMUBH) next; 67 unsigned flags; 68 MemReentrancyGuard *reentrancy_guard; 69 }; 70 71 /* Called concurrently from any thread */ 72 static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags) 73 { 74 AioContext *ctx = bh->ctx; 75 unsigned old_flags; 76 77 /* 78 * Synchronizes with atomic_fetch_and() in aio_bh_dequeue(), ensuring that 79 * insertion starts after BH_PENDING is set. 80 */ 81 old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags); 82 83 if (!(old_flags & BH_PENDING)) { 84 /* 85 * At this point the bottom half becomes visible to aio_bh_poll(). 86 * This insertion thus synchronizes with QSLIST_MOVE_ATOMIC in 87 * aio_bh_poll(), ensuring that: 88 * 1. any writes needed by the callback are visible from the callback 89 * after aio_bh_dequeue() returns bh. 90 * 2. ctx is loaded before the callback has a chance to execute and bh 91 * could be freed. 92 */ 93 QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next); 94 } 95 96 aio_notify(ctx); 97 /* 98 * Workaround for record/replay. 99 * vCPU execution should be suspended when new BH is set. 100 * This is needed to avoid guest timeouts caused 101 * by the long cycles of the execution. 102 */ 103 icount_notify_exit(); 104 } 105 106 /* Only called from aio_bh_poll() and aio_ctx_finalize() */ 107 static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags) 108 { 109 QEMUBH *bh = QSLIST_FIRST_RCU(head); 110 111 if (!bh) { 112 return NULL; 113 } 114 115 QSLIST_REMOVE_HEAD(head, next); 116 117 /* 118 * Synchronizes with qatomic_fetch_or() in aio_bh_enqueue(), ensuring that 119 * the removal finishes before BH_PENDING is reset. 120 */ 121 *flags = qatomic_fetch_and(&bh->flags, 122 ~(BH_PENDING | BH_SCHEDULED | BH_IDLE)); 123 return bh; 124 } 125 126 void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, 127 void *opaque, const char *name) 128 { 129 QEMUBH *bh; 130 bh = g_new(QEMUBH, 1); 131 *bh = (QEMUBH){ 132 .ctx = ctx, 133 .cb = cb, 134 .opaque = opaque, 135 .name = name, 136 }; 137 aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT); 138 } 139 140 QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, 141 const char *name, MemReentrancyGuard *reentrancy_guard) 142 { 143 QEMUBH *bh; 144 bh = g_new(QEMUBH, 1); 145 *bh = (QEMUBH){ 146 .ctx = ctx, 147 .cb = cb, 148 .opaque = opaque, 149 .name = name, 150 .reentrancy_guard = reentrancy_guard, 151 }; 152 return bh; 153 } 154 155 void aio_bh_call(QEMUBH *bh) 156 { 157 bool last_engaged_in_io = false; 158 159 if (bh->reentrancy_guard) { 160 last_engaged_in_io = bh->reentrancy_guard->engaged_in_io; 161 if (bh->reentrancy_guard->engaged_in_io) { 162 trace_reentrant_aio(bh->ctx, bh->name); 163 } 164 bh->reentrancy_guard->engaged_in_io = true; 165 } 166 167 bh->cb(bh->opaque); 168 169 if (bh->reentrancy_guard) { 170 bh->reentrancy_guard->engaged_in_io = last_engaged_in_io; 171 } 172 } 173 174 /* Multiple occurrences of aio_bh_poll cannot be called concurrently. */ 175 int aio_bh_poll(AioContext *ctx) 176 { 177 BHListSlice slice; 178 BHListSlice *s; 179 int ret = 0; 180 181 /* Synchronizes with QSLIST_INSERT_HEAD_ATOMIC in aio_bh_enqueue(). */ 182 QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list); 183 QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next); 184 185 while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) { 186 QEMUBH *bh; 187 unsigned flags; 188 189 bh = aio_bh_dequeue(&s->bh_list, &flags); 190 if (!bh) { 191 QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next); 192 continue; 193 } 194 195 if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { 196 /* Idle BHs don't count as progress */ 197 if (!(flags & BH_IDLE)) { 198 ret = 1; 199 } 200 aio_bh_call(bh); 201 } 202 if (flags & (BH_DELETED | BH_ONESHOT)) { 203 g_free(bh); 204 } 205 } 206 207 return ret; 208 } 209 210 void qemu_bh_schedule_idle(QEMUBH *bh) 211 { 212 aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE); 213 } 214 215 void qemu_bh_schedule(QEMUBH *bh) 216 { 217 aio_bh_enqueue(bh, BH_SCHEDULED); 218 } 219 220 /* This func is async. 221 */ 222 void qemu_bh_cancel(QEMUBH *bh) 223 { 224 qatomic_and(&bh->flags, ~BH_SCHEDULED); 225 } 226 227 /* This func is async.The bottom half will do the delete action at the finial 228 * end. 229 */ 230 void qemu_bh_delete(QEMUBH *bh) 231 { 232 aio_bh_enqueue(bh, BH_DELETED); 233 } 234 235 static int64_t aio_compute_bh_timeout(BHList *head, int timeout) 236 { 237 QEMUBH *bh; 238 239 QSLIST_FOREACH_RCU(bh, head, next) { 240 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { 241 if (bh->flags & BH_IDLE) { 242 /* idle bottom halves will be polled at least 243 * every 10ms */ 244 timeout = 10000000; 245 } else { 246 /* non-idle bottom halves will be executed 247 * immediately */ 248 return 0; 249 } 250 } 251 } 252 253 return timeout; 254 } 255 256 int64_t 257 aio_compute_timeout(AioContext *ctx) 258 { 259 BHListSlice *s; 260 int64_t deadline; 261 int timeout = -1; 262 263 timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout); 264 if (timeout == 0) { 265 return 0; 266 } 267 268 QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) { 269 timeout = aio_compute_bh_timeout(&s->bh_list, timeout); 270 if (timeout == 0) { 271 return 0; 272 } 273 } 274 275 deadline = timerlistgroup_deadline_ns(&ctx->tlg); 276 if (deadline == 0) { 277 return 0; 278 } else { 279 return qemu_soonest_timeout(timeout, deadline); 280 } 281 } 282 283 static gboolean 284 aio_ctx_prepare(GSource *source, gint *timeout) 285 { 286 AioContext *ctx = (AioContext *) source; 287 288 qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1); 289 290 /* 291 * Write ctx->notify_me before computing the timeout 292 * (reading bottom half flags, etc.). Pairs with 293 * smp_mb in aio_notify(). 294 */ 295 smp_mb(); 296 297 /* We assume there is no timeout already supplied */ 298 *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)); 299 300 if (aio_prepare(ctx)) { 301 *timeout = 0; 302 } 303 304 return *timeout == 0; 305 } 306 307 static gboolean 308 aio_ctx_check(GSource *source) 309 { 310 AioContext *ctx = (AioContext *) source; 311 QEMUBH *bh; 312 BHListSlice *s; 313 314 /* Finish computing the timeout before clearing the flag. */ 315 qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1); 316 aio_notify_accept(ctx); 317 318 QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) { 319 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { 320 return true; 321 } 322 } 323 324 QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) { 325 QSLIST_FOREACH_RCU(bh, &s->bh_list, next) { 326 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { 327 return true; 328 } 329 } 330 } 331 return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0); 332 } 333 334 static gboolean 335 aio_ctx_dispatch(GSource *source, 336 GSourceFunc callback, 337 gpointer user_data) 338 { 339 AioContext *ctx = (AioContext *) source; 340 341 assert(callback == NULL); 342 aio_dispatch(ctx); 343 return true; 344 } 345 346 static void 347 aio_ctx_finalize(GSource *source) 348 { 349 AioContext *ctx = (AioContext *) source; 350 QEMUBH *bh; 351 unsigned flags; 352 353 thread_pool_free(ctx->thread_pool); 354 355 #ifdef CONFIG_LINUX_AIO 356 if (ctx->linux_aio) { 357 laio_detach_aio_context(ctx->linux_aio, ctx); 358 laio_cleanup(ctx->linux_aio); 359 ctx->linux_aio = NULL; 360 } 361 #endif 362 363 #ifdef CONFIG_LINUX_IO_URING 364 if (ctx->linux_io_uring) { 365 luring_detach_aio_context(ctx->linux_io_uring, ctx); 366 luring_cleanup(ctx->linux_io_uring); 367 ctx->linux_io_uring = NULL; 368 } 369 #endif 370 371 assert(QSLIST_EMPTY(&ctx->scheduled_coroutines)); 372 qemu_bh_delete(ctx->co_schedule_bh); 373 374 /* There must be no aio_bh_poll() calls going on */ 375 assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list)); 376 377 while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) { 378 /* 379 * qemu_bh_delete() must have been called on BHs in this AioContext. In 380 * many cases memory leaks, hangs, or inconsistent state occur when a 381 * BH is leaked because something still expects it to run. 382 * 383 * If you hit this, fix the lifecycle of the BH so that 384 * qemu_bh_delete() and any associated cleanup is called before the 385 * AioContext is finalized. 386 */ 387 if (unlikely(!(flags & BH_DELETED))) { 388 fprintf(stderr, "%s: BH '%s' leaked, aborting...\n", 389 __func__, bh->name); 390 abort(); 391 } 392 393 g_free(bh); 394 } 395 396 aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL, NULL); 397 event_notifier_cleanup(&ctx->notifier); 398 qemu_rec_mutex_destroy(&ctx->lock); 399 qemu_lockcnt_destroy(&ctx->list_lock); 400 timerlistgroup_deinit(&ctx->tlg); 401 unregister_aiocontext(ctx); 402 aio_context_destroy(ctx); 403 } 404 405 static GSourceFuncs aio_source_funcs = { 406 aio_ctx_prepare, 407 aio_ctx_check, 408 aio_ctx_dispatch, 409 aio_ctx_finalize 410 }; 411 412 GSource *aio_get_g_source(AioContext *ctx) 413 { 414 aio_context_use_g_source(ctx); 415 g_source_ref(&ctx->source); 416 return &ctx->source; 417 } 418 419 ThreadPool *aio_get_thread_pool(AioContext *ctx) 420 { 421 if (!ctx->thread_pool) { 422 ctx->thread_pool = thread_pool_new(ctx); 423 } 424 return ctx->thread_pool; 425 } 426 427 #ifdef CONFIG_LINUX_AIO 428 LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp) 429 { 430 if (!ctx->linux_aio) { 431 ctx->linux_aio = laio_init(errp); 432 if (ctx->linux_aio) { 433 laio_attach_aio_context(ctx->linux_aio, ctx); 434 } 435 } 436 return ctx->linux_aio; 437 } 438 439 LinuxAioState *aio_get_linux_aio(AioContext *ctx) 440 { 441 assert(ctx->linux_aio); 442 return ctx->linux_aio; 443 } 444 #endif 445 446 #ifdef CONFIG_LINUX_IO_URING 447 LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp) 448 { 449 if (ctx->linux_io_uring) { 450 return ctx->linux_io_uring; 451 } 452 453 ctx->linux_io_uring = luring_init(errp); 454 if (!ctx->linux_io_uring) { 455 return NULL; 456 } 457 458 luring_attach_aio_context(ctx->linux_io_uring, ctx); 459 return ctx->linux_io_uring; 460 } 461 462 LuringState *aio_get_linux_io_uring(AioContext *ctx) 463 { 464 assert(ctx->linux_io_uring); 465 return ctx->linux_io_uring; 466 } 467 #endif 468 469 void aio_notify(AioContext *ctx) 470 { 471 /* 472 * Write e.g. ctx->bh_list before writing ctx->notified. Pairs with 473 * smp_mb() in aio_notify_accept(). 474 */ 475 smp_wmb(); 476 qatomic_set(&ctx->notified, true); 477 478 /* 479 * Write ctx->notified (and also ctx->bh_list) before reading ctx->notify_me. 480 * Pairs with smp_mb() in aio_ctx_prepare or aio_poll. 481 */ 482 smp_mb(); 483 if (qatomic_read(&ctx->notify_me)) { 484 event_notifier_set(&ctx->notifier); 485 } 486 } 487 488 void aio_notify_accept(AioContext *ctx) 489 { 490 qatomic_set(&ctx->notified, false); 491 492 /* 493 * Order reads of ctx->notified (in aio_context_notifier_poll()) and the 494 * above clearing of ctx->notified before reads of e.g. bh->flags. Pairs 495 * with smp_wmb() in aio_notify. 496 */ 497 smp_mb(); 498 } 499 500 static void aio_timerlist_notify(void *opaque, QEMUClockType type) 501 { 502 aio_notify(opaque); 503 } 504 505 static void aio_context_notifier_cb(EventNotifier *e) 506 { 507 AioContext *ctx = container_of(e, AioContext, notifier); 508 509 event_notifier_test_and_clear(&ctx->notifier); 510 } 511 512 /* Returns true if aio_notify() was called (e.g. a BH was scheduled) */ 513 static bool aio_context_notifier_poll(void *opaque) 514 { 515 EventNotifier *e = opaque; 516 AioContext *ctx = container_of(e, AioContext, notifier); 517 518 /* 519 * No need for load-acquire because we just want to kick the 520 * event loop. aio_notify_accept() takes care of synchronizing 521 * the event loop with the producers. 522 */ 523 return qatomic_read(&ctx->notified); 524 } 525 526 static void aio_context_notifier_poll_ready(EventNotifier *e) 527 { 528 /* Do nothing, we just wanted to kick the event loop */ 529 } 530 531 static void co_schedule_bh_cb(void *opaque) 532 { 533 AioContext *ctx = opaque; 534 QSLIST_HEAD(, Coroutine) straight, reversed; 535 536 QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines); 537 QSLIST_INIT(&straight); 538 539 while (!QSLIST_EMPTY(&reversed)) { 540 Coroutine *co = QSLIST_FIRST(&reversed); 541 QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next); 542 QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next); 543 } 544 545 while (!QSLIST_EMPTY(&straight)) { 546 Coroutine *co = QSLIST_FIRST(&straight); 547 QSLIST_REMOVE_HEAD(&straight, co_scheduled_next); 548 trace_aio_co_schedule_bh_cb(ctx, co); 549 aio_context_acquire(ctx); 550 551 /* Protected by write barrier in qemu_aio_coroutine_enter */ 552 qatomic_set(&co->scheduled, NULL); 553 qemu_aio_coroutine_enter(ctx, co); 554 aio_context_release(ctx); 555 } 556 } 557 558 AioContext *aio_context_new(Error **errp) 559 { 560 int ret; 561 AioContext *ctx; 562 563 ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); 564 QSLIST_INIT(&ctx->bh_list); 565 QSIMPLEQ_INIT(&ctx->bh_slice_list); 566 aio_context_setup(ctx); 567 568 ret = event_notifier_init(&ctx->notifier, false); 569 if (ret < 0) { 570 error_setg_errno(errp, -ret, "Failed to initialize event notifier"); 571 goto fail; 572 } 573 g_source_set_can_recurse(&ctx->source, true); 574 qemu_lockcnt_init(&ctx->list_lock); 575 576 ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx); 577 QSLIST_INIT(&ctx->scheduled_coroutines); 578 579 aio_set_event_notifier(ctx, &ctx->notifier, 580 false, 581 aio_context_notifier_cb, 582 aio_context_notifier_poll, 583 aio_context_notifier_poll_ready); 584 #ifdef CONFIG_LINUX_AIO 585 ctx->linux_aio = NULL; 586 #endif 587 588 #ifdef CONFIG_LINUX_IO_URING 589 ctx->linux_io_uring = NULL; 590 #endif 591 592 ctx->thread_pool = NULL; 593 qemu_rec_mutex_init(&ctx->lock); 594 timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); 595 596 ctx->poll_ns = 0; 597 ctx->poll_max_ns = 0; 598 ctx->poll_grow = 0; 599 ctx->poll_shrink = 0; 600 601 ctx->aio_max_batch = 0; 602 603 ctx->thread_pool_min = 0; 604 ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT; 605 606 register_aiocontext(ctx); 607 608 return ctx; 609 fail: 610 g_source_destroy(&ctx->source); 611 return NULL; 612 } 613 614 void aio_co_schedule(AioContext *ctx, Coroutine *co) 615 { 616 trace_aio_co_schedule(ctx, co); 617 const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL, 618 __func__); 619 620 if (scheduled) { 621 fprintf(stderr, 622 "%s: Co-routine was already scheduled in '%s'\n", 623 __func__, scheduled); 624 abort(); 625 } 626 627 /* The coroutine might run and release the last ctx reference before we 628 * invoke qemu_bh_schedule(). Take a reference to keep ctx alive until 629 * we're done. 630 */ 631 aio_context_ref(ctx); 632 633 QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines, 634 co, co_scheduled_next); 635 qemu_bh_schedule(ctx->co_schedule_bh); 636 637 aio_context_unref(ctx); 638 } 639 640 typedef struct AioCoRescheduleSelf { 641 Coroutine *co; 642 AioContext *new_ctx; 643 } AioCoRescheduleSelf; 644 645 static void aio_co_reschedule_self_bh(void *opaque) 646 { 647 AioCoRescheduleSelf *data = opaque; 648 aio_co_schedule(data->new_ctx, data->co); 649 } 650 651 void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx) 652 { 653 AioContext *old_ctx = qemu_get_current_aio_context(); 654 655 if (old_ctx != new_ctx) { 656 AioCoRescheduleSelf data = { 657 .co = qemu_coroutine_self(), 658 .new_ctx = new_ctx, 659 }; 660 /* 661 * We can't directly schedule the coroutine in the target context 662 * because this would be racy: The other thread could try to enter the 663 * coroutine before it has yielded in this one. 664 */ 665 aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data); 666 qemu_coroutine_yield(); 667 } 668 } 669 670 void aio_co_wake(Coroutine *co) 671 { 672 AioContext *ctx; 673 674 /* Read coroutine before co->ctx. Matches smp_wmb in 675 * qemu_coroutine_enter. 676 */ 677 smp_read_barrier_depends(); 678 ctx = qatomic_read(&co->ctx); 679 680 aio_co_enter(ctx, co); 681 } 682 683 void aio_co_enter(AioContext *ctx, Coroutine *co) 684 { 685 if (ctx != qemu_get_current_aio_context()) { 686 aio_co_schedule(ctx, co); 687 return; 688 } 689 690 if (qemu_in_coroutine()) { 691 Coroutine *self = qemu_coroutine_self(); 692 assert(self != co); 693 QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next); 694 } else { 695 aio_context_acquire(ctx); 696 qemu_aio_coroutine_enter(ctx, co); 697 aio_context_release(ctx); 698 } 699 } 700 701 void aio_context_ref(AioContext *ctx) 702 { 703 g_source_ref(&ctx->source); 704 } 705 706 void aio_context_unref(AioContext *ctx) 707 { 708 g_source_unref(&ctx->source); 709 } 710 711 void aio_context_acquire(AioContext *ctx) 712 { 713 qemu_rec_mutex_lock(&ctx->lock); 714 } 715 716 void aio_context_release(AioContext *ctx) 717 { 718 qemu_rec_mutex_unlock(&ctx->lock); 719 } 720 721 QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext) 722 723 AioContext *qemu_get_current_aio_context(void) 724 { 725 AioContext *ctx = get_my_aiocontext(); 726 if (ctx) { 727 return ctx; 728 } 729 if (qemu_mutex_iothread_locked()) { 730 /* Possibly in a vCPU thread. */ 731 return qemu_get_aio_context(); 732 } 733 return NULL; 734 } 735 736 void qemu_set_current_aio_context(AioContext *ctx) 737 { 738 assert(!get_my_aiocontext()); 739 set_my_aiocontext(ctx); 740 } 741 742 void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min, 743 int64_t max, Error **errp) 744 { 745 746 if (min > max || !max || min > INT_MAX || max > INT_MAX) { 747 error_setg(errp, "bad thread-pool-min/thread-pool-max values"); 748 return; 749 } 750 751 ctx->thread_pool_min = min; 752 ctx->thread_pool_max = max; 753 754 if (ctx->thread_pool) { 755 thread_pool_update_params(ctx->thread_pool, ctx); 756 } 757 } 758