1 /* 2 * Data plane event loop 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * Copyright (c) 2009-2017 QEMU contributors 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "qapi/error.h" 28 #include "block/aio.h" 29 #include "block/thread-pool.h" 30 #include "block/graph-lock.h" 31 #include "qemu/main-loop.h" 32 #include "qemu/atomic.h" 33 #include "qemu/rcu_queue.h" 34 #include "block/raw-aio.h" 35 #include "qemu/coroutine_int.h" 36 #include "qemu/coroutine-tls.h" 37 #include "sysemu/cpu-timers.h" 38 #include "trace.h" 39 40 /***********************************************************/ 41 /* bottom halves (can be seen as timers which expire ASAP) */ 42 43 /* QEMUBH::flags values */ 44 enum { 45 /* Already enqueued and waiting for aio_bh_poll() */ 46 BH_PENDING = (1 << 0), 47 48 /* Invoke the callback */ 49 BH_SCHEDULED = (1 << 1), 50 51 /* Delete without invoking callback */ 52 BH_DELETED = (1 << 2), 53 54 /* Delete after invoking callback */ 55 BH_ONESHOT = (1 << 3), 56 57 /* Schedule periodically when the event loop is idle */ 58 BH_IDLE = (1 << 4), 59 }; 60 61 struct QEMUBH { 62 AioContext *ctx; 63 const char *name; 64 QEMUBHFunc *cb; 65 void *opaque; 66 QSLIST_ENTRY(QEMUBH) next; 67 unsigned flags; 68 }; 69 70 /* Called concurrently from any thread */ 71 static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags) 72 { 73 AioContext *ctx = bh->ctx; 74 unsigned old_flags; 75 76 /* 77 * Synchronizes with atomic_fetch_and() in aio_bh_dequeue(), ensuring that 78 * insertion starts after BH_PENDING is set. 79 */ 80 old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags); 81 82 if (!(old_flags & BH_PENDING)) { 83 /* 84 * At this point the bottom half becomes visible to aio_bh_poll(). 85 * This insertion thus synchronizes with QSLIST_MOVE_ATOMIC in 86 * aio_bh_poll(), ensuring that: 87 * 1. any writes needed by the callback are visible from the callback 88 * after aio_bh_dequeue() returns bh. 89 * 2. ctx is loaded before the callback has a chance to execute and bh 90 * could be freed. 91 */ 92 QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next); 93 } 94 95 aio_notify(ctx); 96 /* 97 * Workaround for record/replay. 98 * vCPU execution should be suspended when new BH is set. 99 * This is needed to avoid guest timeouts caused 100 * by the long cycles of the execution. 101 */ 102 icount_notify_exit(); 103 } 104 105 /* Only called from aio_bh_poll() and aio_ctx_finalize() */ 106 static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags) 107 { 108 QEMUBH *bh = QSLIST_FIRST_RCU(head); 109 110 if (!bh) { 111 return NULL; 112 } 113 114 QSLIST_REMOVE_HEAD(head, next); 115 116 /* 117 * Synchronizes with qatomic_fetch_or() in aio_bh_enqueue(), ensuring that 118 * the removal finishes before BH_PENDING is reset. 119 */ 120 *flags = qatomic_fetch_and(&bh->flags, 121 ~(BH_PENDING | BH_SCHEDULED | BH_IDLE)); 122 return bh; 123 } 124 125 void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, 126 void *opaque, const char *name) 127 { 128 QEMUBH *bh; 129 bh = g_new(QEMUBH, 1); 130 *bh = (QEMUBH){ 131 .ctx = ctx, 132 .cb = cb, 133 .opaque = opaque, 134 .name = name, 135 }; 136 aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT); 137 } 138 139 QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, 140 const char *name) 141 { 142 QEMUBH *bh; 143 bh = g_new(QEMUBH, 1); 144 *bh = (QEMUBH){ 145 .ctx = ctx, 146 .cb = cb, 147 .opaque = opaque, 148 .name = name, 149 }; 150 return bh; 151 } 152 153 void aio_bh_call(QEMUBH *bh) 154 { 155 bh->cb(bh->opaque); 156 } 157 158 /* Multiple occurrences of aio_bh_poll cannot be called concurrently. */ 159 int aio_bh_poll(AioContext *ctx) 160 { 161 BHListSlice slice; 162 BHListSlice *s; 163 int ret = 0; 164 165 /* Synchronizes with QSLIST_INSERT_HEAD_ATOMIC in aio_bh_enqueue(). */ 166 QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list); 167 168 /* 169 * GCC13 [-Werror=dangling-pointer=] complains that the local variable 170 * 'slice' is being stored in the global 'ctx->bh_slice_list' but the 171 * list is emptied before this function returns. 172 */ 173 #if !defined(__clang__) 174 #pragma GCC diagnostic push 175 #pragma GCC diagnostic ignored "-Wpragmas" 176 #pragma GCC diagnostic ignored "-Wdangling-pointer=" 177 #endif 178 QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next); 179 #if !defined(__clang__) 180 #pragma GCC diagnostic pop 181 #endif 182 183 while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) { 184 QEMUBH *bh; 185 unsigned flags; 186 187 bh = aio_bh_dequeue(&s->bh_list, &flags); 188 if (!bh) { 189 QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next); 190 continue; 191 } 192 193 if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { 194 /* Idle BHs don't count as progress */ 195 if (!(flags & BH_IDLE)) { 196 ret = 1; 197 } 198 aio_bh_call(bh); 199 } 200 if (flags & (BH_DELETED | BH_ONESHOT)) { 201 g_free(bh); 202 } 203 } 204 205 return ret; 206 } 207 208 void qemu_bh_schedule_idle(QEMUBH *bh) 209 { 210 aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE); 211 } 212 213 void qemu_bh_schedule(QEMUBH *bh) 214 { 215 aio_bh_enqueue(bh, BH_SCHEDULED); 216 } 217 218 /* This func is async. 219 */ 220 void qemu_bh_cancel(QEMUBH *bh) 221 { 222 qatomic_and(&bh->flags, ~BH_SCHEDULED); 223 } 224 225 /* This func is async.The bottom half will do the delete action at the finial 226 * end. 227 */ 228 void qemu_bh_delete(QEMUBH *bh) 229 { 230 aio_bh_enqueue(bh, BH_DELETED); 231 } 232 233 static int64_t aio_compute_bh_timeout(BHList *head, int timeout) 234 { 235 QEMUBH *bh; 236 237 QSLIST_FOREACH_RCU(bh, head, next) { 238 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { 239 if (bh->flags & BH_IDLE) { 240 /* idle bottom halves will be polled at least 241 * every 10ms */ 242 timeout = 10000000; 243 } else { 244 /* non-idle bottom halves will be executed 245 * immediately */ 246 return 0; 247 } 248 } 249 } 250 251 return timeout; 252 } 253 254 int64_t 255 aio_compute_timeout(AioContext *ctx) 256 { 257 BHListSlice *s; 258 int64_t deadline; 259 int timeout = -1; 260 261 timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout); 262 if (timeout == 0) { 263 return 0; 264 } 265 266 QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) { 267 timeout = aio_compute_bh_timeout(&s->bh_list, timeout); 268 if (timeout == 0) { 269 return 0; 270 } 271 } 272 273 deadline = timerlistgroup_deadline_ns(&ctx->tlg); 274 if (deadline == 0) { 275 return 0; 276 } else { 277 return qemu_soonest_timeout(timeout, deadline); 278 } 279 } 280 281 static gboolean 282 aio_ctx_prepare(GSource *source, gint *timeout) 283 { 284 AioContext *ctx = (AioContext *) source; 285 286 qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1); 287 288 /* 289 * Write ctx->notify_me before computing the timeout 290 * (reading bottom half flags, etc.). Pairs with 291 * smp_mb in aio_notify(). 292 */ 293 smp_mb(); 294 295 /* We assume there is no timeout already supplied */ 296 *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)); 297 298 if (aio_prepare(ctx)) { 299 *timeout = 0; 300 } 301 302 return *timeout == 0; 303 } 304 305 static gboolean 306 aio_ctx_check(GSource *source) 307 { 308 AioContext *ctx = (AioContext *) source; 309 QEMUBH *bh; 310 BHListSlice *s; 311 312 /* Finish computing the timeout before clearing the flag. */ 313 qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1); 314 aio_notify_accept(ctx); 315 316 QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) { 317 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { 318 return true; 319 } 320 } 321 322 QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) { 323 QSLIST_FOREACH_RCU(bh, &s->bh_list, next) { 324 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { 325 return true; 326 } 327 } 328 } 329 return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0); 330 } 331 332 static gboolean 333 aio_ctx_dispatch(GSource *source, 334 GSourceFunc callback, 335 gpointer user_data) 336 { 337 AioContext *ctx = (AioContext *) source; 338 339 assert(callback == NULL); 340 aio_dispatch(ctx); 341 return true; 342 } 343 344 static void 345 aio_ctx_finalize(GSource *source) 346 { 347 AioContext *ctx = (AioContext *) source; 348 QEMUBH *bh; 349 unsigned flags; 350 351 thread_pool_free(ctx->thread_pool); 352 353 #ifdef CONFIG_LINUX_AIO 354 if (ctx->linux_aio) { 355 laio_detach_aio_context(ctx->linux_aio, ctx); 356 laio_cleanup(ctx->linux_aio); 357 ctx->linux_aio = NULL; 358 } 359 #endif 360 361 #ifdef CONFIG_LINUX_IO_URING 362 if (ctx->linux_io_uring) { 363 luring_detach_aio_context(ctx->linux_io_uring, ctx); 364 luring_cleanup(ctx->linux_io_uring); 365 ctx->linux_io_uring = NULL; 366 } 367 #endif 368 369 assert(QSLIST_EMPTY(&ctx->scheduled_coroutines)); 370 qemu_bh_delete(ctx->co_schedule_bh); 371 372 /* There must be no aio_bh_poll() calls going on */ 373 assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list)); 374 375 while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) { 376 /* 377 * qemu_bh_delete() must have been called on BHs in this AioContext. In 378 * many cases memory leaks, hangs, or inconsistent state occur when a 379 * BH is leaked because something still expects it to run. 380 * 381 * If you hit this, fix the lifecycle of the BH so that 382 * qemu_bh_delete() and any associated cleanup is called before the 383 * AioContext is finalized. 384 */ 385 if (unlikely(!(flags & BH_DELETED))) { 386 fprintf(stderr, "%s: BH '%s' leaked, aborting...\n", 387 __func__, bh->name); 388 abort(); 389 } 390 391 g_free(bh); 392 } 393 394 aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL, NULL); 395 event_notifier_cleanup(&ctx->notifier); 396 qemu_rec_mutex_destroy(&ctx->lock); 397 qemu_lockcnt_destroy(&ctx->list_lock); 398 timerlistgroup_deinit(&ctx->tlg); 399 unregister_aiocontext(ctx); 400 aio_context_destroy(ctx); 401 } 402 403 static GSourceFuncs aio_source_funcs = { 404 aio_ctx_prepare, 405 aio_ctx_check, 406 aio_ctx_dispatch, 407 aio_ctx_finalize 408 }; 409 410 GSource *aio_get_g_source(AioContext *ctx) 411 { 412 aio_context_use_g_source(ctx); 413 g_source_ref(&ctx->source); 414 return &ctx->source; 415 } 416 417 ThreadPool *aio_get_thread_pool(AioContext *ctx) 418 { 419 if (!ctx->thread_pool) { 420 ctx->thread_pool = thread_pool_new(ctx); 421 } 422 return ctx->thread_pool; 423 } 424 425 #ifdef CONFIG_LINUX_AIO 426 LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp) 427 { 428 if (!ctx->linux_aio) { 429 ctx->linux_aio = laio_init(errp); 430 if (ctx->linux_aio) { 431 laio_attach_aio_context(ctx->linux_aio, ctx); 432 } 433 } 434 return ctx->linux_aio; 435 } 436 437 LinuxAioState *aio_get_linux_aio(AioContext *ctx) 438 { 439 assert(ctx->linux_aio); 440 return ctx->linux_aio; 441 } 442 #endif 443 444 #ifdef CONFIG_LINUX_IO_URING 445 LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp) 446 { 447 if (ctx->linux_io_uring) { 448 return ctx->linux_io_uring; 449 } 450 451 ctx->linux_io_uring = luring_init(errp); 452 if (!ctx->linux_io_uring) { 453 return NULL; 454 } 455 456 luring_attach_aio_context(ctx->linux_io_uring, ctx); 457 return ctx->linux_io_uring; 458 } 459 460 LuringState *aio_get_linux_io_uring(AioContext *ctx) 461 { 462 assert(ctx->linux_io_uring); 463 return ctx->linux_io_uring; 464 } 465 #endif 466 467 void aio_notify(AioContext *ctx) 468 { 469 /* 470 * Write e.g. ctx->bh_list before writing ctx->notified. Pairs with 471 * smp_mb() in aio_notify_accept(). 472 */ 473 smp_wmb(); 474 qatomic_set(&ctx->notified, true); 475 476 /* 477 * Write ctx->notified (and also ctx->bh_list) before reading ctx->notify_me. 478 * Pairs with smp_mb() in aio_ctx_prepare or aio_poll. 479 */ 480 smp_mb(); 481 if (qatomic_read(&ctx->notify_me)) { 482 event_notifier_set(&ctx->notifier); 483 } 484 } 485 486 void aio_notify_accept(AioContext *ctx) 487 { 488 qatomic_set(&ctx->notified, false); 489 490 /* 491 * Order reads of ctx->notified (in aio_context_notifier_poll()) and the 492 * above clearing of ctx->notified before reads of e.g. bh->flags. Pairs 493 * with smp_wmb() in aio_notify. 494 */ 495 smp_mb(); 496 } 497 498 static void aio_timerlist_notify(void *opaque, QEMUClockType type) 499 { 500 aio_notify(opaque); 501 } 502 503 static void aio_context_notifier_cb(EventNotifier *e) 504 { 505 AioContext *ctx = container_of(e, AioContext, notifier); 506 507 event_notifier_test_and_clear(&ctx->notifier); 508 } 509 510 /* Returns true if aio_notify() was called (e.g. a BH was scheduled) */ 511 static bool aio_context_notifier_poll(void *opaque) 512 { 513 EventNotifier *e = opaque; 514 AioContext *ctx = container_of(e, AioContext, notifier); 515 516 /* 517 * No need for load-acquire because we just want to kick the 518 * event loop. aio_notify_accept() takes care of synchronizing 519 * the event loop with the producers. 520 */ 521 return qatomic_read(&ctx->notified); 522 } 523 524 static void aio_context_notifier_poll_ready(EventNotifier *e) 525 { 526 /* Do nothing, we just wanted to kick the event loop */ 527 } 528 529 static void co_schedule_bh_cb(void *opaque) 530 { 531 AioContext *ctx = opaque; 532 QSLIST_HEAD(, Coroutine) straight, reversed; 533 534 QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines); 535 QSLIST_INIT(&straight); 536 537 while (!QSLIST_EMPTY(&reversed)) { 538 Coroutine *co = QSLIST_FIRST(&reversed); 539 QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next); 540 QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next); 541 } 542 543 while (!QSLIST_EMPTY(&straight)) { 544 Coroutine *co = QSLIST_FIRST(&straight); 545 QSLIST_REMOVE_HEAD(&straight, co_scheduled_next); 546 trace_aio_co_schedule_bh_cb(ctx, co); 547 aio_context_acquire(ctx); 548 549 /* Protected by write barrier in qemu_aio_coroutine_enter */ 550 qatomic_set(&co->scheduled, NULL); 551 qemu_aio_coroutine_enter(ctx, co); 552 aio_context_release(ctx); 553 } 554 } 555 556 AioContext *aio_context_new(Error **errp) 557 { 558 int ret; 559 AioContext *ctx; 560 561 ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); 562 QSLIST_INIT(&ctx->bh_list); 563 QSIMPLEQ_INIT(&ctx->bh_slice_list); 564 aio_context_setup(ctx); 565 566 ret = event_notifier_init(&ctx->notifier, false); 567 if (ret < 0) { 568 error_setg_errno(errp, -ret, "Failed to initialize event notifier"); 569 goto fail; 570 } 571 g_source_set_can_recurse(&ctx->source, true); 572 qemu_lockcnt_init(&ctx->list_lock); 573 574 ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx); 575 QSLIST_INIT(&ctx->scheduled_coroutines); 576 577 aio_set_event_notifier(ctx, &ctx->notifier, 578 false, 579 aio_context_notifier_cb, 580 aio_context_notifier_poll, 581 aio_context_notifier_poll_ready); 582 #ifdef CONFIG_LINUX_AIO 583 ctx->linux_aio = NULL; 584 #endif 585 586 #ifdef CONFIG_LINUX_IO_URING 587 ctx->linux_io_uring = NULL; 588 #endif 589 590 ctx->thread_pool = NULL; 591 qemu_rec_mutex_init(&ctx->lock); 592 timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); 593 594 ctx->poll_ns = 0; 595 ctx->poll_max_ns = 0; 596 ctx->poll_grow = 0; 597 ctx->poll_shrink = 0; 598 599 ctx->aio_max_batch = 0; 600 601 ctx->thread_pool_min = 0; 602 ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT; 603 604 register_aiocontext(ctx); 605 606 return ctx; 607 fail: 608 g_source_destroy(&ctx->source); 609 return NULL; 610 } 611 612 void aio_co_schedule(AioContext *ctx, Coroutine *co) 613 { 614 trace_aio_co_schedule(ctx, co); 615 const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL, 616 __func__); 617 618 if (scheduled) { 619 fprintf(stderr, 620 "%s: Co-routine was already scheduled in '%s'\n", 621 __func__, scheduled); 622 abort(); 623 } 624 625 /* The coroutine might run and release the last ctx reference before we 626 * invoke qemu_bh_schedule(). Take a reference to keep ctx alive until 627 * we're done. 628 */ 629 aio_context_ref(ctx); 630 631 QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines, 632 co, co_scheduled_next); 633 qemu_bh_schedule(ctx->co_schedule_bh); 634 635 aio_context_unref(ctx); 636 } 637 638 typedef struct AioCoRescheduleSelf { 639 Coroutine *co; 640 AioContext *new_ctx; 641 } AioCoRescheduleSelf; 642 643 static void aio_co_reschedule_self_bh(void *opaque) 644 { 645 AioCoRescheduleSelf *data = opaque; 646 aio_co_schedule(data->new_ctx, data->co); 647 } 648 649 void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx) 650 { 651 AioContext *old_ctx = qemu_get_current_aio_context(); 652 653 if (old_ctx != new_ctx) { 654 AioCoRescheduleSelf data = { 655 .co = qemu_coroutine_self(), 656 .new_ctx = new_ctx, 657 }; 658 /* 659 * We can't directly schedule the coroutine in the target context 660 * because this would be racy: The other thread could try to enter the 661 * coroutine before it has yielded in this one. 662 */ 663 aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data); 664 qemu_coroutine_yield(); 665 } 666 } 667 668 void aio_co_wake(Coroutine *co) 669 { 670 AioContext *ctx; 671 672 /* Read coroutine before co->ctx. Matches smp_wmb in 673 * qemu_coroutine_enter. 674 */ 675 smp_read_barrier_depends(); 676 ctx = qatomic_read(&co->ctx); 677 678 aio_co_enter(ctx, co); 679 } 680 681 void aio_co_enter(AioContext *ctx, Coroutine *co) 682 { 683 if (ctx != qemu_get_current_aio_context()) { 684 aio_co_schedule(ctx, co); 685 return; 686 } 687 688 if (qemu_in_coroutine()) { 689 Coroutine *self = qemu_coroutine_self(); 690 assert(self != co); 691 QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next); 692 } else { 693 aio_context_acquire(ctx); 694 qemu_aio_coroutine_enter(ctx, co); 695 aio_context_release(ctx); 696 } 697 } 698 699 void aio_context_ref(AioContext *ctx) 700 { 701 g_source_ref(&ctx->source); 702 } 703 704 void aio_context_unref(AioContext *ctx) 705 { 706 g_source_unref(&ctx->source); 707 } 708 709 void aio_context_acquire(AioContext *ctx) 710 { 711 qemu_rec_mutex_lock(&ctx->lock); 712 } 713 714 void aio_context_release(AioContext *ctx) 715 { 716 qemu_rec_mutex_unlock(&ctx->lock); 717 } 718 719 QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext) 720 721 AioContext *qemu_get_current_aio_context(void) 722 { 723 AioContext *ctx = get_my_aiocontext(); 724 if (ctx) { 725 return ctx; 726 } 727 if (qemu_mutex_iothread_locked()) { 728 /* Possibly in a vCPU thread. */ 729 return qemu_get_aio_context(); 730 } 731 return NULL; 732 } 733 734 void qemu_set_current_aio_context(AioContext *ctx) 735 { 736 assert(!get_my_aiocontext()); 737 set_my_aiocontext(ctx); 738 } 739 740 void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min, 741 int64_t max, Error **errp) 742 { 743 744 if (min > max || !max || min > INT_MAX || max > INT_MAX) { 745 error_setg(errp, "bad thread-pool-min/thread-pool-max values"); 746 return; 747 } 748 749 ctx->thread_pool_min = min; 750 ctx->thread_pool_max = max; 751 752 if (ctx->thread_pool) { 753 thread_pool_update_params(ctx->thread_pool, ctx); 754 } 755 } 756