1 /* 2 * QEMU aio implementation 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #ifndef QEMU_AIO_H 15 #define QEMU_AIO_H 16 17 #ifdef CONFIG_LINUX_IO_URING 18 #include <liburing.h> 19 #endif 20 #include "qemu/coroutine-core.h" 21 #include "qemu/queue.h" 22 #include "qemu/event_notifier.h" 23 #include "qemu/thread.h" 24 #include "qemu/timer.h" 25 #include "block/graph-lock.h" 26 #include "hw/qdev-core.h" 27 28 29 typedef struct BlockAIOCB BlockAIOCB; 30 typedef void BlockCompletionFunc(void *opaque, int ret); 31 32 typedef struct AIOCBInfo { 33 void (*cancel_async)(BlockAIOCB *acb); 34 AioContext *(*get_aio_context)(BlockAIOCB *acb); 35 size_t aiocb_size; 36 } AIOCBInfo; 37 38 struct BlockAIOCB { 39 const AIOCBInfo *aiocb_info; 40 BlockDriverState *bs; 41 BlockCompletionFunc *cb; 42 void *opaque; 43 int refcnt; 44 }; 45 46 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, 47 BlockCompletionFunc *cb, void *opaque); 48 void qemu_aio_unref(void *p); 49 void qemu_aio_ref(void *p); 50 51 typedef struct AioHandler AioHandler; 52 typedef QLIST_HEAD(, AioHandler) AioHandlerList; 53 typedef void QEMUBHFunc(void *opaque); 54 typedef bool AioPollFn(void *opaque); 55 typedef void IOHandler(void *opaque); 56 57 struct ThreadPool; 58 struct LinuxAioState; 59 struct LuringState; 60 61 /* Is polling disabled? */ 62 bool aio_poll_disabled(AioContext *ctx); 63 64 /* Callbacks for file descriptor monitoring implementations */ 65 typedef struct { 66 /* 67 * update: 68 * @ctx: the AioContext 69 * @old_node: the existing handler or NULL if this file descriptor is being 70 * monitored for the first time 71 * @new_node: the new handler or NULL if this file descriptor is being 72 * removed 73 * 74 * Add/remove/modify a monitored file descriptor. 75 * 76 * Called with ctx->list_lock acquired. 77 */ 78 void (*update)(AioContext *ctx, AioHandler *old_node, AioHandler *new_node); 79 80 /* 81 * wait: 82 * @ctx: the AioContext 83 * @ready_list: list for handlers that become ready 84 * @timeout: maximum duration to wait, in nanoseconds 85 * 86 * Wait for file descriptors to become ready and place them on ready_list. 87 * 88 * Called with ctx->list_lock incremented but not locked. 89 * 90 * Returns: number of ready file descriptors. 91 */ 92 int (*wait)(AioContext *ctx, AioHandlerList *ready_list, int64_t timeout); 93 94 /* 95 * need_wait: 96 * @ctx: the AioContext 97 * 98 * Tell aio_poll() when to stop userspace polling early because ->wait() 99 * has fds ready. 100 * 101 * File descriptor monitoring implementations that cannot poll fd readiness 102 * from userspace should use aio_poll_disabled() here. This ensures that 103 * file descriptors are not starved by handlers that frequently make 104 * progress via userspace polling. 105 * 106 * Returns: true if ->wait() should be called, false otherwise. 107 */ 108 bool (*need_wait)(AioContext *ctx); 109 } FDMonOps; 110 111 /* 112 * Each aio_bh_poll() call carves off a slice of the BH list, so that newly 113 * scheduled BHs are not processed until the next aio_bh_poll() call. All 114 * active aio_bh_poll() calls chain their slices together in a list, so that 115 * nested aio_bh_poll() calls process all scheduled bottom halves. 116 */ 117 typedef QSLIST_HEAD(, QEMUBH) BHList; 118 typedef struct BHListSlice BHListSlice; 119 struct BHListSlice { 120 BHList bh_list; 121 QSIMPLEQ_ENTRY(BHListSlice) next; 122 }; 123 124 typedef QSLIST_HEAD(, AioHandler) AioHandlerSList; 125 126 struct AioContext { 127 GSource source; 128 129 /* Used by AioContext users to protect from multi-threaded access. */ 130 QemuRecMutex lock; 131 132 /* 133 * Keep track of readers and writers of the block layer graph. 134 * This is essential to avoid performing additions and removal 135 * of nodes and edges from block graph while some 136 * other thread is traversing it. 137 */ 138 BdrvGraphRWlock *bdrv_graph; 139 140 /* The list of registered AIO handlers. Protected by ctx->list_lock. */ 141 AioHandlerList aio_handlers; 142 143 /* The list of AIO handlers to be deleted. Protected by ctx->list_lock. */ 144 AioHandlerList deleted_aio_handlers; 145 146 /* Used to avoid unnecessary event_notifier_set calls in aio_notify; 147 * only written from the AioContext home thread, or under the BQL in 148 * the case of the main AioContext. However, it is read from any 149 * thread so it is still accessed with atomic primitives. 150 * 151 * If this field is 0, everything (file descriptors, bottom halves, 152 * timers) will be re-evaluated before the next blocking poll() or 153 * io_uring wait; therefore, the event_notifier_set call can be 154 * skipped. If it is non-zero, you may need to wake up a concurrent 155 * aio_poll or the glib main event loop, making event_notifier_set 156 * necessary. 157 * 158 * Bit 0 is reserved for GSource usage of the AioContext, and is 1 159 * between a call to aio_ctx_prepare and the next call to aio_ctx_check. 160 * Bits 1-31 simply count the number of active calls to aio_poll 161 * that are in the prepare or poll phase. 162 * 163 * The GSource and aio_poll must use a different mechanism because 164 * there is no certainty that a call to GSource's prepare callback 165 * (via g_main_context_prepare) is indeed followed by check and 166 * dispatch. It's not clear whether this would be a bug, but let's 167 * play safe and allow it---it will just cause extra calls to 168 * event_notifier_set until the next call to dispatch. 169 * 170 * Instead, the aio_poll calls include both the prepare and the 171 * dispatch phase, hence a simple counter is enough for them. 172 */ 173 uint32_t notify_me; 174 175 /* A lock to protect between QEMUBH and AioHandler adders and deleter, 176 * and to ensure that no callbacks are removed while we're walking and 177 * dispatching them. 178 */ 179 QemuLockCnt list_lock; 180 181 /* Bottom Halves pending aio_bh_poll() processing */ 182 BHList bh_list; 183 184 /* Chained BH list slices for each nested aio_bh_poll() call */ 185 QSIMPLEQ_HEAD(, BHListSlice) bh_slice_list; 186 187 /* Used by aio_notify. 188 * 189 * "notified" is used to avoid expensive event_notifier_test_and_clear 190 * calls. When it is clear, the EventNotifier is clear, or one thread 191 * is going to clear "notified" before processing more events. False 192 * positives are possible, i.e. "notified" could be set even though the 193 * EventNotifier is clear. 194 * 195 * Note that event_notifier_set *cannot* be optimized the same way. For 196 * more information on the problem that would result, see "#ifdef BUG2" 197 * in the docs/aio_notify_accept.promela formal model. 198 */ 199 bool notified; 200 EventNotifier notifier; 201 202 QSLIST_HEAD(, Coroutine) scheduled_coroutines; 203 QEMUBH *co_schedule_bh; 204 205 int thread_pool_min; 206 int thread_pool_max; 207 /* Thread pool for performing work and receiving completion callbacks. 208 * Has its own locking. 209 */ 210 struct ThreadPool *thread_pool; 211 212 #ifdef CONFIG_LINUX_AIO 213 struct LinuxAioState *linux_aio; 214 #endif 215 #ifdef CONFIG_LINUX_IO_URING 216 struct LuringState *linux_io_uring; 217 218 /* State for file descriptor monitoring using Linux io_uring */ 219 struct io_uring fdmon_io_uring; 220 AioHandlerSList submit_list; 221 #endif 222 223 /* TimerLists for calling timers - one per clock type. Has its own 224 * locking. 225 */ 226 QEMUTimerListGroup tlg; 227 228 /* Number of AioHandlers without .io_poll() */ 229 int poll_disable_cnt; 230 231 /* Polling mode parameters */ 232 int64_t poll_ns; /* current polling time in nanoseconds */ 233 int64_t poll_max_ns; /* maximum polling time in nanoseconds */ 234 int64_t poll_grow; /* polling time growth factor */ 235 int64_t poll_shrink; /* polling time shrink factor */ 236 237 /* AIO engine parameters */ 238 int64_t aio_max_batch; /* maximum number of requests in a batch */ 239 240 /* 241 * List of handlers participating in userspace polling. Protected by 242 * ctx->list_lock. Iterated and modified mostly by the event loop thread 243 * from aio_poll() with ctx->list_lock incremented. aio_set_fd_handler() 244 * only touches the list to delete nodes if ctx->list_lock's count is zero. 245 */ 246 AioHandlerList poll_aio_handlers; 247 248 /* Are we in polling mode or monitoring file descriptors? */ 249 bool poll_started; 250 251 /* epoll(7) state used when built with CONFIG_EPOLL */ 252 int epollfd; 253 254 const FDMonOps *fdmon_ops; 255 }; 256 257 /** 258 * aio_context_new: Allocate a new AioContext. 259 * 260 * AioContext provide a mini event-loop that can be waited on synchronously. 261 * They also provide bottom halves, a service to execute a piece of code 262 * as soon as possible. 263 */ 264 AioContext *aio_context_new(Error **errp); 265 266 /** 267 * aio_context_ref: 268 * @ctx: The AioContext to operate on. 269 * 270 * Add a reference to an AioContext. 271 */ 272 void aio_context_ref(AioContext *ctx); 273 274 /** 275 * aio_context_unref: 276 * @ctx: The AioContext to operate on. 277 * 278 * Drop a reference to an AioContext. 279 */ 280 void aio_context_unref(AioContext *ctx); 281 282 /* Take ownership of the AioContext. If the AioContext will be shared between 283 * threads, and a thread does not want to be interrupted, it will have to 284 * take ownership around calls to aio_poll(). Otherwise, aio_poll() 285 * automatically takes care of calling aio_context_acquire and 286 * aio_context_release. 287 * 288 * Note that this is separate from bdrv_drained_begin/bdrv_drained_end. A 289 * thread still has to call those to avoid being interrupted by the guest. 290 * 291 * Bottom halves, timers and callbacks can be created or removed without 292 * acquiring the AioContext. 293 */ 294 void aio_context_acquire(AioContext *ctx); 295 296 /* Relinquish ownership of the AioContext. */ 297 void aio_context_release(AioContext *ctx); 298 299 /** 300 * aio_bh_schedule_oneshot_full: Allocate a new bottom half structure that will 301 * run only once and as soon as possible. 302 * 303 * @name: A human-readable identifier for debugging purposes. 304 */ 305 void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, 306 const char *name); 307 308 /** 309 * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run 310 * only once and as soon as possible. 311 * 312 * A convenience wrapper for aio_bh_schedule_oneshot_full() that uses cb as the 313 * name string. 314 */ 315 #define aio_bh_schedule_oneshot(ctx, cb, opaque) \ 316 aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb))) 317 318 /** 319 * aio_bh_new_full: Allocate a new bottom half structure. 320 * 321 * Bottom halves are lightweight callbacks whose invocation is guaranteed 322 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure 323 * is opaque and must be allocated prior to its use. 324 * 325 * @name: A human-readable identifier for debugging purposes. 326 * @reentrancy_guard: A guard set when entering a cb to prevent 327 * device-reentrancy issues 328 */ 329 QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, 330 const char *name, MemReentrancyGuard *reentrancy_guard); 331 332 /** 333 * aio_bh_new: Allocate a new bottom half structure 334 * 335 * A convenience wrapper for aio_bh_new_full() that uses the cb as the name 336 * string. 337 */ 338 #define aio_bh_new(ctx, cb, opaque) \ 339 aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), NULL) 340 341 /** 342 * aio_bh_new_guarded: Allocate a new bottom half structure with a 343 * reentrancy_guard 344 * 345 * A convenience wrapper for aio_bh_new_full() that uses the cb as the name 346 * string. 347 */ 348 #define aio_bh_new_guarded(ctx, cb, opaque, guard) \ 349 aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), guard) 350 351 /** 352 * aio_notify: Force processing of pending events. 353 * 354 * Similar to signaling a condition variable, aio_notify forces 355 * aio_poll to exit, so that the next call will re-examine pending events. 356 * The caller of aio_notify will usually call aio_poll again very soon, 357 * or go through another iteration of the GLib main loop. Hence, aio_notify 358 * also has the side effect of recalculating the sets of file descriptors 359 * that the main loop waits for. 360 * 361 * Calling aio_notify is rarely necessary, because for example scheduling 362 * a bottom half calls it already. 363 */ 364 void aio_notify(AioContext *ctx); 365 366 /** 367 * aio_notify_accept: Acknowledge receiving an aio_notify. 368 * 369 * aio_notify() uses an EventNotifier in order to wake up a sleeping 370 * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are 371 * usually rare, but the AioContext has to clear the EventNotifier on 372 * every aio_poll() or g_main_context_iteration() in order to avoid 373 * busy waiting. This event_notifier_test_and_clear() cannot be done 374 * using the usual aio_context_set_event_notifier(), because it must 375 * be done before processing all events (file descriptors, bottom halves, 376 * timers). 377 * 378 * aio_notify_accept() is an optimized event_notifier_test_and_clear() 379 * that is specific to an AioContext's notifier; it is used internally 380 * to clear the EventNotifier only if aio_notify() had been called. 381 */ 382 void aio_notify_accept(AioContext *ctx); 383 384 /** 385 * aio_bh_call: Executes callback function of the specified BH. 386 */ 387 void aio_bh_call(QEMUBH *bh); 388 389 /** 390 * aio_bh_poll: Poll bottom halves for an AioContext. 391 * 392 * These are internal functions used by the QEMU main loop. 393 * And notice that multiple occurrences of aio_bh_poll cannot 394 * be called concurrently 395 */ 396 int aio_bh_poll(AioContext *ctx); 397 398 /** 399 * qemu_bh_schedule: Schedule a bottom half. 400 * 401 * Scheduling a bottom half interrupts the main loop and causes the 402 * execution of the callback that was passed to qemu_bh_new. 403 * 404 * Bottom halves that are scheduled from a bottom half handler are instantly 405 * invoked. This can create an infinite loop if a bottom half handler 406 * schedules itself. 407 * 408 * @bh: The bottom half to be scheduled. 409 */ 410 void qemu_bh_schedule(QEMUBH *bh); 411 412 /** 413 * qemu_bh_cancel: Cancel execution of a bottom half. 414 * 415 * Canceling execution of a bottom half undoes the effect of calls to 416 * qemu_bh_schedule without freeing its resources yet. While cancellation 417 * itself is also wait-free and thread-safe, it can of course race with the 418 * loop that executes bottom halves unless you are holding the iothread 419 * mutex. This makes it mostly useless if you are not holding the mutex. 420 * 421 * @bh: The bottom half to be canceled. 422 */ 423 void qemu_bh_cancel(QEMUBH *bh); 424 425 /** 426 *qemu_bh_delete: Cancel execution of a bottom half and free its resources. 427 * 428 * Deleting a bottom half frees the memory that was allocated for it by 429 * qemu_bh_new. It also implies canceling the bottom half if it was 430 * scheduled. 431 * This func is async. The bottom half will do the delete action at the finial 432 * end. 433 * 434 * @bh: The bottom half to be deleted. 435 */ 436 void qemu_bh_delete(QEMUBH *bh); 437 438 /* Return whether there are any pending callbacks from the GSource 439 * attached to the AioContext, before g_poll is invoked. 440 * 441 * This is used internally in the implementation of the GSource. 442 */ 443 bool aio_prepare(AioContext *ctx); 444 445 /* Return whether there are any pending callbacks from the GSource 446 * attached to the AioContext, after g_poll is invoked. 447 * 448 * This is used internally in the implementation of the GSource. 449 */ 450 bool aio_pending(AioContext *ctx); 451 452 /* Dispatch any pending callbacks from the GSource attached to the AioContext. 453 * 454 * This is used internally in the implementation of the GSource. 455 */ 456 void aio_dispatch(AioContext *ctx); 457 458 /* Progress in completing AIO work to occur. This can issue new pending 459 * aio as a result of executing I/O completion or bh callbacks. 460 * 461 * Return whether any progress was made by executing AIO or bottom half 462 * handlers. If @blocking == true, this should always be true except 463 * if someone called aio_notify. 464 * 465 * If there are no pending bottom halves, but there are pending AIO 466 * operations, it may not be possible to make any progress without 467 * blocking. If @blocking is true, this function will wait until one 468 * or more AIO events have completed, to ensure something has moved 469 * before returning. 470 */ 471 bool aio_poll(AioContext *ctx, bool blocking); 472 473 /* Register a file descriptor and associated callbacks. Behaves very similarly 474 * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will 475 * be invoked when using aio_poll(). 476 * 477 * Code that invokes AIO completion functions should rely on this function 478 * instead of qemu_set_fd_handler[2]. 479 */ 480 void aio_set_fd_handler(AioContext *ctx, 481 int fd, 482 IOHandler *io_read, 483 IOHandler *io_write, 484 AioPollFn *io_poll, 485 IOHandler *io_poll_ready, 486 void *opaque); 487 488 /* Register an event notifier and associated callbacks. Behaves very similarly 489 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks 490 * will be invoked when using aio_poll(). 491 * 492 * Code that invokes AIO completion functions should rely on this function 493 * instead of event_notifier_set_handler. 494 */ 495 void aio_set_event_notifier(AioContext *ctx, 496 EventNotifier *notifier, 497 EventNotifierHandler *io_read, 498 AioPollFn *io_poll, 499 EventNotifierHandler *io_poll_ready); 500 501 /* Set polling begin/end callbacks for an event notifier that has already been 502 * registered with aio_set_event_notifier. Do nothing if the event notifier is 503 * not registered. 504 */ 505 void aio_set_event_notifier_poll(AioContext *ctx, 506 EventNotifier *notifier, 507 EventNotifierHandler *io_poll_begin, 508 EventNotifierHandler *io_poll_end); 509 510 /* Return a GSource that lets the main loop poll the file descriptors attached 511 * to this AioContext. 512 */ 513 GSource *aio_get_g_source(AioContext *ctx); 514 515 /* Return the ThreadPool bound to this AioContext */ 516 struct ThreadPool *aio_get_thread_pool(AioContext *ctx); 517 518 /* Setup the LinuxAioState bound to this AioContext */ 519 struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp); 520 521 /* Return the LinuxAioState bound to this AioContext */ 522 struct LinuxAioState *aio_get_linux_aio(AioContext *ctx); 523 524 /* Setup the LuringState bound to this AioContext */ 525 struct LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp); 526 527 /* Return the LuringState bound to this AioContext */ 528 struct LuringState *aio_get_linux_io_uring(AioContext *ctx); 529 /** 530 * aio_timer_new_with_attrs: 531 * @ctx: the aio context 532 * @type: the clock type 533 * @scale: the scale 534 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values 535 * to assign 536 * @cb: the callback to call on timer expiry 537 * @opaque: the opaque pointer to pass to the callback 538 * 539 * Allocate a new timer (with attributes) attached to the context @ctx. 540 * The function is responsible for memory allocation. 541 * 542 * The preferred interface is aio_timer_init or aio_timer_init_with_attrs. 543 * Use that unless you really need dynamic memory allocation. 544 * 545 * Returns: a pointer to the new timer 546 */ 547 static inline QEMUTimer *aio_timer_new_with_attrs(AioContext *ctx, 548 QEMUClockType type, 549 int scale, int attributes, 550 QEMUTimerCB *cb, void *opaque) 551 { 552 return timer_new_full(&ctx->tlg, type, scale, attributes, cb, opaque); 553 } 554 555 /** 556 * aio_timer_new: 557 * @ctx: the aio context 558 * @type: the clock type 559 * @scale: the scale 560 * @cb: the callback to call on timer expiry 561 * @opaque: the opaque pointer to pass to the callback 562 * 563 * Allocate a new timer attached to the context @ctx. 564 * See aio_timer_new_with_attrs for details. 565 * 566 * Returns: a pointer to the new timer 567 */ 568 static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type, 569 int scale, 570 QEMUTimerCB *cb, void *opaque) 571 { 572 return timer_new_full(&ctx->tlg, type, scale, 0, cb, opaque); 573 } 574 575 /** 576 * aio_timer_init_with_attrs: 577 * @ctx: the aio context 578 * @ts: the timer 579 * @type: the clock type 580 * @scale: the scale 581 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values 582 * to assign 583 * @cb: the callback to call on timer expiry 584 * @opaque: the opaque pointer to pass to the callback 585 * 586 * Initialise a new timer (with attributes) attached to the context @ctx. 587 * The caller is responsible for memory allocation. 588 */ 589 static inline void aio_timer_init_with_attrs(AioContext *ctx, 590 QEMUTimer *ts, QEMUClockType type, 591 int scale, int attributes, 592 QEMUTimerCB *cb, void *opaque) 593 { 594 timer_init_full(ts, &ctx->tlg, type, scale, attributes, cb, opaque); 595 } 596 597 /** 598 * aio_timer_init: 599 * @ctx: the aio context 600 * @ts: the timer 601 * @type: the clock type 602 * @scale: the scale 603 * @cb: the callback to call on timer expiry 604 * @opaque: the opaque pointer to pass to the callback 605 * 606 * Initialise a new timer attached to the context @ctx. 607 * See aio_timer_init_with_attrs for details. 608 */ 609 static inline void aio_timer_init(AioContext *ctx, 610 QEMUTimer *ts, QEMUClockType type, 611 int scale, 612 QEMUTimerCB *cb, void *opaque) 613 { 614 timer_init_full(ts, &ctx->tlg, type, scale, 0, cb, opaque); 615 } 616 617 /** 618 * aio_compute_timeout: 619 * @ctx: the aio context 620 * 621 * Compute the timeout that a blocking aio_poll should use. 622 */ 623 int64_t aio_compute_timeout(AioContext *ctx); 624 625 /** 626 * aio_co_schedule: 627 * @ctx: the aio context 628 * @co: the coroutine 629 * 630 * Start a coroutine on a remote AioContext. 631 * 632 * The coroutine must not be entered by anyone else while aio_co_schedule() 633 * is active. In addition the coroutine must have yielded unless ctx 634 * is the context in which the coroutine is running (i.e. the value of 635 * qemu_get_current_aio_context() from the coroutine itself). 636 */ 637 void aio_co_schedule(AioContext *ctx, Coroutine *co); 638 639 /** 640 * aio_co_reschedule_self: 641 * @new_ctx: the new context 642 * 643 * Move the currently running coroutine to new_ctx. If the coroutine is already 644 * running in new_ctx, do nothing. 645 */ 646 void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx); 647 648 /** 649 * aio_co_wake: 650 * @co: the coroutine 651 * 652 * Restart a coroutine on the AioContext where it was running last, thus 653 * preventing coroutines from jumping from one context to another when they 654 * go to sleep. 655 * 656 * aio_co_wake may be executed either in coroutine or non-coroutine 657 * context. The coroutine must not be entered by anyone else while 658 * aio_co_wake() is active. 659 */ 660 void aio_co_wake(Coroutine *co); 661 662 /** 663 * aio_co_enter: 664 * @ctx: the context to run the coroutine 665 * @co: the coroutine to run 666 * 667 * Enter a coroutine in the specified AioContext. 668 */ 669 void aio_co_enter(AioContext *ctx, Coroutine *co); 670 671 /** 672 * Return the AioContext whose event loop runs in the current thread. 673 * 674 * If called from an IOThread this will be the IOThread's AioContext. If 675 * called from the main thread or with the "big QEMU lock" taken it 676 * will be the main loop AioContext. 677 */ 678 AioContext *qemu_get_current_aio_context(void); 679 680 void qemu_set_current_aio_context(AioContext *ctx); 681 682 /** 683 * aio_context_setup: 684 * @ctx: the aio context 685 * 686 * Initialize the aio context. 687 */ 688 void aio_context_setup(AioContext *ctx); 689 690 /** 691 * aio_context_destroy: 692 * @ctx: the aio context 693 * 694 * Destroy the aio context. 695 */ 696 void aio_context_destroy(AioContext *ctx); 697 698 /* Used internally, do not call outside AioContext code */ 699 void aio_context_use_g_source(AioContext *ctx); 700 701 /** 702 * aio_context_set_poll_params: 703 * @ctx: the aio context 704 * @max_ns: how long to busy poll for, in nanoseconds 705 * @grow: polling time growth factor 706 * @shrink: polling time shrink factor 707 * 708 * Poll mode can be disabled by setting poll_max_ns to 0. 709 */ 710 void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, 711 int64_t grow, int64_t shrink, 712 Error **errp); 713 714 /** 715 * aio_context_set_aio_params: 716 * @ctx: the aio context 717 * @max_batch: maximum number of requests in a batch, 0 means that the 718 * engine will use its default 719 */ 720 void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch, 721 Error **errp); 722 723 /** 724 * aio_context_set_thread_pool_params: 725 * @ctx: the aio context 726 * @min: min number of threads to have readily available in the thread pool 727 * @min: max number of threads the thread pool can contain 728 */ 729 void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min, 730 int64_t max, Error **errp); 731 #endif 732