1 /* 2 * QEMU aio implementation 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #ifndef QEMU_AIO_H 15 #define QEMU_AIO_H 16 17 #ifdef CONFIG_LINUX_IO_URING 18 #include <liburing.h> 19 #endif 20 #include "qemu/coroutine-core.h" 21 #include "qemu/queue.h" 22 #include "qemu/event_notifier.h" 23 #include "qemu/thread.h" 24 #include "qemu/timer.h" 25 #include "block/graph-lock.h" 26 27 typedef struct BlockAIOCB BlockAIOCB; 28 typedef void BlockCompletionFunc(void *opaque, int ret); 29 30 typedef struct AIOCBInfo { 31 void (*cancel_async)(BlockAIOCB *acb); 32 AioContext *(*get_aio_context)(BlockAIOCB *acb); 33 size_t aiocb_size; 34 } AIOCBInfo; 35 36 struct BlockAIOCB { 37 const AIOCBInfo *aiocb_info; 38 BlockDriverState *bs; 39 BlockCompletionFunc *cb; 40 void *opaque; 41 int refcnt; 42 }; 43 44 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, 45 BlockCompletionFunc *cb, void *opaque); 46 void qemu_aio_unref(void *p); 47 void qemu_aio_ref(void *p); 48 49 typedef struct AioHandler AioHandler; 50 typedef QLIST_HEAD(, AioHandler) AioHandlerList; 51 typedef void QEMUBHFunc(void *opaque); 52 typedef bool AioPollFn(void *opaque); 53 typedef void IOHandler(void *opaque); 54 55 struct ThreadPool; 56 struct LinuxAioState; 57 struct LuringState; 58 59 /* Is polling disabled? */ 60 bool aio_poll_disabled(AioContext *ctx); 61 62 /* Callbacks for file descriptor monitoring implementations */ 63 typedef struct { 64 /* 65 * update: 66 * @ctx: the AioContext 67 * @old_node: the existing handler or NULL if this file descriptor is being 68 * monitored for the first time 69 * @new_node: the new handler or NULL if this file descriptor is being 70 * removed 71 * 72 * Add/remove/modify a monitored file descriptor. 73 * 74 * Called with ctx->list_lock acquired. 75 */ 76 void (*update)(AioContext *ctx, AioHandler *old_node, AioHandler *new_node); 77 78 /* 79 * wait: 80 * @ctx: the AioContext 81 * @ready_list: list for handlers that become ready 82 * @timeout: maximum duration to wait, in nanoseconds 83 * 84 * Wait for file descriptors to become ready and place them on ready_list. 85 * 86 * Called with ctx->list_lock incremented but not locked. 87 * 88 * Returns: number of ready file descriptors. 89 */ 90 int (*wait)(AioContext *ctx, AioHandlerList *ready_list, int64_t timeout); 91 92 /* 93 * need_wait: 94 * @ctx: the AioContext 95 * 96 * Tell aio_poll() when to stop userspace polling early because ->wait() 97 * has fds ready. 98 * 99 * File descriptor monitoring implementations that cannot poll fd readiness 100 * from userspace should use aio_poll_disabled() here. This ensures that 101 * file descriptors are not starved by handlers that frequently make 102 * progress via userspace polling. 103 * 104 * Returns: true if ->wait() should be called, false otherwise. 105 */ 106 bool (*need_wait)(AioContext *ctx); 107 } FDMonOps; 108 109 /* 110 * Each aio_bh_poll() call carves off a slice of the BH list, so that newly 111 * scheduled BHs are not processed until the next aio_bh_poll() call. All 112 * active aio_bh_poll() calls chain their slices together in a list, so that 113 * nested aio_bh_poll() calls process all scheduled bottom halves. 114 */ 115 typedef QSLIST_HEAD(, QEMUBH) BHList; 116 typedef struct BHListSlice BHListSlice; 117 struct BHListSlice { 118 BHList bh_list; 119 QSIMPLEQ_ENTRY(BHListSlice) next; 120 }; 121 122 typedef QSLIST_HEAD(, AioHandler) AioHandlerSList; 123 124 struct AioContext { 125 GSource source; 126 127 /* Used by AioContext users to protect from multi-threaded access. */ 128 QemuRecMutex lock; 129 130 /* 131 * Keep track of readers and writers of the block layer graph. 132 * This is essential to avoid performing additions and removal 133 * of nodes and edges from block graph while some 134 * other thread is traversing it. 135 */ 136 BdrvGraphRWlock *bdrv_graph; 137 138 /* The list of registered AIO handlers. Protected by ctx->list_lock. */ 139 AioHandlerList aio_handlers; 140 141 /* The list of AIO handlers to be deleted. Protected by ctx->list_lock. */ 142 AioHandlerList deleted_aio_handlers; 143 144 /* Used to avoid unnecessary event_notifier_set calls in aio_notify; 145 * only written from the AioContext home thread, or under the BQL in 146 * the case of the main AioContext. However, it is read from any 147 * thread so it is still accessed with atomic primitives. 148 * 149 * If this field is 0, everything (file descriptors, bottom halves, 150 * timers) will be re-evaluated before the next blocking poll() or 151 * io_uring wait; therefore, the event_notifier_set call can be 152 * skipped. If it is non-zero, you may need to wake up a concurrent 153 * aio_poll or the glib main event loop, making event_notifier_set 154 * necessary. 155 * 156 * Bit 0 is reserved for GSource usage of the AioContext, and is 1 157 * between a call to aio_ctx_prepare and the next call to aio_ctx_check. 158 * Bits 1-31 simply count the number of active calls to aio_poll 159 * that are in the prepare or poll phase. 160 * 161 * The GSource and aio_poll must use a different mechanism because 162 * there is no certainty that a call to GSource's prepare callback 163 * (via g_main_context_prepare) is indeed followed by check and 164 * dispatch. It's not clear whether this would be a bug, but let's 165 * play safe and allow it---it will just cause extra calls to 166 * event_notifier_set until the next call to dispatch. 167 * 168 * Instead, the aio_poll calls include both the prepare and the 169 * dispatch phase, hence a simple counter is enough for them. 170 */ 171 uint32_t notify_me; 172 173 /* A lock to protect between QEMUBH and AioHandler adders and deleter, 174 * and to ensure that no callbacks are removed while we're walking and 175 * dispatching them. 176 */ 177 QemuLockCnt list_lock; 178 179 /* Bottom Halves pending aio_bh_poll() processing */ 180 BHList bh_list; 181 182 /* Chained BH list slices for each nested aio_bh_poll() call */ 183 QSIMPLEQ_HEAD(, BHListSlice) bh_slice_list; 184 185 /* Used by aio_notify. 186 * 187 * "notified" is used to avoid expensive event_notifier_test_and_clear 188 * calls. When it is clear, the EventNotifier is clear, or one thread 189 * is going to clear "notified" before processing more events. False 190 * positives are possible, i.e. "notified" could be set even though the 191 * EventNotifier is clear. 192 * 193 * Note that event_notifier_set *cannot* be optimized the same way. For 194 * more information on the problem that would result, see "#ifdef BUG2" 195 * in the docs/aio_notify_accept.promela formal model. 196 */ 197 bool notified; 198 EventNotifier notifier; 199 200 QSLIST_HEAD(, Coroutine) scheduled_coroutines; 201 QEMUBH *co_schedule_bh; 202 203 int thread_pool_min; 204 int thread_pool_max; 205 /* Thread pool for performing work and receiving completion callbacks. 206 * Has its own locking. 207 */ 208 struct ThreadPool *thread_pool; 209 210 #ifdef CONFIG_LINUX_AIO 211 struct LinuxAioState *linux_aio; 212 #endif 213 #ifdef CONFIG_LINUX_IO_URING 214 struct LuringState *linux_io_uring; 215 216 /* State for file descriptor monitoring using Linux io_uring */ 217 struct io_uring fdmon_io_uring; 218 AioHandlerSList submit_list; 219 #endif 220 221 /* TimerLists for calling timers - one per clock type. Has its own 222 * locking. 223 */ 224 QEMUTimerListGroup tlg; 225 226 int external_disable_cnt; 227 228 /* Number of AioHandlers without .io_poll() */ 229 int poll_disable_cnt; 230 231 /* Polling mode parameters */ 232 int64_t poll_ns; /* current polling time in nanoseconds */ 233 int64_t poll_max_ns; /* maximum polling time in nanoseconds */ 234 int64_t poll_grow; /* polling time growth factor */ 235 int64_t poll_shrink; /* polling time shrink factor */ 236 237 /* AIO engine parameters */ 238 int64_t aio_max_batch; /* maximum number of requests in a batch */ 239 240 /* 241 * List of handlers participating in userspace polling. Protected by 242 * ctx->list_lock. Iterated and modified mostly by the event loop thread 243 * from aio_poll() with ctx->list_lock incremented. aio_set_fd_handler() 244 * only touches the list to delete nodes if ctx->list_lock's count is zero. 245 */ 246 AioHandlerList poll_aio_handlers; 247 248 /* Are we in polling mode or monitoring file descriptors? */ 249 bool poll_started; 250 251 /* epoll(7) state used when built with CONFIG_EPOLL */ 252 int epollfd; 253 254 const FDMonOps *fdmon_ops; 255 }; 256 257 /** 258 * aio_context_new: Allocate a new AioContext. 259 * 260 * AioContext provide a mini event-loop that can be waited on synchronously. 261 * They also provide bottom halves, a service to execute a piece of code 262 * as soon as possible. 263 */ 264 AioContext *aio_context_new(Error **errp); 265 266 /** 267 * aio_context_ref: 268 * @ctx: The AioContext to operate on. 269 * 270 * Add a reference to an AioContext. 271 */ 272 void aio_context_ref(AioContext *ctx); 273 274 /** 275 * aio_context_unref: 276 * @ctx: The AioContext to operate on. 277 * 278 * Drop a reference to an AioContext. 279 */ 280 void aio_context_unref(AioContext *ctx); 281 282 /* Take ownership of the AioContext. If the AioContext will be shared between 283 * threads, and a thread does not want to be interrupted, it will have to 284 * take ownership around calls to aio_poll(). Otherwise, aio_poll() 285 * automatically takes care of calling aio_context_acquire and 286 * aio_context_release. 287 * 288 * Note that this is separate from bdrv_drained_begin/bdrv_drained_end. A 289 * thread still has to call those to avoid being interrupted by the guest. 290 * 291 * Bottom halves, timers and callbacks can be created or removed without 292 * acquiring the AioContext. 293 */ 294 void aio_context_acquire(AioContext *ctx); 295 296 /* Relinquish ownership of the AioContext. */ 297 void aio_context_release(AioContext *ctx); 298 299 /** 300 * aio_bh_schedule_oneshot_full: Allocate a new bottom half structure that will 301 * run only once and as soon as possible. 302 * 303 * @name: A human-readable identifier for debugging purposes. 304 */ 305 void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, 306 const char *name); 307 308 /** 309 * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run 310 * only once and as soon as possible. 311 * 312 * A convenience wrapper for aio_bh_schedule_oneshot_full() that uses cb as the 313 * name string. 314 */ 315 #define aio_bh_schedule_oneshot(ctx, cb, opaque) \ 316 aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb))) 317 318 /** 319 * aio_bh_new_full: Allocate a new bottom half structure. 320 * 321 * Bottom halves are lightweight callbacks whose invocation is guaranteed 322 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure 323 * is opaque and must be allocated prior to its use. 324 * 325 * @name: A human-readable identifier for debugging purposes. 326 */ 327 QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, 328 const char *name); 329 330 /** 331 * aio_bh_new: Allocate a new bottom half structure 332 * 333 * A convenience wrapper for aio_bh_new_full() that uses the cb as the name 334 * string. 335 */ 336 #define aio_bh_new(ctx, cb, opaque) \ 337 aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb))) 338 339 /** 340 * aio_notify: Force processing of pending events. 341 * 342 * Similar to signaling a condition variable, aio_notify forces 343 * aio_poll to exit, so that the next call will re-examine pending events. 344 * The caller of aio_notify will usually call aio_poll again very soon, 345 * or go through another iteration of the GLib main loop. Hence, aio_notify 346 * also has the side effect of recalculating the sets of file descriptors 347 * that the main loop waits for. 348 * 349 * Calling aio_notify is rarely necessary, because for example scheduling 350 * a bottom half calls it already. 351 */ 352 void aio_notify(AioContext *ctx); 353 354 /** 355 * aio_notify_accept: Acknowledge receiving an aio_notify. 356 * 357 * aio_notify() uses an EventNotifier in order to wake up a sleeping 358 * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are 359 * usually rare, but the AioContext has to clear the EventNotifier on 360 * every aio_poll() or g_main_context_iteration() in order to avoid 361 * busy waiting. This event_notifier_test_and_clear() cannot be done 362 * using the usual aio_context_set_event_notifier(), because it must 363 * be done before processing all events (file descriptors, bottom halves, 364 * timers). 365 * 366 * aio_notify_accept() is an optimized event_notifier_test_and_clear() 367 * that is specific to an AioContext's notifier; it is used internally 368 * to clear the EventNotifier only if aio_notify() had been called. 369 */ 370 void aio_notify_accept(AioContext *ctx); 371 372 /** 373 * aio_bh_call: Executes callback function of the specified BH. 374 */ 375 void aio_bh_call(QEMUBH *bh); 376 377 /** 378 * aio_bh_poll: Poll bottom halves for an AioContext. 379 * 380 * These are internal functions used by the QEMU main loop. 381 * And notice that multiple occurrences of aio_bh_poll cannot 382 * be called concurrently 383 */ 384 int aio_bh_poll(AioContext *ctx); 385 386 /** 387 * qemu_bh_schedule: Schedule a bottom half. 388 * 389 * Scheduling a bottom half interrupts the main loop and causes the 390 * execution of the callback that was passed to qemu_bh_new. 391 * 392 * Bottom halves that are scheduled from a bottom half handler are instantly 393 * invoked. This can create an infinite loop if a bottom half handler 394 * schedules itself. 395 * 396 * @bh: The bottom half to be scheduled. 397 */ 398 void qemu_bh_schedule(QEMUBH *bh); 399 400 /** 401 * qemu_bh_cancel: Cancel execution of a bottom half. 402 * 403 * Canceling execution of a bottom half undoes the effect of calls to 404 * qemu_bh_schedule without freeing its resources yet. While cancellation 405 * itself is also wait-free and thread-safe, it can of course race with the 406 * loop that executes bottom halves unless you are holding the iothread 407 * mutex. This makes it mostly useless if you are not holding the mutex. 408 * 409 * @bh: The bottom half to be canceled. 410 */ 411 void qemu_bh_cancel(QEMUBH *bh); 412 413 /** 414 *qemu_bh_delete: Cancel execution of a bottom half and free its resources. 415 * 416 * Deleting a bottom half frees the memory that was allocated for it by 417 * qemu_bh_new. It also implies canceling the bottom half if it was 418 * scheduled. 419 * This func is async. The bottom half will do the delete action at the finial 420 * end. 421 * 422 * @bh: The bottom half to be deleted. 423 */ 424 void qemu_bh_delete(QEMUBH *bh); 425 426 /* Return whether there are any pending callbacks from the GSource 427 * attached to the AioContext, before g_poll is invoked. 428 * 429 * This is used internally in the implementation of the GSource. 430 */ 431 bool aio_prepare(AioContext *ctx); 432 433 /* Return whether there are any pending callbacks from the GSource 434 * attached to the AioContext, after g_poll is invoked. 435 * 436 * This is used internally in the implementation of the GSource. 437 */ 438 bool aio_pending(AioContext *ctx); 439 440 /* Dispatch any pending callbacks from the GSource attached to the AioContext. 441 * 442 * This is used internally in the implementation of the GSource. 443 */ 444 void aio_dispatch(AioContext *ctx); 445 446 /* Progress in completing AIO work to occur. This can issue new pending 447 * aio as a result of executing I/O completion or bh callbacks. 448 * 449 * Return whether any progress was made by executing AIO or bottom half 450 * handlers. If @blocking == true, this should always be true except 451 * if someone called aio_notify. 452 * 453 * If there are no pending bottom halves, but there are pending AIO 454 * operations, it may not be possible to make any progress without 455 * blocking. If @blocking is true, this function will wait until one 456 * or more AIO events have completed, to ensure something has moved 457 * before returning. 458 */ 459 bool aio_poll(AioContext *ctx, bool blocking); 460 461 /* Register a file descriptor and associated callbacks. Behaves very similarly 462 * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will 463 * be invoked when using aio_poll(). 464 * 465 * Code that invokes AIO completion functions should rely on this function 466 * instead of qemu_set_fd_handler[2]. 467 */ 468 void aio_set_fd_handler(AioContext *ctx, 469 int fd, 470 bool is_external, 471 IOHandler *io_read, 472 IOHandler *io_write, 473 AioPollFn *io_poll, 474 IOHandler *io_poll_ready, 475 void *opaque); 476 477 /* Register an event notifier and associated callbacks. Behaves very similarly 478 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks 479 * will be invoked when using aio_poll(). 480 * 481 * Code that invokes AIO completion functions should rely on this function 482 * instead of event_notifier_set_handler. 483 */ 484 void aio_set_event_notifier(AioContext *ctx, 485 EventNotifier *notifier, 486 bool is_external, 487 EventNotifierHandler *io_read, 488 AioPollFn *io_poll, 489 EventNotifierHandler *io_poll_ready); 490 491 /* Set polling begin/end callbacks for an event notifier that has already been 492 * registered with aio_set_event_notifier. Do nothing if the event notifier is 493 * not registered. 494 */ 495 void aio_set_event_notifier_poll(AioContext *ctx, 496 EventNotifier *notifier, 497 EventNotifierHandler *io_poll_begin, 498 EventNotifierHandler *io_poll_end); 499 500 /* Return a GSource that lets the main loop poll the file descriptors attached 501 * to this AioContext. 502 */ 503 GSource *aio_get_g_source(AioContext *ctx); 504 505 /* Return the ThreadPool bound to this AioContext */ 506 struct ThreadPool *aio_get_thread_pool(AioContext *ctx); 507 508 /* Setup the LinuxAioState bound to this AioContext */ 509 struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp); 510 511 /* Return the LinuxAioState bound to this AioContext */ 512 struct LinuxAioState *aio_get_linux_aio(AioContext *ctx); 513 514 /* Setup the LuringState bound to this AioContext */ 515 struct LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp); 516 517 /* Return the LuringState bound to this AioContext */ 518 struct LuringState *aio_get_linux_io_uring(AioContext *ctx); 519 /** 520 * aio_timer_new_with_attrs: 521 * @ctx: the aio context 522 * @type: the clock type 523 * @scale: the scale 524 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values 525 * to assign 526 * @cb: the callback to call on timer expiry 527 * @opaque: the opaque pointer to pass to the callback 528 * 529 * Allocate a new timer (with attributes) attached to the context @ctx. 530 * The function is responsible for memory allocation. 531 * 532 * The preferred interface is aio_timer_init or aio_timer_init_with_attrs. 533 * Use that unless you really need dynamic memory allocation. 534 * 535 * Returns: a pointer to the new timer 536 */ 537 static inline QEMUTimer *aio_timer_new_with_attrs(AioContext *ctx, 538 QEMUClockType type, 539 int scale, int attributes, 540 QEMUTimerCB *cb, void *opaque) 541 { 542 return timer_new_full(&ctx->tlg, type, scale, attributes, cb, opaque); 543 } 544 545 /** 546 * aio_timer_new: 547 * @ctx: the aio context 548 * @type: the clock type 549 * @scale: the scale 550 * @cb: the callback to call on timer expiry 551 * @opaque: the opaque pointer to pass to the callback 552 * 553 * Allocate a new timer attached to the context @ctx. 554 * See aio_timer_new_with_attrs for details. 555 * 556 * Returns: a pointer to the new timer 557 */ 558 static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type, 559 int scale, 560 QEMUTimerCB *cb, void *opaque) 561 { 562 return timer_new_full(&ctx->tlg, type, scale, 0, cb, opaque); 563 } 564 565 /** 566 * aio_timer_init_with_attrs: 567 * @ctx: the aio context 568 * @ts: the timer 569 * @type: the clock type 570 * @scale: the scale 571 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values 572 * to assign 573 * @cb: the callback to call on timer expiry 574 * @opaque: the opaque pointer to pass to the callback 575 * 576 * Initialise a new timer (with attributes) attached to the context @ctx. 577 * The caller is responsible for memory allocation. 578 */ 579 static inline void aio_timer_init_with_attrs(AioContext *ctx, 580 QEMUTimer *ts, QEMUClockType type, 581 int scale, int attributes, 582 QEMUTimerCB *cb, void *opaque) 583 { 584 timer_init_full(ts, &ctx->tlg, type, scale, attributes, cb, opaque); 585 } 586 587 /** 588 * aio_timer_init: 589 * @ctx: the aio context 590 * @ts: the timer 591 * @type: the clock type 592 * @scale: the scale 593 * @cb: the callback to call on timer expiry 594 * @opaque: the opaque pointer to pass to the callback 595 * 596 * Initialise a new timer attached to the context @ctx. 597 * See aio_timer_init_with_attrs for details. 598 */ 599 static inline void aio_timer_init(AioContext *ctx, 600 QEMUTimer *ts, QEMUClockType type, 601 int scale, 602 QEMUTimerCB *cb, void *opaque) 603 { 604 timer_init_full(ts, &ctx->tlg, type, scale, 0, cb, opaque); 605 } 606 607 /** 608 * aio_compute_timeout: 609 * @ctx: the aio context 610 * 611 * Compute the timeout that a blocking aio_poll should use. 612 */ 613 int64_t aio_compute_timeout(AioContext *ctx); 614 615 /** 616 * aio_disable_external: 617 * @ctx: the aio context 618 * 619 * Disable the further processing of external clients. 620 */ 621 static inline void aio_disable_external(AioContext *ctx) 622 { 623 qatomic_inc(&ctx->external_disable_cnt); 624 } 625 626 /** 627 * aio_enable_external: 628 * @ctx: the aio context 629 * 630 * Enable the processing of external clients. 631 */ 632 static inline void aio_enable_external(AioContext *ctx) 633 { 634 int old; 635 636 old = qatomic_fetch_dec(&ctx->external_disable_cnt); 637 assert(old > 0); 638 if (old == 1) { 639 /* Kick event loop so it re-arms file descriptors */ 640 aio_notify(ctx); 641 } 642 } 643 644 /** 645 * aio_external_disabled: 646 * @ctx: the aio context 647 * 648 * Return true if the external clients are disabled. 649 */ 650 static inline bool aio_external_disabled(AioContext *ctx) 651 { 652 return qatomic_read(&ctx->external_disable_cnt); 653 } 654 655 /** 656 * aio_node_check: 657 * @ctx: the aio context 658 * @is_external: Whether or not the checked node is an external event source. 659 * 660 * Check if the node's is_external flag is okay to be polled by the ctx at this 661 * moment. True means green light. 662 */ 663 static inline bool aio_node_check(AioContext *ctx, bool is_external) 664 { 665 return !is_external || !qatomic_read(&ctx->external_disable_cnt); 666 } 667 668 /** 669 * aio_co_schedule: 670 * @ctx: the aio context 671 * @co: the coroutine 672 * 673 * Start a coroutine on a remote AioContext. 674 * 675 * The coroutine must not be entered by anyone else while aio_co_schedule() 676 * is active. In addition the coroutine must have yielded unless ctx 677 * is the context in which the coroutine is running (i.e. the value of 678 * qemu_get_current_aio_context() from the coroutine itself). 679 */ 680 void aio_co_schedule(AioContext *ctx, Coroutine *co); 681 682 /** 683 * aio_co_reschedule_self: 684 * @new_ctx: the new context 685 * 686 * Move the currently running coroutine to new_ctx. If the coroutine is already 687 * running in new_ctx, do nothing. 688 */ 689 void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx); 690 691 /** 692 * aio_co_wake: 693 * @co: the coroutine 694 * 695 * Restart a coroutine on the AioContext where it was running last, thus 696 * preventing coroutines from jumping from one context to another when they 697 * go to sleep. 698 * 699 * aio_co_wake may be executed either in coroutine or non-coroutine 700 * context. The coroutine must not be entered by anyone else while 701 * aio_co_wake() is active. 702 */ 703 void aio_co_wake(Coroutine *co); 704 705 /** 706 * aio_co_enter: 707 * @ctx: the context to run the coroutine 708 * @co: the coroutine to run 709 * 710 * Enter a coroutine in the specified AioContext. 711 */ 712 void aio_co_enter(AioContext *ctx, Coroutine *co); 713 714 /** 715 * Return the AioContext whose event loop runs in the current thread. 716 * 717 * If called from an IOThread this will be the IOThread's AioContext. If 718 * called from the main thread or with the "big QEMU lock" taken it 719 * will be the main loop AioContext. 720 */ 721 AioContext *qemu_get_current_aio_context(void); 722 723 void qemu_set_current_aio_context(AioContext *ctx); 724 725 /** 726 * aio_context_setup: 727 * @ctx: the aio context 728 * 729 * Initialize the aio context. 730 */ 731 void aio_context_setup(AioContext *ctx); 732 733 /** 734 * aio_context_destroy: 735 * @ctx: the aio context 736 * 737 * Destroy the aio context. 738 */ 739 void aio_context_destroy(AioContext *ctx); 740 741 /* Used internally, do not call outside AioContext code */ 742 void aio_context_use_g_source(AioContext *ctx); 743 744 /** 745 * aio_context_set_poll_params: 746 * @ctx: the aio context 747 * @max_ns: how long to busy poll for, in nanoseconds 748 * @grow: polling time growth factor 749 * @shrink: polling time shrink factor 750 * 751 * Poll mode can be disabled by setting poll_max_ns to 0. 752 */ 753 void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, 754 int64_t grow, int64_t shrink, 755 Error **errp); 756 757 /** 758 * aio_context_set_aio_params: 759 * @ctx: the aio context 760 * @max_batch: maximum number of requests in a batch, 0 means that the 761 * engine will use its default 762 */ 763 void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch, 764 Error **errp); 765 766 /** 767 * aio_context_set_thread_pool_params: 768 * @ctx: the aio context 769 * @min: min number of threads to have readily available in the thread pool 770 * @min: max number of threads the thread pool can contain 771 */ 772 void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min, 773 int64_t max, Error **errp); 774 #endif 775