1 /* 2 * QEMU aio implementation 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #ifndef QEMU_AIO_H 15 #define QEMU_AIO_H 16 17 #ifdef CONFIG_LINUX_IO_URING 18 #include <liburing.h> 19 #endif 20 #include "qemu/coroutine-core.h" 21 #include "qemu/queue.h" 22 #include "qemu/event_notifier.h" 23 #include "qemu/lockcnt.h" 24 #include "qemu/thread.h" 25 #include "qemu/timer.h" 26 #include "block/graph-lock.h" 27 #include "hw/qdev-core.h" 28 29 30 typedef struct BlockAIOCB BlockAIOCB; 31 typedef void BlockCompletionFunc(void *opaque, int ret); 32 33 typedef struct AIOCBInfo { 34 void (*cancel_async)(BlockAIOCB *acb); 35 size_t aiocb_size; 36 } AIOCBInfo; 37 38 struct BlockAIOCB { 39 const AIOCBInfo *aiocb_info; 40 BlockDriverState *bs; 41 BlockCompletionFunc *cb; 42 void *opaque; 43 int refcnt; 44 }; 45 46 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, 47 BlockCompletionFunc *cb, void *opaque); 48 void qemu_aio_unref(void *p); 49 void qemu_aio_ref(void *p); 50 51 typedef struct AioHandler AioHandler; 52 typedef QLIST_HEAD(, AioHandler) AioHandlerList; 53 typedef void QEMUBHFunc(void *opaque); 54 typedef bool AioPollFn(void *opaque); 55 typedef void IOHandler(void *opaque); 56 57 struct ThreadPool; 58 struct LinuxAioState; 59 typedef struct LuringState LuringState; 60 61 /* Is polling disabled? */ 62 bool aio_poll_disabled(AioContext *ctx); 63 64 /* Callbacks for file descriptor monitoring implementations */ 65 typedef struct { 66 /* 67 * update: 68 * @ctx: the AioContext 69 * @old_node: the existing handler or NULL if this file descriptor is being 70 * monitored for the first time 71 * @new_node: the new handler or NULL if this file descriptor is being 72 * removed 73 * 74 * Add/remove/modify a monitored file descriptor. 75 * 76 * Called with ctx->list_lock acquired. 77 */ 78 void (*update)(AioContext *ctx, AioHandler *old_node, AioHandler *new_node); 79 80 /* 81 * wait: 82 * @ctx: the AioContext 83 * @ready_list: list for handlers that become ready 84 * @timeout: maximum duration to wait, in nanoseconds 85 * 86 * Wait for file descriptors to become ready and place them on ready_list. 87 * 88 * Called with ctx->list_lock incremented but not locked. 89 * 90 * Returns: number of ready file descriptors. 91 */ 92 int (*wait)(AioContext *ctx, AioHandlerList *ready_list, int64_t timeout); 93 94 /* 95 * need_wait: 96 * @ctx: the AioContext 97 * 98 * Tell aio_poll() when to stop userspace polling early because ->wait() 99 * has fds ready. 100 * 101 * File descriptor monitoring implementations that cannot poll fd readiness 102 * from userspace should use aio_poll_disabled() here. This ensures that 103 * file descriptors are not starved by handlers that frequently make 104 * progress via userspace polling. 105 * 106 * Returns: true if ->wait() should be called, false otherwise. 107 */ 108 bool (*need_wait)(AioContext *ctx); 109 } FDMonOps; 110 111 /* 112 * Each aio_bh_poll() call carves off a slice of the BH list, so that newly 113 * scheduled BHs are not processed until the next aio_bh_poll() call. All 114 * active aio_bh_poll() calls chain their slices together in a list, so that 115 * nested aio_bh_poll() calls process all scheduled bottom halves. 116 */ 117 typedef QSLIST_HEAD(, QEMUBH) BHList; 118 typedef struct BHListSlice BHListSlice; 119 struct BHListSlice { 120 BHList bh_list; 121 QSIMPLEQ_ENTRY(BHListSlice) next; 122 }; 123 124 typedef QSLIST_HEAD(, AioHandler) AioHandlerSList; 125 126 struct AioContext { 127 GSource source; 128 129 /* Used by AioContext users to protect from multi-threaded access. */ 130 QemuRecMutex lock; 131 132 /* 133 * Keep track of readers and writers of the block layer graph. 134 * This is essential to avoid performing additions and removal 135 * of nodes and edges from block graph while some 136 * other thread is traversing it. 137 */ 138 BdrvGraphRWlock *bdrv_graph; 139 140 /* The list of registered AIO handlers. Protected by ctx->list_lock. */ 141 AioHandlerList aio_handlers; 142 143 /* The list of AIO handlers to be deleted. Protected by ctx->list_lock. */ 144 AioHandlerList deleted_aio_handlers; 145 146 /* Used to avoid unnecessary event_notifier_set calls in aio_notify; 147 * only written from the AioContext home thread, or under the BQL in 148 * the case of the main AioContext. However, it is read from any 149 * thread so it is still accessed with atomic primitives. 150 * 151 * If this field is 0, everything (file descriptors, bottom halves, 152 * timers) will be re-evaluated before the next blocking poll() or 153 * io_uring wait; therefore, the event_notifier_set call can be 154 * skipped. If it is non-zero, you may need to wake up a concurrent 155 * aio_poll or the glib main event loop, making event_notifier_set 156 * necessary. 157 * 158 * Bit 0 is reserved for GSource usage of the AioContext, and is 1 159 * between a call to aio_ctx_prepare and the next call to aio_ctx_check. 160 * Bits 1-31 simply count the number of active calls to aio_poll 161 * that are in the prepare or poll phase. 162 * 163 * The GSource and aio_poll must use a different mechanism because 164 * there is no certainty that a call to GSource's prepare callback 165 * (via g_main_context_prepare) is indeed followed by check and 166 * dispatch. It's not clear whether this would be a bug, but let's 167 * play safe and allow it---it will just cause extra calls to 168 * event_notifier_set until the next call to dispatch. 169 * 170 * Instead, the aio_poll calls include both the prepare and the 171 * dispatch phase, hence a simple counter is enough for them. 172 */ 173 uint32_t notify_me; 174 175 /* A lock to protect between QEMUBH and AioHandler adders and deleter, 176 * and to ensure that no callbacks are removed while we're walking and 177 * dispatching them. 178 */ 179 QemuLockCnt list_lock; 180 181 /* Bottom Halves pending aio_bh_poll() processing */ 182 BHList bh_list; 183 184 /* Chained BH list slices for each nested aio_bh_poll() call */ 185 QSIMPLEQ_HEAD(, BHListSlice) bh_slice_list; 186 187 /* Used by aio_notify. 188 * 189 * "notified" is used to avoid expensive event_notifier_test_and_clear 190 * calls. When it is clear, the EventNotifier is clear, or one thread 191 * is going to clear "notified" before processing more events. False 192 * positives are possible, i.e. "notified" could be set even though the 193 * EventNotifier is clear. 194 * 195 * Note that event_notifier_set *cannot* be optimized the same way. For 196 * more information on the problem that would result, see "#ifdef BUG2" 197 * in the docs/aio_notify_accept.promela formal model. 198 */ 199 bool notified; 200 EventNotifier notifier; 201 202 QSLIST_HEAD(, Coroutine) scheduled_coroutines; 203 QEMUBH *co_schedule_bh; 204 205 int thread_pool_min; 206 int thread_pool_max; 207 /* Thread pool for performing work and receiving completion callbacks. 208 * Has its own locking. 209 */ 210 struct ThreadPool *thread_pool; 211 212 #ifdef CONFIG_LINUX_AIO 213 struct LinuxAioState *linux_aio; 214 #endif 215 #ifdef CONFIG_LINUX_IO_URING 216 LuringState *linux_io_uring; 217 218 /* State for file descriptor monitoring using Linux io_uring */ 219 struct io_uring fdmon_io_uring; 220 AioHandlerSList submit_list; 221 #endif 222 223 /* TimerLists for calling timers - one per clock type. Has its own 224 * locking. 225 */ 226 QEMUTimerListGroup tlg; 227 228 /* Number of AioHandlers without .io_poll() */ 229 int poll_disable_cnt; 230 231 /* Polling mode parameters */ 232 int64_t poll_ns; /* current polling time in nanoseconds */ 233 int64_t poll_max_ns; /* maximum polling time in nanoseconds */ 234 int64_t poll_grow; /* polling time growth factor */ 235 int64_t poll_shrink; /* polling time shrink factor */ 236 237 /* AIO engine parameters */ 238 int64_t aio_max_batch; /* maximum number of requests in a batch */ 239 240 /* 241 * List of handlers participating in userspace polling. Protected by 242 * ctx->list_lock. Iterated and modified mostly by the event loop thread 243 * from aio_poll() with ctx->list_lock incremented. aio_set_fd_handler() 244 * only touches the list to delete nodes if ctx->list_lock's count is zero. 245 */ 246 AioHandlerList poll_aio_handlers; 247 248 /* Are we in polling mode or monitoring file descriptors? */ 249 bool poll_started; 250 251 /* epoll(7) state used when built with CONFIG_EPOLL */ 252 int epollfd; 253 254 const FDMonOps *fdmon_ops; 255 }; 256 257 /** 258 * aio_context_new: Allocate a new AioContext. 259 * 260 * AioContext provide a mini event-loop that can be waited on synchronously. 261 * They also provide bottom halves, a service to execute a piece of code 262 * as soon as possible. 263 */ 264 AioContext *aio_context_new(Error **errp); 265 266 /** 267 * aio_context_ref: 268 * @ctx: The AioContext to operate on. 269 * 270 * Add a reference to an AioContext. 271 */ 272 void aio_context_ref(AioContext *ctx); 273 274 /** 275 * aio_context_unref: 276 * @ctx: The AioContext to operate on. 277 * 278 * Drop a reference to an AioContext. 279 */ 280 void aio_context_unref(AioContext *ctx); 281 282 /** 283 * aio_bh_schedule_oneshot_full: Allocate a new bottom half structure that will 284 * run only once and as soon as possible. 285 * 286 * @name: A human-readable identifier for debugging purposes. 287 */ 288 void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, 289 const char *name); 290 291 /** 292 * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run 293 * only once and as soon as possible. 294 * 295 * A convenience wrapper for aio_bh_schedule_oneshot_full() that uses cb as the 296 * name string. 297 */ 298 #define aio_bh_schedule_oneshot(ctx, cb, opaque) \ 299 aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb))) 300 301 /** 302 * aio_bh_new_full: Allocate a new bottom half structure. 303 * 304 * Bottom halves are lightweight callbacks whose invocation is guaranteed 305 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure 306 * is opaque and must be allocated prior to its use. 307 * 308 * @name: A human-readable identifier for debugging purposes. 309 * @reentrancy_guard: A guard set when entering a cb to prevent 310 * device-reentrancy issues 311 */ 312 QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, 313 const char *name, MemReentrancyGuard *reentrancy_guard); 314 315 /** 316 * aio_bh_new: Allocate a new bottom half structure 317 * 318 * A convenience wrapper for aio_bh_new_full() that uses the cb as the name 319 * string. 320 */ 321 #define aio_bh_new(ctx, cb, opaque) \ 322 aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), NULL) 323 324 /** 325 * aio_bh_new_guarded: Allocate a new bottom half structure with a 326 * reentrancy_guard 327 * 328 * A convenience wrapper for aio_bh_new_full() that uses the cb as the name 329 * string. 330 */ 331 #define aio_bh_new_guarded(ctx, cb, opaque, guard) \ 332 aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), guard) 333 334 /** 335 * aio_notify: Force processing of pending events. 336 * 337 * Similar to signaling a condition variable, aio_notify forces 338 * aio_poll to exit, so that the next call will re-examine pending events. 339 * The caller of aio_notify will usually call aio_poll again very soon, 340 * or go through another iteration of the GLib main loop. Hence, aio_notify 341 * also has the side effect of recalculating the sets of file descriptors 342 * that the main loop waits for. 343 * 344 * Calling aio_notify is rarely necessary, because for example scheduling 345 * a bottom half calls it already. 346 */ 347 void aio_notify(AioContext *ctx); 348 349 /** 350 * aio_notify_accept: Acknowledge receiving an aio_notify. 351 * 352 * aio_notify() uses an EventNotifier in order to wake up a sleeping 353 * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are 354 * usually rare, but the AioContext has to clear the EventNotifier on 355 * every aio_poll() or g_main_context_iteration() in order to avoid 356 * busy waiting. This event_notifier_test_and_clear() cannot be done 357 * using the usual aio_context_set_event_notifier(), because it must 358 * be done before processing all events (file descriptors, bottom halves, 359 * timers). 360 * 361 * aio_notify_accept() is an optimized event_notifier_test_and_clear() 362 * that is specific to an AioContext's notifier; it is used internally 363 * to clear the EventNotifier only if aio_notify() had been called. 364 */ 365 void aio_notify_accept(AioContext *ctx); 366 367 /** 368 * aio_bh_call: Executes callback function of the specified BH. 369 */ 370 void aio_bh_call(QEMUBH *bh); 371 372 /** 373 * aio_bh_poll: Poll bottom halves for an AioContext. 374 * 375 * These are internal functions used by the QEMU main loop. 376 * And notice that multiple occurrences of aio_bh_poll cannot 377 * be called concurrently 378 */ 379 int aio_bh_poll(AioContext *ctx); 380 381 /** 382 * qemu_bh_schedule: Schedule a bottom half. 383 * 384 * Scheduling a bottom half interrupts the main loop and causes the 385 * execution of the callback that was passed to qemu_bh_new. 386 * 387 * Bottom halves that are scheduled from a bottom half handler are instantly 388 * invoked. This can create an infinite loop if a bottom half handler 389 * schedules itself. 390 * 391 * @bh: The bottom half to be scheduled. 392 */ 393 void qemu_bh_schedule(QEMUBH *bh); 394 395 /** 396 * qemu_bh_cancel: Cancel execution of a bottom half. 397 * 398 * Canceling execution of a bottom half undoes the effect of calls to 399 * qemu_bh_schedule without freeing its resources yet. While cancellation 400 * itself is also wait-free and thread-safe, it can of course race with the 401 * loop that executes bottom halves unless you are holding the iothread 402 * mutex. This makes it mostly useless if you are not holding the mutex. 403 * 404 * @bh: The bottom half to be canceled. 405 */ 406 void qemu_bh_cancel(QEMUBH *bh); 407 408 /** 409 *qemu_bh_delete: Cancel execution of a bottom half and free its resources. 410 * 411 * Deleting a bottom half frees the memory that was allocated for it by 412 * qemu_bh_new. It also implies canceling the bottom half if it was 413 * scheduled. 414 * This func is async. The bottom half will do the delete action at the finial 415 * end. 416 * 417 * @bh: The bottom half to be deleted. 418 */ 419 void qemu_bh_delete(QEMUBH *bh); 420 421 /* Return whether there are any pending callbacks from the GSource 422 * attached to the AioContext, before g_poll is invoked. 423 * 424 * This is used internally in the implementation of the GSource. 425 */ 426 bool aio_prepare(AioContext *ctx); 427 428 /* Return whether there are any pending callbacks from the GSource 429 * attached to the AioContext, after g_poll is invoked. 430 * 431 * This is used internally in the implementation of the GSource. 432 */ 433 bool aio_pending(AioContext *ctx); 434 435 /* Dispatch any pending callbacks from the GSource attached to the AioContext. 436 * 437 * This is used internally in the implementation of the GSource. 438 */ 439 void aio_dispatch(AioContext *ctx); 440 441 /* Progress in completing AIO work to occur. This can issue new pending 442 * aio as a result of executing I/O completion or bh callbacks. 443 * 444 * Return whether any progress was made by executing AIO or bottom half 445 * handlers. If @blocking == true, this should always be true except 446 * if someone called aio_notify. 447 * 448 * If there are no pending bottom halves, but there are pending AIO 449 * operations, it may not be possible to make any progress without 450 * blocking. If @blocking is true, this function will wait until one 451 * or more AIO events have completed, to ensure something has moved 452 * before returning. 453 */ 454 bool no_coroutine_fn aio_poll(AioContext *ctx, bool blocking); 455 456 /* Register a file descriptor and associated callbacks. Behaves very similarly 457 * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will 458 * be invoked when using aio_poll(). 459 * 460 * Code that invokes AIO completion functions should rely on this function 461 * instead of qemu_set_fd_handler[2]. 462 */ 463 void aio_set_fd_handler(AioContext *ctx, 464 int fd, 465 IOHandler *io_read, 466 IOHandler *io_write, 467 AioPollFn *io_poll, 468 IOHandler *io_poll_ready, 469 void *opaque); 470 471 /* Register an event notifier and associated callbacks. Behaves very similarly 472 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks 473 * will be invoked when using aio_poll(). 474 * 475 * Code that invokes AIO completion functions should rely on this function 476 * instead of event_notifier_set_handler. 477 */ 478 void aio_set_event_notifier(AioContext *ctx, 479 EventNotifier *notifier, 480 EventNotifierHandler *io_read, 481 AioPollFn *io_poll, 482 EventNotifierHandler *io_poll_ready); 483 484 /* 485 * Set polling begin/end callbacks for an event notifier that has already been 486 * registered with aio_set_event_notifier. Do nothing if the event notifier is 487 * not registered. 488 * 489 * Note that if the io_poll_end() callback (or the entire notifier) is removed 490 * during polling, it will not be called, so an io_poll_begin() is not 491 * necessarily always followed by an io_poll_end(). 492 */ 493 void aio_set_event_notifier_poll(AioContext *ctx, 494 EventNotifier *notifier, 495 EventNotifierHandler *io_poll_begin, 496 EventNotifierHandler *io_poll_end); 497 498 /* Return a GSource that lets the main loop poll the file descriptors attached 499 * to this AioContext. 500 */ 501 GSource *aio_get_g_source(AioContext *ctx); 502 503 /* Return the ThreadPool bound to this AioContext */ 504 struct ThreadPool *aio_get_thread_pool(AioContext *ctx); 505 506 /* Setup the LinuxAioState bound to this AioContext */ 507 struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp); 508 509 /* Return the LinuxAioState bound to this AioContext */ 510 struct LinuxAioState *aio_get_linux_aio(AioContext *ctx); 511 512 /* Setup the LuringState bound to this AioContext */ 513 LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp); 514 515 /* Return the LuringState bound to this AioContext */ 516 LuringState *aio_get_linux_io_uring(AioContext *ctx); 517 /** 518 * aio_timer_new_with_attrs: 519 * @ctx: the aio context 520 * @type: the clock type 521 * @scale: the scale 522 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values 523 * to assign 524 * @cb: the callback to call on timer expiry 525 * @opaque: the opaque pointer to pass to the callback 526 * 527 * Allocate a new timer (with attributes) attached to the context @ctx. 528 * The function is responsible for memory allocation. 529 * 530 * The preferred interface is aio_timer_init or aio_timer_init_with_attrs. 531 * Use that unless you really need dynamic memory allocation. 532 * 533 * Returns: a pointer to the new timer 534 */ 535 static inline QEMUTimer *aio_timer_new_with_attrs(AioContext *ctx, 536 QEMUClockType type, 537 int scale, int attributes, 538 QEMUTimerCB *cb, void *opaque) 539 { 540 return timer_new_full(&ctx->tlg, type, scale, attributes, cb, opaque); 541 } 542 543 /** 544 * aio_timer_new: 545 * @ctx: the aio context 546 * @type: the clock type 547 * @scale: the scale 548 * @cb: the callback to call on timer expiry 549 * @opaque: the opaque pointer to pass to the callback 550 * 551 * Allocate a new timer attached to the context @ctx. 552 * See aio_timer_new_with_attrs for details. 553 * 554 * Returns: a pointer to the new timer 555 */ 556 static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type, 557 int scale, 558 QEMUTimerCB *cb, void *opaque) 559 { 560 return timer_new_full(&ctx->tlg, type, scale, 0, cb, opaque); 561 } 562 563 /** 564 * aio_timer_init_with_attrs: 565 * @ctx: the aio context 566 * @ts: the timer 567 * @type: the clock type 568 * @scale: the scale 569 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values 570 * to assign 571 * @cb: the callback to call on timer expiry 572 * @opaque: the opaque pointer to pass to the callback 573 * 574 * Initialise a new timer (with attributes) attached to the context @ctx. 575 * The caller is responsible for memory allocation. 576 */ 577 static inline void aio_timer_init_with_attrs(AioContext *ctx, 578 QEMUTimer *ts, QEMUClockType type, 579 int scale, int attributes, 580 QEMUTimerCB *cb, void *opaque) 581 { 582 timer_init_full(ts, &ctx->tlg, type, scale, attributes, cb, opaque); 583 } 584 585 /** 586 * aio_timer_init: 587 * @ctx: the aio context 588 * @ts: the timer 589 * @type: the clock type 590 * @scale: the scale 591 * @cb: the callback to call on timer expiry 592 * @opaque: the opaque pointer to pass to the callback 593 * 594 * Initialise a new timer attached to the context @ctx. 595 * See aio_timer_init_with_attrs for details. 596 */ 597 static inline void aio_timer_init(AioContext *ctx, 598 QEMUTimer *ts, QEMUClockType type, 599 int scale, 600 QEMUTimerCB *cb, void *opaque) 601 { 602 timer_init_full(ts, &ctx->tlg, type, scale, 0, cb, opaque); 603 } 604 605 /** 606 * aio_compute_timeout: 607 * @ctx: the aio context 608 * 609 * Compute the timeout that a blocking aio_poll should use. 610 */ 611 int64_t aio_compute_timeout(AioContext *ctx); 612 613 /** 614 * aio_co_schedule: 615 * @ctx: the aio context 616 * @co: the coroutine 617 * 618 * Start a coroutine on a remote AioContext. 619 * 620 * The coroutine must not be entered by anyone else while aio_co_schedule() 621 * is active. In addition the coroutine must have yielded unless ctx 622 * is the context in which the coroutine is running (i.e. the value of 623 * qemu_get_current_aio_context() from the coroutine itself). 624 */ 625 void aio_co_schedule(AioContext *ctx, Coroutine *co); 626 627 /** 628 * aio_co_reschedule_self: 629 * @new_ctx: the new context 630 * 631 * Move the currently running coroutine to new_ctx. If the coroutine is already 632 * running in new_ctx, do nothing. 633 * 634 * Note that this function cannot reschedule from iohandler_ctx to 635 * qemu_aio_context. 636 */ 637 void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx); 638 639 /** 640 * aio_co_wake: 641 * @co: the coroutine 642 * 643 * Restart a coroutine on the AioContext where it was running last, thus 644 * preventing coroutines from jumping from one context to another when they 645 * go to sleep. 646 * 647 * aio_co_wake may be executed either in coroutine or non-coroutine 648 * context. The coroutine must not be entered by anyone else while 649 * aio_co_wake() is active. 650 */ 651 void aio_co_wake(Coroutine *co); 652 653 /** 654 * aio_co_enter: 655 * @ctx: the context to run the coroutine 656 * @co: the coroutine to run 657 * 658 * Enter a coroutine in the specified AioContext. 659 */ 660 void aio_co_enter(AioContext *ctx, Coroutine *co); 661 662 /** 663 * Return the AioContext whose event loop runs in the current thread. 664 * 665 * If called from an IOThread this will be the IOThread's AioContext. If 666 * called from the main thread or with the "big QEMU lock" taken it 667 * will be the main loop AioContext. 668 * 669 * Note that the return value is never the main loop's iohandler_ctx and the 670 * return value is the main loop AioContext instead. 671 */ 672 AioContext *qemu_get_current_aio_context(void); 673 674 void qemu_set_current_aio_context(AioContext *ctx); 675 676 /** 677 * aio_context_setup: 678 * @ctx: the aio context 679 * 680 * Initialize the aio context. 681 */ 682 void aio_context_setup(AioContext *ctx); 683 684 /** 685 * aio_context_destroy: 686 * @ctx: the aio context 687 * 688 * Destroy the aio context. 689 */ 690 void aio_context_destroy(AioContext *ctx); 691 692 /* Used internally, do not call outside AioContext code */ 693 void aio_context_use_g_source(AioContext *ctx); 694 695 /** 696 * aio_context_set_poll_params: 697 * @ctx: the aio context 698 * @max_ns: how long to busy poll for, in nanoseconds 699 * @grow: polling time growth factor 700 * @shrink: polling time shrink factor 701 * 702 * Poll mode can be disabled by setting poll_max_ns to 0. 703 */ 704 void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, 705 int64_t grow, int64_t shrink, 706 Error **errp); 707 708 /** 709 * aio_context_set_aio_params: 710 * @ctx: the aio context 711 * @max_batch: maximum number of requests in a batch, 0 means that the 712 * engine will use its default 713 */ 714 void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch); 715 716 /** 717 * aio_context_set_thread_pool_params: 718 * @ctx: the aio context 719 * @min: min number of threads to have readily available in the thread pool 720 * @min: max number of threads the thread pool can contain 721 */ 722 void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min, 723 int64_t max, Error **errp); 724 #endif 725