xref: /openbmc/qemu/include/block/aio.h (revision 824cac68)
1 /*
2  * QEMU aio implementation
3  *
4  * Copyright IBM, Corp. 2008
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #ifndef QEMU_AIO_H
15 #define QEMU_AIO_H
16 
17 #ifdef CONFIG_LINUX_IO_URING
18 #include <liburing.h>
19 #endif
20 #include "qemu/coroutine.h"
21 #include "qemu/queue.h"
22 #include "qemu/event_notifier.h"
23 #include "qemu/thread.h"
24 #include "qemu/timer.h"
25 #include "block/graph-lock.h"
26 
27 typedef struct BlockAIOCB BlockAIOCB;
28 typedef void BlockCompletionFunc(void *opaque, int ret);
29 
30 typedef struct AIOCBInfo {
31     void (*cancel_async)(BlockAIOCB *acb);
32     AioContext *(*get_aio_context)(BlockAIOCB *acb);
33     size_t aiocb_size;
34 } AIOCBInfo;
35 
36 struct BlockAIOCB {
37     const AIOCBInfo *aiocb_info;
38     BlockDriverState *bs;
39     BlockCompletionFunc *cb;
40     void *opaque;
41     int refcnt;
42 };
43 
44 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
45                    BlockCompletionFunc *cb, void *opaque);
46 void qemu_aio_unref(void *p);
47 void qemu_aio_ref(void *p);
48 
49 typedef struct AioHandler AioHandler;
50 typedef QLIST_HEAD(, AioHandler) AioHandlerList;
51 typedef void QEMUBHFunc(void *opaque);
52 typedef bool AioPollFn(void *opaque);
53 typedef void IOHandler(void *opaque);
54 
55 struct Coroutine;
56 struct ThreadPool;
57 struct LinuxAioState;
58 struct LuringState;
59 
60 /* Is polling disabled? */
61 bool aio_poll_disabled(AioContext *ctx);
62 
63 /* Callbacks for file descriptor monitoring implementations */
64 typedef struct {
65     /*
66      * update:
67      * @ctx: the AioContext
68      * @old_node: the existing handler or NULL if this file descriptor is being
69      *            monitored for the first time
70      * @new_node: the new handler or NULL if this file descriptor is being
71      *            removed
72      *
73      * Add/remove/modify a monitored file descriptor.
74      *
75      * Called with ctx->list_lock acquired.
76      */
77     void (*update)(AioContext *ctx, AioHandler *old_node, AioHandler *new_node);
78 
79     /*
80      * wait:
81      * @ctx: the AioContext
82      * @ready_list: list for handlers that become ready
83      * @timeout: maximum duration to wait, in nanoseconds
84      *
85      * Wait for file descriptors to become ready and place them on ready_list.
86      *
87      * Called with ctx->list_lock incremented but not locked.
88      *
89      * Returns: number of ready file descriptors.
90      */
91     int (*wait)(AioContext *ctx, AioHandlerList *ready_list, int64_t timeout);
92 
93     /*
94      * need_wait:
95      * @ctx: the AioContext
96      *
97      * Tell aio_poll() when to stop userspace polling early because ->wait()
98      * has fds ready.
99      *
100      * File descriptor monitoring implementations that cannot poll fd readiness
101      * from userspace should use aio_poll_disabled() here.  This ensures that
102      * file descriptors are not starved by handlers that frequently make
103      * progress via userspace polling.
104      *
105      * Returns: true if ->wait() should be called, false otherwise.
106      */
107     bool (*need_wait)(AioContext *ctx);
108 } FDMonOps;
109 
110 /*
111  * Each aio_bh_poll() call carves off a slice of the BH list, so that newly
112  * scheduled BHs are not processed until the next aio_bh_poll() call.  All
113  * active aio_bh_poll() calls chain their slices together in a list, so that
114  * nested aio_bh_poll() calls process all scheduled bottom halves.
115  */
116 typedef QSLIST_HEAD(, QEMUBH) BHList;
117 typedef struct BHListSlice BHListSlice;
118 struct BHListSlice {
119     BHList bh_list;
120     QSIMPLEQ_ENTRY(BHListSlice) next;
121 };
122 
123 typedef QSLIST_HEAD(, AioHandler) AioHandlerSList;
124 
125 struct AioContext {
126     GSource source;
127 
128     /* Used by AioContext users to protect from multi-threaded access.  */
129     QemuRecMutex lock;
130 
131     /*
132      * Keep track of readers and writers of the block layer graph.
133      * This is essential to avoid performing additions and removal
134      * of nodes and edges from block graph while some
135      * other thread is traversing it.
136      */
137     BdrvGraphRWlock *bdrv_graph;
138 
139     /* The list of registered AIO handlers.  Protected by ctx->list_lock. */
140     AioHandlerList aio_handlers;
141 
142     /* The list of AIO handlers to be deleted.  Protected by ctx->list_lock. */
143     AioHandlerList deleted_aio_handlers;
144 
145     /* Used to avoid unnecessary event_notifier_set calls in aio_notify;
146      * only written from the AioContext home thread, or under the BQL in
147      * the case of the main AioContext.  However, it is read from any
148      * thread so it is still accessed with atomic primitives.
149      *
150      * If this field is 0, everything (file descriptors, bottom halves,
151      * timers) will be re-evaluated before the next blocking poll() or
152      * io_uring wait; therefore, the event_notifier_set call can be
153      * skipped.  If it is non-zero, you may need to wake up a concurrent
154      * aio_poll or the glib main event loop, making event_notifier_set
155      * necessary.
156      *
157      * Bit 0 is reserved for GSource usage of the AioContext, and is 1
158      * between a call to aio_ctx_prepare and the next call to aio_ctx_check.
159      * Bits 1-31 simply count the number of active calls to aio_poll
160      * that are in the prepare or poll phase.
161      *
162      * The GSource and aio_poll must use a different mechanism because
163      * there is no certainty that a call to GSource's prepare callback
164      * (via g_main_context_prepare) is indeed followed by check and
165      * dispatch.  It's not clear whether this would be a bug, but let's
166      * play safe and allow it---it will just cause extra calls to
167      * event_notifier_set until the next call to dispatch.
168      *
169      * Instead, the aio_poll calls include both the prepare and the
170      * dispatch phase, hence a simple counter is enough for them.
171      */
172     uint32_t notify_me;
173 
174     /* A lock to protect between QEMUBH and AioHandler adders and deleter,
175      * and to ensure that no callbacks are removed while we're walking and
176      * dispatching them.
177      */
178     QemuLockCnt list_lock;
179 
180     /* Bottom Halves pending aio_bh_poll() processing */
181     BHList bh_list;
182 
183     /* Chained BH list slices for each nested aio_bh_poll() call */
184     QSIMPLEQ_HEAD(, BHListSlice) bh_slice_list;
185 
186     /* Used by aio_notify.
187      *
188      * "notified" is used to avoid expensive event_notifier_test_and_clear
189      * calls.  When it is clear, the EventNotifier is clear, or one thread
190      * is going to clear "notified" before processing more events.  False
191      * positives are possible, i.e. "notified" could be set even though the
192      * EventNotifier is clear.
193      *
194      * Note that event_notifier_set *cannot* be optimized the same way.  For
195      * more information on the problem that would result, see "#ifdef BUG2"
196      * in the docs/aio_notify_accept.promela formal model.
197      */
198     bool notified;
199     EventNotifier notifier;
200 
201     QSLIST_HEAD(, Coroutine) scheduled_coroutines;
202     QEMUBH *co_schedule_bh;
203 
204     int thread_pool_min;
205     int thread_pool_max;
206     /* Thread pool for performing work and receiving completion callbacks.
207      * Has its own locking.
208      */
209     struct ThreadPool *thread_pool;
210 
211 #ifdef CONFIG_LINUX_AIO
212     /*
213      * State for native Linux AIO.  Uses aio_context_acquire/release for
214      * locking.
215      */
216     struct LinuxAioState *linux_aio;
217 #endif
218 #ifdef CONFIG_LINUX_IO_URING
219     /*
220      * State for Linux io_uring.  Uses aio_context_acquire/release for
221      * locking.
222      */
223     struct LuringState *linux_io_uring;
224 
225     /* State for file descriptor monitoring using Linux io_uring */
226     struct io_uring fdmon_io_uring;
227     AioHandlerSList submit_list;
228 #endif
229 
230     /* TimerLists for calling timers - one per clock type.  Has its own
231      * locking.
232      */
233     QEMUTimerListGroup tlg;
234 
235     int external_disable_cnt;
236 
237     /* Number of AioHandlers without .io_poll() */
238     int poll_disable_cnt;
239 
240     /* Polling mode parameters */
241     int64_t poll_ns;        /* current polling time in nanoseconds */
242     int64_t poll_max_ns;    /* maximum polling time in nanoseconds */
243     int64_t poll_grow;      /* polling time growth factor */
244     int64_t poll_shrink;    /* polling time shrink factor */
245 
246     /* AIO engine parameters */
247     int64_t aio_max_batch;  /* maximum number of requests in a batch */
248 
249     /*
250      * List of handlers participating in userspace polling.  Protected by
251      * ctx->list_lock.  Iterated and modified mostly by the event loop thread
252      * from aio_poll() with ctx->list_lock incremented.  aio_set_fd_handler()
253      * only touches the list to delete nodes if ctx->list_lock's count is zero.
254      */
255     AioHandlerList poll_aio_handlers;
256 
257     /* Are we in polling mode or monitoring file descriptors? */
258     bool poll_started;
259 
260     /* epoll(7) state used when built with CONFIG_EPOLL */
261     int epollfd;
262 
263     const FDMonOps *fdmon_ops;
264 };
265 
266 /**
267  * aio_context_new: Allocate a new AioContext.
268  *
269  * AioContext provide a mini event-loop that can be waited on synchronously.
270  * They also provide bottom halves, a service to execute a piece of code
271  * as soon as possible.
272  */
273 AioContext *aio_context_new(Error **errp);
274 
275 /**
276  * aio_context_ref:
277  * @ctx: The AioContext to operate on.
278  *
279  * Add a reference to an AioContext.
280  */
281 void aio_context_ref(AioContext *ctx);
282 
283 /**
284  * aio_context_unref:
285  * @ctx: The AioContext to operate on.
286  *
287  * Drop a reference to an AioContext.
288  */
289 void aio_context_unref(AioContext *ctx);
290 
291 /* Take ownership of the AioContext.  If the AioContext will be shared between
292  * threads, and a thread does not want to be interrupted, it will have to
293  * take ownership around calls to aio_poll().  Otherwise, aio_poll()
294  * automatically takes care of calling aio_context_acquire and
295  * aio_context_release.
296  *
297  * Note that this is separate from bdrv_drained_begin/bdrv_drained_end.  A
298  * thread still has to call those to avoid being interrupted by the guest.
299  *
300  * Bottom halves, timers and callbacks can be created or removed without
301  * acquiring the AioContext.
302  */
303 void aio_context_acquire(AioContext *ctx);
304 
305 /* Relinquish ownership of the AioContext. */
306 void aio_context_release(AioContext *ctx);
307 
308 /**
309  * aio_bh_schedule_oneshot_full: Allocate a new bottom half structure that will
310  * run only once and as soon as possible.
311  *
312  * @name: A human-readable identifier for debugging purposes.
313  */
314 void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
315                                   const char *name);
316 
317 /**
318  * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
319  * only once and as soon as possible.
320  *
321  * A convenience wrapper for aio_bh_schedule_oneshot_full() that uses cb as the
322  * name string.
323  */
324 #define aio_bh_schedule_oneshot(ctx, cb, opaque) \
325     aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb)))
326 
327 /**
328  * aio_bh_new_full: Allocate a new bottom half structure.
329  *
330  * Bottom halves are lightweight callbacks whose invocation is guaranteed
331  * to be wait-free, thread-safe and signal-safe.  The #QEMUBH structure
332  * is opaque and must be allocated prior to its use.
333  *
334  * @name: A human-readable identifier for debugging purposes.
335  */
336 QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
337                         const char *name);
338 
339 /**
340  * aio_bh_new: Allocate a new bottom half structure
341  *
342  * A convenience wrapper for aio_bh_new_full() that uses the cb as the name
343  * string.
344  */
345 #define aio_bh_new(ctx, cb, opaque) \
346     aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)))
347 
348 /**
349  * aio_notify: Force processing of pending events.
350  *
351  * Similar to signaling a condition variable, aio_notify forces
352  * aio_poll to exit, so that the next call will re-examine pending events.
353  * The caller of aio_notify will usually call aio_poll again very soon,
354  * or go through another iteration of the GLib main loop.  Hence, aio_notify
355  * also has the side effect of recalculating the sets of file descriptors
356  * that the main loop waits for.
357  *
358  * Calling aio_notify is rarely necessary, because for example scheduling
359  * a bottom half calls it already.
360  */
361 void aio_notify(AioContext *ctx);
362 
363 /**
364  * aio_notify_accept: Acknowledge receiving an aio_notify.
365  *
366  * aio_notify() uses an EventNotifier in order to wake up a sleeping
367  * aio_poll() or g_main_context_iteration().  Calls to aio_notify() are
368  * usually rare, but the AioContext has to clear the EventNotifier on
369  * every aio_poll() or g_main_context_iteration() in order to avoid
370  * busy waiting.  This event_notifier_test_and_clear() cannot be done
371  * using the usual aio_context_set_event_notifier(), because it must
372  * be done before processing all events (file descriptors, bottom halves,
373  * timers).
374  *
375  * aio_notify_accept() is an optimized event_notifier_test_and_clear()
376  * that is specific to an AioContext's notifier; it is used internally
377  * to clear the EventNotifier only if aio_notify() had been called.
378  */
379 void aio_notify_accept(AioContext *ctx);
380 
381 /**
382  * aio_bh_call: Executes callback function of the specified BH.
383  */
384 void aio_bh_call(QEMUBH *bh);
385 
386 /**
387  * aio_bh_poll: Poll bottom halves for an AioContext.
388  *
389  * These are internal functions used by the QEMU main loop.
390  * And notice that multiple occurrences of aio_bh_poll cannot
391  * be called concurrently
392  */
393 int aio_bh_poll(AioContext *ctx);
394 
395 /**
396  * qemu_bh_schedule: Schedule a bottom half.
397  *
398  * Scheduling a bottom half interrupts the main loop and causes the
399  * execution of the callback that was passed to qemu_bh_new.
400  *
401  * Bottom halves that are scheduled from a bottom half handler are instantly
402  * invoked.  This can create an infinite loop if a bottom half handler
403  * schedules itself.
404  *
405  * @bh: The bottom half to be scheduled.
406  */
407 void qemu_bh_schedule(QEMUBH *bh);
408 
409 /**
410  * qemu_bh_cancel: Cancel execution of a bottom half.
411  *
412  * Canceling execution of a bottom half undoes the effect of calls to
413  * qemu_bh_schedule without freeing its resources yet.  While cancellation
414  * itself is also wait-free and thread-safe, it can of course race with the
415  * loop that executes bottom halves unless you are holding the iothread
416  * mutex.  This makes it mostly useless if you are not holding the mutex.
417  *
418  * @bh: The bottom half to be canceled.
419  */
420 void qemu_bh_cancel(QEMUBH *bh);
421 
422 /**
423  *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
424  *
425  * Deleting a bottom half frees the memory that was allocated for it by
426  * qemu_bh_new.  It also implies canceling the bottom half if it was
427  * scheduled.
428  * This func is async. The bottom half will do the delete action at the finial
429  * end.
430  *
431  * @bh: The bottom half to be deleted.
432  */
433 void qemu_bh_delete(QEMUBH *bh);
434 
435 /* Return whether there are any pending callbacks from the GSource
436  * attached to the AioContext, before g_poll is invoked.
437  *
438  * This is used internally in the implementation of the GSource.
439  */
440 bool aio_prepare(AioContext *ctx);
441 
442 /* Return whether there are any pending callbacks from the GSource
443  * attached to the AioContext, after g_poll is invoked.
444  *
445  * This is used internally in the implementation of the GSource.
446  */
447 bool aio_pending(AioContext *ctx);
448 
449 /* Dispatch any pending callbacks from the GSource attached to the AioContext.
450  *
451  * This is used internally in the implementation of the GSource.
452  */
453 void aio_dispatch(AioContext *ctx);
454 
455 /* Progress in completing AIO work to occur.  This can issue new pending
456  * aio as a result of executing I/O completion or bh callbacks.
457  *
458  * Return whether any progress was made by executing AIO or bottom half
459  * handlers.  If @blocking == true, this should always be true except
460  * if someone called aio_notify.
461  *
462  * If there are no pending bottom halves, but there are pending AIO
463  * operations, it may not be possible to make any progress without
464  * blocking.  If @blocking is true, this function will wait until one
465  * or more AIO events have completed, to ensure something has moved
466  * before returning.
467  */
468 bool aio_poll(AioContext *ctx, bool blocking);
469 
470 /* Register a file descriptor and associated callbacks.  Behaves very similarly
471  * to qemu_set_fd_handler.  Unlike qemu_set_fd_handler, these callbacks will
472  * be invoked when using aio_poll().
473  *
474  * Code that invokes AIO completion functions should rely on this function
475  * instead of qemu_set_fd_handler[2].
476  */
477 void aio_set_fd_handler(AioContext *ctx,
478                         int fd,
479                         bool is_external,
480                         IOHandler *io_read,
481                         IOHandler *io_write,
482                         AioPollFn *io_poll,
483                         IOHandler *io_poll_ready,
484                         void *opaque);
485 
486 /* Set polling begin/end callbacks for a file descriptor that has already been
487  * registered with aio_set_fd_handler.  Do nothing if the file descriptor is
488  * not registered.
489  */
490 void aio_set_fd_poll(AioContext *ctx, int fd,
491                      IOHandler *io_poll_begin,
492                      IOHandler *io_poll_end);
493 
494 /* Register an event notifier and associated callbacks.  Behaves very similarly
495  * to event_notifier_set_handler.  Unlike event_notifier_set_handler, these callbacks
496  * will be invoked when using aio_poll().
497  *
498  * Code that invokes AIO completion functions should rely on this function
499  * instead of event_notifier_set_handler.
500  */
501 void aio_set_event_notifier(AioContext *ctx,
502                             EventNotifier *notifier,
503                             bool is_external,
504                             EventNotifierHandler *io_read,
505                             AioPollFn *io_poll,
506                             EventNotifierHandler *io_poll_ready);
507 
508 /* Set polling begin/end callbacks for an event notifier that has already been
509  * registered with aio_set_event_notifier.  Do nothing if the event notifier is
510  * not registered.
511  */
512 void aio_set_event_notifier_poll(AioContext *ctx,
513                                  EventNotifier *notifier,
514                                  EventNotifierHandler *io_poll_begin,
515                                  EventNotifierHandler *io_poll_end);
516 
517 /* Return a GSource that lets the main loop poll the file descriptors attached
518  * to this AioContext.
519  */
520 GSource *aio_get_g_source(AioContext *ctx);
521 
522 /* Return the ThreadPool bound to this AioContext */
523 struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
524 
525 /* Setup the LinuxAioState bound to this AioContext */
526 struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp);
527 
528 /* Return the LinuxAioState bound to this AioContext */
529 struct LinuxAioState *aio_get_linux_aio(AioContext *ctx);
530 
531 /* Setup the LuringState bound to this AioContext */
532 struct LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp);
533 
534 /* Return the LuringState bound to this AioContext */
535 struct LuringState *aio_get_linux_io_uring(AioContext *ctx);
536 /**
537  * aio_timer_new_with_attrs:
538  * @ctx: the aio context
539  * @type: the clock type
540  * @scale: the scale
541  * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
542  *              to assign
543  * @cb: the callback to call on timer expiry
544  * @opaque: the opaque pointer to pass to the callback
545  *
546  * Allocate a new timer (with attributes) attached to the context @ctx.
547  * The function is responsible for memory allocation.
548  *
549  * The preferred interface is aio_timer_init or aio_timer_init_with_attrs.
550  * Use that unless you really need dynamic memory allocation.
551  *
552  * Returns: a pointer to the new timer
553  */
554 static inline QEMUTimer *aio_timer_new_with_attrs(AioContext *ctx,
555                                                   QEMUClockType type,
556                                                   int scale, int attributes,
557                                                   QEMUTimerCB *cb, void *opaque)
558 {
559     return timer_new_full(&ctx->tlg, type, scale, attributes, cb, opaque);
560 }
561 
562 /**
563  * aio_timer_new:
564  * @ctx: the aio context
565  * @type: the clock type
566  * @scale: the scale
567  * @cb: the callback to call on timer expiry
568  * @opaque: the opaque pointer to pass to the callback
569  *
570  * Allocate a new timer attached to the context @ctx.
571  * See aio_timer_new_with_attrs for details.
572  *
573  * Returns: a pointer to the new timer
574  */
575 static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type,
576                                        int scale,
577                                        QEMUTimerCB *cb, void *opaque)
578 {
579     return timer_new_full(&ctx->tlg, type, scale, 0, cb, opaque);
580 }
581 
582 /**
583  * aio_timer_init_with_attrs:
584  * @ctx: the aio context
585  * @ts: the timer
586  * @type: the clock type
587  * @scale: the scale
588  * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
589  *              to assign
590  * @cb: the callback to call on timer expiry
591  * @opaque: the opaque pointer to pass to the callback
592  *
593  * Initialise a new timer (with attributes) attached to the context @ctx.
594  * The caller is responsible for memory allocation.
595  */
596 static inline void aio_timer_init_with_attrs(AioContext *ctx,
597                                              QEMUTimer *ts, QEMUClockType type,
598                                              int scale, int attributes,
599                                              QEMUTimerCB *cb, void *opaque)
600 {
601     timer_init_full(ts, &ctx->tlg, type, scale, attributes, cb, opaque);
602 }
603 
604 /**
605  * aio_timer_init:
606  * @ctx: the aio context
607  * @ts: the timer
608  * @type: the clock type
609  * @scale: the scale
610  * @cb: the callback to call on timer expiry
611  * @opaque: the opaque pointer to pass to the callback
612  *
613  * Initialise a new timer attached to the context @ctx.
614  * See aio_timer_init_with_attrs for details.
615  */
616 static inline void aio_timer_init(AioContext *ctx,
617                                   QEMUTimer *ts, QEMUClockType type,
618                                   int scale,
619                                   QEMUTimerCB *cb, void *opaque)
620 {
621     timer_init_full(ts, &ctx->tlg, type, scale, 0, cb, opaque);
622 }
623 
624 /**
625  * aio_compute_timeout:
626  * @ctx: the aio context
627  *
628  * Compute the timeout that a blocking aio_poll should use.
629  */
630 int64_t aio_compute_timeout(AioContext *ctx);
631 
632 /**
633  * aio_disable_external:
634  * @ctx: the aio context
635  *
636  * Disable the further processing of external clients.
637  */
638 static inline void aio_disable_external(AioContext *ctx)
639 {
640     qatomic_inc(&ctx->external_disable_cnt);
641 }
642 
643 /**
644  * aio_enable_external:
645  * @ctx: the aio context
646  *
647  * Enable the processing of external clients.
648  */
649 static inline void aio_enable_external(AioContext *ctx)
650 {
651     int old;
652 
653     old = qatomic_fetch_dec(&ctx->external_disable_cnt);
654     assert(old > 0);
655     if (old == 1) {
656         /* Kick event loop so it re-arms file descriptors */
657         aio_notify(ctx);
658     }
659 }
660 
661 /**
662  * aio_external_disabled:
663  * @ctx: the aio context
664  *
665  * Return true if the external clients are disabled.
666  */
667 static inline bool aio_external_disabled(AioContext *ctx)
668 {
669     return qatomic_read(&ctx->external_disable_cnt);
670 }
671 
672 /**
673  * aio_node_check:
674  * @ctx: the aio context
675  * @is_external: Whether or not the checked node is an external event source.
676  *
677  * Check if the node's is_external flag is okay to be polled by the ctx at this
678  * moment. True means green light.
679  */
680 static inline bool aio_node_check(AioContext *ctx, bool is_external)
681 {
682     return !is_external || !qatomic_read(&ctx->external_disable_cnt);
683 }
684 
685 /**
686  * aio_co_schedule:
687  * @ctx: the aio context
688  * @co: the coroutine
689  *
690  * Start a coroutine on a remote AioContext.
691  *
692  * The coroutine must not be entered by anyone else while aio_co_schedule()
693  * is active.  In addition the coroutine must have yielded unless ctx
694  * is the context in which the coroutine is running (i.e. the value of
695  * qemu_get_current_aio_context() from the coroutine itself).
696  */
697 void aio_co_schedule(AioContext *ctx, struct Coroutine *co);
698 
699 /**
700  * aio_co_reschedule_self:
701  * @new_ctx: the new context
702  *
703  * Move the currently running coroutine to new_ctx. If the coroutine is already
704  * running in new_ctx, do nothing.
705  */
706 void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx);
707 
708 /**
709  * aio_co_wake:
710  * @co: the coroutine
711  *
712  * Restart a coroutine on the AioContext where it was running last, thus
713  * preventing coroutines from jumping from one context to another when they
714  * go to sleep.
715  *
716  * aio_co_wake may be executed either in coroutine or non-coroutine
717  * context.  The coroutine must not be entered by anyone else while
718  * aio_co_wake() is active.
719  */
720 void aio_co_wake(struct Coroutine *co);
721 
722 /**
723  * aio_co_enter:
724  * @ctx: the context to run the coroutine
725  * @co: the coroutine to run
726  *
727  * Enter a coroutine in the specified AioContext.
728  */
729 void aio_co_enter(AioContext *ctx, struct Coroutine *co);
730 
731 /**
732  * Return the AioContext whose event loop runs in the current thread.
733  *
734  * If called from an IOThread this will be the IOThread's AioContext.  If
735  * called from the main thread or with the "big QEMU lock" taken it
736  * will be the main loop AioContext.
737  */
738 AioContext *qemu_get_current_aio_context(void);
739 
740 void qemu_set_current_aio_context(AioContext *ctx);
741 
742 /**
743  * aio_context_setup:
744  * @ctx: the aio context
745  *
746  * Initialize the aio context.
747  */
748 void aio_context_setup(AioContext *ctx);
749 
750 /**
751  * aio_context_destroy:
752  * @ctx: the aio context
753  *
754  * Destroy the aio context.
755  */
756 void aio_context_destroy(AioContext *ctx);
757 
758 /* Used internally, do not call outside AioContext code */
759 void aio_context_use_g_source(AioContext *ctx);
760 
761 /**
762  * aio_context_set_poll_params:
763  * @ctx: the aio context
764  * @max_ns: how long to busy poll for, in nanoseconds
765  * @grow: polling time growth factor
766  * @shrink: polling time shrink factor
767  *
768  * Poll mode can be disabled by setting poll_max_ns to 0.
769  */
770 void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
771                                  int64_t grow, int64_t shrink,
772                                  Error **errp);
773 
774 /**
775  * aio_context_set_aio_params:
776  * @ctx: the aio context
777  * @max_batch: maximum number of requests in a batch, 0 means that the
778  *             engine will use its default
779  */
780 void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
781                                 Error **errp);
782 
783 /**
784  * aio_context_set_thread_pool_params:
785  * @ctx: the aio context
786  * @min: min number of threads to have readily available in the thread pool
787  * @min: max number of threads the thread pool can contain
788  */
789 void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
790                                         int64_t max, Error **errp);
791 #endif
792