xref: /openbmc/qemu/include/block/aio.h (revision 1580b897)
1 /*
2  * QEMU aio implementation
3  *
4  * Copyright IBM, Corp. 2008
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #ifndef QEMU_AIO_H
15 #define QEMU_AIO_H
16 
17 #ifdef CONFIG_LINUX_IO_URING
18 #include <liburing.h>
19 #endif
20 #include "qemu/coroutine.h"
21 #include "qemu/queue.h"
22 #include "qemu/event_notifier.h"
23 #include "qemu/thread.h"
24 #include "qemu/timer.h"
25 
26 typedef struct BlockAIOCB BlockAIOCB;
27 typedef void BlockCompletionFunc(void *opaque, int ret);
28 
29 typedef struct AIOCBInfo {
30     void (*cancel_async)(BlockAIOCB *acb);
31     AioContext *(*get_aio_context)(BlockAIOCB *acb);
32     size_t aiocb_size;
33 } AIOCBInfo;
34 
35 struct BlockAIOCB {
36     const AIOCBInfo *aiocb_info;
37     BlockDriverState *bs;
38     BlockCompletionFunc *cb;
39     void *opaque;
40     int refcnt;
41 };
42 
43 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
44                    BlockCompletionFunc *cb, void *opaque);
45 void qemu_aio_unref(void *p);
46 void qemu_aio_ref(void *p);
47 
48 typedef struct AioHandler AioHandler;
49 typedef QLIST_HEAD(, AioHandler) AioHandlerList;
50 typedef void QEMUBHFunc(void *opaque);
51 typedef bool AioPollFn(void *opaque);
52 typedef void IOHandler(void *opaque);
53 
54 struct Coroutine;
55 struct ThreadPool;
56 struct LinuxAioState;
57 struct LuringState;
58 
59 /* Is polling disabled? */
60 bool aio_poll_disabled(AioContext *ctx);
61 
62 /* Callbacks for file descriptor monitoring implementations */
63 typedef struct {
64     /*
65      * update:
66      * @ctx: the AioContext
67      * @old_node: the existing handler or NULL if this file descriptor is being
68      *            monitored for the first time
69      * @new_node: the new handler or NULL if this file descriptor is being
70      *            removed
71      *
72      * Add/remove/modify a monitored file descriptor.
73      *
74      * Called with ctx->list_lock acquired.
75      */
76     void (*update)(AioContext *ctx, AioHandler *old_node, AioHandler *new_node);
77 
78     /*
79      * wait:
80      * @ctx: the AioContext
81      * @ready_list: list for handlers that become ready
82      * @timeout: maximum duration to wait, in nanoseconds
83      *
84      * Wait for file descriptors to become ready and place them on ready_list.
85      *
86      * Called with ctx->list_lock incremented but not locked.
87      *
88      * Returns: number of ready file descriptors.
89      */
90     int (*wait)(AioContext *ctx, AioHandlerList *ready_list, int64_t timeout);
91 
92     /*
93      * need_wait:
94      * @ctx: the AioContext
95      *
96      * Tell aio_poll() when to stop userspace polling early because ->wait()
97      * has fds ready.
98      *
99      * File descriptor monitoring implementations that cannot poll fd readiness
100      * from userspace should use aio_poll_disabled() here.  This ensures that
101      * file descriptors are not starved by handlers that frequently make
102      * progress via userspace polling.
103      *
104      * Returns: true if ->wait() should be called, false otherwise.
105      */
106     bool (*need_wait)(AioContext *ctx);
107 } FDMonOps;
108 
109 /*
110  * Each aio_bh_poll() call carves off a slice of the BH list, so that newly
111  * scheduled BHs are not processed until the next aio_bh_poll() call.  All
112  * active aio_bh_poll() calls chain their slices together in a list, so that
113  * nested aio_bh_poll() calls process all scheduled bottom halves.
114  */
115 typedef QSLIST_HEAD(, QEMUBH) BHList;
116 typedef struct BHListSlice BHListSlice;
117 struct BHListSlice {
118     BHList bh_list;
119     QSIMPLEQ_ENTRY(BHListSlice) next;
120 };
121 
122 typedef QSLIST_HEAD(, AioHandler) AioHandlerSList;
123 
124 struct AioContext {
125     GSource source;
126 
127     /* Used by AioContext users to protect from multi-threaded access.  */
128     QemuRecMutex lock;
129 
130     /* The list of registered AIO handlers.  Protected by ctx->list_lock. */
131     AioHandlerList aio_handlers;
132 
133     /* The list of AIO handlers to be deleted.  Protected by ctx->list_lock. */
134     AioHandlerList deleted_aio_handlers;
135 
136     /* Used to avoid unnecessary event_notifier_set calls in aio_notify;
137      * only written from the AioContext home thread, or under the BQL in
138      * the case of the main AioContext.  However, it is read from any
139      * thread so it is still accessed with atomic primitives.
140      *
141      * If this field is 0, everything (file descriptors, bottom halves,
142      * timers) will be re-evaluated before the next blocking poll() or
143      * io_uring wait; therefore, the event_notifier_set call can be
144      * skipped.  If it is non-zero, you may need to wake up a concurrent
145      * aio_poll or the glib main event loop, making event_notifier_set
146      * necessary.
147      *
148      * Bit 0 is reserved for GSource usage of the AioContext, and is 1
149      * between a call to aio_ctx_prepare and the next call to aio_ctx_check.
150      * Bits 1-31 simply count the number of active calls to aio_poll
151      * that are in the prepare or poll phase.
152      *
153      * The GSource and aio_poll must use a different mechanism because
154      * there is no certainty that a call to GSource's prepare callback
155      * (via g_main_context_prepare) is indeed followed by check and
156      * dispatch.  It's not clear whether this would be a bug, but let's
157      * play safe and allow it---it will just cause extra calls to
158      * event_notifier_set until the next call to dispatch.
159      *
160      * Instead, the aio_poll calls include both the prepare and the
161      * dispatch phase, hence a simple counter is enough for them.
162      */
163     uint32_t notify_me;
164 
165     /* A lock to protect between QEMUBH and AioHandler adders and deleter,
166      * and to ensure that no callbacks are removed while we're walking and
167      * dispatching them.
168      */
169     QemuLockCnt list_lock;
170 
171     /* Bottom Halves pending aio_bh_poll() processing */
172     BHList bh_list;
173 
174     /* Chained BH list slices for each nested aio_bh_poll() call */
175     QSIMPLEQ_HEAD(, BHListSlice) bh_slice_list;
176 
177     /* Used by aio_notify.
178      *
179      * "notified" is used to avoid expensive event_notifier_test_and_clear
180      * calls.  When it is clear, the EventNotifier is clear, or one thread
181      * is going to clear "notified" before processing more events.  False
182      * positives are possible, i.e. "notified" could be set even though the
183      * EventNotifier is clear.
184      *
185      * Note that event_notifier_set *cannot* be optimized the same way.  For
186      * more information on the problem that would result, see "#ifdef BUG2"
187      * in the docs/aio_notify_accept.promela formal model.
188      */
189     bool notified;
190     EventNotifier notifier;
191 
192     QSLIST_HEAD(, Coroutine) scheduled_coroutines;
193     QEMUBH *co_schedule_bh;
194 
195     /* Thread pool for performing work and receiving completion callbacks.
196      * Has its own locking.
197      */
198     struct ThreadPool *thread_pool;
199 
200 #ifdef CONFIG_LINUX_AIO
201     /*
202      * State for native Linux AIO.  Uses aio_context_acquire/release for
203      * locking.
204      */
205     struct LinuxAioState *linux_aio;
206 #endif
207 #ifdef CONFIG_LINUX_IO_URING
208     /*
209      * State for Linux io_uring.  Uses aio_context_acquire/release for
210      * locking.
211      */
212     struct LuringState *linux_io_uring;
213 
214     /* State for file descriptor monitoring using Linux io_uring */
215     struct io_uring fdmon_io_uring;
216     AioHandlerSList submit_list;
217 #endif
218 
219     /* TimerLists for calling timers - one per clock type.  Has its own
220      * locking.
221      */
222     QEMUTimerListGroup tlg;
223 
224     int external_disable_cnt;
225 
226     /* Number of AioHandlers without .io_poll() */
227     int poll_disable_cnt;
228 
229     /* Polling mode parameters */
230     int64_t poll_ns;        /* current polling time in nanoseconds */
231     int64_t poll_max_ns;    /* maximum polling time in nanoseconds */
232     int64_t poll_grow;      /* polling time growth factor */
233     int64_t poll_shrink;    /* polling time shrink factor */
234 
235     /*
236      * List of handlers participating in userspace polling.  Protected by
237      * ctx->list_lock.  Iterated and modified mostly by the event loop thread
238      * from aio_poll() with ctx->list_lock incremented.  aio_set_fd_handler()
239      * only touches the list to delete nodes if ctx->list_lock's count is zero.
240      */
241     AioHandlerList poll_aio_handlers;
242 
243     /* Are we in polling mode or monitoring file descriptors? */
244     bool poll_started;
245 
246     /* epoll(7) state used when built with CONFIG_EPOLL */
247     int epollfd;
248 
249     const FDMonOps *fdmon_ops;
250 };
251 
252 /**
253  * aio_context_new: Allocate a new AioContext.
254  *
255  * AioContext provide a mini event-loop that can be waited on synchronously.
256  * They also provide bottom halves, a service to execute a piece of code
257  * as soon as possible.
258  */
259 AioContext *aio_context_new(Error **errp);
260 
261 /**
262  * aio_context_ref:
263  * @ctx: The AioContext to operate on.
264  *
265  * Add a reference to an AioContext.
266  */
267 void aio_context_ref(AioContext *ctx);
268 
269 /**
270  * aio_context_unref:
271  * @ctx: The AioContext to operate on.
272  *
273  * Drop a reference to an AioContext.
274  */
275 void aio_context_unref(AioContext *ctx);
276 
277 /* Take ownership of the AioContext.  If the AioContext will be shared between
278  * threads, and a thread does not want to be interrupted, it will have to
279  * take ownership around calls to aio_poll().  Otherwise, aio_poll()
280  * automatically takes care of calling aio_context_acquire and
281  * aio_context_release.
282  *
283  * Note that this is separate from bdrv_drained_begin/bdrv_drained_end.  A
284  * thread still has to call those to avoid being interrupted by the guest.
285  *
286  * Bottom halves, timers and callbacks can be created or removed without
287  * acquiring the AioContext.
288  */
289 void aio_context_acquire(AioContext *ctx);
290 
291 /* Relinquish ownership of the AioContext. */
292 void aio_context_release(AioContext *ctx);
293 
294 /**
295  * aio_bh_schedule_oneshot_full: Allocate a new bottom half structure that will
296  * run only once and as soon as possible.
297  *
298  * @name: A human-readable identifier for debugging purposes.
299  */
300 void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
301                                   const char *name);
302 
303 /**
304  * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
305  * only once and as soon as possible.
306  *
307  * A convenience wrapper for aio_bh_schedule_oneshot_full() that uses cb as the
308  * name string.
309  */
310 #define aio_bh_schedule_oneshot(ctx, cb, opaque) \
311     aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb)))
312 
313 /**
314  * aio_bh_new_full: Allocate a new bottom half structure.
315  *
316  * Bottom halves are lightweight callbacks whose invocation is guaranteed
317  * to be wait-free, thread-safe and signal-safe.  The #QEMUBH structure
318  * is opaque and must be allocated prior to its use.
319  *
320  * @name: A human-readable identifier for debugging purposes.
321  */
322 QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
323                         const char *name);
324 
325 /**
326  * aio_bh_new: Allocate a new bottom half structure
327  *
328  * A convenience wrapper for aio_bh_new_full() that uses the cb as the name
329  * string.
330  */
331 #define aio_bh_new(ctx, cb, opaque) \
332     aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)))
333 
334 /**
335  * aio_notify: Force processing of pending events.
336  *
337  * Similar to signaling a condition variable, aio_notify forces
338  * aio_poll to exit, so that the next call will re-examine pending events.
339  * The caller of aio_notify will usually call aio_poll again very soon,
340  * or go through another iteration of the GLib main loop.  Hence, aio_notify
341  * also has the side effect of recalculating the sets of file descriptors
342  * that the main loop waits for.
343  *
344  * Calling aio_notify is rarely necessary, because for example scheduling
345  * a bottom half calls it already.
346  */
347 void aio_notify(AioContext *ctx);
348 
349 /**
350  * aio_notify_accept: Acknowledge receiving an aio_notify.
351  *
352  * aio_notify() uses an EventNotifier in order to wake up a sleeping
353  * aio_poll() or g_main_context_iteration().  Calls to aio_notify() are
354  * usually rare, but the AioContext has to clear the EventNotifier on
355  * every aio_poll() or g_main_context_iteration() in order to avoid
356  * busy waiting.  This event_notifier_test_and_clear() cannot be done
357  * using the usual aio_context_set_event_notifier(), because it must
358  * be done before processing all events (file descriptors, bottom halves,
359  * timers).
360  *
361  * aio_notify_accept() is an optimized event_notifier_test_and_clear()
362  * that is specific to an AioContext's notifier; it is used internally
363  * to clear the EventNotifier only if aio_notify() had been called.
364  */
365 void aio_notify_accept(AioContext *ctx);
366 
367 /**
368  * aio_bh_call: Executes callback function of the specified BH.
369  */
370 void aio_bh_call(QEMUBH *bh);
371 
372 /**
373  * aio_bh_poll: Poll bottom halves for an AioContext.
374  *
375  * These are internal functions used by the QEMU main loop.
376  * And notice that multiple occurrences of aio_bh_poll cannot
377  * be called concurrently
378  */
379 int aio_bh_poll(AioContext *ctx);
380 
381 /**
382  * qemu_bh_schedule: Schedule a bottom half.
383  *
384  * Scheduling a bottom half interrupts the main loop and causes the
385  * execution of the callback that was passed to qemu_bh_new.
386  *
387  * Bottom halves that are scheduled from a bottom half handler are instantly
388  * invoked.  This can create an infinite loop if a bottom half handler
389  * schedules itself.
390  *
391  * @bh: The bottom half to be scheduled.
392  */
393 void qemu_bh_schedule(QEMUBH *bh);
394 
395 /**
396  * qemu_bh_cancel: Cancel execution of a bottom half.
397  *
398  * Canceling execution of a bottom half undoes the effect of calls to
399  * qemu_bh_schedule without freeing its resources yet.  While cancellation
400  * itself is also wait-free and thread-safe, it can of course race with the
401  * loop that executes bottom halves unless you are holding the iothread
402  * mutex.  This makes it mostly useless if you are not holding the mutex.
403  *
404  * @bh: The bottom half to be canceled.
405  */
406 void qemu_bh_cancel(QEMUBH *bh);
407 
408 /**
409  *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
410  *
411  * Deleting a bottom half frees the memory that was allocated for it by
412  * qemu_bh_new.  It also implies canceling the bottom half if it was
413  * scheduled.
414  * This func is async. The bottom half will do the delete action at the finial
415  * end.
416  *
417  * @bh: The bottom half to be deleted.
418  */
419 void qemu_bh_delete(QEMUBH *bh);
420 
421 /* Return whether there are any pending callbacks from the GSource
422  * attached to the AioContext, before g_poll is invoked.
423  *
424  * This is used internally in the implementation of the GSource.
425  */
426 bool aio_prepare(AioContext *ctx);
427 
428 /* Return whether there are any pending callbacks from the GSource
429  * attached to the AioContext, after g_poll is invoked.
430  *
431  * This is used internally in the implementation of the GSource.
432  */
433 bool aio_pending(AioContext *ctx);
434 
435 /* Dispatch any pending callbacks from the GSource attached to the AioContext.
436  *
437  * This is used internally in the implementation of the GSource.
438  */
439 void aio_dispatch(AioContext *ctx);
440 
441 /* Progress in completing AIO work to occur.  This can issue new pending
442  * aio as a result of executing I/O completion or bh callbacks.
443  *
444  * Return whether any progress was made by executing AIO or bottom half
445  * handlers.  If @blocking == true, this should always be true except
446  * if someone called aio_notify.
447  *
448  * If there are no pending bottom halves, but there are pending AIO
449  * operations, it may not be possible to make any progress without
450  * blocking.  If @blocking is true, this function will wait until one
451  * or more AIO events have completed, to ensure something has moved
452  * before returning.
453  */
454 bool aio_poll(AioContext *ctx, bool blocking);
455 
456 /* Register a file descriptor and associated callbacks.  Behaves very similarly
457  * to qemu_set_fd_handler.  Unlike qemu_set_fd_handler, these callbacks will
458  * be invoked when using aio_poll().
459  *
460  * Code that invokes AIO completion functions should rely on this function
461  * instead of qemu_set_fd_handler[2].
462  */
463 void aio_set_fd_handler(AioContext *ctx,
464                         int fd,
465                         bool is_external,
466                         IOHandler *io_read,
467                         IOHandler *io_write,
468                         AioPollFn *io_poll,
469                         void *opaque);
470 
471 /* Set polling begin/end callbacks for a file descriptor that has already been
472  * registered with aio_set_fd_handler.  Do nothing if the file descriptor is
473  * not registered.
474  */
475 void aio_set_fd_poll(AioContext *ctx, int fd,
476                      IOHandler *io_poll_begin,
477                      IOHandler *io_poll_end);
478 
479 /* Register an event notifier and associated callbacks.  Behaves very similarly
480  * to event_notifier_set_handler.  Unlike event_notifier_set_handler, these callbacks
481  * will be invoked when using aio_poll().
482  *
483  * Code that invokes AIO completion functions should rely on this function
484  * instead of event_notifier_set_handler.
485  */
486 void aio_set_event_notifier(AioContext *ctx,
487                             EventNotifier *notifier,
488                             bool is_external,
489                             EventNotifierHandler *io_read,
490                             AioPollFn *io_poll);
491 
492 /* Set polling begin/end callbacks for an event notifier that has already been
493  * registered with aio_set_event_notifier.  Do nothing if the event notifier is
494  * not registered.
495  */
496 void aio_set_event_notifier_poll(AioContext *ctx,
497                                  EventNotifier *notifier,
498                                  EventNotifierHandler *io_poll_begin,
499                                  EventNotifierHandler *io_poll_end);
500 
501 /* Return a GSource that lets the main loop poll the file descriptors attached
502  * to this AioContext.
503  */
504 GSource *aio_get_g_source(AioContext *ctx);
505 
506 /* Return the ThreadPool bound to this AioContext */
507 struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
508 
509 /* Setup the LinuxAioState bound to this AioContext */
510 struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp);
511 
512 /* Return the LinuxAioState bound to this AioContext */
513 struct LinuxAioState *aio_get_linux_aio(AioContext *ctx);
514 
515 /* Setup the LuringState bound to this AioContext */
516 struct LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp);
517 
518 /* Return the LuringState bound to this AioContext */
519 struct LuringState *aio_get_linux_io_uring(AioContext *ctx);
520 /**
521  * aio_timer_new_with_attrs:
522  * @ctx: the aio context
523  * @type: the clock type
524  * @scale: the scale
525  * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
526  *              to assign
527  * @cb: the callback to call on timer expiry
528  * @opaque: the opaque pointer to pass to the callback
529  *
530  * Allocate a new timer (with attributes) attached to the context @ctx.
531  * The function is responsible for memory allocation.
532  *
533  * The preferred interface is aio_timer_init or aio_timer_init_with_attrs.
534  * Use that unless you really need dynamic memory allocation.
535  *
536  * Returns: a pointer to the new timer
537  */
538 static inline QEMUTimer *aio_timer_new_with_attrs(AioContext *ctx,
539                                                   QEMUClockType type,
540                                                   int scale, int attributes,
541                                                   QEMUTimerCB *cb, void *opaque)
542 {
543     return timer_new_full(&ctx->tlg, type, scale, attributes, cb, opaque);
544 }
545 
546 /**
547  * aio_timer_new:
548  * @ctx: the aio context
549  * @type: the clock type
550  * @scale: the scale
551  * @cb: the callback to call on timer expiry
552  * @opaque: the opaque pointer to pass to the callback
553  *
554  * Allocate a new timer attached to the context @ctx.
555  * See aio_timer_new_with_attrs for details.
556  *
557  * Returns: a pointer to the new timer
558  */
559 static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type,
560                                        int scale,
561                                        QEMUTimerCB *cb, void *opaque)
562 {
563     return timer_new_full(&ctx->tlg, type, scale, 0, cb, opaque);
564 }
565 
566 /**
567  * aio_timer_init_with_attrs:
568  * @ctx: the aio context
569  * @ts: the timer
570  * @type: the clock type
571  * @scale: the scale
572  * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
573  *              to assign
574  * @cb: the callback to call on timer expiry
575  * @opaque: the opaque pointer to pass to the callback
576  *
577  * Initialise a new timer (with attributes) attached to the context @ctx.
578  * The caller is responsible for memory allocation.
579  */
580 static inline void aio_timer_init_with_attrs(AioContext *ctx,
581                                              QEMUTimer *ts, QEMUClockType type,
582                                              int scale, int attributes,
583                                              QEMUTimerCB *cb, void *opaque)
584 {
585     timer_init_full(ts, &ctx->tlg, type, scale, attributes, cb, opaque);
586 }
587 
588 /**
589  * aio_timer_init:
590  * @ctx: the aio context
591  * @ts: the timer
592  * @type: the clock type
593  * @scale: the scale
594  * @cb: the callback to call on timer expiry
595  * @opaque: the opaque pointer to pass to the callback
596  *
597  * Initialise a new timer attached to the context @ctx.
598  * See aio_timer_init_with_attrs for details.
599  */
600 static inline void aio_timer_init(AioContext *ctx,
601                                   QEMUTimer *ts, QEMUClockType type,
602                                   int scale,
603                                   QEMUTimerCB *cb, void *opaque)
604 {
605     timer_init_full(ts, &ctx->tlg, type, scale, 0, cb, opaque);
606 }
607 
608 /**
609  * aio_compute_timeout:
610  * @ctx: the aio context
611  *
612  * Compute the timeout that a blocking aio_poll should use.
613  */
614 int64_t aio_compute_timeout(AioContext *ctx);
615 
616 /**
617  * aio_disable_external:
618  * @ctx: the aio context
619  *
620  * Disable the further processing of external clients.
621  */
622 static inline void aio_disable_external(AioContext *ctx)
623 {
624     qatomic_inc(&ctx->external_disable_cnt);
625 }
626 
627 /**
628  * aio_enable_external:
629  * @ctx: the aio context
630  *
631  * Enable the processing of external clients.
632  */
633 static inline void aio_enable_external(AioContext *ctx)
634 {
635     int old;
636 
637     old = qatomic_fetch_dec(&ctx->external_disable_cnt);
638     assert(old > 0);
639     if (old == 1) {
640         /* Kick event loop so it re-arms file descriptors */
641         aio_notify(ctx);
642     }
643 }
644 
645 /**
646  * aio_external_disabled:
647  * @ctx: the aio context
648  *
649  * Return true if the external clients are disabled.
650  */
651 static inline bool aio_external_disabled(AioContext *ctx)
652 {
653     return qatomic_read(&ctx->external_disable_cnt);
654 }
655 
656 /**
657  * aio_node_check:
658  * @ctx: the aio context
659  * @is_external: Whether or not the checked node is an external event source.
660  *
661  * Check if the node's is_external flag is okay to be polled by the ctx at this
662  * moment. True means green light.
663  */
664 static inline bool aio_node_check(AioContext *ctx, bool is_external)
665 {
666     return !is_external || !qatomic_read(&ctx->external_disable_cnt);
667 }
668 
669 /**
670  * aio_co_schedule:
671  * @ctx: the aio context
672  * @co: the coroutine
673  *
674  * Start a coroutine on a remote AioContext.
675  *
676  * The coroutine must not be entered by anyone else while aio_co_schedule()
677  * is active.  In addition the coroutine must have yielded unless ctx
678  * is the context in which the coroutine is running (i.e. the value of
679  * qemu_get_current_aio_context() from the coroutine itself).
680  */
681 void aio_co_schedule(AioContext *ctx, struct Coroutine *co);
682 
683 /**
684  * aio_co_reschedule_self:
685  * @new_ctx: the new context
686  *
687  * Move the currently running coroutine to new_ctx. If the coroutine is already
688  * running in new_ctx, do nothing.
689  */
690 void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx);
691 
692 /**
693  * aio_co_wake:
694  * @co: the coroutine
695  *
696  * Restart a coroutine on the AioContext where it was running last, thus
697  * preventing coroutines from jumping from one context to another when they
698  * go to sleep.
699  *
700  * aio_co_wake may be executed either in coroutine or non-coroutine
701  * context.  The coroutine must not be entered by anyone else while
702  * aio_co_wake() is active.
703  */
704 void aio_co_wake(struct Coroutine *co);
705 
706 /**
707  * aio_co_enter:
708  * @ctx: the context to run the coroutine
709  * @co: the coroutine to run
710  *
711  * Enter a coroutine in the specified AioContext.
712  */
713 void aio_co_enter(AioContext *ctx, struct Coroutine *co);
714 
715 /**
716  * Return the AioContext whose event loop runs in the current thread.
717  *
718  * If called from an IOThread this will be the IOThread's AioContext.  If
719  * called from the main thread or with the "big QEMU lock" taken it
720  * will be the main loop AioContext.
721  */
722 AioContext *qemu_get_current_aio_context(void);
723 
724 void qemu_set_current_aio_context(AioContext *ctx);
725 
726 /**
727  * aio_context_setup:
728  * @ctx: the aio context
729  *
730  * Initialize the aio context.
731  */
732 void aio_context_setup(AioContext *ctx);
733 
734 /**
735  * aio_context_destroy:
736  * @ctx: the aio context
737  *
738  * Destroy the aio context.
739  */
740 void aio_context_destroy(AioContext *ctx);
741 
742 /* Used internally, do not call outside AioContext code */
743 void aio_context_use_g_source(AioContext *ctx);
744 
745 /**
746  * aio_context_set_poll_params:
747  * @ctx: the aio context
748  * @max_ns: how long to busy poll for, in nanoseconds
749  * @grow: polling time growth factor
750  * @shrink: polling time shrink factor
751  *
752  * Poll mode can be disabled by setting poll_max_ns to 0.
753  */
754 void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
755                                  int64_t grow, int64_t shrink,
756                                  Error **errp);
757 
758 #endif
759