xref: /openbmc/qemu/include/block/aio.h (revision 6ab425d8)
1 /*
2  * QEMU aio implementation
3  *
4  * Copyright IBM, Corp. 2008
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #ifndef QEMU_AIO_H
15 #define QEMU_AIO_H
16 
17 #include "qemu/queue.h"
18 #include "qemu/event_notifier.h"
19 #include "qemu/thread.h"
20 #include "qemu/timer.h"
21 
22 typedef struct BlockAIOCB BlockAIOCB;
23 typedef void BlockCompletionFunc(void *opaque, int ret);
24 
25 typedef struct AIOCBInfo {
26     void (*cancel_async)(BlockAIOCB *acb);
27     AioContext *(*get_aio_context)(BlockAIOCB *acb);
28     size_t aiocb_size;
29 } AIOCBInfo;
30 
31 struct BlockAIOCB {
32     const AIOCBInfo *aiocb_info;
33     BlockDriverState *bs;
34     BlockCompletionFunc *cb;
35     void *opaque;
36     int refcnt;
37 };
38 
39 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
40                    BlockCompletionFunc *cb, void *opaque);
41 void qemu_aio_unref(void *p);
42 void qemu_aio_ref(void *p);
43 
44 typedef struct AioHandler AioHandler;
45 typedef QLIST_HEAD(, AioHandler) AioHandlerList;
46 typedef void QEMUBHFunc(void *opaque);
47 typedef bool AioPollFn(void *opaque);
48 typedef void IOHandler(void *opaque);
49 
50 struct Coroutine;
51 struct ThreadPool;
52 struct LinuxAioState;
53 struct LuringState;
54 
55 /*
56  * Each aio_bh_poll() call carves off a slice of the BH list, so that newly
57  * scheduled BHs are not processed until the next aio_bh_poll() call.  All
58  * active aio_bh_poll() calls chain their slices together in a list, so that
59  * nested aio_bh_poll() calls process all scheduled bottom halves.
60  */
61 typedef QSLIST_HEAD(, QEMUBH) BHList;
62 typedef struct BHListSlice BHListSlice;
63 struct BHListSlice {
64     BHList bh_list;
65     QSIMPLEQ_ENTRY(BHListSlice) next;
66 };
67 
68 struct AioContext {
69     GSource source;
70 
71     /* Used by AioContext users to protect from multi-threaded access.  */
72     QemuRecMutex lock;
73 
74     /* The list of registered AIO handlers.  Protected by ctx->list_lock. */
75     AioHandlerList aio_handlers;
76 
77     /* The list of AIO handlers to be deleted.  Protected by ctx->list_lock. */
78     AioHandlerList deleted_aio_handlers;
79 
80     /* Used to avoid unnecessary event_notifier_set calls in aio_notify;
81      * accessed with atomic primitives.  If this field is 0, everything
82      * (file descriptors, bottom halves, timers) will be re-evaluated
83      * before the next blocking poll(), thus the event_notifier_set call
84      * can be skipped.  If it is non-zero, you may need to wake up a
85      * concurrent aio_poll or the glib main event loop, making
86      * event_notifier_set necessary.
87      *
88      * Bit 0 is reserved for GSource usage of the AioContext, and is 1
89      * between a call to aio_ctx_prepare and the next call to aio_ctx_check.
90      * Bits 1-31 simply count the number of active calls to aio_poll
91      * that are in the prepare or poll phase.
92      *
93      * The GSource and aio_poll must use a different mechanism because
94      * there is no certainty that a call to GSource's prepare callback
95      * (via g_main_context_prepare) is indeed followed by check and
96      * dispatch.  It's not clear whether this would be a bug, but let's
97      * play safe and allow it---it will just cause extra calls to
98      * event_notifier_set until the next call to dispatch.
99      *
100      * Instead, the aio_poll calls include both the prepare and the
101      * dispatch phase, hence a simple counter is enough for them.
102      */
103     uint32_t notify_me;
104 
105     /* A lock to protect between QEMUBH and AioHandler adders and deleter,
106      * and to ensure that no callbacks are removed while we're walking and
107      * dispatching them.
108      */
109     QemuLockCnt list_lock;
110 
111     /* Bottom Halves pending aio_bh_poll() processing */
112     BHList bh_list;
113 
114     /* Chained BH list slices for each nested aio_bh_poll() call */
115     QSIMPLEQ_HEAD(, BHListSlice) bh_slice_list;
116 
117     /* Used by aio_notify.
118      *
119      * "notified" is used to avoid expensive event_notifier_test_and_clear
120      * calls.  When it is clear, the EventNotifier is clear, or one thread
121      * is going to clear "notified" before processing more events.  False
122      * positives are possible, i.e. "notified" could be set even though the
123      * EventNotifier is clear.
124      *
125      * Note that event_notifier_set *cannot* be optimized the same way.  For
126      * more information on the problem that would result, see "#ifdef BUG2"
127      * in the docs/aio_notify_accept.promela formal model.
128      */
129     bool notified;
130     EventNotifier notifier;
131 
132     QSLIST_HEAD(, Coroutine) scheduled_coroutines;
133     QEMUBH *co_schedule_bh;
134 
135     /* Thread pool for performing work and receiving completion callbacks.
136      * Has its own locking.
137      */
138     struct ThreadPool *thread_pool;
139 
140 #ifdef CONFIG_LINUX_AIO
141     /*
142      * State for native Linux AIO.  Uses aio_context_acquire/release for
143      * locking.
144      */
145     struct LinuxAioState *linux_aio;
146 #endif
147 #ifdef CONFIG_LINUX_IO_URING
148     /*
149      * State for Linux io_uring.  Uses aio_context_acquire/release for
150      * locking.
151      */
152     struct LuringState *linux_io_uring;
153 #endif
154 
155     /* TimerLists for calling timers - one per clock type.  Has its own
156      * locking.
157      */
158     QEMUTimerListGroup tlg;
159 
160     int external_disable_cnt;
161 
162     /* Number of AioHandlers without .io_poll() */
163     int poll_disable_cnt;
164 
165     /* Polling mode parameters */
166     int64_t poll_ns;        /* current polling time in nanoseconds */
167     int64_t poll_max_ns;    /* maximum polling time in nanoseconds */
168     int64_t poll_grow;      /* polling time growth factor */
169     int64_t poll_shrink;    /* polling time shrink factor */
170 
171     /* Are we in polling mode or monitoring file descriptors? */
172     bool poll_started;
173 
174     /* epoll(7) state used when built with CONFIG_EPOLL */
175     int epollfd;
176     bool epoll_enabled;
177     bool epoll_available;
178 };
179 
180 /**
181  * aio_context_new: Allocate a new AioContext.
182  *
183  * AioContext provide a mini event-loop that can be waited on synchronously.
184  * They also provide bottom halves, a service to execute a piece of code
185  * as soon as possible.
186  */
187 AioContext *aio_context_new(Error **errp);
188 
189 /**
190  * aio_context_ref:
191  * @ctx: The AioContext to operate on.
192  *
193  * Add a reference to an AioContext.
194  */
195 void aio_context_ref(AioContext *ctx);
196 
197 /**
198  * aio_context_unref:
199  * @ctx: The AioContext to operate on.
200  *
201  * Drop a reference to an AioContext.
202  */
203 void aio_context_unref(AioContext *ctx);
204 
205 /* Take ownership of the AioContext.  If the AioContext will be shared between
206  * threads, and a thread does not want to be interrupted, it will have to
207  * take ownership around calls to aio_poll().  Otherwise, aio_poll()
208  * automatically takes care of calling aio_context_acquire and
209  * aio_context_release.
210  *
211  * Note that this is separate from bdrv_drained_begin/bdrv_drained_end.  A
212  * thread still has to call those to avoid being interrupted by the guest.
213  *
214  * Bottom halves, timers and callbacks can be created or removed without
215  * acquiring the AioContext.
216  */
217 void aio_context_acquire(AioContext *ctx);
218 
219 /* Relinquish ownership of the AioContext. */
220 void aio_context_release(AioContext *ctx);
221 
222 /**
223  * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
224  * only once and as soon as possible.
225  */
226 void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
227 
228 /**
229  * aio_bh_new: Allocate a new bottom half structure.
230  *
231  * Bottom halves are lightweight callbacks whose invocation is guaranteed
232  * to be wait-free, thread-safe and signal-safe.  The #QEMUBH structure
233  * is opaque and must be allocated prior to its use.
234  */
235 QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
236 
237 /**
238  * aio_notify: Force processing of pending events.
239  *
240  * Similar to signaling a condition variable, aio_notify forces
241  * aio_poll to exit, so that the next call will re-examine pending events.
242  * The caller of aio_notify will usually call aio_poll again very soon,
243  * or go through another iteration of the GLib main loop.  Hence, aio_notify
244  * also has the side effect of recalculating the sets of file descriptors
245  * that the main loop waits for.
246  *
247  * Calling aio_notify is rarely necessary, because for example scheduling
248  * a bottom half calls it already.
249  */
250 void aio_notify(AioContext *ctx);
251 
252 /**
253  * aio_notify_accept: Acknowledge receiving an aio_notify.
254  *
255  * aio_notify() uses an EventNotifier in order to wake up a sleeping
256  * aio_poll() or g_main_context_iteration().  Calls to aio_notify() are
257  * usually rare, but the AioContext has to clear the EventNotifier on
258  * every aio_poll() or g_main_context_iteration() in order to avoid
259  * busy waiting.  This event_notifier_test_and_clear() cannot be done
260  * using the usual aio_context_set_event_notifier(), because it must
261  * be done before processing all events (file descriptors, bottom halves,
262  * timers).
263  *
264  * aio_notify_accept() is an optimized event_notifier_test_and_clear()
265  * that is specific to an AioContext's notifier; it is used internally
266  * to clear the EventNotifier only if aio_notify() had been called.
267  */
268 void aio_notify_accept(AioContext *ctx);
269 
270 /**
271  * aio_bh_call: Executes callback function of the specified BH.
272  */
273 void aio_bh_call(QEMUBH *bh);
274 
275 /**
276  * aio_bh_poll: Poll bottom halves for an AioContext.
277  *
278  * These are internal functions used by the QEMU main loop.
279  * And notice that multiple occurrences of aio_bh_poll cannot
280  * be called concurrently
281  */
282 int aio_bh_poll(AioContext *ctx);
283 
284 /**
285  * qemu_bh_schedule: Schedule a bottom half.
286  *
287  * Scheduling a bottom half interrupts the main loop and causes the
288  * execution of the callback that was passed to qemu_bh_new.
289  *
290  * Bottom halves that are scheduled from a bottom half handler are instantly
291  * invoked.  This can create an infinite loop if a bottom half handler
292  * schedules itself.
293  *
294  * @bh: The bottom half to be scheduled.
295  */
296 void qemu_bh_schedule(QEMUBH *bh);
297 
298 /**
299  * qemu_bh_cancel: Cancel execution of a bottom half.
300  *
301  * Canceling execution of a bottom half undoes the effect of calls to
302  * qemu_bh_schedule without freeing its resources yet.  While cancellation
303  * itself is also wait-free and thread-safe, it can of course race with the
304  * loop that executes bottom halves unless you are holding the iothread
305  * mutex.  This makes it mostly useless if you are not holding the mutex.
306  *
307  * @bh: The bottom half to be canceled.
308  */
309 void qemu_bh_cancel(QEMUBH *bh);
310 
311 /**
312  *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
313  *
314  * Deleting a bottom half frees the memory that was allocated for it by
315  * qemu_bh_new.  It also implies canceling the bottom half if it was
316  * scheduled.
317  * This func is async. The bottom half will do the delete action at the finial
318  * end.
319  *
320  * @bh: The bottom half to be deleted.
321  */
322 void qemu_bh_delete(QEMUBH *bh);
323 
324 /* Return whether there are any pending callbacks from the GSource
325  * attached to the AioContext, before g_poll is invoked.
326  *
327  * This is used internally in the implementation of the GSource.
328  */
329 bool aio_prepare(AioContext *ctx);
330 
331 /* Return whether there are any pending callbacks from the GSource
332  * attached to the AioContext, after g_poll is invoked.
333  *
334  * This is used internally in the implementation of the GSource.
335  */
336 bool aio_pending(AioContext *ctx);
337 
338 /* Dispatch any pending callbacks from the GSource attached to the AioContext.
339  *
340  * This is used internally in the implementation of the GSource.
341  */
342 void aio_dispatch(AioContext *ctx);
343 
344 /* Progress in completing AIO work to occur.  This can issue new pending
345  * aio as a result of executing I/O completion or bh callbacks.
346  *
347  * Return whether any progress was made by executing AIO or bottom half
348  * handlers.  If @blocking == true, this should always be true except
349  * if someone called aio_notify.
350  *
351  * If there are no pending bottom halves, but there are pending AIO
352  * operations, it may not be possible to make any progress without
353  * blocking.  If @blocking is true, this function will wait until one
354  * or more AIO events have completed, to ensure something has moved
355  * before returning.
356  */
357 bool aio_poll(AioContext *ctx, bool blocking);
358 
359 /* Register a file descriptor and associated callbacks.  Behaves very similarly
360  * to qemu_set_fd_handler.  Unlike qemu_set_fd_handler, these callbacks will
361  * be invoked when using aio_poll().
362  *
363  * Code that invokes AIO completion functions should rely on this function
364  * instead of qemu_set_fd_handler[2].
365  */
366 void aio_set_fd_handler(AioContext *ctx,
367                         int fd,
368                         bool is_external,
369                         IOHandler *io_read,
370                         IOHandler *io_write,
371                         AioPollFn *io_poll,
372                         void *opaque);
373 
374 /* Set polling begin/end callbacks for a file descriptor that has already been
375  * registered with aio_set_fd_handler.  Do nothing if the file descriptor is
376  * not registered.
377  */
378 void aio_set_fd_poll(AioContext *ctx, int fd,
379                      IOHandler *io_poll_begin,
380                      IOHandler *io_poll_end);
381 
382 /* Register an event notifier and associated callbacks.  Behaves very similarly
383  * to event_notifier_set_handler.  Unlike event_notifier_set_handler, these callbacks
384  * will be invoked when using aio_poll().
385  *
386  * Code that invokes AIO completion functions should rely on this function
387  * instead of event_notifier_set_handler.
388  */
389 void aio_set_event_notifier(AioContext *ctx,
390                             EventNotifier *notifier,
391                             bool is_external,
392                             EventNotifierHandler *io_read,
393                             AioPollFn *io_poll);
394 
395 /* Set polling begin/end callbacks for an event notifier that has already been
396  * registered with aio_set_event_notifier.  Do nothing if the event notifier is
397  * not registered.
398  */
399 void aio_set_event_notifier_poll(AioContext *ctx,
400                                  EventNotifier *notifier,
401                                  EventNotifierHandler *io_poll_begin,
402                                  EventNotifierHandler *io_poll_end);
403 
404 /* Return a GSource that lets the main loop poll the file descriptors attached
405  * to this AioContext.
406  */
407 GSource *aio_get_g_source(AioContext *ctx);
408 
409 /* Return the ThreadPool bound to this AioContext */
410 struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
411 
412 /* Setup the LinuxAioState bound to this AioContext */
413 struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp);
414 
415 /* Return the LinuxAioState bound to this AioContext */
416 struct LinuxAioState *aio_get_linux_aio(AioContext *ctx);
417 
418 /* Setup the LuringState bound to this AioContext */
419 struct LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp);
420 
421 /* Return the LuringState bound to this AioContext */
422 struct LuringState *aio_get_linux_io_uring(AioContext *ctx);
423 /**
424  * aio_timer_new_with_attrs:
425  * @ctx: the aio context
426  * @type: the clock type
427  * @scale: the scale
428  * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
429  *              to assign
430  * @cb: the callback to call on timer expiry
431  * @opaque: the opaque pointer to pass to the callback
432  *
433  * Allocate a new timer (with attributes) attached to the context @ctx.
434  * The function is responsible for memory allocation.
435  *
436  * The preferred interface is aio_timer_init or aio_timer_init_with_attrs.
437  * Use that unless you really need dynamic memory allocation.
438  *
439  * Returns: a pointer to the new timer
440  */
441 static inline QEMUTimer *aio_timer_new_with_attrs(AioContext *ctx,
442                                                   QEMUClockType type,
443                                                   int scale, int attributes,
444                                                   QEMUTimerCB *cb, void *opaque)
445 {
446     return timer_new_full(&ctx->tlg, type, scale, attributes, cb, opaque);
447 }
448 
449 /**
450  * aio_timer_new:
451  * @ctx: the aio context
452  * @type: the clock type
453  * @scale: the scale
454  * @cb: the callback to call on timer expiry
455  * @opaque: the opaque pointer to pass to the callback
456  *
457  * Allocate a new timer attached to the context @ctx.
458  * See aio_timer_new_with_attrs for details.
459  *
460  * Returns: a pointer to the new timer
461  */
462 static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type,
463                                        int scale,
464                                        QEMUTimerCB *cb, void *opaque)
465 {
466     return timer_new_full(&ctx->tlg, type, scale, 0, cb, opaque);
467 }
468 
469 /**
470  * aio_timer_init_with_attrs:
471  * @ctx: the aio context
472  * @ts: the timer
473  * @type: the clock type
474  * @scale: the scale
475  * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
476  *              to assign
477  * @cb: the callback to call on timer expiry
478  * @opaque: the opaque pointer to pass to the callback
479  *
480  * Initialise a new timer (with attributes) attached to the context @ctx.
481  * The caller is responsible for memory allocation.
482  */
483 static inline void aio_timer_init_with_attrs(AioContext *ctx,
484                                              QEMUTimer *ts, QEMUClockType type,
485                                              int scale, int attributes,
486                                              QEMUTimerCB *cb, void *opaque)
487 {
488     timer_init_full(ts, &ctx->tlg, type, scale, attributes, cb, opaque);
489 }
490 
491 /**
492  * aio_timer_init:
493  * @ctx: the aio context
494  * @ts: the timer
495  * @type: the clock type
496  * @scale: the scale
497  * @cb: the callback to call on timer expiry
498  * @opaque: the opaque pointer to pass to the callback
499  *
500  * Initialise a new timer attached to the context @ctx.
501  * See aio_timer_init_with_attrs for details.
502  */
503 static inline void aio_timer_init(AioContext *ctx,
504                                   QEMUTimer *ts, QEMUClockType type,
505                                   int scale,
506                                   QEMUTimerCB *cb, void *opaque)
507 {
508     timer_init_full(ts, &ctx->tlg, type, scale, 0, cb, opaque);
509 }
510 
511 /**
512  * aio_compute_timeout:
513  * @ctx: the aio context
514  *
515  * Compute the timeout that a blocking aio_poll should use.
516  */
517 int64_t aio_compute_timeout(AioContext *ctx);
518 
519 /**
520  * aio_disable_external:
521  * @ctx: the aio context
522  *
523  * Disable the further processing of external clients.
524  */
525 static inline void aio_disable_external(AioContext *ctx)
526 {
527     atomic_inc(&ctx->external_disable_cnt);
528 }
529 
530 /**
531  * aio_enable_external:
532  * @ctx: the aio context
533  *
534  * Enable the processing of external clients.
535  */
536 static inline void aio_enable_external(AioContext *ctx)
537 {
538     int old;
539 
540     old = atomic_fetch_dec(&ctx->external_disable_cnt);
541     assert(old > 0);
542     if (old == 1) {
543         /* Kick event loop so it re-arms file descriptors */
544         aio_notify(ctx);
545     }
546 }
547 
548 /**
549  * aio_external_disabled:
550  * @ctx: the aio context
551  *
552  * Return true if the external clients are disabled.
553  */
554 static inline bool aio_external_disabled(AioContext *ctx)
555 {
556     return atomic_read(&ctx->external_disable_cnt);
557 }
558 
559 /**
560  * aio_node_check:
561  * @ctx: the aio context
562  * @is_external: Whether or not the checked node is an external event source.
563  *
564  * Check if the node's is_external flag is okay to be polled by the ctx at this
565  * moment. True means green light.
566  */
567 static inline bool aio_node_check(AioContext *ctx, bool is_external)
568 {
569     return !is_external || !atomic_read(&ctx->external_disable_cnt);
570 }
571 
572 /**
573  * aio_co_schedule:
574  * @ctx: the aio context
575  * @co: the coroutine
576  *
577  * Start a coroutine on a remote AioContext.
578  *
579  * The coroutine must not be entered by anyone else while aio_co_schedule()
580  * is active.  In addition the coroutine must have yielded unless ctx
581  * is the context in which the coroutine is running (i.e. the value of
582  * qemu_get_current_aio_context() from the coroutine itself).
583  */
584 void aio_co_schedule(AioContext *ctx, struct Coroutine *co);
585 
586 /**
587  * aio_co_wake:
588  * @co: the coroutine
589  *
590  * Restart a coroutine on the AioContext where it was running last, thus
591  * preventing coroutines from jumping from one context to another when they
592  * go to sleep.
593  *
594  * aio_co_wake may be executed either in coroutine or non-coroutine
595  * context.  The coroutine must not be entered by anyone else while
596  * aio_co_wake() is active.
597  */
598 void aio_co_wake(struct Coroutine *co);
599 
600 /**
601  * aio_co_enter:
602  * @ctx: the context to run the coroutine
603  * @co: the coroutine to run
604  *
605  * Enter a coroutine in the specified AioContext.
606  */
607 void aio_co_enter(AioContext *ctx, struct Coroutine *co);
608 
609 /**
610  * Return the AioContext whose event loop runs in the current thread.
611  *
612  * If called from an IOThread this will be the IOThread's AioContext.  If
613  * called from another thread it will be the main loop AioContext.
614  */
615 AioContext *qemu_get_current_aio_context(void);
616 
617 /**
618  * in_aio_context_home_thread:
619  * @ctx: the aio context
620  *
621  * Return whether we are running in the thread that normally runs @ctx.  Note
622  * that acquiring/releasing ctx does not affect the outcome, each AioContext
623  * still only has one home thread that is responsible for running it.
624  */
625 static inline bool in_aio_context_home_thread(AioContext *ctx)
626 {
627     return ctx == qemu_get_current_aio_context();
628 }
629 
630 /**
631  * aio_context_setup:
632  * @ctx: the aio context
633  *
634  * Initialize the aio context.
635  */
636 void aio_context_setup(AioContext *ctx);
637 
638 /**
639  * aio_context_destroy:
640  * @ctx: the aio context
641  *
642  * Destroy the aio context.
643  */
644 void aio_context_destroy(AioContext *ctx);
645 
646 /**
647  * aio_context_set_poll_params:
648  * @ctx: the aio context
649  * @max_ns: how long to busy poll for, in nanoseconds
650  * @grow: polling time growth factor
651  * @shrink: polling time shrink factor
652  *
653  * Poll mode can be disabled by setting poll_max_ns to 0.
654  */
655 void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
656                                  int64_t grow, int64_t shrink,
657                                  Error **errp);
658 
659 #endif
660