xref: /openbmc/qemu/include/block/aio.h (revision 0b2ff2ce)
1 /*
2  * QEMU aio implementation
3  *
4  * Copyright IBM, Corp. 2008
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #ifndef QEMU_AIO_H
15 #define QEMU_AIO_H
16 
17 #include "qemu/typedefs.h"
18 #include "qemu-common.h"
19 #include "qemu/queue.h"
20 #include "qemu/event_notifier.h"
21 #include "qemu/thread.h"
22 #include "qemu/rfifolock.h"
23 #include "qemu/timer.h"
24 
25 typedef struct BlockAIOCB BlockAIOCB;
26 typedef void BlockCompletionFunc(void *opaque, int ret);
27 
28 typedef struct AIOCBInfo {
29     void (*cancel_async)(BlockAIOCB *acb);
30     AioContext *(*get_aio_context)(BlockAIOCB *acb);
31     size_t aiocb_size;
32 } AIOCBInfo;
33 
34 struct BlockAIOCB {
35     const AIOCBInfo *aiocb_info;
36     BlockDriverState *bs;
37     BlockCompletionFunc *cb;
38     void *opaque;
39     int refcnt;
40 };
41 
42 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
43                    BlockCompletionFunc *cb, void *opaque);
44 void qemu_aio_unref(void *p);
45 void qemu_aio_ref(void *p);
46 
47 typedef struct AioHandler AioHandler;
48 typedef void QEMUBHFunc(void *opaque);
49 typedef void IOHandler(void *opaque);
50 
51 struct AioContext {
52     GSource source;
53 
54     /* Protects all fields from multi-threaded access */
55     RFifoLock lock;
56 
57     /* The list of registered AIO handlers */
58     QLIST_HEAD(, AioHandler) aio_handlers;
59 
60     /* This is a simple lock used to protect the aio_handlers list.
61      * Specifically, it's used to ensure that no callbacks are removed while
62      * we're walking and dispatching callbacks.
63      */
64     int walking_handlers;
65 
66     /* Used to avoid unnecessary event_notifier_set calls in aio_notify.
67      * Writes protected by lock or BQL, reads are lockless.
68      */
69     bool dispatching;
70 
71     /* lock to protect between bh's adders and deleter */
72     QemuMutex bh_lock;
73 
74     /* Anchor of the list of Bottom Halves belonging to the context */
75     struct QEMUBH *first_bh;
76 
77     /* A simple lock used to protect the first_bh list, and ensure that
78      * no callbacks are removed while we're walking and dispatching callbacks.
79      */
80     int walking_bh;
81 
82     /* Used for aio_notify.  */
83     EventNotifier notifier;
84 
85     /* Thread pool for performing work and receiving completion callbacks */
86     struct ThreadPool *thread_pool;
87 
88     /* TimerLists for calling timers - one per clock type */
89     QEMUTimerListGroup tlg;
90 };
91 
92 /* Used internally to synchronize aio_poll against qemu_bh_schedule.  */
93 void aio_set_dispatching(AioContext *ctx, bool dispatching);
94 
95 /**
96  * aio_context_new: Allocate a new AioContext.
97  *
98  * AioContext provide a mini event-loop that can be waited on synchronously.
99  * They also provide bottom halves, a service to execute a piece of code
100  * as soon as possible.
101  */
102 AioContext *aio_context_new(Error **errp);
103 
104 /**
105  * aio_context_ref:
106  * @ctx: The AioContext to operate on.
107  *
108  * Add a reference to an AioContext.
109  */
110 void aio_context_ref(AioContext *ctx);
111 
112 /**
113  * aio_context_unref:
114  * @ctx: The AioContext to operate on.
115  *
116  * Drop a reference to an AioContext.
117  */
118 void aio_context_unref(AioContext *ctx);
119 
120 /* Take ownership of the AioContext.  If the AioContext will be shared between
121  * threads, and a thread does not want to be interrupted, it will have to
122  * take ownership around calls to aio_poll().  Otherwise, aio_poll()
123  * automatically takes care of calling aio_context_acquire and
124  * aio_context_release.
125  *
126  * Access to timers and BHs from a thread that has not acquired AioContext
127  * is possible.  Access to callbacks for now must be done while the AioContext
128  * is owned by the thread (FIXME).
129  */
130 void aio_context_acquire(AioContext *ctx);
131 
132 /* Relinquish ownership of the AioContext. */
133 void aio_context_release(AioContext *ctx);
134 
135 /**
136  * aio_bh_new: Allocate a new bottom half structure.
137  *
138  * Bottom halves are lightweight callbacks whose invocation is guaranteed
139  * to be wait-free, thread-safe and signal-safe.  The #QEMUBH structure
140  * is opaque and must be allocated prior to its use.
141  */
142 QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
143 
144 /**
145  * aio_notify: Force processing of pending events.
146  *
147  * Similar to signaling a condition variable, aio_notify forces
148  * aio_wait to exit, so that the next call will re-examine pending events.
149  * The caller of aio_notify will usually call aio_wait again very soon,
150  * or go through another iteration of the GLib main loop.  Hence, aio_notify
151  * also has the side effect of recalculating the sets of file descriptors
152  * that the main loop waits for.
153  *
154  * Calling aio_notify is rarely necessary, because for example scheduling
155  * a bottom half calls it already.
156  */
157 void aio_notify(AioContext *ctx);
158 
159 /**
160  * aio_bh_poll: Poll bottom halves for an AioContext.
161  *
162  * These are internal functions used by the QEMU main loop.
163  * And notice that multiple occurrences of aio_bh_poll cannot
164  * be called concurrently
165  */
166 int aio_bh_poll(AioContext *ctx);
167 
168 /**
169  * qemu_bh_schedule: Schedule a bottom half.
170  *
171  * Scheduling a bottom half interrupts the main loop and causes the
172  * execution of the callback that was passed to qemu_bh_new.
173  *
174  * Bottom halves that are scheduled from a bottom half handler are instantly
175  * invoked.  This can create an infinite loop if a bottom half handler
176  * schedules itself.
177  *
178  * @bh: The bottom half to be scheduled.
179  */
180 void qemu_bh_schedule(QEMUBH *bh);
181 
182 /**
183  * qemu_bh_cancel: Cancel execution of a bottom half.
184  *
185  * Canceling execution of a bottom half undoes the effect of calls to
186  * qemu_bh_schedule without freeing its resources yet.  While cancellation
187  * itself is also wait-free and thread-safe, it can of course race with the
188  * loop that executes bottom halves unless you are holding the iothread
189  * mutex.  This makes it mostly useless if you are not holding the mutex.
190  *
191  * @bh: The bottom half to be canceled.
192  */
193 void qemu_bh_cancel(QEMUBH *bh);
194 
195 /**
196  *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
197  *
198  * Deleting a bottom half frees the memory that was allocated for it by
199  * qemu_bh_new.  It also implies canceling the bottom half if it was
200  * scheduled.
201  * This func is async. The bottom half will do the delete action at the finial
202  * end.
203  *
204  * @bh: The bottom half to be deleted.
205  */
206 void qemu_bh_delete(QEMUBH *bh);
207 
208 /* Return whether there are any pending callbacks from the GSource
209  * attached to the AioContext, before g_poll is invoked.
210  *
211  * This is used internally in the implementation of the GSource.
212  */
213 bool aio_prepare(AioContext *ctx);
214 
215 /* Return whether there are any pending callbacks from the GSource
216  * attached to the AioContext, after g_poll is invoked.
217  *
218  * This is used internally in the implementation of the GSource.
219  */
220 bool aio_pending(AioContext *ctx);
221 
222 /* Dispatch any pending callbacks from the GSource attached to the AioContext.
223  *
224  * This is used internally in the implementation of the GSource.
225  */
226 bool aio_dispatch(AioContext *ctx);
227 
228 /* Progress in completing AIO work to occur.  This can issue new pending
229  * aio as a result of executing I/O completion or bh callbacks.
230  *
231  * Return whether any progress was made by executing AIO or bottom half
232  * handlers.  If @blocking == true, this should always be true except
233  * if someone called aio_notify.
234  *
235  * If there are no pending bottom halves, but there are pending AIO
236  * operations, it may not be possible to make any progress without
237  * blocking.  If @blocking is true, this function will wait until one
238  * or more AIO events have completed, to ensure something has moved
239  * before returning.
240  */
241 bool aio_poll(AioContext *ctx, bool blocking);
242 
243 /* Register a file descriptor and associated callbacks.  Behaves very similarly
244  * to qemu_set_fd_handler2.  Unlike qemu_set_fd_handler2, these callbacks will
245  * be invoked when using aio_poll().
246  *
247  * Code that invokes AIO completion functions should rely on this function
248  * instead of qemu_set_fd_handler[2].
249  */
250 void aio_set_fd_handler(AioContext *ctx,
251                         int fd,
252                         IOHandler *io_read,
253                         IOHandler *io_write,
254                         void *opaque);
255 
256 /* Register an event notifier and associated callbacks.  Behaves very similarly
257  * to event_notifier_set_handler.  Unlike event_notifier_set_handler, these callbacks
258  * will be invoked when using aio_poll().
259  *
260  * Code that invokes AIO completion functions should rely on this function
261  * instead of event_notifier_set_handler.
262  */
263 void aio_set_event_notifier(AioContext *ctx,
264                             EventNotifier *notifier,
265                             EventNotifierHandler *io_read);
266 
267 /* Return a GSource that lets the main loop poll the file descriptors attached
268  * to this AioContext.
269  */
270 GSource *aio_get_g_source(AioContext *ctx);
271 
272 /* Return the ThreadPool bound to this AioContext */
273 struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
274 
275 /**
276  * aio_timer_new:
277  * @ctx: the aio context
278  * @type: the clock type
279  * @scale: the scale
280  * @cb: the callback to call on timer expiry
281  * @opaque: the opaque pointer to pass to the callback
282  *
283  * Allocate a new timer attached to the context @ctx.
284  * The function is responsible for memory allocation.
285  *
286  * The preferred interface is aio_timer_init. Use that
287  * unless you really need dynamic memory allocation.
288  *
289  * Returns: a pointer to the new timer
290  */
291 static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type,
292                                        int scale,
293                                        QEMUTimerCB *cb, void *opaque)
294 {
295     return timer_new_tl(ctx->tlg.tl[type], scale, cb, opaque);
296 }
297 
298 /**
299  * aio_timer_init:
300  * @ctx: the aio context
301  * @ts: the timer
302  * @type: the clock type
303  * @scale: the scale
304  * @cb: the callback to call on timer expiry
305  * @opaque: the opaque pointer to pass to the callback
306  *
307  * Initialise a new timer attached to the context @ctx.
308  * The caller is responsible for memory allocation.
309  */
310 static inline void aio_timer_init(AioContext *ctx,
311                                   QEMUTimer *ts, QEMUClockType type,
312                                   int scale,
313                                   QEMUTimerCB *cb, void *opaque)
314 {
315     timer_init_tl(ts, ctx->tlg.tl[type], scale, cb, opaque);
316 }
317 
318 /**
319  * aio_compute_timeout:
320  * @ctx: the aio context
321  *
322  * Compute the timeout that a blocking aio_poll should use.
323  */
324 int64_t aio_compute_timeout(AioContext *ctx);
325 
326 #endif
327