1 /* 2 * QEMU aio implementation 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #ifndef QEMU_AIO_H 15 #define QEMU_AIO_H 16 17 #include "qemu/typedefs.h" 18 #include "qemu-common.h" 19 #include "qemu/queue.h" 20 #include "qemu/event_notifier.h" 21 #include "qemu/thread.h" 22 #include "qemu/rfifolock.h" 23 #include "qemu/timer.h" 24 25 typedef struct BlockAIOCB BlockAIOCB; 26 typedef void BlockCompletionFunc(void *opaque, int ret); 27 28 typedef struct AIOCBInfo { 29 void (*cancel_async)(BlockAIOCB *acb); 30 AioContext *(*get_aio_context)(BlockAIOCB *acb); 31 size_t aiocb_size; 32 } AIOCBInfo; 33 34 struct BlockAIOCB { 35 const AIOCBInfo *aiocb_info; 36 BlockDriverState *bs; 37 BlockCompletionFunc *cb; 38 void *opaque; 39 int refcnt; 40 }; 41 42 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, 43 BlockCompletionFunc *cb, void *opaque); 44 void qemu_aio_unref(void *p); 45 void qemu_aio_ref(void *p); 46 47 typedef struct AioHandler AioHandler; 48 typedef void QEMUBHFunc(void *opaque); 49 typedef void IOHandler(void *opaque); 50 51 struct AioContext { 52 GSource source; 53 54 /* Protects all fields from multi-threaded access */ 55 RFifoLock lock; 56 57 /* The list of registered AIO handlers */ 58 QLIST_HEAD(, AioHandler) aio_handlers; 59 60 /* This is a simple lock used to protect the aio_handlers list. 61 * Specifically, it's used to ensure that no callbacks are removed while 62 * we're walking and dispatching callbacks. 63 */ 64 int walking_handlers; 65 66 /* Used to avoid unnecessary event_notifier_set calls in aio_notify; 67 * accessed with atomic primitives. If this field is 0, everything 68 * (file descriptors, bottom halves, timers) will be re-evaluated 69 * before the next blocking poll(), thus the event_notifier_set call 70 * can be skipped. If it is non-zero, you may need to wake up a 71 * concurrent aio_poll or the glib main event loop, making 72 * event_notifier_set necessary. 73 * 74 * Bit 0 is reserved for GSource usage of the AioContext, and is 1 75 * between a call to aio_ctx_check and the next call to aio_ctx_dispatch. 76 * Bits 1-31 simply count the number of active calls to aio_poll 77 * that are in the prepare or poll phase. 78 * 79 * The GSource and aio_poll must use a different mechanism because 80 * there is no certainty that a call to GSource's prepare callback 81 * (via g_main_context_prepare) is indeed followed by check and 82 * dispatch. It's not clear whether this would be a bug, but let's 83 * play safe and allow it---it will just cause extra calls to 84 * event_notifier_set until the next call to dispatch. 85 * 86 * Instead, the aio_poll calls include both the prepare and the 87 * dispatch phase, hence a simple counter is enough for them. 88 */ 89 uint32_t notify_me; 90 91 /* lock to protect between bh's adders and deleter */ 92 QemuMutex bh_lock; 93 94 /* Anchor of the list of Bottom Halves belonging to the context */ 95 struct QEMUBH *first_bh; 96 97 /* A simple lock used to protect the first_bh list, and ensure that 98 * no callbacks are removed while we're walking and dispatching callbacks. 99 */ 100 int walking_bh; 101 102 /* Used by aio_notify. 103 * 104 * "notified" is used to avoid expensive event_notifier_test_and_clear 105 * calls. When it is clear, the EventNotifier is clear, or one thread 106 * is going to clear "notified" before processing more events. False 107 * positives are possible, i.e. "notified" could be set even though the 108 * EventNotifier is clear. 109 * 110 * Note that event_notifier_set *cannot* be optimized the same way. For 111 * more information on the problem that would result, see "#ifdef BUG2" 112 * in the docs/aio_notify_accept.promela formal model. 113 */ 114 bool notified; 115 EventNotifier notifier; 116 117 /* Thread pool for performing work and receiving completion callbacks */ 118 struct ThreadPool *thread_pool; 119 120 /* TimerLists for calling timers - one per clock type */ 121 QEMUTimerListGroup tlg; 122 }; 123 124 /** 125 * aio_context_new: Allocate a new AioContext. 126 * 127 * AioContext provide a mini event-loop that can be waited on synchronously. 128 * They also provide bottom halves, a service to execute a piece of code 129 * as soon as possible. 130 */ 131 AioContext *aio_context_new(Error **errp); 132 133 /** 134 * aio_context_ref: 135 * @ctx: The AioContext to operate on. 136 * 137 * Add a reference to an AioContext. 138 */ 139 void aio_context_ref(AioContext *ctx); 140 141 /** 142 * aio_context_unref: 143 * @ctx: The AioContext to operate on. 144 * 145 * Drop a reference to an AioContext. 146 */ 147 void aio_context_unref(AioContext *ctx); 148 149 /* Take ownership of the AioContext. If the AioContext will be shared between 150 * threads, and a thread does not want to be interrupted, it will have to 151 * take ownership around calls to aio_poll(). Otherwise, aio_poll() 152 * automatically takes care of calling aio_context_acquire and 153 * aio_context_release. 154 * 155 * Access to timers and BHs from a thread that has not acquired AioContext 156 * is possible. Access to callbacks for now must be done while the AioContext 157 * is owned by the thread (FIXME). 158 */ 159 void aio_context_acquire(AioContext *ctx); 160 161 /* Relinquish ownership of the AioContext. */ 162 void aio_context_release(AioContext *ctx); 163 164 /** 165 * aio_bh_new: Allocate a new bottom half structure. 166 * 167 * Bottom halves are lightweight callbacks whose invocation is guaranteed 168 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure 169 * is opaque and must be allocated prior to its use. 170 */ 171 QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque); 172 173 /** 174 * aio_notify: Force processing of pending events. 175 * 176 * Similar to signaling a condition variable, aio_notify forces 177 * aio_wait to exit, so that the next call will re-examine pending events. 178 * The caller of aio_notify will usually call aio_wait again very soon, 179 * or go through another iteration of the GLib main loop. Hence, aio_notify 180 * also has the side effect of recalculating the sets of file descriptors 181 * that the main loop waits for. 182 * 183 * Calling aio_notify is rarely necessary, because for example scheduling 184 * a bottom half calls it already. 185 */ 186 void aio_notify(AioContext *ctx); 187 188 /** 189 * aio_notify_accept: Acknowledge receiving an aio_notify. 190 * 191 * aio_notify() uses an EventNotifier in order to wake up a sleeping 192 * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are 193 * usually rare, but the AioContext has to clear the EventNotifier on 194 * every aio_poll() or g_main_context_iteration() in order to avoid 195 * busy waiting. This event_notifier_test_and_clear() cannot be done 196 * using the usual aio_context_set_event_notifier(), because it must 197 * be done before processing all events (file descriptors, bottom halves, 198 * timers). 199 * 200 * aio_notify_accept() is an optimized event_notifier_test_and_clear() 201 * that is specific to an AioContext's notifier; it is used internally 202 * to clear the EventNotifier only if aio_notify() had been called. 203 */ 204 void aio_notify_accept(AioContext *ctx); 205 206 /** 207 * aio_bh_poll: Poll bottom halves for an AioContext. 208 * 209 * These are internal functions used by the QEMU main loop. 210 * And notice that multiple occurrences of aio_bh_poll cannot 211 * be called concurrently 212 */ 213 int aio_bh_poll(AioContext *ctx); 214 215 /** 216 * qemu_bh_schedule: Schedule a bottom half. 217 * 218 * Scheduling a bottom half interrupts the main loop and causes the 219 * execution of the callback that was passed to qemu_bh_new. 220 * 221 * Bottom halves that are scheduled from a bottom half handler are instantly 222 * invoked. This can create an infinite loop if a bottom half handler 223 * schedules itself. 224 * 225 * @bh: The bottom half to be scheduled. 226 */ 227 void qemu_bh_schedule(QEMUBH *bh); 228 229 /** 230 * qemu_bh_cancel: Cancel execution of a bottom half. 231 * 232 * Canceling execution of a bottom half undoes the effect of calls to 233 * qemu_bh_schedule without freeing its resources yet. While cancellation 234 * itself is also wait-free and thread-safe, it can of course race with the 235 * loop that executes bottom halves unless you are holding the iothread 236 * mutex. This makes it mostly useless if you are not holding the mutex. 237 * 238 * @bh: The bottom half to be canceled. 239 */ 240 void qemu_bh_cancel(QEMUBH *bh); 241 242 /** 243 *qemu_bh_delete: Cancel execution of a bottom half and free its resources. 244 * 245 * Deleting a bottom half frees the memory that was allocated for it by 246 * qemu_bh_new. It also implies canceling the bottom half if it was 247 * scheduled. 248 * This func is async. The bottom half will do the delete action at the finial 249 * end. 250 * 251 * @bh: The bottom half to be deleted. 252 */ 253 void qemu_bh_delete(QEMUBH *bh); 254 255 /* Return whether there are any pending callbacks from the GSource 256 * attached to the AioContext, before g_poll is invoked. 257 * 258 * This is used internally in the implementation of the GSource. 259 */ 260 bool aio_prepare(AioContext *ctx); 261 262 /* Return whether there are any pending callbacks from the GSource 263 * attached to the AioContext, after g_poll is invoked. 264 * 265 * This is used internally in the implementation of the GSource. 266 */ 267 bool aio_pending(AioContext *ctx); 268 269 /* Dispatch any pending callbacks from the GSource attached to the AioContext. 270 * 271 * This is used internally in the implementation of the GSource. 272 */ 273 bool aio_dispatch(AioContext *ctx); 274 275 /* Progress in completing AIO work to occur. This can issue new pending 276 * aio as a result of executing I/O completion or bh callbacks. 277 * 278 * Return whether any progress was made by executing AIO or bottom half 279 * handlers. If @blocking == true, this should always be true except 280 * if someone called aio_notify. 281 * 282 * If there are no pending bottom halves, but there are pending AIO 283 * operations, it may not be possible to make any progress without 284 * blocking. If @blocking is true, this function will wait until one 285 * or more AIO events have completed, to ensure something has moved 286 * before returning. 287 */ 288 bool aio_poll(AioContext *ctx, bool blocking); 289 290 /* Register a file descriptor and associated callbacks. Behaves very similarly 291 * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will 292 * be invoked when using aio_poll(). 293 * 294 * Code that invokes AIO completion functions should rely on this function 295 * instead of qemu_set_fd_handler[2]. 296 */ 297 void aio_set_fd_handler(AioContext *ctx, 298 int fd, 299 IOHandler *io_read, 300 IOHandler *io_write, 301 void *opaque); 302 303 /* Register an event notifier and associated callbacks. Behaves very similarly 304 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks 305 * will be invoked when using aio_poll(). 306 * 307 * Code that invokes AIO completion functions should rely on this function 308 * instead of event_notifier_set_handler. 309 */ 310 void aio_set_event_notifier(AioContext *ctx, 311 EventNotifier *notifier, 312 EventNotifierHandler *io_read); 313 314 /* Return a GSource that lets the main loop poll the file descriptors attached 315 * to this AioContext. 316 */ 317 GSource *aio_get_g_source(AioContext *ctx); 318 319 /* Return the ThreadPool bound to this AioContext */ 320 struct ThreadPool *aio_get_thread_pool(AioContext *ctx); 321 322 /** 323 * aio_timer_new: 324 * @ctx: the aio context 325 * @type: the clock type 326 * @scale: the scale 327 * @cb: the callback to call on timer expiry 328 * @opaque: the opaque pointer to pass to the callback 329 * 330 * Allocate a new timer attached to the context @ctx. 331 * The function is responsible for memory allocation. 332 * 333 * The preferred interface is aio_timer_init. Use that 334 * unless you really need dynamic memory allocation. 335 * 336 * Returns: a pointer to the new timer 337 */ 338 static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type, 339 int scale, 340 QEMUTimerCB *cb, void *opaque) 341 { 342 return timer_new_tl(ctx->tlg.tl[type], scale, cb, opaque); 343 } 344 345 /** 346 * aio_timer_init: 347 * @ctx: the aio context 348 * @ts: the timer 349 * @type: the clock type 350 * @scale: the scale 351 * @cb: the callback to call on timer expiry 352 * @opaque: the opaque pointer to pass to the callback 353 * 354 * Initialise a new timer attached to the context @ctx. 355 * The caller is responsible for memory allocation. 356 */ 357 static inline void aio_timer_init(AioContext *ctx, 358 QEMUTimer *ts, QEMUClockType type, 359 int scale, 360 QEMUTimerCB *cb, void *opaque) 361 { 362 timer_init_tl(ts, ctx->tlg.tl[type], scale, cb, opaque); 363 } 364 365 /** 366 * aio_compute_timeout: 367 * @ctx: the aio context 368 * 369 * Compute the timeout that a blocking aio_poll should use. 370 */ 371 int64_t aio_compute_timeout(AioContext *ctx); 372 373 #endif 374