1 /* 2 * QEMU aio implementation 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #ifndef QEMU_AIO_H 15 #define QEMU_AIO_H 16 17 #include "qemu/typedefs.h" 18 #include "qemu-common.h" 19 #include "qemu/queue.h" 20 #include "qemu/event_notifier.h" 21 #include "qemu/thread.h" 22 #include "qemu/rfifolock.h" 23 #include "qemu/timer.h" 24 25 typedef struct BlockAIOCB BlockAIOCB; 26 typedef void BlockCompletionFunc(void *opaque, int ret); 27 28 typedef struct AIOCBInfo { 29 void (*cancel_async)(BlockAIOCB *acb); 30 AioContext *(*get_aio_context)(BlockAIOCB *acb); 31 size_t aiocb_size; 32 } AIOCBInfo; 33 34 struct BlockAIOCB { 35 const AIOCBInfo *aiocb_info; 36 BlockDriverState *bs; 37 BlockCompletionFunc *cb; 38 void *opaque; 39 int refcnt; 40 }; 41 42 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, 43 BlockCompletionFunc *cb, void *opaque); 44 void qemu_aio_unref(void *p); 45 void qemu_aio_ref(void *p); 46 47 typedef struct AioHandler AioHandler; 48 typedef void QEMUBHFunc(void *opaque); 49 typedef void IOHandler(void *opaque); 50 51 struct AioContext { 52 GSource source; 53 54 /* Protects all fields from multi-threaded access */ 55 RFifoLock lock; 56 57 /* The list of registered AIO handlers */ 58 QLIST_HEAD(, AioHandler) aio_handlers; 59 60 /* This is a simple lock used to protect the aio_handlers list. 61 * Specifically, it's used to ensure that no callbacks are removed while 62 * we're walking and dispatching callbacks. 63 */ 64 int walking_handlers; 65 66 /* Used to avoid unnecessary event_notifier_set calls in aio_notify; 67 * accessed with atomic primitives. If this field is 0, everything 68 * (file descriptors, bottom halves, timers) will be re-evaluated 69 * before the next blocking poll(), thus the event_notifier_set call 70 * can be skipped. If it is non-zero, you may need to wake up a 71 * concurrent aio_poll or the glib main event loop, making 72 * event_notifier_set necessary. 73 * 74 * Bit 0 is reserved for GSource usage of the AioContext, and is 1 75 * between a call to aio_ctx_check and the next call to aio_ctx_dispatch. 76 * Bits 1-31 simply count the number of active calls to aio_poll 77 * that are in the prepare or poll phase. 78 * 79 * The GSource and aio_poll must use a different mechanism because 80 * there is no certainty that a call to GSource's prepare callback 81 * (via g_main_context_prepare) is indeed followed by check and 82 * dispatch. It's not clear whether this would be a bug, but let's 83 * play safe and allow it---it will just cause extra calls to 84 * event_notifier_set until the next call to dispatch. 85 * 86 * Instead, the aio_poll calls include both the prepare and the 87 * dispatch phase, hence a simple counter is enough for them. 88 */ 89 uint32_t notify_me; 90 91 /* lock to protect between bh's adders and deleter */ 92 QemuMutex bh_lock; 93 94 /* Anchor of the list of Bottom Halves belonging to the context */ 95 struct QEMUBH *first_bh; 96 97 /* A simple lock used to protect the first_bh list, and ensure that 98 * no callbacks are removed while we're walking and dispatching callbacks. 99 */ 100 int walking_bh; 101 102 /* Used by aio_notify. 103 * 104 * "notified" is used to avoid expensive event_notifier_test_and_clear 105 * calls. When it is clear, the EventNotifier is clear, or one thread 106 * is going to clear "notified" before processing more events. False 107 * positives are possible, i.e. "notified" could be set even though the 108 * EventNotifier is clear. 109 * 110 * Note that event_notifier_set *cannot* be optimized the same way. For 111 * more information on the problem that would result, see "#ifdef BUG2" 112 * in the docs/aio_notify_accept.promela formal model. 113 */ 114 bool notified; 115 EventNotifier notifier; 116 117 /* Scheduling this BH forces the event loop it iterate */ 118 QEMUBH *notify_dummy_bh; 119 120 /* Thread pool for performing work and receiving completion callbacks */ 121 struct ThreadPool *thread_pool; 122 123 /* TimerLists for calling timers - one per clock type */ 124 QEMUTimerListGroup tlg; 125 126 int external_disable_cnt; 127 128 /* epoll(7) state used when built with CONFIG_EPOLL */ 129 int epollfd; 130 bool epoll_enabled; 131 bool epoll_available; 132 }; 133 134 /** 135 * aio_context_new: Allocate a new AioContext. 136 * 137 * AioContext provide a mini event-loop that can be waited on synchronously. 138 * They also provide bottom halves, a service to execute a piece of code 139 * as soon as possible. 140 */ 141 AioContext *aio_context_new(Error **errp); 142 143 /** 144 * aio_context_ref: 145 * @ctx: The AioContext to operate on. 146 * 147 * Add a reference to an AioContext. 148 */ 149 void aio_context_ref(AioContext *ctx); 150 151 /** 152 * aio_context_unref: 153 * @ctx: The AioContext to operate on. 154 * 155 * Drop a reference to an AioContext. 156 */ 157 void aio_context_unref(AioContext *ctx); 158 159 /* Take ownership of the AioContext. If the AioContext will be shared between 160 * threads, and a thread does not want to be interrupted, it will have to 161 * take ownership around calls to aio_poll(). Otherwise, aio_poll() 162 * automatically takes care of calling aio_context_acquire and 163 * aio_context_release. 164 * 165 * Access to timers and BHs from a thread that has not acquired AioContext 166 * is possible. Access to callbacks for now must be done while the AioContext 167 * is owned by the thread (FIXME). 168 */ 169 void aio_context_acquire(AioContext *ctx); 170 171 /* Relinquish ownership of the AioContext. */ 172 void aio_context_release(AioContext *ctx); 173 174 /** 175 * aio_bh_new: Allocate a new bottom half structure. 176 * 177 * Bottom halves are lightweight callbacks whose invocation is guaranteed 178 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure 179 * is opaque and must be allocated prior to its use. 180 */ 181 QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque); 182 183 /** 184 * aio_notify: Force processing of pending events. 185 * 186 * Similar to signaling a condition variable, aio_notify forces 187 * aio_wait to exit, so that the next call will re-examine pending events. 188 * The caller of aio_notify will usually call aio_wait again very soon, 189 * or go through another iteration of the GLib main loop. Hence, aio_notify 190 * also has the side effect of recalculating the sets of file descriptors 191 * that the main loop waits for. 192 * 193 * Calling aio_notify is rarely necessary, because for example scheduling 194 * a bottom half calls it already. 195 */ 196 void aio_notify(AioContext *ctx); 197 198 /** 199 * aio_notify_accept: Acknowledge receiving an aio_notify. 200 * 201 * aio_notify() uses an EventNotifier in order to wake up a sleeping 202 * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are 203 * usually rare, but the AioContext has to clear the EventNotifier on 204 * every aio_poll() or g_main_context_iteration() in order to avoid 205 * busy waiting. This event_notifier_test_and_clear() cannot be done 206 * using the usual aio_context_set_event_notifier(), because it must 207 * be done before processing all events (file descriptors, bottom halves, 208 * timers). 209 * 210 * aio_notify_accept() is an optimized event_notifier_test_and_clear() 211 * that is specific to an AioContext's notifier; it is used internally 212 * to clear the EventNotifier only if aio_notify() had been called. 213 */ 214 void aio_notify_accept(AioContext *ctx); 215 216 /** 217 * aio_bh_call: Executes callback function of the specified BH. 218 */ 219 void aio_bh_call(QEMUBH *bh); 220 221 /** 222 * aio_bh_poll: Poll bottom halves for an AioContext. 223 * 224 * These are internal functions used by the QEMU main loop. 225 * And notice that multiple occurrences of aio_bh_poll cannot 226 * be called concurrently 227 */ 228 int aio_bh_poll(AioContext *ctx); 229 230 /** 231 * qemu_bh_schedule: Schedule a bottom half. 232 * 233 * Scheduling a bottom half interrupts the main loop and causes the 234 * execution of the callback that was passed to qemu_bh_new. 235 * 236 * Bottom halves that are scheduled from a bottom half handler are instantly 237 * invoked. This can create an infinite loop if a bottom half handler 238 * schedules itself. 239 * 240 * @bh: The bottom half to be scheduled. 241 */ 242 void qemu_bh_schedule(QEMUBH *bh); 243 244 /** 245 * qemu_bh_cancel: Cancel execution of a bottom half. 246 * 247 * Canceling execution of a bottom half undoes the effect of calls to 248 * qemu_bh_schedule without freeing its resources yet. While cancellation 249 * itself is also wait-free and thread-safe, it can of course race with the 250 * loop that executes bottom halves unless you are holding the iothread 251 * mutex. This makes it mostly useless if you are not holding the mutex. 252 * 253 * @bh: The bottom half to be canceled. 254 */ 255 void qemu_bh_cancel(QEMUBH *bh); 256 257 /** 258 *qemu_bh_delete: Cancel execution of a bottom half and free its resources. 259 * 260 * Deleting a bottom half frees the memory that was allocated for it by 261 * qemu_bh_new. It also implies canceling the bottom half if it was 262 * scheduled. 263 * This func is async. The bottom half will do the delete action at the finial 264 * end. 265 * 266 * @bh: The bottom half to be deleted. 267 */ 268 void qemu_bh_delete(QEMUBH *bh); 269 270 /* Return whether there are any pending callbacks from the GSource 271 * attached to the AioContext, before g_poll is invoked. 272 * 273 * This is used internally in the implementation of the GSource. 274 */ 275 bool aio_prepare(AioContext *ctx); 276 277 /* Return whether there are any pending callbacks from the GSource 278 * attached to the AioContext, after g_poll is invoked. 279 * 280 * This is used internally in the implementation of the GSource. 281 */ 282 bool aio_pending(AioContext *ctx); 283 284 /* Dispatch any pending callbacks from the GSource attached to the AioContext. 285 * 286 * This is used internally in the implementation of the GSource. 287 */ 288 bool aio_dispatch(AioContext *ctx); 289 290 /* Progress in completing AIO work to occur. This can issue new pending 291 * aio as a result of executing I/O completion or bh callbacks. 292 * 293 * Return whether any progress was made by executing AIO or bottom half 294 * handlers. If @blocking == true, this should always be true except 295 * if someone called aio_notify. 296 * 297 * If there are no pending bottom halves, but there are pending AIO 298 * operations, it may not be possible to make any progress without 299 * blocking. If @blocking is true, this function will wait until one 300 * or more AIO events have completed, to ensure something has moved 301 * before returning. 302 */ 303 bool aio_poll(AioContext *ctx, bool blocking); 304 305 /* Register a file descriptor and associated callbacks. Behaves very similarly 306 * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will 307 * be invoked when using aio_poll(). 308 * 309 * Code that invokes AIO completion functions should rely on this function 310 * instead of qemu_set_fd_handler[2]. 311 */ 312 void aio_set_fd_handler(AioContext *ctx, 313 int fd, 314 bool is_external, 315 IOHandler *io_read, 316 IOHandler *io_write, 317 void *opaque); 318 319 /* Register an event notifier and associated callbacks. Behaves very similarly 320 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks 321 * will be invoked when using aio_poll(). 322 * 323 * Code that invokes AIO completion functions should rely on this function 324 * instead of event_notifier_set_handler. 325 */ 326 void aio_set_event_notifier(AioContext *ctx, 327 EventNotifier *notifier, 328 bool is_external, 329 EventNotifierHandler *io_read); 330 331 /* Return a GSource that lets the main loop poll the file descriptors attached 332 * to this AioContext. 333 */ 334 GSource *aio_get_g_source(AioContext *ctx); 335 336 /* Return the ThreadPool bound to this AioContext */ 337 struct ThreadPool *aio_get_thread_pool(AioContext *ctx); 338 339 /** 340 * aio_timer_new: 341 * @ctx: the aio context 342 * @type: the clock type 343 * @scale: the scale 344 * @cb: the callback to call on timer expiry 345 * @opaque: the opaque pointer to pass to the callback 346 * 347 * Allocate a new timer attached to the context @ctx. 348 * The function is responsible for memory allocation. 349 * 350 * The preferred interface is aio_timer_init. Use that 351 * unless you really need dynamic memory allocation. 352 * 353 * Returns: a pointer to the new timer 354 */ 355 static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type, 356 int scale, 357 QEMUTimerCB *cb, void *opaque) 358 { 359 return timer_new_tl(ctx->tlg.tl[type], scale, cb, opaque); 360 } 361 362 /** 363 * aio_timer_init: 364 * @ctx: the aio context 365 * @ts: the timer 366 * @type: the clock type 367 * @scale: the scale 368 * @cb: the callback to call on timer expiry 369 * @opaque: the opaque pointer to pass to the callback 370 * 371 * Initialise a new timer attached to the context @ctx. 372 * The caller is responsible for memory allocation. 373 */ 374 static inline void aio_timer_init(AioContext *ctx, 375 QEMUTimer *ts, QEMUClockType type, 376 int scale, 377 QEMUTimerCB *cb, void *opaque) 378 { 379 timer_init_tl(ts, ctx->tlg.tl[type], scale, cb, opaque); 380 } 381 382 /** 383 * aio_compute_timeout: 384 * @ctx: the aio context 385 * 386 * Compute the timeout that a blocking aio_poll should use. 387 */ 388 int64_t aio_compute_timeout(AioContext *ctx); 389 390 /** 391 * aio_disable_external: 392 * @ctx: the aio context 393 * 394 * Disable the further processing of external clients. 395 */ 396 static inline void aio_disable_external(AioContext *ctx) 397 { 398 atomic_inc(&ctx->external_disable_cnt); 399 } 400 401 /** 402 * aio_enable_external: 403 * @ctx: the aio context 404 * 405 * Enable the processing of external clients. 406 */ 407 static inline void aio_enable_external(AioContext *ctx) 408 { 409 assert(ctx->external_disable_cnt > 0); 410 atomic_dec(&ctx->external_disable_cnt); 411 } 412 413 /** 414 * aio_external_disabled: 415 * @ctx: the aio context 416 * 417 * Return true if the external clients are disabled. 418 */ 419 static inline bool aio_external_disabled(AioContext *ctx) 420 { 421 return atomic_read(&ctx->external_disable_cnt); 422 } 423 424 /** 425 * aio_node_check: 426 * @ctx: the aio context 427 * @is_external: Whether or not the checked node is an external event source. 428 * 429 * Check if the node's is_external flag is okay to be polled by the ctx at this 430 * moment. True means green light. 431 */ 432 static inline bool aio_node_check(AioContext *ctx, bool is_external) 433 { 434 return !is_external || !atomic_read(&ctx->external_disable_cnt); 435 } 436 437 /** 438 * aio_context_setup: 439 * @ctx: the aio context 440 * 441 * Initialize the aio context. 442 */ 443 void aio_context_setup(AioContext *ctx, Error **errp); 444 445 #endif 446