xref: /openbmc/qemu/util/async.c (revision 5710a3e0)
1c2b38b27SPaolo Bonzini /*
2c2b38b27SPaolo Bonzini  * Data plane event loop
3c2b38b27SPaolo Bonzini  *
4c2b38b27SPaolo Bonzini  * Copyright (c) 2003-2008 Fabrice Bellard
5c2b38b27SPaolo Bonzini  * Copyright (c) 2009-2017 QEMU contributors
6c2b38b27SPaolo Bonzini  *
7c2b38b27SPaolo Bonzini  * Permission is hereby granted, free of charge, to any person obtaining a copy
8c2b38b27SPaolo Bonzini  * of this software and associated documentation files (the "Software"), to deal
9c2b38b27SPaolo Bonzini  * in the Software without restriction, including without limitation the rights
10c2b38b27SPaolo Bonzini  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11c2b38b27SPaolo Bonzini  * copies of the Software, and to permit persons to whom the Software is
12c2b38b27SPaolo Bonzini  * furnished to do so, subject to the following conditions:
13c2b38b27SPaolo Bonzini  *
14c2b38b27SPaolo Bonzini  * The above copyright notice and this permission notice shall be included in
15c2b38b27SPaolo Bonzini  * all copies or substantial portions of the Software.
16c2b38b27SPaolo Bonzini  *
17c2b38b27SPaolo Bonzini  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18c2b38b27SPaolo Bonzini  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19c2b38b27SPaolo Bonzini  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20c2b38b27SPaolo Bonzini  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21c2b38b27SPaolo Bonzini  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22c2b38b27SPaolo Bonzini  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23c2b38b27SPaolo Bonzini  * THE SOFTWARE.
24c2b38b27SPaolo Bonzini  */
25c2b38b27SPaolo Bonzini 
26c2b38b27SPaolo Bonzini #include "qemu/osdep.h"
27c2b38b27SPaolo Bonzini #include "qapi/error.h"
28c2b38b27SPaolo Bonzini #include "block/aio.h"
29c2b38b27SPaolo Bonzini #include "block/thread-pool.h"
30c2b38b27SPaolo Bonzini #include "qemu/main-loop.h"
31c2b38b27SPaolo Bonzini #include "qemu/atomic.h"
328c6b0356SStefan Hajnoczi #include "qemu/rcu_queue.h"
33c2b38b27SPaolo Bonzini #include "block/raw-aio.h"
340c330a73SPaolo Bonzini #include "qemu/coroutine_int.h"
350c330a73SPaolo Bonzini #include "trace.h"
36c2b38b27SPaolo Bonzini 
37c2b38b27SPaolo Bonzini /***********************************************************/
38c2b38b27SPaolo Bonzini /* bottom halves (can be seen as timers which expire ASAP) */
39c2b38b27SPaolo Bonzini 
408c6b0356SStefan Hajnoczi /* QEMUBH::flags values */
418c6b0356SStefan Hajnoczi enum {
428c6b0356SStefan Hajnoczi     /* Already enqueued and waiting for aio_bh_poll() */
438c6b0356SStefan Hajnoczi     BH_PENDING   = (1 << 0),
448c6b0356SStefan Hajnoczi 
458c6b0356SStefan Hajnoczi     /* Invoke the callback */
468c6b0356SStefan Hajnoczi     BH_SCHEDULED = (1 << 1),
478c6b0356SStefan Hajnoczi 
488c6b0356SStefan Hajnoczi     /* Delete without invoking callback */
498c6b0356SStefan Hajnoczi     BH_DELETED   = (1 << 2),
508c6b0356SStefan Hajnoczi 
518c6b0356SStefan Hajnoczi     /* Delete after invoking callback */
528c6b0356SStefan Hajnoczi     BH_ONESHOT   = (1 << 3),
538c6b0356SStefan Hajnoczi 
548c6b0356SStefan Hajnoczi     /* Schedule periodically when the event loop is idle */
558c6b0356SStefan Hajnoczi     BH_IDLE      = (1 << 4),
568c6b0356SStefan Hajnoczi };
578c6b0356SStefan Hajnoczi 
58c2b38b27SPaolo Bonzini struct QEMUBH {
59c2b38b27SPaolo Bonzini     AioContext *ctx;
60c2b38b27SPaolo Bonzini     QEMUBHFunc *cb;
61c2b38b27SPaolo Bonzini     void *opaque;
628c6b0356SStefan Hajnoczi     QSLIST_ENTRY(QEMUBH) next;
638c6b0356SStefan Hajnoczi     unsigned flags;
64c2b38b27SPaolo Bonzini };
65c2b38b27SPaolo Bonzini 
668c6b0356SStefan Hajnoczi /* Called concurrently from any thread */
678c6b0356SStefan Hajnoczi static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags)
688c6b0356SStefan Hajnoczi {
698c6b0356SStefan Hajnoczi     AioContext *ctx = bh->ctx;
708c6b0356SStefan Hajnoczi     unsigned old_flags;
718c6b0356SStefan Hajnoczi 
728c6b0356SStefan Hajnoczi     /*
738c6b0356SStefan Hajnoczi      * The memory barrier implicit in atomic_fetch_or makes sure that:
748c6b0356SStefan Hajnoczi      * 1. idle & any writes needed by the callback are done before the
758c6b0356SStefan Hajnoczi      *    locations are read in the aio_bh_poll.
768c6b0356SStefan Hajnoczi      * 2. ctx is loaded before the callback has a chance to execute and bh
778c6b0356SStefan Hajnoczi      *    could be freed.
788c6b0356SStefan Hajnoczi      */
798c6b0356SStefan Hajnoczi     old_flags = atomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
808c6b0356SStefan Hajnoczi     if (!(old_flags & BH_PENDING)) {
818c6b0356SStefan Hajnoczi         QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next);
828c6b0356SStefan Hajnoczi     }
838c6b0356SStefan Hajnoczi 
848c6b0356SStefan Hajnoczi     aio_notify(ctx);
858c6b0356SStefan Hajnoczi }
868c6b0356SStefan Hajnoczi 
878c6b0356SStefan Hajnoczi /* Only called from aio_bh_poll() and aio_ctx_finalize() */
888c6b0356SStefan Hajnoczi static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags)
898c6b0356SStefan Hajnoczi {
908c6b0356SStefan Hajnoczi     QEMUBH *bh = QSLIST_FIRST_RCU(head);
918c6b0356SStefan Hajnoczi 
928c6b0356SStefan Hajnoczi     if (!bh) {
938c6b0356SStefan Hajnoczi         return NULL;
948c6b0356SStefan Hajnoczi     }
958c6b0356SStefan Hajnoczi 
968c6b0356SStefan Hajnoczi     QSLIST_REMOVE_HEAD(head, next);
978c6b0356SStefan Hajnoczi 
988c6b0356SStefan Hajnoczi     /*
998c6b0356SStefan Hajnoczi      * The atomic_and is paired with aio_bh_enqueue().  The implicit memory
1008c6b0356SStefan Hajnoczi      * barrier ensures that the callback sees all writes done by the scheduling
1018c6b0356SStefan Hajnoczi      * thread.  It also ensures that the scheduling thread sees the cleared
1028c6b0356SStefan Hajnoczi      * flag before bh->cb has run, and thus will call aio_notify again if
1038c6b0356SStefan Hajnoczi      * necessary.
1048c6b0356SStefan Hajnoczi      */
1058c6b0356SStefan Hajnoczi     *flags = atomic_fetch_and(&bh->flags,
1068c6b0356SStefan Hajnoczi                               ~(BH_PENDING | BH_SCHEDULED | BH_IDLE));
1078c6b0356SStefan Hajnoczi     return bh;
1088c6b0356SStefan Hajnoczi }
1098c6b0356SStefan Hajnoczi 
110c2b38b27SPaolo Bonzini void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
111c2b38b27SPaolo Bonzini {
112c2b38b27SPaolo Bonzini     QEMUBH *bh;
113c2b38b27SPaolo Bonzini     bh = g_new(QEMUBH, 1);
114c2b38b27SPaolo Bonzini     *bh = (QEMUBH){
115c2b38b27SPaolo Bonzini         .ctx = ctx,
116c2b38b27SPaolo Bonzini         .cb = cb,
117c2b38b27SPaolo Bonzini         .opaque = opaque,
118c2b38b27SPaolo Bonzini     };
1198c6b0356SStefan Hajnoczi     aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT);
120c2b38b27SPaolo Bonzini }
121c2b38b27SPaolo Bonzini 
122c2b38b27SPaolo Bonzini QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
123c2b38b27SPaolo Bonzini {
124c2b38b27SPaolo Bonzini     QEMUBH *bh;
125c2b38b27SPaolo Bonzini     bh = g_new(QEMUBH, 1);
126c2b38b27SPaolo Bonzini     *bh = (QEMUBH){
127c2b38b27SPaolo Bonzini         .ctx = ctx,
128c2b38b27SPaolo Bonzini         .cb = cb,
129c2b38b27SPaolo Bonzini         .opaque = opaque,
130c2b38b27SPaolo Bonzini     };
131c2b38b27SPaolo Bonzini     return bh;
132c2b38b27SPaolo Bonzini }
133c2b38b27SPaolo Bonzini 
134c2b38b27SPaolo Bonzini void aio_bh_call(QEMUBH *bh)
135c2b38b27SPaolo Bonzini {
136c2b38b27SPaolo Bonzini     bh->cb(bh->opaque);
137c2b38b27SPaolo Bonzini }
138c2b38b27SPaolo Bonzini 
1398c6b0356SStefan Hajnoczi /* Multiple occurrences of aio_bh_poll cannot be called concurrently. */
140c2b38b27SPaolo Bonzini int aio_bh_poll(AioContext *ctx)
141c2b38b27SPaolo Bonzini {
1428c6b0356SStefan Hajnoczi     BHListSlice slice;
1438c6b0356SStefan Hajnoczi     BHListSlice *s;
1448c6b0356SStefan Hajnoczi     int ret = 0;
145c2b38b27SPaolo Bonzini 
1468c6b0356SStefan Hajnoczi     QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list);
1478c6b0356SStefan Hajnoczi     QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next);
1488c6b0356SStefan Hajnoczi 
1498c6b0356SStefan Hajnoczi     while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) {
1508c6b0356SStefan Hajnoczi         QEMUBH *bh;
1518c6b0356SStefan Hajnoczi         unsigned flags;
1528c6b0356SStefan Hajnoczi 
1538c6b0356SStefan Hajnoczi         bh = aio_bh_dequeue(&s->bh_list, &flags);
1548c6b0356SStefan Hajnoczi         if (!bh) {
1558c6b0356SStefan Hajnoczi             QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next);
1568c6b0356SStefan Hajnoczi             continue;
1578c6b0356SStefan Hajnoczi         }
1588c6b0356SStefan Hajnoczi 
1598c6b0356SStefan Hajnoczi         if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
160c2b38b27SPaolo Bonzini             /* Idle BHs don't count as progress */
1618c6b0356SStefan Hajnoczi             if (!(flags & BH_IDLE)) {
162c2b38b27SPaolo Bonzini                 ret = 1;
163c2b38b27SPaolo Bonzini             }
164c2b38b27SPaolo Bonzini             aio_bh_call(bh);
165c2b38b27SPaolo Bonzini         }
1668c6b0356SStefan Hajnoczi         if (flags & (BH_DELETED | BH_ONESHOT)) {
167c2b38b27SPaolo Bonzini             g_free(bh);
168c2b38b27SPaolo Bonzini         }
169c2b38b27SPaolo Bonzini     }
1708c6b0356SStefan Hajnoczi 
171c2b38b27SPaolo Bonzini     return ret;
172c2b38b27SPaolo Bonzini }
173c2b38b27SPaolo Bonzini 
174c2b38b27SPaolo Bonzini void qemu_bh_schedule_idle(QEMUBH *bh)
175c2b38b27SPaolo Bonzini {
1768c6b0356SStefan Hajnoczi     aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE);
177c2b38b27SPaolo Bonzini }
178c2b38b27SPaolo Bonzini 
179c2b38b27SPaolo Bonzini void qemu_bh_schedule(QEMUBH *bh)
180c2b38b27SPaolo Bonzini {
1818c6b0356SStefan Hajnoczi     aio_bh_enqueue(bh, BH_SCHEDULED);
182c2b38b27SPaolo Bonzini }
183c2b38b27SPaolo Bonzini 
184c2b38b27SPaolo Bonzini /* This func is async.
185c2b38b27SPaolo Bonzini  */
186c2b38b27SPaolo Bonzini void qemu_bh_cancel(QEMUBH *bh)
187c2b38b27SPaolo Bonzini {
1888c6b0356SStefan Hajnoczi     atomic_and(&bh->flags, ~BH_SCHEDULED);
189c2b38b27SPaolo Bonzini }
190c2b38b27SPaolo Bonzini 
191c2b38b27SPaolo Bonzini /* This func is async.The bottom half will do the delete action at the finial
192c2b38b27SPaolo Bonzini  * end.
193c2b38b27SPaolo Bonzini  */
194c2b38b27SPaolo Bonzini void qemu_bh_delete(QEMUBH *bh)
195c2b38b27SPaolo Bonzini {
1968c6b0356SStefan Hajnoczi     aio_bh_enqueue(bh, BH_DELETED);
197c2b38b27SPaolo Bonzini }
198c2b38b27SPaolo Bonzini 
1998c6b0356SStefan Hajnoczi static int64_t aio_compute_bh_timeout(BHList *head, int timeout)
200c2b38b27SPaolo Bonzini {
201c2b38b27SPaolo Bonzini     QEMUBH *bh;
202c2b38b27SPaolo Bonzini 
2038c6b0356SStefan Hajnoczi     QSLIST_FOREACH_RCU(bh, head, next) {
2048c6b0356SStefan Hajnoczi         if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
2058c6b0356SStefan Hajnoczi             if (bh->flags & BH_IDLE) {
206c2b38b27SPaolo Bonzini                 /* idle bottom halves will be polled at least
207c2b38b27SPaolo Bonzini                  * every 10ms */
208c2b38b27SPaolo Bonzini                 timeout = 10000000;
209c2b38b27SPaolo Bonzini             } else {
210c2b38b27SPaolo Bonzini                 /* non-idle bottom halves will be executed
211c2b38b27SPaolo Bonzini                  * immediately */
212c2b38b27SPaolo Bonzini                 return 0;
213c2b38b27SPaolo Bonzini             }
214c2b38b27SPaolo Bonzini         }
215c2b38b27SPaolo Bonzini     }
216c2b38b27SPaolo Bonzini 
2178c6b0356SStefan Hajnoczi     return timeout;
2188c6b0356SStefan Hajnoczi }
2198c6b0356SStefan Hajnoczi 
2208c6b0356SStefan Hajnoczi int64_t
2218c6b0356SStefan Hajnoczi aio_compute_timeout(AioContext *ctx)
2228c6b0356SStefan Hajnoczi {
2238c6b0356SStefan Hajnoczi     BHListSlice *s;
2248c6b0356SStefan Hajnoczi     int64_t deadline;
2258c6b0356SStefan Hajnoczi     int timeout = -1;
2268c6b0356SStefan Hajnoczi 
2278c6b0356SStefan Hajnoczi     timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout);
2288c6b0356SStefan Hajnoczi     if (timeout == 0) {
2298c6b0356SStefan Hajnoczi         return 0;
2308c6b0356SStefan Hajnoczi     }
2318c6b0356SStefan Hajnoczi 
2328c6b0356SStefan Hajnoczi     QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
2338c6b0356SStefan Hajnoczi         timeout = aio_compute_bh_timeout(&s->bh_list, timeout);
2348c6b0356SStefan Hajnoczi         if (timeout == 0) {
2358c6b0356SStefan Hajnoczi             return 0;
2368c6b0356SStefan Hajnoczi         }
2378c6b0356SStefan Hajnoczi     }
2388c6b0356SStefan Hajnoczi 
239c2b38b27SPaolo Bonzini     deadline = timerlistgroup_deadline_ns(&ctx->tlg);
240c2b38b27SPaolo Bonzini     if (deadline == 0) {
241c2b38b27SPaolo Bonzini         return 0;
242c2b38b27SPaolo Bonzini     } else {
243c2b38b27SPaolo Bonzini         return qemu_soonest_timeout(timeout, deadline);
244c2b38b27SPaolo Bonzini     }
245c2b38b27SPaolo Bonzini }
246c2b38b27SPaolo Bonzini 
247c2b38b27SPaolo Bonzini static gboolean
248c2b38b27SPaolo Bonzini aio_ctx_prepare(GSource *source, gint    *timeout)
249c2b38b27SPaolo Bonzini {
250c2b38b27SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
251c2b38b27SPaolo Bonzini 
252*5710a3e0SPaolo Bonzini     atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) | 1);
253*5710a3e0SPaolo Bonzini 
254*5710a3e0SPaolo Bonzini     /*
255*5710a3e0SPaolo Bonzini      * Write ctx->notify_me before computing the timeout
256*5710a3e0SPaolo Bonzini      * (reading bottom half flags, etc.).  Pairs with
257*5710a3e0SPaolo Bonzini      * smp_mb in aio_notify().
258*5710a3e0SPaolo Bonzini      */
259*5710a3e0SPaolo Bonzini     smp_mb();
260c2b38b27SPaolo Bonzini 
261c2b38b27SPaolo Bonzini     /* We assume there is no timeout already supplied */
262c2b38b27SPaolo Bonzini     *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
263c2b38b27SPaolo Bonzini 
264c2b38b27SPaolo Bonzini     if (aio_prepare(ctx)) {
265c2b38b27SPaolo Bonzini         *timeout = 0;
266c2b38b27SPaolo Bonzini     }
267c2b38b27SPaolo Bonzini 
268c2b38b27SPaolo Bonzini     return *timeout == 0;
269c2b38b27SPaolo Bonzini }
270c2b38b27SPaolo Bonzini 
271c2b38b27SPaolo Bonzini static gboolean
272c2b38b27SPaolo Bonzini aio_ctx_check(GSource *source)
273c2b38b27SPaolo Bonzini {
274c2b38b27SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
275c2b38b27SPaolo Bonzini     QEMUBH *bh;
2768c6b0356SStefan Hajnoczi     BHListSlice *s;
277c2b38b27SPaolo Bonzini 
278*5710a3e0SPaolo Bonzini     /* Finish computing the timeout before clearing the flag.  */
279*5710a3e0SPaolo Bonzini     atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) & ~1);
280c2b38b27SPaolo Bonzini     aio_notify_accept(ctx);
281c2b38b27SPaolo Bonzini 
2828c6b0356SStefan Hajnoczi     QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) {
2838c6b0356SStefan Hajnoczi         if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
284c2b38b27SPaolo Bonzini             return true;
285c2b38b27SPaolo Bonzini         }
286c2b38b27SPaolo Bonzini     }
2878c6b0356SStefan Hajnoczi 
2888c6b0356SStefan Hajnoczi     QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
2898c6b0356SStefan Hajnoczi         QSLIST_FOREACH_RCU(bh, &s->bh_list, next) {
2908c6b0356SStefan Hajnoczi             if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
2918c6b0356SStefan Hajnoczi                 return true;
2928c6b0356SStefan Hajnoczi             }
2938c6b0356SStefan Hajnoczi         }
2948c6b0356SStefan Hajnoczi     }
295c2b38b27SPaolo Bonzini     return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
296c2b38b27SPaolo Bonzini }
297c2b38b27SPaolo Bonzini 
298c2b38b27SPaolo Bonzini static gboolean
299c2b38b27SPaolo Bonzini aio_ctx_dispatch(GSource     *source,
300c2b38b27SPaolo Bonzini                  GSourceFunc  callback,
301c2b38b27SPaolo Bonzini                  gpointer     user_data)
302c2b38b27SPaolo Bonzini {
303c2b38b27SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
304c2b38b27SPaolo Bonzini 
305c2b38b27SPaolo Bonzini     assert(callback == NULL);
306a153bf52SPaolo Bonzini     aio_dispatch(ctx);
307c2b38b27SPaolo Bonzini     return true;
308c2b38b27SPaolo Bonzini }
309c2b38b27SPaolo Bonzini 
310c2b38b27SPaolo Bonzini static void
311c2b38b27SPaolo Bonzini aio_ctx_finalize(GSource     *source)
312c2b38b27SPaolo Bonzini {
313c2b38b27SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
3148c6b0356SStefan Hajnoczi     QEMUBH *bh;
3158c6b0356SStefan Hajnoczi     unsigned flags;
316c2b38b27SPaolo Bonzini 
317c2b38b27SPaolo Bonzini     thread_pool_free(ctx->thread_pool);
318c2b38b27SPaolo Bonzini 
319c2b38b27SPaolo Bonzini #ifdef CONFIG_LINUX_AIO
320c2b38b27SPaolo Bonzini     if (ctx->linux_aio) {
321c2b38b27SPaolo Bonzini         laio_detach_aio_context(ctx->linux_aio, ctx);
322c2b38b27SPaolo Bonzini         laio_cleanup(ctx->linux_aio);
323c2b38b27SPaolo Bonzini         ctx->linux_aio = NULL;
324c2b38b27SPaolo Bonzini     }
325c2b38b27SPaolo Bonzini #endif
326c2b38b27SPaolo Bonzini 
327fcb7a4a4SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING
328fcb7a4a4SAarushi Mehta     if (ctx->linux_io_uring) {
329fcb7a4a4SAarushi Mehta         luring_detach_aio_context(ctx->linux_io_uring, ctx);
330fcb7a4a4SAarushi Mehta         luring_cleanup(ctx->linux_io_uring);
331fcb7a4a4SAarushi Mehta         ctx->linux_io_uring = NULL;
332fcb7a4a4SAarushi Mehta     }
333fcb7a4a4SAarushi Mehta #endif
334fcb7a4a4SAarushi Mehta 
3350c330a73SPaolo Bonzini     assert(QSLIST_EMPTY(&ctx->scheduled_coroutines));
3360c330a73SPaolo Bonzini     qemu_bh_delete(ctx->co_schedule_bh);
3370c330a73SPaolo Bonzini 
3388c6b0356SStefan Hajnoczi     /* There must be no aio_bh_poll() calls going on */
3398c6b0356SStefan Hajnoczi     assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list));
340c2b38b27SPaolo Bonzini 
3418c6b0356SStefan Hajnoczi     while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) {
342c2b38b27SPaolo Bonzini         /* qemu_bh_delete() must have been called on BHs in this AioContext */
3438c6b0356SStefan Hajnoczi         assert(flags & BH_DELETED);
344c2b38b27SPaolo Bonzini 
3458c6b0356SStefan Hajnoczi         g_free(bh);
346c2b38b27SPaolo Bonzini     }
347c2b38b27SPaolo Bonzini 
348c2b38b27SPaolo Bonzini     aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL);
349c2b38b27SPaolo Bonzini     event_notifier_cleanup(&ctx->notifier);
350c2b38b27SPaolo Bonzini     qemu_rec_mutex_destroy(&ctx->lock);
351c2b38b27SPaolo Bonzini     qemu_lockcnt_destroy(&ctx->list_lock);
352c2b38b27SPaolo Bonzini     timerlistgroup_deinit(&ctx->tlg);
353cd0a6d2bSJie Wang     aio_context_destroy(ctx);
354c2b38b27SPaolo Bonzini }
355c2b38b27SPaolo Bonzini 
356c2b38b27SPaolo Bonzini static GSourceFuncs aio_source_funcs = {
357c2b38b27SPaolo Bonzini     aio_ctx_prepare,
358c2b38b27SPaolo Bonzini     aio_ctx_check,
359c2b38b27SPaolo Bonzini     aio_ctx_dispatch,
360c2b38b27SPaolo Bonzini     aio_ctx_finalize
361c2b38b27SPaolo Bonzini };
362c2b38b27SPaolo Bonzini 
363c2b38b27SPaolo Bonzini GSource *aio_get_g_source(AioContext *ctx)
364c2b38b27SPaolo Bonzini {
365c2b38b27SPaolo Bonzini     g_source_ref(&ctx->source);
366c2b38b27SPaolo Bonzini     return &ctx->source;
367c2b38b27SPaolo Bonzini }
368c2b38b27SPaolo Bonzini 
369c2b38b27SPaolo Bonzini ThreadPool *aio_get_thread_pool(AioContext *ctx)
370c2b38b27SPaolo Bonzini {
371c2b38b27SPaolo Bonzini     if (!ctx->thread_pool) {
372c2b38b27SPaolo Bonzini         ctx->thread_pool = thread_pool_new(ctx);
373c2b38b27SPaolo Bonzini     }
374c2b38b27SPaolo Bonzini     return ctx->thread_pool;
375c2b38b27SPaolo Bonzini }
376c2b38b27SPaolo Bonzini 
377c2b38b27SPaolo Bonzini #ifdef CONFIG_LINUX_AIO
378ed6e2161SNishanth Aravamudan LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp)
379c2b38b27SPaolo Bonzini {
380c2b38b27SPaolo Bonzini     if (!ctx->linux_aio) {
381ed6e2161SNishanth Aravamudan         ctx->linux_aio = laio_init(errp);
382ed6e2161SNishanth Aravamudan         if (ctx->linux_aio) {
383c2b38b27SPaolo Bonzini             laio_attach_aio_context(ctx->linux_aio, ctx);
384c2b38b27SPaolo Bonzini         }
385ed6e2161SNishanth Aravamudan     }
386ed6e2161SNishanth Aravamudan     return ctx->linux_aio;
387ed6e2161SNishanth Aravamudan }
388ed6e2161SNishanth Aravamudan 
389ed6e2161SNishanth Aravamudan LinuxAioState *aio_get_linux_aio(AioContext *ctx)
390ed6e2161SNishanth Aravamudan {
391ed6e2161SNishanth Aravamudan     assert(ctx->linux_aio);
392c2b38b27SPaolo Bonzini     return ctx->linux_aio;
393c2b38b27SPaolo Bonzini }
394c2b38b27SPaolo Bonzini #endif
395c2b38b27SPaolo Bonzini 
396fcb7a4a4SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING
397fcb7a4a4SAarushi Mehta LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp)
398fcb7a4a4SAarushi Mehta {
399fcb7a4a4SAarushi Mehta     if (ctx->linux_io_uring) {
400fcb7a4a4SAarushi Mehta         return ctx->linux_io_uring;
401fcb7a4a4SAarushi Mehta     }
402fcb7a4a4SAarushi Mehta 
403fcb7a4a4SAarushi Mehta     ctx->linux_io_uring = luring_init(errp);
404fcb7a4a4SAarushi Mehta     if (!ctx->linux_io_uring) {
405fcb7a4a4SAarushi Mehta         return NULL;
406fcb7a4a4SAarushi Mehta     }
407fcb7a4a4SAarushi Mehta 
408fcb7a4a4SAarushi Mehta     luring_attach_aio_context(ctx->linux_io_uring, ctx);
409fcb7a4a4SAarushi Mehta     return ctx->linux_io_uring;
410fcb7a4a4SAarushi Mehta }
411fcb7a4a4SAarushi Mehta 
412fcb7a4a4SAarushi Mehta LuringState *aio_get_linux_io_uring(AioContext *ctx)
413fcb7a4a4SAarushi Mehta {
414fcb7a4a4SAarushi Mehta     assert(ctx->linux_io_uring);
415fcb7a4a4SAarushi Mehta     return ctx->linux_io_uring;
416fcb7a4a4SAarushi Mehta }
417fcb7a4a4SAarushi Mehta #endif
418fcb7a4a4SAarushi Mehta 
419c2b38b27SPaolo Bonzini void aio_notify(AioContext *ctx)
420c2b38b27SPaolo Bonzini {
421c2b38b27SPaolo Bonzini     /* Write e.g. bh->scheduled before reading ctx->notify_me.  Pairs
422*5710a3e0SPaolo Bonzini      * with smp_mb in aio_ctx_prepare or aio_poll.
423c2b38b27SPaolo Bonzini      */
424c2b38b27SPaolo Bonzini     smp_mb();
425*5710a3e0SPaolo Bonzini     if (atomic_read(&ctx->notify_me)) {
426c2b38b27SPaolo Bonzini         event_notifier_set(&ctx->notifier);
427c2b38b27SPaolo Bonzini         atomic_mb_set(&ctx->notified, true);
428c2b38b27SPaolo Bonzini     }
429c2b38b27SPaolo Bonzini }
430c2b38b27SPaolo Bonzini 
431c2b38b27SPaolo Bonzini void aio_notify_accept(AioContext *ctx)
432c2b38b27SPaolo Bonzini {
433873df2ceSMarc-André Lureau     if (atomic_xchg(&ctx->notified, false)
434873df2ceSMarc-André Lureau #ifdef WIN32
435873df2ceSMarc-André Lureau         || true
436873df2ceSMarc-André Lureau #endif
437873df2ceSMarc-André Lureau     ) {
438c2b38b27SPaolo Bonzini         event_notifier_test_and_clear(&ctx->notifier);
439c2b38b27SPaolo Bonzini     }
440c2b38b27SPaolo Bonzini }
441c2b38b27SPaolo Bonzini 
4423f53bc61SPaolo Bonzini static void aio_timerlist_notify(void *opaque, QEMUClockType type)
443c2b38b27SPaolo Bonzini {
444c2b38b27SPaolo Bonzini     aio_notify(opaque);
445c2b38b27SPaolo Bonzini }
446c2b38b27SPaolo Bonzini 
447c2b38b27SPaolo Bonzini static void event_notifier_dummy_cb(EventNotifier *e)
448c2b38b27SPaolo Bonzini {
449c2b38b27SPaolo Bonzini }
450c2b38b27SPaolo Bonzini 
451c2b38b27SPaolo Bonzini /* Returns true if aio_notify() was called (e.g. a BH was scheduled) */
452c2b38b27SPaolo Bonzini static bool event_notifier_poll(void *opaque)
453c2b38b27SPaolo Bonzini {
454c2b38b27SPaolo Bonzini     EventNotifier *e = opaque;
455c2b38b27SPaolo Bonzini     AioContext *ctx = container_of(e, AioContext, notifier);
456c2b38b27SPaolo Bonzini 
457c2b38b27SPaolo Bonzini     return atomic_read(&ctx->notified);
458c2b38b27SPaolo Bonzini }
459c2b38b27SPaolo Bonzini 
4600c330a73SPaolo Bonzini static void co_schedule_bh_cb(void *opaque)
4610c330a73SPaolo Bonzini {
4620c330a73SPaolo Bonzini     AioContext *ctx = opaque;
4630c330a73SPaolo Bonzini     QSLIST_HEAD(, Coroutine) straight, reversed;
4640c330a73SPaolo Bonzini 
4650c330a73SPaolo Bonzini     QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines);
4660c330a73SPaolo Bonzini     QSLIST_INIT(&straight);
4670c330a73SPaolo Bonzini 
4680c330a73SPaolo Bonzini     while (!QSLIST_EMPTY(&reversed)) {
4690c330a73SPaolo Bonzini         Coroutine *co = QSLIST_FIRST(&reversed);
4700c330a73SPaolo Bonzini         QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next);
4710c330a73SPaolo Bonzini         QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next);
4720c330a73SPaolo Bonzini     }
4730c330a73SPaolo Bonzini 
4740c330a73SPaolo Bonzini     while (!QSLIST_EMPTY(&straight)) {
4750c330a73SPaolo Bonzini         Coroutine *co = QSLIST_FIRST(&straight);
4760c330a73SPaolo Bonzini         QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
4770c330a73SPaolo Bonzini         trace_aio_co_schedule_bh_cb(ctx, co);
4781919631eSPaolo Bonzini         aio_context_acquire(ctx);
4796133b39fSJeff Cody 
4806133b39fSJeff Cody         /* Protected by write barrier in qemu_aio_coroutine_enter */
4816133b39fSJeff Cody         atomic_set(&co->scheduled, NULL);
4826808ae04SSergio Lopez         qemu_aio_coroutine_enter(ctx, co);
4831919631eSPaolo Bonzini         aio_context_release(ctx);
4840c330a73SPaolo Bonzini     }
4850c330a73SPaolo Bonzini }
4860c330a73SPaolo Bonzini 
487c2b38b27SPaolo Bonzini AioContext *aio_context_new(Error **errp)
488c2b38b27SPaolo Bonzini {
489c2b38b27SPaolo Bonzini     int ret;
490c2b38b27SPaolo Bonzini     AioContext *ctx;
491c2b38b27SPaolo Bonzini 
492c2b38b27SPaolo Bonzini     ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
4938c6b0356SStefan Hajnoczi     QSLIST_INIT(&ctx->bh_list);
4948c6b0356SStefan Hajnoczi     QSIMPLEQ_INIT(&ctx->bh_slice_list);
495c2b38b27SPaolo Bonzini     aio_context_setup(ctx);
496c2b38b27SPaolo Bonzini 
497c2b38b27SPaolo Bonzini     ret = event_notifier_init(&ctx->notifier, false);
498c2b38b27SPaolo Bonzini     if (ret < 0) {
499c2b38b27SPaolo Bonzini         error_setg_errno(errp, -ret, "Failed to initialize event notifier");
500c2b38b27SPaolo Bonzini         goto fail;
501c2b38b27SPaolo Bonzini     }
502c2b38b27SPaolo Bonzini     g_source_set_can_recurse(&ctx->source, true);
503c2b38b27SPaolo Bonzini     qemu_lockcnt_init(&ctx->list_lock);
5040c330a73SPaolo Bonzini 
5050c330a73SPaolo Bonzini     ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx);
5060c330a73SPaolo Bonzini     QSLIST_INIT(&ctx->scheduled_coroutines);
5070c330a73SPaolo Bonzini 
508c2b38b27SPaolo Bonzini     aio_set_event_notifier(ctx, &ctx->notifier,
509c2b38b27SPaolo Bonzini                            false,
510c2b38b27SPaolo Bonzini                            event_notifier_dummy_cb,
511c2b38b27SPaolo Bonzini                            event_notifier_poll);
512c2b38b27SPaolo Bonzini #ifdef CONFIG_LINUX_AIO
513c2b38b27SPaolo Bonzini     ctx->linux_aio = NULL;
514c2b38b27SPaolo Bonzini #endif
515fcb7a4a4SAarushi Mehta 
516fcb7a4a4SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING
517fcb7a4a4SAarushi Mehta     ctx->linux_io_uring = NULL;
518fcb7a4a4SAarushi Mehta #endif
519fcb7a4a4SAarushi Mehta 
520c2b38b27SPaolo Bonzini     ctx->thread_pool = NULL;
521c2b38b27SPaolo Bonzini     qemu_rec_mutex_init(&ctx->lock);
522c2b38b27SPaolo Bonzini     timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
523c2b38b27SPaolo Bonzini 
524c2b38b27SPaolo Bonzini     ctx->poll_ns = 0;
525c2b38b27SPaolo Bonzini     ctx->poll_max_ns = 0;
526c2b38b27SPaolo Bonzini     ctx->poll_grow = 0;
527c2b38b27SPaolo Bonzini     ctx->poll_shrink = 0;
528c2b38b27SPaolo Bonzini 
529c2b38b27SPaolo Bonzini     return ctx;
530c2b38b27SPaolo Bonzini fail:
531c2b38b27SPaolo Bonzini     g_source_destroy(&ctx->source);
532c2b38b27SPaolo Bonzini     return NULL;
533c2b38b27SPaolo Bonzini }
534c2b38b27SPaolo Bonzini 
5350c330a73SPaolo Bonzini void aio_co_schedule(AioContext *ctx, Coroutine *co)
5360c330a73SPaolo Bonzini {
5370c330a73SPaolo Bonzini     trace_aio_co_schedule(ctx, co);
5386133b39fSJeff Cody     const char *scheduled = atomic_cmpxchg(&co->scheduled, NULL,
5396133b39fSJeff Cody                                            __func__);
5406133b39fSJeff Cody 
5416133b39fSJeff Cody     if (scheduled) {
5426133b39fSJeff Cody         fprintf(stderr,
5436133b39fSJeff Cody                 "%s: Co-routine was already scheduled in '%s'\n",
5446133b39fSJeff Cody                 __func__, scheduled);
5456133b39fSJeff Cody         abort();
5466133b39fSJeff Cody     }
5476133b39fSJeff Cody 
548f0f81002SStefan Hajnoczi     /* The coroutine might run and release the last ctx reference before we
549f0f81002SStefan Hajnoczi      * invoke qemu_bh_schedule().  Take a reference to keep ctx alive until
550f0f81002SStefan Hajnoczi      * we're done.
551f0f81002SStefan Hajnoczi      */
552f0f81002SStefan Hajnoczi     aio_context_ref(ctx);
553f0f81002SStefan Hajnoczi 
5540c330a73SPaolo Bonzini     QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
5550c330a73SPaolo Bonzini                               co, co_scheduled_next);
5560c330a73SPaolo Bonzini     qemu_bh_schedule(ctx->co_schedule_bh);
557f0f81002SStefan Hajnoczi 
558f0f81002SStefan Hajnoczi     aio_context_unref(ctx);
5590c330a73SPaolo Bonzini }
5600c330a73SPaolo Bonzini 
5610c330a73SPaolo Bonzini void aio_co_wake(struct Coroutine *co)
5620c330a73SPaolo Bonzini {
5630c330a73SPaolo Bonzini     AioContext *ctx;
5640c330a73SPaolo Bonzini 
5650c330a73SPaolo Bonzini     /* Read coroutine before co->ctx.  Matches smp_wmb in
5660c330a73SPaolo Bonzini      * qemu_coroutine_enter.
5670c330a73SPaolo Bonzini      */
5680c330a73SPaolo Bonzini     smp_read_barrier_depends();
5690c330a73SPaolo Bonzini     ctx = atomic_read(&co->ctx);
5700c330a73SPaolo Bonzini 
5718865852eSFam Zheng     aio_co_enter(ctx, co);
5728865852eSFam Zheng }
5738865852eSFam Zheng 
5748865852eSFam Zheng void aio_co_enter(AioContext *ctx, struct Coroutine *co)
5758865852eSFam Zheng {
5760c330a73SPaolo Bonzini     if (ctx != qemu_get_current_aio_context()) {
5770c330a73SPaolo Bonzini         aio_co_schedule(ctx, co);
5780c330a73SPaolo Bonzini         return;
5790c330a73SPaolo Bonzini     }
5800c330a73SPaolo Bonzini 
5810c330a73SPaolo Bonzini     if (qemu_in_coroutine()) {
5820c330a73SPaolo Bonzini         Coroutine *self = qemu_coroutine_self();
5830c330a73SPaolo Bonzini         assert(self != co);
5840c330a73SPaolo Bonzini         QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next);
5850c330a73SPaolo Bonzini     } else {
5860c330a73SPaolo Bonzini         aio_context_acquire(ctx);
5878865852eSFam Zheng         qemu_aio_coroutine_enter(ctx, co);
5880c330a73SPaolo Bonzini         aio_context_release(ctx);
5890c330a73SPaolo Bonzini     }
5900c330a73SPaolo Bonzini }
5910c330a73SPaolo Bonzini 
592c2b38b27SPaolo Bonzini void aio_context_ref(AioContext *ctx)
593c2b38b27SPaolo Bonzini {
594c2b38b27SPaolo Bonzini     g_source_ref(&ctx->source);
595c2b38b27SPaolo Bonzini }
596c2b38b27SPaolo Bonzini 
597c2b38b27SPaolo Bonzini void aio_context_unref(AioContext *ctx)
598c2b38b27SPaolo Bonzini {
599c2b38b27SPaolo Bonzini     g_source_unref(&ctx->source);
600c2b38b27SPaolo Bonzini }
601c2b38b27SPaolo Bonzini 
602c2b38b27SPaolo Bonzini void aio_context_acquire(AioContext *ctx)
603c2b38b27SPaolo Bonzini {
604c2b38b27SPaolo Bonzini     qemu_rec_mutex_lock(&ctx->lock);
605c2b38b27SPaolo Bonzini }
606c2b38b27SPaolo Bonzini 
607c2b38b27SPaolo Bonzini void aio_context_release(AioContext *ctx)
608c2b38b27SPaolo Bonzini {
609c2b38b27SPaolo Bonzini     qemu_rec_mutex_unlock(&ctx->lock);
610c2b38b27SPaolo Bonzini }
611