xref: /openbmc/qemu/util/async.c (revision a5c02408c1de0a0592d90f153328a4295b6fbca6)
1  /*
2   * Data plane event loop
3   *
4   * Copyright (c) 2003-2008 Fabrice Bellard
5   * Copyright (c) 2009-2017 QEMU contributors
6   *
7   * Permission is hereby granted, free of charge, to any person obtaining a copy
8   * of this software and associated documentation files (the "Software"), to deal
9   * in the Software without restriction, including without limitation the rights
10   * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11   * copies of the Software, and to permit persons to whom the Software is
12   * furnished to do so, subject to the following conditions:
13   *
14   * The above copyright notice and this permission notice shall be included in
15   * all copies or substantial portions of the Software.
16   *
17   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20   * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21   * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22   * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23   * THE SOFTWARE.
24   */
25  
26  #include "qemu/osdep.h"
27  #include "qapi/error.h"
28  #include "block/aio.h"
29  #include "block/thread-pool.h"
30  #include "block/graph-lock.h"
31  #include "qemu/main-loop.h"
32  #include "qemu/atomic.h"
33  #include "qemu/lockcnt.h"
34  #include "qemu/rcu_queue.h"
35  #include "block/raw-aio.h"
36  #include "qemu/coroutine_int.h"
37  #include "qemu/coroutine-tls.h"
38  #include "sysemu/cpu-timers.h"
39  #include "trace.h"
40  
41  /***********************************************************/
42  /* bottom halves (can be seen as timers which expire ASAP) */
43  
44  /* QEMUBH::flags values */
45  enum {
46      /* Already enqueued and waiting for aio_bh_poll() */
47      BH_PENDING   = (1 << 0),
48  
49      /* Invoke the callback */
50      BH_SCHEDULED = (1 << 1),
51  
52      /* Delete without invoking callback */
53      BH_DELETED   = (1 << 2),
54  
55      /* Delete after invoking callback */
56      BH_ONESHOT   = (1 << 3),
57  
58      /* Schedule periodically when the event loop is idle */
59      BH_IDLE      = (1 << 4),
60  };
61  
62  struct QEMUBH {
63      AioContext *ctx;
64      const char *name;
65      QEMUBHFunc *cb;
66      void *opaque;
67      QSLIST_ENTRY(QEMUBH) next;
68      unsigned flags;
69      MemReentrancyGuard *reentrancy_guard;
70  };
71  
72  /* Called concurrently from any thread */
73  static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags)
74  {
75      AioContext *ctx = bh->ctx;
76      unsigned old_flags;
77  
78      /*
79       * Synchronizes with atomic_fetch_and() in aio_bh_dequeue(), ensuring that
80       * insertion starts after BH_PENDING is set.
81       */
82      old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
83  
84      if (!(old_flags & BH_PENDING)) {
85          /*
86           * At this point the bottom half becomes visible to aio_bh_poll().
87           * This insertion thus synchronizes with QSLIST_MOVE_ATOMIC in
88           * aio_bh_poll(), ensuring that:
89           * 1. any writes needed by the callback are visible from the callback
90           *    after aio_bh_dequeue() returns bh.
91           * 2. ctx is loaded before the callback has a chance to execute and bh
92           *    could be freed.
93           */
94          QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next);
95      }
96  
97      aio_notify(ctx);
98      if (unlikely(icount_enabled())) {
99          /*
100           * Workaround for record/replay.
101           * vCPU execution should be suspended when new BH is set.
102           * This is needed to avoid guest timeouts caused
103           * by the long cycles of the execution.
104           */
105          icount_notify_exit();
106      }
107  }
108  
109  /* Only called from aio_bh_poll() and aio_ctx_finalize() */
110  static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags)
111  {
112      QEMUBH *bh = QSLIST_FIRST_RCU(head);
113  
114      if (!bh) {
115          return NULL;
116      }
117  
118      QSLIST_REMOVE_HEAD(head, next);
119  
120      /*
121       * Synchronizes with qatomic_fetch_or() in aio_bh_enqueue(), ensuring that
122       * the removal finishes before BH_PENDING is reset.
123       */
124      *flags = qatomic_fetch_and(&bh->flags,
125                                ~(BH_PENDING | BH_SCHEDULED | BH_IDLE));
126      return bh;
127  }
128  
129  void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb,
130                                    void *opaque, const char *name)
131  {
132      QEMUBH *bh;
133      bh = g_new(QEMUBH, 1);
134      *bh = (QEMUBH){
135          .ctx = ctx,
136          .cb = cb,
137          .opaque = opaque,
138          .name = name,
139      };
140      aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT);
141  }
142  
143  QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
144                          const char *name, MemReentrancyGuard *reentrancy_guard)
145  {
146      QEMUBH *bh;
147      bh = g_new(QEMUBH, 1);
148      *bh = (QEMUBH){
149          .ctx = ctx,
150          .cb = cb,
151          .opaque = opaque,
152          .name = name,
153          .reentrancy_guard = reentrancy_guard,
154      };
155      return bh;
156  }
157  
158  void aio_bh_call(QEMUBH *bh)
159  {
160      bool last_engaged_in_io = false;
161  
162      /* Make a copy of the guard-pointer as cb may free the bh */
163      MemReentrancyGuard *reentrancy_guard = bh->reentrancy_guard;
164      if (reentrancy_guard) {
165          last_engaged_in_io = reentrancy_guard->engaged_in_io;
166          if (reentrancy_guard->engaged_in_io) {
167              trace_reentrant_aio(bh->ctx, bh->name);
168          }
169          reentrancy_guard->engaged_in_io = true;
170      }
171  
172      bh->cb(bh->opaque);
173  
174      if (reentrancy_guard) {
175          reentrancy_guard->engaged_in_io = last_engaged_in_io;
176      }
177  }
178  
179  /* Multiple occurrences of aio_bh_poll cannot be called concurrently. */
180  int aio_bh_poll(AioContext *ctx)
181  {
182      BHListSlice slice;
183      BHListSlice *s;
184      int ret = 0;
185  
186      /* Synchronizes with QSLIST_INSERT_HEAD_ATOMIC in aio_bh_enqueue().  */
187      QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list);
188  
189      /*
190       * GCC13 [-Werror=dangling-pointer=] complains that the local variable
191       * 'slice' is being stored in the global 'ctx->bh_slice_list' but the
192       * list is emptied before this function returns.
193       */
194  #if !defined(__clang__)
195  #pragma GCC diagnostic push
196  #pragma GCC diagnostic ignored "-Wpragmas"
197  #pragma GCC diagnostic ignored "-Wdangling-pointer="
198  #endif
199      QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next);
200  #if !defined(__clang__)
201  #pragma GCC diagnostic pop
202  #endif
203  
204      while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) {
205          QEMUBH *bh;
206          unsigned flags;
207  
208          bh = aio_bh_dequeue(&s->bh_list, &flags);
209          if (!bh) {
210              QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next);
211              continue;
212          }
213  
214          if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
215              /* Idle BHs don't count as progress */
216              if (!(flags & BH_IDLE)) {
217                  ret = 1;
218              }
219              aio_bh_call(bh);
220          }
221          if (flags & (BH_DELETED | BH_ONESHOT)) {
222              g_free(bh);
223          }
224      }
225  
226      return ret;
227  }
228  
229  void qemu_bh_schedule_idle(QEMUBH *bh)
230  {
231      aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE);
232  }
233  
234  void qemu_bh_schedule(QEMUBH *bh)
235  {
236      aio_bh_enqueue(bh, BH_SCHEDULED);
237  }
238  
239  /* This func is async.
240   */
241  void qemu_bh_cancel(QEMUBH *bh)
242  {
243      qatomic_and(&bh->flags, ~BH_SCHEDULED);
244  }
245  
246  /* This func is async.The bottom half will do the delete action at the finial
247   * end.
248   */
249  void qemu_bh_delete(QEMUBH *bh)
250  {
251      aio_bh_enqueue(bh, BH_DELETED);
252  }
253  
254  static int64_t aio_compute_bh_timeout(BHList *head, int timeout)
255  {
256      QEMUBH *bh;
257  
258      QSLIST_FOREACH_RCU(bh, head, next) {
259          if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
260              if (bh->flags & BH_IDLE) {
261                  /* idle bottom halves will be polled at least
262                   * every 10ms */
263                  timeout = 10000000;
264              } else {
265                  /* non-idle bottom halves will be executed
266                   * immediately */
267                  return 0;
268              }
269          }
270      }
271  
272      return timeout;
273  }
274  
275  int64_t
276  aio_compute_timeout(AioContext *ctx)
277  {
278      BHListSlice *s;
279      int64_t deadline;
280      int timeout = -1;
281  
282      timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout);
283      if (timeout == 0) {
284          return 0;
285      }
286  
287      QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
288          timeout = aio_compute_bh_timeout(&s->bh_list, timeout);
289          if (timeout == 0) {
290              return 0;
291          }
292      }
293  
294      deadline = timerlistgroup_deadline_ns(&ctx->tlg);
295      if (deadline == 0) {
296          return 0;
297      } else {
298          return qemu_soonest_timeout(timeout, deadline);
299      }
300  }
301  
302  static gboolean
303  aio_ctx_prepare(GSource *source, gint    *timeout)
304  {
305      AioContext *ctx = (AioContext *) source;
306  
307      qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1);
308  
309      /*
310       * Write ctx->notify_me before computing the timeout
311       * (reading bottom half flags, etc.).  Pairs with
312       * smp_mb in aio_notify().
313       */
314      smp_mb();
315  
316      /* We assume there is no timeout already supplied */
317      *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
318  
319      if (aio_prepare(ctx)) {
320          *timeout = 0;
321      }
322  
323      return *timeout == 0;
324  }
325  
326  static gboolean
327  aio_ctx_check(GSource *source)
328  {
329      AioContext *ctx = (AioContext *) source;
330      QEMUBH *bh;
331      BHListSlice *s;
332  
333      /* Finish computing the timeout before clearing the flag.  */
334      qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1);
335      aio_notify_accept(ctx);
336  
337      QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) {
338          if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
339              return true;
340          }
341      }
342  
343      QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
344          QSLIST_FOREACH_RCU(bh, &s->bh_list, next) {
345              if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
346                  return true;
347              }
348          }
349      }
350      return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
351  }
352  
353  static gboolean
354  aio_ctx_dispatch(GSource     *source,
355                   GSourceFunc  callback,
356                   gpointer     user_data)
357  {
358      AioContext *ctx = (AioContext *) source;
359  
360      assert(callback == NULL);
361      aio_dispatch(ctx);
362      return true;
363  }
364  
365  static void
366  aio_ctx_finalize(GSource     *source)
367  {
368      AioContext *ctx = (AioContext *) source;
369      QEMUBH *bh;
370      unsigned flags;
371  
372      thread_pool_free(ctx->thread_pool);
373  
374  #ifdef CONFIG_LINUX_AIO
375      if (ctx->linux_aio) {
376          laio_detach_aio_context(ctx->linux_aio, ctx);
377          laio_cleanup(ctx->linux_aio);
378          ctx->linux_aio = NULL;
379      }
380  #endif
381  
382  #ifdef CONFIG_LINUX_IO_URING
383      if (ctx->linux_io_uring) {
384          luring_detach_aio_context(ctx->linux_io_uring, ctx);
385          luring_cleanup(ctx->linux_io_uring);
386          ctx->linux_io_uring = NULL;
387      }
388  #endif
389  
390      assert(QSLIST_EMPTY(&ctx->scheduled_coroutines));
391      qemu_bh_delete(ctx->co_schedule_bh);
392  
393      /* There must be no aio_bh_poll() calls going on */
394      assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list));
395  
396      while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) {
397          /*
398           * qemu_bh_delete() must have been called on BHs in this AioContext. In
399           * many cases memory leaks, hangs, or inconsistent state occur when a
400           * BH is leaked because something still expects it to run.
401           *
402           * If you hit this, fix the lifecycle of the BH so that
403           * qemu_bh_delete() and any associated cleanup is called before the
404           * AioContext is finalized.
405           */
406          if (unlikely(!(flags & BH_DELETED))) {
407              fprintf(stderr, "%s: BH '%s' leaked, aborting...\n",
408                      __func__, bh->name);
409              abort();
410          }
411  
412          g_free(bh);
413      }
414  
415      aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL, NULL);
416      event_notifier_cleanup(&ctx->notifier);
417      qemu_rec_mutex_destroy(&ctx->lock);
418      qemu_lockcnt_destroy(&ctx->list_lock);
419      timerlistgroup_deinit(&ctx->tlg);
420      unregister_aiocontext(ctx);
421      aio_context_destroy(ctx);
422  }
423  
424  static GSourceFuncs aio_source_funcs = {
425      aio_ctx_prepare,
426      aio_ctx_check,
427      aio_ctx_dispatch,
428      aio_ctx_finalize
429  };
430  
431  GSource *aio_get_g_source(AioContext *ctx)
432  {
433      aio_context_use_g_source(ctx);
434      g_source_ref(&ctx->source);
435      return &ctx->source;
436  }
437  
438  ThreadPool *aio_get_thread_pool(AioContext *ctx)
439  {
440      if (!ctx->thread_pool) {
441          ctx->thread_pool = thread_pool_new(ctx);
442      }
443      return ctx->thread_pool;
444  }
445  
446  #ifdef CONFIG_LINUX_AIO
447  LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp)
448  {
449      if (!ctx->linux_aio) {
450          ctx->linux_aio = laio_init(errp);
451          if (ctx->linux_aio) {
452              laio_attach_aio_context(ctx->linux_aio, ctx);
453          }
454      }
455      return ctx->linux_aio;
456  }
457  
458  LinuxAioState *aio_get_linux_aio(AioContext *ctx)
459  {
460      assert(ctx->linux_aio);
461      return ctx->linux_aio;
462  }
463  #endif
464  
465  #ifdef CONFIG_LINUX_IO_URING
466  LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp)
467  {
468      if (ctx->linux_io_uring) {
469          return ctx->linux_io_uring;
470      }
471  
472      ctx->linux_io_uring = luring_init(errp);
473      if (!ctx->linux_io_uring) {
474          return NULL;
475      }
476  
477      luring_attach_aio_context(ctx->linux_io_uring, ctx);
478      return ctx->linux_io_uring;
479  }
480  
481  LuringState *aio_get_linux_io_uring(AioContext *ctx)
482  {
483      assert(ctx->linux_io_uring);
484      return ctx->linux_io_uring;
485  }
486  #endif
487  
488  void aio_notify(AioContext *ctx)
489  {
490      /*
491       * Write e.g. ctx->bh_list before writing ctx->notified.  Pairs with
492       * smp_mb() in aio_notify_accept().
493       */
494      smp_wmb();
495      qatomic_set(&ctx->notified, true);
496  
497      /*
498       * Write ctx->notified (and also ctx->bh_list) before reading ctx->notify_me.
499       * Pairs with smp_mb() in aio_ctx_prepare or aio_poll.
500       */
501      smp_mb();
502      if (qatomic_read(&ctx->notify_me)) {
503          event_notifier_set(&ctx->notifier);
504      }
505  }
506  
507  void aio_notify_accept(AioContext *ctx)
508  {
509      qatomic_set(&ctx->notified, false);
510  
511      /*
512       * Order reads of ctx->notified (in aio_context_notifier_poll()) and the
513       * above clearing of ctx->notified before reads of e.g. bh->flags.  Pairs
514       * with smp_wmb() in aio_notify.
515       */
516      smp_mb();
517  }
518  
519  static void aio_timerlist_notify(void *opaque, QEMUClockType type)
520  {
521      aio_notify(opaque);
522  }
523  
524  static void aio_context_notifier_cb(EventNotifier *e)
525  {
526      AioContext *ctx = container_of(e, AioContext, notifier);
527  
528      event_notifier_test_and_clear(&ctx->notifier);
529  }
530  
531  /* Returns true if aio_notify() was called (e.g. a BH was scheduled) */
532  static bool aio_context_notifier_poll(void *opaque)
533  {
534      EventNotifier *e = opaque;
535      AioContext *ctx = container_of(e, AioContext, notifier);
536  
537      /*
538       * No need for load-acquire because we just want to kick the
539       * event loop.  aio_notify_accept() takes care of synchronizing
540       * the event loop with the producers.
541       */
542      return qatomic_read(&ctx->notified);
543  }
544  
545  static void aio_context_notifier_poll_ready(EventNotifier *e)
546  {
547      /* Do nothing, we just wanted to kick the event loop */
548  }
549  
550  static void co_schedule_bh_cb(void *opaque)
551  {
552      AioContext *ctx = opaque;
553      QSLIST_HEAD(, Coroutine) straight, reversed;
554  
555      QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines);
556      QSLIST_INIT(&straight);
557  
558      while (!QSLIST_EMPTY(&reversed)) {
559          Coroutine *co = QSLIST_FIRST(&reversed);
560          QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next);
561          QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next);
562      }
563  
564      while (!QSLIST_EMPTY(&straight)) {
565          Coroutine *co = QSLIST_FIRST(&straight);
566          QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
567          trace_aio_co_schedule_bh_cb(ctx, co);
568  
569          /* Protected by write barrier in qemu_aio_coroutine_enter */
570          qatomic_set(&co->scheduled, NULL);
571          qemu_aio_coroutine_enter(ctx, co);
572      }
573  }
574  
575  AioContext *aio_context_new(Error **errp)
576  {
577      int ret;
578      AioContext *ctx;
579  
580      ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
581      QSLIST_INIT(&ctx->bh_list);
582      QSIMPLEQ_INIT(&ctx->bh_slice_list);
583      aio_context_setup(ctx);
584  
585      ret = event_notifier_init(&ctx->notifier, false);
586      if (ret < 0) {
587          error_setg_errno(errp, -ret, "Failed to initialize event notifier");
588          goto fail;
589      }
590      g_source_set_can_recurse(&ctx->source, true);
591      qemu_lockcnt_init(&ctx->list_lock);
592  
593      ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx);
594      QSLIST_INIT(&ctx->scheduled_coroutines);
595  
596      aio_set_event_notifier(ctx, &ctx->notifier,
597                             aio_context_notifier_cb,
598                             aio_context_notifier_poll,
599                             aio_context_notifier_poll_ready);
600  #ifdef CONFIG_LINUX_AIO
601      ctx->linux_aio = NULL;
602  #endif
603  
604  #ifdef CONFIG_LINUX_IO_URING
605      ctx->linux_io_uring = NULL;
606  #endif
607  
608      ctx->thread_pool = NULL;
609      qemu_rec_mutex_init(&ctx->lock);
610      timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
611  
612      ctx->poll_ns = 0;
613      ctx->poll_max_ns = 0;
614      ctx->poll_grow = 0;
615      ctx->poll_shrink = 0;
616  
617      ctx->aio_max_batch = 0;
618  
619      ctx->thread_pool_min = 0;
620      ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT;
621  
622      register_aiocontext(ctx);
623  
624      return ctx;
625  fail:
626      g_source_destroy(&ctx->source);
627      return NULL;
628  }
629  
630  void aio_co_schedule(AioContext *ctx, Coroutine *co)
631  {
632      trace_aio_co_schedule(ctx, co);
633      const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL,
634                                             __func__);
635  
636      if (scheduled) {
637          fprintf(stderr,
638                  "%s: Co-routine was already scheduled in '%s'\n",
639                  __func__, scheduled);
640          abort();
641      }
642  
643      /* The coroutine might run and release the last ctx reference before we
644       * invoke qemu_bh_schedule().  Take a reference to keep ctx alive until
645       * we're done.
646       */
647      aio_context_ref(ctx);
648  
649      QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
650                                co, co_scheduled_next);
651      qemu_bh_schedule(ctx->co_schedule_bh);
652  
653      aio_context_unref(ctx);
654  }
655  
656  typedef struct AioCoRescheduleSelf {
657      Coroutine *co;
658      AioContext *new_ctx;
659  } AioCoRescheduleSelf;
660  
661  static void aio_co_reschedule_self_bh(void *opaque)
662  {
663      AioCoRescheduleSelf *data = opaque;
664      aio_co_schedule(data->new_ctx, data->co);
665  }
666  
667  void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx)
668  {
669      AioContext *old_ctx = qemu_get_current_aio_context();
670  
671      if (old_ctx != new_ctx) {
672          AioCoRescheduleSelf data = {
673              .co = qemu_coroutine_self(),
674              .new_ctx = new_ctx,
675          };
676          /*
677           * We can't directly schedule the coroutine in the target context
678           * because this would be racy: The other thread could try to enter the
679           * coroutine before it has yielded in this one.
680           */
681          aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data);
682          qemu_coroutine_yield();
683      }
684  }
685  
686  void aio_co_wake(Coroutine *co)
687  {
688      AioContext *ctx;
689  
690      /* Read coroutine before co->ctx.  Matches smp_wmb in
691       * qemu_coroutine_enter.
692       */
693      smp_read_barrier_depends();
694      ctx = qatomic_read(&co->ctx);
695  
696      aio_co_enter(ctx, co);
697  }
698  
699  void aio_co_enter(AioContext *ctx, Coroutine *co)
700  {
701      if (ctx != qemu_get_current_aio_context()) {
702          aio_co_schedule(ctx, co);
703          return;
704      }
705  
706      if (qemu_in_coroutine()) {
707          Coroutine *self = qemu_coroutine_self();
708          assert(self != co);
709          QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next);
710      } else {
711          qemu_aio_coroutine_enter(ctx, co);
712      }
713  }
714  
715  void aio_context_ref(AioContext *ctx)
716  {
717      g_source_ref(&ctx->source);
718  }
719  
720  void aio_context_unref(AioContext *ctx)
721  {
722      g_source_unref(&ctx->source);
723  }
724  
725  QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext)
726  
727  AioContext *qemu_get_current_aio_context(void)
728  {
729      AioContext *ctx = get_my_aiocontext();
730      if (ctx) {
731          return ctx;
732      }
733      if (bql_locked()) {
734          /* Possibly in a vCPU thread.  */
735          return qemu_get_aio_context();
736      }
737      return NULL;
738  }
739  
740  void qemu_set_current_aio_context(AioContext *ctx)
741  {
742      assert(!get_my_aiocontext());
743      set_my_aiocontext(ctx);
744  }
745  
746  void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
747                                          int64_t max, Error **errp)
748  {
749  
750      if (min > max || max <= 0 || min < 0 || min > INT_MAX || max > INT_MAX) {
751          error_setg(errp, "bad thread-pool-min/thread-pool-max values");
752          return;
753      }
754  
755      ctx->thread_pool_min = min;
756      ctx->thread_pool_max = max;
757  
758      if (ctx->thread_pool) {
759          thread_pool_update_params(ctx->thread_pool, ctx);
760      }
761  }
762