xref: /openbmc/qemu/util/aio-posix.c (revision 2e1cacfb)
1 /*
2  * QEMU aio implementation
3  *
4  * Copyright IBM, Corp. 2008
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "qemu/osdep.h"
17 #include "block/block.h"
18 #include "block/thread-pool.h"
19 #include "qemu/main-loop.h"
20 #include "qemu/lockcnt.h"
21 #include "qemu/rcu.h"
22 #include "qemu/rcu_queue.h"
23 #include "qemu/sockets.h"
24 #include "qemu/cutils.h"
25 #include "trace.h"
26 #include "aio-posix.h"
27 
28 /* Stop userspace polling on a handler if it isn't active for some time */
29 #define POLL_IDLE_INTERVAL_NS (7 * NANOSECONDS_PER_SECOND)
30 
31 bool aio_poll_disabled(AioContext *ctx)
32 {
33     return qatomic_read(&ctx->poll_disable_cnt);
34 }
35 
36 void aio_add_ready_handler(AioHandlerList *ready_list,
37                            AioHandler *node,
38                            int revents)
39 {
40     QLIST_SAFE_REMOVE(node, node_ready); /* remove from nested parent's list */
41     node->pfd.revents = revents;
42     QLIST_INSERT_HEAD(ready_list, node, node_ready);
43 }
44 
45 static void aio_add_poll_ready_handler(AioHandlerList *ready_list,
46                                        AioHandler *node)
47 {
48     QLIST_SAFE_REMOVE(node, node_ready); /* remove from nested parent's list */
49     node->poll_ready = true;
50     QLIST_INSERT_HEAD(ready_list, node, node_ready);
51 }
52 
53 static AioHandler *find_aio_handler(AioContext *ctx, int fd)
54 {
55     AioHandler *node;
56 
57     QLIST_FOREACH(node, &ctx->aio_handlers, node) {
58         if (node->pfd.fd == fd) {
59             if (!QLIST_IS_INSERTED(node, node_deleted)) {
60                 return node;
61             }
62         }
63     }
64 
65     return NULL;
66 }
67 
68 static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
69 {
70     /* If the GSource is in the process of being destroyed then
71      * g_source_remove_poll() causes an assertion failure.  Skip
72      * removal in that case, because glib cleans up its state during
73      * destruction anyway.
74      */
75     if (!g_source_is_destroyed(&ctx->source)) {
76         g_source_remove_poll(&ctx->source, &node->pfd);
77     }
78 
79     node->pfd.revents = 0;
80     node->poll_ready = false;
81 
82     /* If the fd monitor has already marked it deleted, leave it alone */
83     if (QLIST_IS_INSERTED(node, node_deleted)) {
84         return false;
85     }
86 
87     /* If a read is in progress, just mark the node as deleted */
88     if (qemu_lockcnt_count(&ctx->list_lock)) {
89         QLIST_INSERT_HEAD_RCU(&ctx->deleted_aio_handlers, node, node_deleted);
90         return false;
91     }
92     /* Otherwise, delete it for real.  We can't just mark it as
93      * deleted because deleted nodes are only cleaned up while
94      * no one is walking the handlers list.
95      */
96     QLIST_SAFE_REMOVE(node, node_poll);
97     QLIST_REMOVE(node, node);
98     return true;
99 }
100 
101 void aio_set_fd_handler(AioContext *ctx,
102                         int fd,
103                         IOHandler *io_read,
104                         IOHandler *io_write,
105                         AioPollFn *io_poll,
106                         IOHandler *io_poll_ready,
107                         void *opaque)
108 {
109     AioHandler *node;
110     AioHandler *new_node = NULL;
111     bool is_new = false;
112     bool deleted = false;
113     int poll_disable_change;
114 
115     if (io_poll && !io_poll_ready) {
116         io_poll = NULL; /* polling only makes sense if there is a handler */
117     }
118 
119     qemu_lockcnt_lock(&ctx->list_lock);
120 
121     node = find_aio_handler(ctx, fd);
122 
123     /* Are we deleting the fd handler? */
124     if (!io_read && !io_write && !io_poll) {
125         if (node == NULL) {
126             qemu_lockcnt_unlock(&ctx->list_lock);
127             return;
128         }
129         /* Clean events in order to unregister fd from the ctx epoll. */
130         node->pfd.events = 0;
131 
132         poll_disable_change = -!node->io_poll;
133     } else {
134         poll_disable_change = !io_poll - (node && !node->io_poll);
135         if (node == NULL) {
136             is_new = true;
137         }
138         /* Alloc and insert if it's not already there */
139         new_node = g_new0(AioHandler, 1);
140 
141         /* Update handler with latest information */
142         new_node->io_read = io_read;
143         new_node->io_write = io_write;
144         new_node->io_poll = io_poll;
145         new_node->io_poll_ready = io_poll_ready;
146         new_node->opaque = opaque;
147 
148         if (is_new) {
149             new_node->pfd.fd = fd;
150         } else {
151             new_node->pfd = node->pfd;
152         }
153         g_source_add_poll(&ctx->source, &new_node->pfd);
154 
155         new_node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0);
156         new_node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
157 
158         QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, new_node, node);
159     }
160 
161     /* No need to order poll_disable_cnt writes against other updates;
162      * the counter is only used to avoid wasting time and latency on
163      * iterated polling when the system call will be ultimately necessary.
164      * Changing handlers is a rare event, and a little wasted polling until
165      * the aio_notify below is not an issue.
166      */
167     qatomic_set(&ctx->poll_disable_cnt,
168                qatomic_read(&ctx->poll_disable_cnt) + poll_disable_change);
169 
170     ctx->fdmon_ops->update(ctx, node, new_node);
171     if (node) {
172         deleted = aio_remove_fd_handler(ctx, node);
173     }
174     qemu_lockcnt_unlock(&ctx->list_lock);
175     aio_notify(ctx);
176 
177     if (deleted) {
178         g_free(node);
179     }
180 }
181 
182 static void aio_set_fd_poll(AioContext *ctx, int fd,
183                             IOHandler *io_poll_begin,
184                             IOHandler *io_poll_end)
185 {
186     AioHandler *node = find_aio_handler(ctx, fd);
187 
188     if (!node) {
189         return;
190     }
191 
192     node->io_poll_begin = io_poll_begin;
193     node->io_poll_end = io_poll_end;
194 }
195 
196 void aio_set_event_notifier(AioContext *ctx,
197                             EventNotifier *notifier,
198                             EventNotifierHandler *io_read,
199                             AioPollFn *io_poll,
200                             EventNotifierHandler *io_poll_ready)
201 {
202     aio_set_fd_handler(ctx, event_notifier_get_fd(notifier),
203                        (IOHandler *)io_read, NULL, io_poll,
204                        (IOHandler *)io_poll_ready, notifier);
205 }
206 
207 void aio_set_event_notifier_poll(AioContext *ctx,
208                                  EventNotifier *notifier,
209                                  EventNotifierHandler *io_poll_begin,
210                                  EventNotifierHandler *io_poll_end)
211 {
212     aio_set_fd_poll(ctx, event_notifier_get_fd(notifier),
213                     (IOHandler *)io_poll_begin,
214                     (IOHandler *)io_poll_end);
215 }
216 
217 static bool poll_set_started(AioContext *ctx, AioHandlerList *ready_list,
218                              bool started)
219 {
220     AioHandler *node;
221     bool progress = false;
222 
223     if (started == ctx->poll_started) {
224         return false;
225     }
226 
227     ctx->poll_started = started;
228 
229     qemu_lockcnt_inc(&ctx->list_lock);
230     QLIST_FOREACH(node, &ctx->poll_aio_handlers, node_poll) {
231         IOHandler *fn;
232 
233         if (QLIST_IS_INSERTED(node, node_deleted)) {
234             continue;
235         }
236 
237         if (started) {
238             fn = node->io_poll_begin;
239         } else {
240             fn = node->io_poll_end;
241         }
242 
243         if (fn) {
244             fn(node->opaque);
245         }
246 
247         /* Poll one last time in case ->io_poll_end() raced with the event */
248         if (!started && node->io_poll(node->opaque)) {
249             aio_add_poll_ready_handler(ready_list, node);
250             progress = true;
251         }
252     }
253     qemu_lockcnt_dec(&ctx->list_lock);
254 
255     return progress;
256 }
257 
258 
259 bool aio_prepare(AioContext *ctx)
260 {
261     AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list);
262 
263     /* Poll mode cannot be used with glib's event loop, disable it. */
264     poll_set_started(ctx, &ready_list, false);
265     /* TODO what to do with this list? */
266 
267     return false;
268 }
269 
270 bool aio_pending(AioContext *ctx)
271 {
272     AioHandler *node;
273     bool result = false;
274 
275     /*
276      * We have to walk very carefully in case aio_set_fd_handler is
277      * called while we're walking.
278      */
279     qemu_lockcnt_inc(&ctx->list_lock);
280 
281     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
282         int revents;
283 
284         /* TODO should this check poll ready? */
285         revents = node->pfd.revents & node->pfd.events;
286         if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read) {
287             result = true;
288             break;
289         }
290         if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write) {
291             result = true;
292             break;
293         }
294     }
295     qemu_lockcnt_dec(&ctx->list_lock);
296 
297     return result;
298 }
299 
300 static void aio_free_deleted_handlers(AioContext *ctx)
301 {
302     AioHandler *node;
303 
304     if (QLIST_EMPTY_RCU(&ctx->deleted_aio_handlers)) {
305         return;
306     }
307     if (!qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
308         return; /* we are nested, let the parent do the freeing */
309     }
310 
311     while ((node = QLIST_FIRST_RCU(&ctx->deleted_aio_handlers))) {
312         QLIST_REMOVE(node, node);
313         QLIST_REMOVE(node, node_deleted);
314         QLIST_SAFE_REMOVE(node, node_poll);
315         g_free(node);
316     }
317 
318     qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
319 }
320 
321 static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node)
322 {
323     bool progress = false;
324     bool poll_ready;
325     int revents;
326 
327     revents = node->pfd.revents & node->pfd.events;
328     node->pfd.revents = 0;
329 
330     poll_ready = node->poll_ready;
331     node->poll_ready = false;
332 
333     /*
334      * Start polling AioHandlers when they become ready because activity is
335      * likely to continue.  Note that starvation is theoretically possible when
336      * fdmon_supports_polling(), but only until the fd fires for the first
337      * time.
338      */
339     if (!QLIST_IS_INSERTED(node, node_deleted) &&
340         !QLIST_IS_INSERTED(node, node_poll) &&
341         node->io_poll) {
342         trace_poll_add(ctx, node, node->pfd.fd, revents);
343         if (ctx->poll_started && node->io_poll_begin) {
344             node->io_poll_begin(node->opaque);
345         }
346         QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll);
347     }
348     if (!QLIST_IS_INSERTED(node, node_deleted) &&
349         poll_ready && revents == 0 && node->io_poll_ready) {
350         /*
351          * Remove temporarily to avoid infinite loops when ->io_poll_ready()
352          * calls aio_poll() before clearing the condition that made the poll
353          * handler become ready.
354          */
355         QLIST_SAFE_REMOVE(node, node_poll);
356 
357         node->io_poll_ready(node->opaque);
358 
359         if (!QLIST_IS_INSERTED(node, node_poll)) {
360             QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll);
361         }
362 
363         /*
364          * Return early since revents was zero. aio_notify() does not count as
365          * progress.
366          */
367         return node->opaque != &ctx->notifier;
368     }
369 
370     if (!QLIST_IS_INSERTED(node, node_deleted) &&
371         (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
372         node->io_read) {
373         node->io_read(node->opaque);
374 
375         /* aio_notify() does not count as progress */
376         if (node->opaque != &ctx->notifier) {
377             progress = true;
378         }
379     }
380     if (!QLIST_IS_INSERTED(node, node_deleted) &&
381         (revents & (G_IO_OUT | G_IO_ERR)) &&
382         node->io_write) {
383         node->io_write(node->opaque);
384         progress = true;
385     }
386 
387     return progress;
388 }
389 
390 /*
391  * If we have a list of ready handlers then this is more efficient than
392  * scanning all handlers with aio_dispatch_handlers().
393  */
394 static bool aio_dispatch_ready_handlers(AioContext *ctx,
395                                         AioHandlerList *ready_list)
396 {
397     bool progress = false;
398     AioHandler *node;
399 
400     while ((node = QLIST_FIRST(ready_list))) {
401         QLIST_REMOVE(node, node_ready);
402         progress = aio_dispatch_handler(ctx, node) || progress;
403     }
404 
405     return progress;
406 }
407 
408 /* Slower than aio_dispatch_ready_handlers() but only used via glib */
409 static bool aio_dispatch_handlers(AioContext *ctx)
410 {
411     AioHandler *node, *tmp;
412     bool progress = false;
413 
414     QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
415         progress = aio_dispatch_handler(ctx, node) || progress;
416     }
417 
418     return progress;
419 }
420 
421 void aio_dispatch(AioContext *ctx)
422 {
423     qemu_lockcnt_inc(&ctx->list_lock);
424     aio_bh_poll(ctx);
425     aio_dispatch_handlers(ctx);
426     aio_free_deleted_handlers(ctx);
427     qemu_lockcnt_dec(&ctx->list_lock);
428 
429     timerlistgroup_run_timers(&ctx->tlg);
430 }
431 
432 static bool run_poll_handlers_once(AioContext *ctx,
433                                    AioHandlerList *ready_list,
434                                    int64_t now,
435                                    int64_t *timeout)
436 {
437     bool progress = false;
438     AioHandler *node;
439     AioHandler *tmp;
440 
441     QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) {
442         if (node->io_poll(node->opaque)) {
443             aio_add_poll_ready_handler(ready_list, node);
444 
445             node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS;
446 
447             /*
448              * Polling was successful, exit try_poll_mode immediately
449              * to adjust the next polling time.
450              */
451             *timeout = 0;
452             if (node->opaque != &ctx->notifier) {
453                 progress = true;
454             }
455         }
456 
457         /* Caller handles freeing deleted nodes.  Don't do it here. */
458     }
459 
460     return progress;
461 }
462 
463 static bool fdmon_supports_polling(AioContext *ctx)
464 {
465     return ctx->fdmon_ops->need_wait != aio_poll_disabled;
466 }
467 
468 static bool remove_idle_poll_handlers(AioContext *ctx,
469                                       AioHandlerList *ready_list,
470                                       int64_t now)
471 {
472     AioHandler *node;
473     AioHandler *tmp;
474     bool progress = false;
475 
476     /*
477      * File descriptor monitoring implementations without userspace polling
478      * support suffer from starvation when a subset of handlers is polled
479      * because fds will not be processed in a timely fashion.  Don't remove
480      * idle poll handlers.
481      */
482     if (!fdmon_supports_polling(ctx)) {
483         return false;
484     }
485 
486     QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) {
487         if (node->poll_idle_timeout == 0LL) {
488             node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS;
489         } else if (now >= node->poll_idle_timeout) {
490             trace_poll_remove(ctx, node, node->pfd.fd);
491             node->poll_idle_timeout = 0LL;
492             QLIST_SAFE_REMOVE(node, node_poll);
493             if (ctx->poll_started && node->io_poll_end) {
494                 node->io_poll_end(node->opaque);
495 
496                 /*
497                  * Final poll in case ->io_poll_end() races with an event.
498                  * Nevermind about re-adding the handler in the rare case where
499                  * this causes progress.
500                  */
501                 if (node->io_poll(node->opaque)) {
502                     aio_add_poll_ready_handler(ready_list, node);
503                     progress = true;
504                 }
505             }
506         }
507     }
508 
509     return progress;
510 }
511 
512 /* run_poll_handlers:
513  * @ctx: the AioContext
514  * @ready_list: the list to place ready handlers on
515  * @max_ns: maximum time to poll for, in nanoseconds
516  *
517  * Polls for a given time.
518  *
519  * Note that the caller must have incremented ctx->list_lock.
520  *
521  * Returns: true if progress was made, false otherwise
522  */
523 static bool run_poll_handlers(AioContext *ctx, AioHandlerList *ready_list,
524                               int64_t max_ns, int64_t *timeout)
525 {
526     bool progress;
527     int64_t start_time, elapsed_time;
528 
529     assert(qemu_lockcnt_count(&ctx->list_lock) > 0);
530 
531     trace_run_poll_handlers_begin(ctx, max_ns, *timeout);
532 
533     /*
534      * Optimization: ->io_poll() handlers often contain RCU read critical
535      * sections and we therefore see many rcu_read_lock() -> rcu_read_unlock()
536      * -> rcu_read_lock() -> ... sequences with expensive memory
537      * synchronization primitives.  Make the entire polling loop an RCU
538      * critical section because nested rcu_read_lock()/rcu_read_unlock() calls
539      * are cheap.
540      */
541     RCU_READ_LOCK_GUARD();
542 
543     start_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
544     do {
545         progress = run_poll_handlers_once(ctx, ready_list,
546                                           start_time, timeout);
547         elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time;
548         max_ns = qemu_soonest_timeout(*timeout, max_ns);
549         assert(!(max_ns && progress));
550     } while (elapsed_time < max_ns && !ctx->fdmon_ops->need_wait(ctx));
551 
552     if (remove_idle_poll_handlers(ctx, ready_list,
553                                   start_time + elapsed_time)) {
554         *timeout = 0;
555         progress = true;
556     }
557 
558     /* If time has passed with no successful polling, adjust *timeout to
559      * keep the same ending time.
560      */
561     if (*timeout != -1) {
562         *timeout -= MIN(*timeout, elapsed_time);
563     }
564 
565     trace_run_poll_handlers_end(ctx, progress, *timeout);
566     return progress;
567 }
568 
569 /* try_poll_mode:
570  * @ctx: the AioContext
571  * @ready_list: list to add handlers that need to be run
572  * @timeout: timeout for blocking wait, computed by the caller and updated if
573  *    polling succeeds.
574  *
575  * Note that the caller must have incremented ctx->list_lock.
576  *
577  * Returns: true if progress was made, false otherwise
578  */
579 static bool try_poll_mode(AioContext *ctx, AioHandlerList *ready_list,
580                           int64_t *timeout)
581 {
582     int64_t max_ns;
583 
584     if (QLIST_EMPTY_RCU(&ctx->poll_aio_handlers)) {
585         return false;
586     }
587 
588     max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns);
589     if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) {
590         /*
591          * Enable poll mode. It pairs with the poll_set_started() in
592          * aio_poll() which disables poll mode.
593          */
594         poll_set_started(ctx, ready_list, true);
595 
596         if (run_poll_handlers(ctx, ready_list, max_ns, timeout)) {
597             return true;
598         }
599     }
600     return false;
601 }
602 
603 bool aio_poll(AioContext *ctx, bool blocking)
604 {
605     AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list);
606     bool progress;
607     bool use_notify_me;
608     int64_t timeout;
609     int64_t start = 0;
610 
611     /*
612      * There cannot be two concurrent aio_poll calls for the same AioContext (or
613      * an aio_poll concurrent with a GSource prepare/check/dispatch callback).
614      * We rely on this below to avoid slow locked accesses to ctx->notify_me.
615      *
616      * aio_poll() may only be called in the AioContext's thread. iohandler_ctx
617      * is special in that it runs in the main thread, but that thread's context
618      * is qemu_aio_context.
619      */
620     assert(in_aio_context_home_thread(ctx == iohandler_get_aio_context() ?
621                                       qemu_get_aio_context() : ctx));
622 
623     qemu_lockcnt_inc(&ctx->list_lock);
624 
625     if (ctx->poll_max_ns) {
626         start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
627     }
628 
629     timeout = blocking ? aio_compute_timeout(ctx) : 0;
630     progress = try_poll_mode(ctx, &ready_list, &timeout);
631     assert(!(timeout && progress));
632 
633     /*
634      * aio_notify can avoid the expensive event_notifier_set if
635      * everything (file descriptors, bottom halves, timers) will
636      * be re-evaluated before the next blocking poll().  This is
637      * already true when aio_poll is called with blocking == false;
638      * if blocking == true, it is only true after poll() returns,
639      * so disable the optimization now.
640      */
641     use_notify_me = timeout != 0;
642     if (use_notify_me) {
643         qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) + 2);
644         /*
645          * Write ctx->notify_me before reading ctx->notified.  Pairs with
646          * smp_mb in aio_notify().
647          */
648         smp_mb();
649 
650         /* Don't block if aio_notify() was called */
651         if (qatomic_read(&ctx->notified)) {
652             timeout = 0;
653         }
654     }
655 
656     /* If polling is allowed, non-blocking aio_poll does not need the
657      * system call---a single round of run_poll_handlers_once suffices.
658      */
659     if (timeout || ctx->fdmon_ops->need_wait(ctx)) {
660         /*
661          * Disable poll mode. poll mode should be disabled before the call
662          * of ctx->fdmon_ops->wait() so that guest's notification can wake
663          * up IO threads when some work becomes pending. It is essential to
664          * avoid hangs or unnecessary latency.
665          */
666         if (poll_set_started(ctx, &ready_list, false)) {
667             timeout = 0;
668             progress = true;
669         }
670 
671         ctx->fdmon_ops->wait(ctx, &ready_list, timeout);
672     }
673 
674     if (use_notify_me) {
675         /* Finish the poll before clearing the flag.  */
676         qatomic_store_release(&ctx->notify_me,
677                              qatomic_read(&ctx->notify_me) - 2);
678     }
679 
680     aio_notify_accept(ctx);
681 
682     /* Adjust polling time */
683     if (ctx->poll_max_ns) {
684         int64_t block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start;
685 
686         if (block_ns <= ctx->poll_ns) {
687             /* This is the sweet spot, no adjustment needed */
688         } else if (block_ns > ctx->poll_max_ns) {
689             /* We'd have to poll for too long, poll less */
690             int64_t old = ctx->poll_ns;
691 
692             if (ctx->poll_shrink) {
693                 ctx->poll_ns /= ctx->poll_shrink;
694             } else {
695                 ctx->poll_ns = 0;
696             }
697 
698             trace_poll_shrink(ctx, old, ctx->poll_ns);
699         } else if (ctx->poll_ns < ctx->poll_max_ns &&
700                    block_ns < ctx->poll_max_ns) {
701             /* There is room to grow, poll longer */
702             int64_t old = ctx->poll_ns;
703             int64_t grow = ctx->poll_grow;
704 
705             if (grow == 0) {
706                 grow = 2;
707             }
708 
709             if (ctx->poll_ns) {
710                 ctx->poll_ns *= grow;
711             } else {
712                 ctx->poll_ns = 4000; /* start polling at 4 microseconds */
713             }
714 
715             if (ctx->poll_ns > ctx->poll_max_ns) {
716                 ctx->poll_ns = ctx->poll_max_ns;
717             }
718 
719             trace_poll_grow(ctx, old, ctx->poll_ns);
720         }
721     }
722 
723     progress |= aio_bh_poll(ctx);
724     progress |= aio_dispatch_ready_handlers(ctx, &ready_list);
725 
726     aio_free_deleted_handlers(ctx);
727 
728     qemu_lockcnt_dec(&ctx->list_lock);
729 
730     progress |= timerlistgroup_run_timers(&ctx->tlg);
731 
732     return progress;
733 }
734 
735 void aio_context_setup(AioContext *ctx)
736 {
737     ctx->fdmon_ops = &fdmon_poll_ops;
738     ctx->epollfd = -1;
739 
740     /* Use the fastest fd monitoring implementation if available */
741     if (fdmon_io_uring_setup(ctx)) {
742         return;
743     }
744 
745     fdmon_epoll_setup(ctx);
746 }
747 
748 void aio_context_destroy(AioContext *ctx)
749 {
750     fdmon_io_uring_destroy(ctx);
751     fdmon_epoll_disable(ctx);
752     aio_free_deleted_handlers(ctx);
753 }
754 
755 void aio_context_use_g_source(AioContext *ctx)
756 {
757     /*
758      * Disable io_uring when the glib main loop is used because it doesn't
759      * support mixed glib/aio_poll() usage. It relies on aio_poll() being
760      * called regularly so that changes to the monitored file descriptors are
761      * submitted, otherwise a list of pending fd handlers builds up.
762      */
763     fdmon_io_uring_destroy(ctx);
764     aio_free_deleted_handlers(ctx);
765 }
766 
767 void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
768                                  int64_t grow, int64_t shrink, Error **errp)
769 {
770     /* No thread synchronization here, it doesn't matter if an incorrect value
771      * is used once.
772      */
773     ctx->poll_max_ns = max_ns;
774     ctx->poll_ns = 0;
775     ctx->poll_grow = grow;
776     ctx->poll_shrink = shrink;
777 
778     aio_notify(ctx);
779 }
780 
781 void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch)
782 {
783     /*
784      * No thread synchronization here, it doesn't matter if an incorrect value
785      * is used once.
786      */
787     ctx->aio_max_batch = max_batch;
788 
789     aio_notify(ctx);
790 }
791