xref: /openbmc/qemu/util/aio-posix.c (revision 500eb6db)
1 /*
2  * QEMU aio implementation
3  *
4  * Copyright IBM, Corp. 2008
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "qemu/osdep.h"
17 #include "block/block.h"
18 #include "qemu/rcu_queue.h"
19 #include "qemu/sockets.h"
20 #include "qemu/cutils.h"
21 #include "trace.h"
22 #ifdef CONFIG_EPOLL_CREATE1
23 #include <sys/epoll.h>
24 #endif
25 
26 struct AioHandler
27 {
28     GPollFD pfd;
29     IOHandler *io_read;
30     IOHandler *io_write;
31     AioPollFn *io_poll;
32     IOHandler *io_poll_begin;
33     IOHandler *io_poll_end;
34     int deleted;
35     void *opaque;
36     bool is_external;
37     QLIST_ENTRY(AioHandler) node;
38 };
39 
40 #ifdef CONFIG_EPOLL_CREATE1
41 
42 /* The fd number threshold to switch to epoll */
43 #define EPOLL_ENABLE_THRESHOLD 64
44 
45 static void aio_epoll_disable(AioContext *ctx)
46 {
47     ctx->epoll_enabled = false;
48     if (!ctx->epoll_available) {
49         return;
50     }
51     ctx->epoll_available = false;
52     close(ctx->epollfd);
53 }
54 
55 static inline int epoll_events_from_pfd(int pfd_events)
56 {
57     return (pfd_events & G_IO_IN ? EPOLLIN : 0) |
58            (pfd_events & G_IO_OUT ? EPOLLOUT : 0) |
59            (pfd_events & G_IO_HUP ? EPOLLHUP : 0) |
60            (pfd_events & G_IO_ERR ? EPOLLERR : 0);
61 }
62 
63 static bool aio_epoll_try_enable(AioContext *ctx)
64 {
65     AioHandler *node;
66     struct epoll_event event;
67 
68     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
69         int r;
70         if (node->deleted || !node->pfd.events) {
71             continue;
72         }
73         event.events = epoll_events_from_pfd(node->pfd.events);
74         event.data.ptr = node;
75         r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, node->pfd.fd, &event);
76         if (r) {
77             return false;
78         }
79     }
80     ctx->epoll_enabled = true;
81     return true;
82 }
83 
84 static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new)
85 {
86     struct epoll_event event;
87     int r;
88     int ctl;
89 
90     if (!ctx->epoll_enabled) {
91         return;
92     }
93     if (!node->pfd.events) {
94         ctl = EPOLL_CTL_DEL;
95     } else {
96         event.data.ptr = node;
97         event.events = epoll_events_from_pfd(node->pfd.events);
98         ctl = is_new ? EPOLL_CTL_ADD : EPOLL_CTL_MOD;
99     }
100 
101     r = epoll_ctl(ctx->epollfd, ctl, node->pfd.fd, &event);
102     if (r) {
103         aio_epoll_disable(ctx);
104     }
105 }
106 
107 static int aio_epoll(AioContext *ctx, GPollFD *pfds,
108                      unsigned npfd, int64_t timeout)
109 {
110     AioHandler *node;
111     int i, ret = 0;
112     struct epoll_event events[128];
113 
114     assert(npfd == 1);
115     assert(pfds[0].fd == ctx->epollfd);
116     if (timeout > 0) {
117         ret = qemu_poll_ns(pfds, npfd, timeout);
118     }
119     if (timeout <= 0 || ret > 0) {
120         ret = epoll_wait(ctx->epollfd, events,
121                          ARRAY_SIZE(events),
122                          timeout);
123         if (ret <= 0) {
124             goto out;
125         }
126         for (i = 0; i < ret; i++) {
127             int ev = events[i].events;
128             node = events[i].data.ptr;
129             node->pfd.revents = (ev & EPOLLIN ? G_IO_IN : 0) |
130                 (ev & EPOLLOUT ? G_IO_OUT : 0) |
131                 (ev & EPOLLHUP ? G_IO_HUP : 0) |
132                 (ev & EPOLLERR ? G_IO_ERR : 0);
133         }
134     }
135 out:
136     return ret;
137 }
138 
139 static bool aio_epoll_enabled(AioContext *ctx)
140 {
141     /* Fall back to ppoll when external clients are disabled. */
142     return !aio_external_disabled(ctx) && ctx->epoll_enabled;
143 }
144 
145 static bool aio_epoll_check_poll(AioContext *ctx, GPollFD *pfds,
146                                  unsigned npfd, int64_t timeout)
147 {
148     if (!ctx->epoll_available) {
149         return false;
150     }
151     if (aio_epoll_enabled(ctx)) {
152         return true;
153     }
154     if (npfd >= EPOLL_ENABLE_THRESHOLD) {
155         if (aio_epoll_try_enable(ctx)) {
156             return true;
157         } else {
158             aio_epoll_disable(ctx);
159         }
160     }
161     return false;
162 }
163 
164 #else
165 
166 static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new)
167 {
168 }
169 
170 static int aio_epoll(AioContext *ctx, GPollFD *pfds,
171                      unsigned npfd, int64_t timeout)
172 {
173     assert(false);
174 }
175 
176 static bool aio_epoll_enabled(AioContext *ctx)
177 {
178     return false;
179 }
180 
181 static bool aio_epoll_check_poll(AioContext *ctx, GPollFD *pfds,
182                           unsigned npfd, int64_t timeout)
183 {
184     return false;
185 }
186 
187 #endif
188 
189 static AioHandler *find_aio_handler(AioContext *ctx, int fd)
190 {
191     AioHandler *node;
192 
193     QLIST_FOREACH(node, &ctx->aio_handlers, node) {
194         if (node->pfd.fd == fd)
195             if (!node->deleted)
196                 return node;
197     }
198 
199     return NULL;
200 }
201 
202 static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
203 {
204     /* If the GSource is in the process of being destroyed then
205      * g_source_remove_poll() causes an assertion failure.  Skip
206      * removal in that case, because glib cleans up its state during
207      * destruction anyway.
208      */
209     if (!g_source_is_destroyed(&ctx->source)) {
210         g_source_remove_poll(&ctx->source, &node->pfd);
211     }
212 
213     /* If a read is in progress, just mark the node as deleted */
214     if (qemu_lockcnt_count(&ctx->list_lock)) {
215         node->deleted = 1;
216         node->pfd.revents = 0;
217         return false;
218     }
219     /* Otherwise, delete it for real.  We can't just mark it as
220      * deleted because deleted nodes are only cleaned up while
221      * no one is walking the handlers list.
222      */
223     QLIST_REMOVE(node, node);
224     return true;
225 }
226 
227 void aio_set_fd_handler(AioContext *ctx,
228                         int fd,
229                         bool is_external,
230                         IOHandler *io_read,
231                         IOHandler *io_write,
232                         AioPollFn *io_poll,
233                         void *opaque)
234 {
235     AioHandler *node;
236     AioHandler *new_node = NULL;
237     bool is_new = false;
238     bool deleted = false;
239     int poll_disable_change;
240 
241     qemu_lockcnt_lock(&ctx->list_lock);
242 
243     node = find_aio_handler(ctx, fd);
244 
245     /* Are we deleting the fd handler? */
246     if (!io_read && !io_write && !io_poll) {
247         if (node == NULL) {
248             qemu_lockcnt_unlock(&ctx->list_lock);
249             return;
250         }
251         /* Clean events in order to unregister fd from the ctx epoll. */
252         node->pfd.events = 0;
253 
254         poll_disable_change = -!node->io_poll;
255     } else {
256         poll_disable_change = !io_poll - (node && !node->io_poll);
257         if (node == NULL) {
258             is_new = true;
259         }
260         /* Alloc and insert if it's not already there */
261         new_node = g_new0(AioHandler, 1);
262 
263         /* Update handler with latest information */
264         new_node->io_read = io_read;
265         new_node->io_write = io_write;
266         new_node->io_poll = io_poll;
267         new_node->opaque = opaque;
268         new_node->is_external = is_external;
269 
270         if (is_new) {
271             new_node->pfd.fd = fd;
272         } else {
273             new_node->pfd = node->pfd;
274         }
275         g_source_add_poll(&ctx->source, &new_node->pfd);
276 
277         new_node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0);
278         new_node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
279 
280         QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, new_node, node);
281     }
282     if (node) {
283         deleted = aio_remove_fd_handler(ctx, node);
284     }
285 
286     /* No need to order poll_disable_cnt writes against other updates;
287      * the counter is only used to avoid wasting time and latency on
288      * iterated polling when the system call will be ultimately necessary.
289      * Changing handlers is a rare event, and a little wasted polling until
290      * the aio_notify below is not an issue.
291      */
292     atomic_set(&ctx->poll_disable_cnt,
293                atomic_read(&ctx->poll_disable_cnt) + poll_disable_change);
294 
295     if (new_node) {
296         aio_epoll_update(ctx, new_node, is_new);
297     } else if (node) {
298         /* Unregister deleted fd_handler */
299         aio_epoll_update(ctx, node, false);
300     }
301     qemu_lockcnt_unlock(&ctx->list_lock);
302     aio_notify(ctx);
303 
304     if (deleted) {
305         g_free(node);
306     }
307 }
308 
309 void aio_set_fd_poll(AioContext *ctx, int fd,
310                      IOHandler *io_poll_begin,
311                      IOHandler *io_poll_end)
312 {
313     AioHandler *node = find_aio_handler(ctx, fd);
314 
315     if (!node) {
316         return;
317     }
318 
319     node->io_poll_begin = io_poll_begin;
320     node->io_poll_end = io_poll_end;
321 }
322 
323 void aio_set_event_notifier(AioContext *ctx,
324                             EventNotifier *notifier,
325                             bool is_external,
326                             EventNotifierHandler *io_read,
327                             AioPollFn *io_poll)
328 {
329     aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), is_external,
330                        (IOHandler *)io_read, NULL, io_poll, notifier);
331 }
332 
333 void aio_set_event_notifier_poll(AioContext *ctx,
334                                  EventNotifier *notifier,
335                                  EventNotifierHandler *io_poll_begin,
336                                  EventNotifierHandler *io_poll_end)
337 {
338     aio_set_fd_poll(ctx, event_notifier_get_fd(notifier),
339                     (IOHandler *)io_poll_begin,
340                     (IOHandler *)io_poll_end);
341 }
342 
343 static void poll_set_started(AioContext *ctx, bool started)
344 {
345     AioHandler *node;
346 
347     if (started == ctx->poll_started) {
348         return;
349     }
350 
351     ctx->poll_started = started;
352 
353     qemu_lockcnt_inc(&ctx->list_lock);
354     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
355         IOHandler *fn;
356 
357         if (node->deleted) {
358             continue;
359         }
360 
361         if (started) {
362             fn = node->io_poll_begin;
363         } else {
364             fn = node->io_poll_end;
365         }
366 
367         if (fn) {
368             fn(node->opaque);
369         }
370     }
371     qemu_lockcnt_dec(&ctx->list_lock);
372 }
373 
374 
375 bool aio_prepare(AioContext *ctx)
376 {
377     /* Poll mode cannot be used with glib's event loop, disable it. */
378     poll_set_started(ctx, false);
379 
380     return false;
381 }
382 
383 bool aio_pending(AioContext *ctx)
384 {
385     AioHandler *node;
386     bool result = false;
387 
388     /*
389      * We have to walk very carefully in case aio_set_fd_handler is
390      * called while we're walking.
391      */
392     qemu_lockcnt_inc(&ctx->list_lock);
393 
394     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
395         int revents;
396 
397         revents = node->pfd.revents & node->pfd.events;
398         if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read &&
399             aio_node_check(ctx, node->is_external)) {
400             result = true;
401             break;
402         }
403         if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write &&
404             aio_node_check(ctx, node->is_external)) {
405             result = true;
406             break;
407         }
408     }
409     qemu_lockcnt_dec(&ctx->list_lock);
410 
411     return result;
412 }
413 
414 static bool aio_dispatch_handlers(AioContext *ctx)
415 {
416     AioHandler *node, *tmp;
417     bool progress = false;
418 
419     QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
420         int revents;
421 
422         revents = node->pfd.revents & node->pfd.events;
423         node->pfd.revents = 0;
424 
425         if (!node->deleted &&
426             (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
427             aio_node_check(ctx, node->is_external) &&
428             node->io_read) {
429             node->io_read(node->opaque);
430 
431             /* aio_notify() does not count as progress */
432             if (node->opaque != &ctx->notifier) {
433                 progress = true;
434             }
435         }
436         if (!node->deleted &&
437             (revents & (G_IO_OUT | G_IO_ERR)) &&
438             aio_node_check(ctx, node->is_external) &&
439             node->io_write) {
440             node->io_write(node->opaque);
441             progress = true;
442         }
443 
444         if (node->deleted) {
445             if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
446                 QLIST_REMOVE(node, node);
447                 g_free(node);
448                 qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
449             }
450         }
451     }
452 
453     return progress;
454 }
455 
456 void aio_dispatch(AioContext *ctx)
457 {
458     qemu_lockcnt_inc(&ctx->list_lock);
459     aio_bh_poll(ctx);
460     aio_dispatch_handlers(ctx);
461     qemu_lockcnt_dec(&ctx->list_lock);
462 
463     timerlistgroup_run_timers(&ctx->tlg);
464 }
465 
466 /* These thread-local variables are used only in a small part of aio_poll
467  * around the call to the poll() system call.  In particular they are not
468  * used while aio_poll is performing callbacks, which makes it much easier
469  * to think about reentrancy!
470  *
471  * Stack-allocated arrays would be perfect but they have size limitations;
472  * heap allocation is expensive enough that we want to reuse arrays across
473  * calls to aio_poll().  And because poll() has to be called without holding
474  * any lock, the arrays cannot be stored in AioContext.  Thread-local data
475  * has none of the disadvantages of these three options.
476  */
477 static __thread GPollFD *pollfds;
478 static __thread AioHandler **nodes;
479 static __thread unsigned npfd, nalloc;
480 static __thread Notifier pollfds_cleanup_notifier;
481 
482 static void pollfds_cleanup(Notifier *n, void *unused)
483 {
484     g_assert(npfd == 0);
485     g_free(pollfds);
486     g_free(nodes);
487     nalloc = 0;
488 }
489 
490 static void add_pollfd(AioHandler *node)
491 {
492     if (npfd == nalloc) {
493         if (nalloc == 0) {
494             pollfds_cleanup_notifier.notify = pollfds_cleanup;
495             qemu_thread_atexit_add(&pollfds_cleanup_notifier);
496             nalloc = 8;
497         } else {
498             g_assert(nalloc <= INT_MAX);
499             nalloc *= 2;
500         }
501         pollfds = g_renew(GPollFD, pollfds, nalloc);
502         nodes = g_renew(AioHandler *, nodes, nalloc);
503     }
504     nodes[npfd] = node;
505     pollfds[npfd] = (GPollFD) {
506         .fd = node->pfd.fd,
507         .events = node->pfd.events,
508     };
509     npfd++;
510 }
511 
512 static bool run_poll_handlers_once(AioContext *ctx, int64_t *timeout)
513 {
514     bool progress = false;
515     AioHandler *node;
516 
517     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
518         if (!node->deleted && node->io_poll &&
519             aio_node_check(ctx, node->is_external) &&
520             node->io_poll(node->opaque)) {
521             /*
522              * Polling was successful, exit try_poll_mode immediately
523              * to adjust the next polling time.
524              */
525             *timeout = 0;
526             if (node->opaque != &ctx->notifier) {
527                 progress = true;
528             }
529         }
530 
531         /* Caller handles freeing deleted nodes.  Don't do it here. */
532     }
533 
534     return progress;
535 }
536 
537 /* run_poll_handlers:
538  * @ctx: the AioContext
539  * @max_ns: maximum time to poll for, in nanoseconds
540  *
541  * Polls for a given time.
542  *
543  * Note that ctx->notify_me must be non-zero so this function can detect
544  * aio_notify().
545  *
546  * Note that the caller must have incremented ctx->list_lock.
547  *
548  * Returns: true if progress was made, false otherwise
549  */
550 static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
551 {
552     bool progress;
553     int64_t start_time, elapsed_time;
554 
555     assert(ctx->notify_me);
556     assert(qemu_lockcnt_count(&ctx->list_lock) > 0);
557 
558     trace_run_poll_handlers_begin(ctx, max_ns, *timeout);
559 
560     start_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
561     do {
562         progress = run_poll_handlers_once(ctx, timeout);
563         elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time;
564         max_ns = qemu_soonest_timeout(*timeout, max_ns);
565         assert(!(max_ns && progress));
566     } while (elapsed_time < max_ns && !atomic_read(&ctx->poll_disable_cnt));
567 
568     /* If time has passed with no successful polling, adjust *timeout to
569      * keep the same ending time.
570      */
571     if (*timeout != -1) {
572         *timeout -= MIN(*timeout, elapsed_time);
573     }
574 
575     trace_run_poll_handlers_end(ctx, progress, *timeout);
576     return progress;
577 }
578 
579 /* try_poll_mode:
580  * @ctx: the AioContext
581  * @timeout: timeout for blocking wait, computed by the caller and updated if
582  *    polling succeeds.
583  *
584  * ctx->notify_me must be non-zero so this function can detect aio_notify().
585  *
586  * Note that the caller must have incremented ctx->list_lock.
587  *
588  * Returns: true if progress was made, false otherwise
589  */
590 static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
591 {
592     int64_t max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns);
593 
594     if (max_ns && !atomic_read(&ctx->poll_disable_cnt)) {
595         poll_set_started(ctx, true);
596 
597         if (run_poll_handlers(ctx, max_ns, timeout)) {
598             return true;
599         }
600     }
601 
602     poll_set_started(ctx, false);
603 
604     /* Even if we don't run busy polling, try polling once in case it can make
605      * progress and the caller will be able to avoid ppoll(2)/epoll_wait(2).
606      */
607     return run_poll_handlers_once(ctx, timeout);
608 }
609 
610 bool aio_poll(AioContext *ctx, bool blocking)
611 {
612     AioHandler *node;
613     int i;
614     int ret = 0;
615     bool progress;
616     int64_t timeout;
617     int64_t start = 0;
618 
619     assert(in_aio_context_home_thread(ctx));
620 
621     /* aio_notify can avoid the expensive event_notifier_set if
622      * everything (file descriptors, bottom halves, timers) will
623      * be re-evaluated before the next blocking poll().  This is
624      * already true when aio_poll is called with blocking == false;
625      * if blocking == true, it is only true after poll() returns,
626      * so disable the optimization now.
627      */
628     if (blocking) {
629         atomic_add(&ctx->notify_me, 2);
630     }
631 
632     qemu_lockcnt_inc(&ctx->list_lock);
633 
634     if (ctx->poll_max_ns) {
635         start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
636     }
637 
638     timeout = blocking ? aio_compute_timeout(ctx) : 0;
639     progress = try_poll_mode(ctx, &timeout);
640     assert(!(timeout && progress));
641 
642     /* If polling is allowed, non-blocking aio_poll does not need the
643      * system call---a single round of run_poll_handlers_once suffices.
644      */
645     if (timeout || atomic_read(&ctx->poll_disable_cnt)) {
646         assert(npfd == 0);
647 
648         /* fill pollfds */
649 
650         if (!aio_epoll_enabled(ctx)) {
651             QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
652                 if (!node->deleted && node->pfd.events
653                     && aio_node_check(ctx, node->is_external)) {
654                     add_pollfd(node);
655                 }
656             }
657         }
658 
659         /* wait until next event */
660         if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
661             AioHandler epoll_handler;
662 
663             epoll_handler.pfd.fd = ctx->epollfd;
664             epoll_handler.pfd.events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR;
665             npfd = 0;
666             add_pollfd(&epoll_handler);
667             ret = aio_epoll(ctx, pollfds, npfd, timeout);
668         } else  {
669             ret = qemu_poll_ns(pollfds, npfd, timeout);
670         }
671     }
672 
673     if (blocking) {
674         atomic_sub(&ctx->notify_me, 2);
675         aio_notify_accept(ctx);
676     }
677 
678     /* Adjust polling time */
679     if (ctx->poll_max_ns) {
680         int64_t block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start;
681 
682         if (block_ns <= ctx->poll_ns) {
683             /* This is the sweet spot, no adjustment needed */
684         } else if (block_ns > ctx->poll_max_ns) {
685             /* We'd have to poll for too long, poll less */
686             int64_t old = ctx->poll_ns;
687 
688             if (ctx->poll_shrink) {
689                 ctx->poll_ns /= ctx->poll_shrink;
690             } else {
691                 ctx->poll_ns = 0;
692             }
693 
694             trace_poll_shrink(ctx, old, ctx->poll_ns);
695         } else if (ctx->poll_ns < ctx->poll_max_ns &&
696                    block_ns < ctx->poll_max_ns) {
697             /* There is room to grow, poll longer */
698             int64_t old = ctx->poll_ns;
699             int64_t grow = ctx->poll_grow;
700 
701             if (grow == 0) {
702                 grow = 2;
703             }
704 
705             if (ctx->poll_ns) {
706                 ctx->poll_ns *= grow;
707             } else {
708                 ctx->poll_ns = 4000; /* start polling at 4 microseconds */
709             }
710 
711             if (ctx->poll_ns > ctx->poll_max_ns) {
712                 ctx->poll_ns = ctx->poll_max_ns;
713             }
714 
715             trace_poll_grow(ctx, old, ctx->poll_ns);
716         }
717     }
718 
719     /* if we have any readable fds, dispatch event */
720     if (ret > 0) {
721         for (i = 0; i < npfd; i++) {
722             nodes[i]->pfd.revents = pollfds[i].revents;
723         }
724     }
725 
726     npfd = 0;
727 
728     progress |= aio_bh_poll(ctx);
729 
730     if (ret > 0) {
731         progress |= aio_dispatch_handlers(ctx);
732     }
733 
734     qemu_lockcnt_dec(&ctx->list_lock);
735 
736     progress |= timerlistgroup_run_timers(&ctx->tlg);
737 
738     return progress;
739 }
740 
741 void aio_context_setup(AioContext *ctx)
742 {
743 #ifdef CONFIG_EPOLL_CREATE1
744     assert(!ctx->epollfd);
745     ctx->epollfd = epoll_create1(EPOLL_CLOEXEC);
746     if (ctx->epollfd == -1) {
747         fprintf(stderr, "Failed to create epoll instance: %s", strerror(errno));
748         ctx->epoll_available = false;
749     } else {
750         ctx->epoll_available = true;
751     }
752 #endif
753 }
754 
755 void aio_context_destroy(AioContext *ctx)
756 {
757 #ifdef CONFIG_EPOLL_CREATE1
758     aio_epoll_disable(ctx);
759 #endif
760 }
761 
762 void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
763                                  int64_t grow, int64_t shrink, Error **errp)
764 {
765     /* No thread synchronization here, it doesn't matter if an incorrect value
766      * is used once.
767      */
768     ctx->poll_max_ns = max_ns;
769     ctx->poll_ns = 0;
770     ctx->poll_grow = grow;
771     ctx->poll_shrink = shrink;
772 
773     aio_notify(ctx);
774 }
775