1c2b38b27SPaolo Bonzini /*
2c2b38b27SPaolo Bonzini * QEMU aio implementation
3c2b38b27SPaolo Bonzini *
4c2b38b27SPaolo Bonzini * Copyright IBM, Corp. 2008
5c2b38b27SPaolo Bonzini *
6c2b38b27SPaolo Bonzini * Authors:
7c2b38b27SPaolo Bonzini * Anthony Liguori <aliguori@us.ibm.com>
8c2b38b27SPaolo Bonzini *
9c2b38b27SPaolo Bonzini * This work is licensed under the terms of the GNU GPL, version 2. See
10c2b38b27SPaolo Bonzini * the COPYING file in the top-level directory.
11c2b38b27SPaolo Bonzini *
12c2b38b27SPaolo Bonzini * Contributions after 2012-01-13 are licensed under the terms of the
13c2b38b27SPaolo Bonzini * GNU GPL, version 2 or (at your option) any later version.
14c2b38b27SPaolo Bonzini */
15c2b38b27SPaolo Bonzini
16c2b38b27SPaolo Bonzini #include "qemu/osdep.h"
17c2b38b27SPaolo Bonzini #include "block/block.h"
1871ad4713SNicolas Saenz Julienne #include "block/thread-pool.h"
199ce44e2cSKevin Wolf #include "qemu/main-loop.h"
20*51483f6cSPeter Maydell #include "qemu/lockcnt.h"
21f25c0b54SStefan Hajnoczi #include "qemu/rcu.h"
22c2b38b27SPaolo Bonzini #include "qemu/rcu_queue.h"
23c2b38b27SPaolo Bonzini #include "qemu/sockets.h"
24c2b38b27SPaolo Bonzini #include "qemu/cutils.h"
25c2b38b27SPaolo Bonzini #include "trace.h"
261f050a46SStefan Hajnoczi #include "aio-posix.h"
27c2b38b27SPaolo Bonzini
28d37d0e36SStefan Hajnoczi /* Stop userspace polling on a handler if it isn't active for some time */
29d37d0e36SStefan Hajnoczi #define POLL_IDLE_INTERVAL_NS (7 * NANOSECONDS_PER_SECOND)
30d37d0e36SStefan Hajnoczi
aio_poll_disabled(AioContext * ctx)31aa38e19fSStefan Hajnoczi bool aio_poll_disabled(AioContext *ctx)
32aa38e19fSStefan Hajnoczi {
33d73415a3SStefan Hajnoczi return qatomic_read(&ctx->poll_disable_cnt);
34aa38e19fSStefan Hajnoczi }
35aa38e19fSStefan Hajnoczi
aio_add_ready_handler(AioHandlerList * ready_list,AioHandler * node,int revents)361f050a46SStefan Hajnoczi void aio_add_ready_handler(AioHandlerList *ready_list,
377391d34cSStefan Hajnoczi AioHandler *node,
387391d34cSStefan Hajnoczi int revents)
397391d34cSStefan Hajnoczi {
407391d34cSStefan Hajnoczi QLIST_SAFE_REMOVE(node, node_ready); /* remove from nested parent's list */
417391d34cSStefan Hajnoczi node->pfd.revents = revents;
427391d34cSStefan Hajnoczi QLIST_INSERT_HEAD(ready_list, node, node_ready);
437391d34cSStefan Hajnoczi }
447391d34cSStefan Hajnoczi
aio_add_poll_ready_handler(AioHandlerList * ready_list,AioHandler * node)45fc879646SStefan Hajnoczi static void aio_add_poll_ready_handler(AioHandlerList *ready_list,
46fc879646SStefan Hajnoczi AioHandler *node)
47fc879646SStefan Hajnoczi {
48fc879646SStefan Hajnoczi QLIST_SAFE_REMOVE(node, node_ready); /* remove from nested parent's list */
49fc879646SStefan Hajnoczi node->poll_ready = true;
50fc879646SStefan Hajnoczi QLIST_INSERT_HEAD(ready_list, node, node_ready);
51fc879646SStefan Hajnoczi }
52fc879646SStefan Hajnoczi
find_aio_handler(AioContext * ctx,int fd)53c2b38b27SPaolo Bonzini static AioHandler *find_aio_handler(AioContext *ctx, int fd)
54c2b38b27SPaolo Bonzini {
55c2b38b27SPaolo Bonzini AioHandler *node;
56c2b38b27SPaolo Bonzini
57c2b38b27SPaolo Bonzini QLIST_FOREACH(node, &ctx->aio_handlers, node) {
584749079cSStefan Hajnoczi if (node->pfd.fd == fd) {
594749079cSStefan Hajnoczi if (!QLIST_IS_INSERTED(node, node_deleted)) {
60c2b38b27SPaolo Bonzini return node;
61c2b38b27SPaolo Bonzini }
624749079cSStefan Hajnoczi }
634749079cSStefan Hajnoczi }
64c2b38b27SPaolo Bonzini
65c2b38b27SPaolo Bonzini return NULL;
66c2b38b27SPaolo Bonzini }
67c2b38b27SPaolo Bonzini
aio_remove_fd_handler(AioContext * ctx,AioHandler * node)68fef16601SRemy Noel static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
69fef16601SRemy Noel {
70fef16601SRemy Noel /* If the GSource is in the process of being destroyed then
71fef16601SRemy Noel * g_source_remove_poll() causes an assertion failure. Skip
72fef16601SRemy Noel * removal in that case, because glib cleans up its state during
73fef16601SRemy Noel * destruction anyway.
74fef16601SRemy Noel */
75fef16601SRemy Noel if (!g_source_is_destroyed(&ctx->source)) {
76fef16601SRemy Noel g_source_remove_poll(&ctx->source, &node->pfd);
77fef16601SRemy Noel }
78fef16601SRemy Noel
7973fd282eSStefan Hajnoczi node->pfd.revents = 0;
80fc879646SStefan Hajnoczi node->poll_ready = false;
8173fd282eSStefan Hajnoczi
8273fd282eSStefan Hajnoczi /* If the fd monitor has already marked it deleted, leave it alone */
8373fd282eSStefan Hajnoczi if (QLIST_IS_INSERTED(node, node_deleted)) {
8473fd282eSStefan Hajnoczi return false;
8573fd282eSStefan Hajnoczi }
8673fd282eSStefan Hajnoczi
87fef16601SRemy Noel /* If a read is in progress, just mark the node as deleted */
88fef16601SRemy Noel if (qemu_lockcnt_count(&ctx->list_lock)) {
894749079cSStefan Hajnoczi QLIST_INSERT_HEAD_RCU(&ctx->deleted_aio_handlers, node, node_deleted);
90fef16601SRemy Noel return false;
91fef16601SRemy Noel }
92fef16601SRemy Noel /* Otherwise, delete it for real. We can't just mark it as
93fef16601SRemy Noel * deleted because deleted nodes are only cleaned up while
94fef16601SRemy Noel * no one is walking the handlers list.
95fef16601SRemy Noel */
96d37d0e36SStefan Hajnoczi QLIST_SAFE_REMOVE(node, node_poll);
97fef16601SRemy Noel QLIST_REMOVE(node, node);
98fef16601SRemy Noel return true;
99fef16601SRemy Noel }
100fef16601SRemy Noel
aio_set_fd_handler(AioContext * ctx,int fd,IOHandler * io_read,IOHandler * io_write,AioPollFn * io_poll,IOHandler * io_poll_ready,void * opaque)101c2b38b27SPaolo Bonzini void aio_set_fd_handler(AioContext *ctx,
102c2b38b27SPaolo Bonzini int fd,
103c2b38b27SPaolo Bonzini IOHandler *io_read,
104c2b38b27SPaolo Bonzini IOHandler *io_write,
105c2b38b27SPaolo Bonzini AioPollFn *io_poll,
106826cc324SStefan Hajnoczi IOHandler *io_poll_ready,
107c2b38b27SPaolo Bonzini void *opaque)
108c2b38b27SPaolo Bonzini {
109c2b38b27SPaolo Bonzini AioHandler *node;
110fef16601SRemy Noel AioHandler *new_node = NULL;
111c2b38b27SPaolo Bonzini bool is_new = false;
112c2b38b27SPaolo Bonzini bool deleted = false;
113d7be5dd1SPaolo Bonzini int poll_disable_change;
114c2b38b27SPaolo Bonzini
115826cc324SStefan Hajnoczi if (io_poll && !io_poll_ready) {
116826cc324SStefan Hajnoczi io_poll = NULL; /* polling only makes sense if there is a handler */
117826cc324SStefan Hajnoczi }
118826cc324SStefan Hajnoczi
119c2b38b27SPaolo Bonzini qemu_lockcnt_lock(&ctx->list_lock);
120c2b38b27SPaolo Bonzini
121c2b38b27SPaolo Bonzini node = find_aio_handler(ctx, fd);
122c2b38b27SPaolo Bonzini
123c2b38b27SPaolo Bonzini /* Are we deleting the fd handler? */
124c2b38b27SPaolo Bonzini if (!io_read && !io_write && !io_poll) {
125c2b38b27SPaolo Bonzini if (node == NULL) {
126c2b38b27SPaolo Bonzini qemu_lockcnt_unlock(&ctx->list_lock);
127c2b38b27SPaolo Bonzini return;
128c2b38b27SPaolo Bonzini }
1298821b34aSRemy Noel /* Clean events in order to unregister fd from the ctx epoll. */
1308821b34aSRemy Noel node->pfd.events = 0;
1318821b34aSRemy Noel
132d7be5dd1SPaolo Bonzini poll_disable_change = -!node->io_poll;
133c2b38b27SPaolo Bonzini } else {
134d7be5dd1SPaolo Bonzini poll_disable_change = !io_poll - (node && !node->io_poll);
135c2b38b27SPaolo Bonzini if (node == NULL) {
136c2b38b27SPaolo Bonzini is_new = true;
137c2b38b27SPaolo Bonzini }
138fef16601SRemy Noel /* Alloc and insert if it's not already there */
139fef16601SRemy Noel new_node = g_new0(AioHandler, 1);
140c2b38b27SPaolo Bonzini
141c2b38b27SPaolo Bonzini /* Update handler with latest information */
142fef16601SRemy Noel new_node->io_read = io_read;
143fef16601SRemy Noel new_node->io_write = io_write;
144fef16601SRemy Noel new_node->io_poll = io_poll;
145826cc324SStefan Hajnoczi new_node->io_poll_ready = io_poll_ready;
146fef16601SRemy Noel new_node->opaque = opaque;
147c2b38b27SPaolo Bonzini
148fef16601SRemy Noel if (is_new) {
149fef16601SRemy Noel new_node->pfd.fd = fd;
150fef16601SRemy Noel } else {
151fef16601SRemy Noel new_node->pfd = node->pfd;
152fef16601SRemy Noel }
153fef16601SRemy Noel g_source_add_poll(&ctx->source, &new_node->pfd);
154fef16601SRemy Noel
155fef16601SRemy Noel new_node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0);
156fef16601SRemy Noel new_node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
157fef16601SRemy Noel
158fef16601SRemy Noel QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, new_node, node);
159fef16601SRemy Noel }
160c2b38b27SPaolo Bonzini
161d7be5dd1SPaolo Bonzini /* No need to order poll_disable_cnt writes against other updates;
162d7be5dd1SPaolo Bonzini * the counter is only used to avoid wasting time and latency on
163d7be5dd1SPaolo Bonzini * iterated polling when the system call will be ultimately necessary.
164d7be5dd1SPaolo Bonzini * Changing handlers is a rare event, and a little wasted polling until
165d7be5dd1SPaolo Bonzini * the aio_notify below is not an issue.
166d7be5dd1SPaolo Bonzini */
167d73415a3SStefan Hajnoczi qatomic_set(&ctx->poll_disable_cnt,
168d73415a3SStefan Hajnoczi qatomic_read(&ctx->poll_disable_cnt) + poll_disable_change);
169d7be5dd1SPaolo Bonzini
170b321051cSStefan Hajnoczi ctx->fdmon_ops->update(ctx, node, new_node);
17173fd282eSStefan Hajnoczi if (node) {
17273fd282eSStefan Hajnoczi deleted = aio_remove_fd_handler(ctx, node);
17373fd282eSStefan Hajnoczi }
174c2b38b27SPaolo Bonzini qemu_lockcnt_unlock(&ctx->list_lock);
175c2b38b27SPaolo Bonzini aio_notify(ctx);
176c2b38b27SPaolo Bonzini
177c2b38b27SPaolo Bonzini if (deleted) {
178c2b38b27SPaolo Bonzini g_free(node);
179c2b38b27SPaolo Bonzini }
180c2b38b27SPaolo Bonzini }
181c2b38b27SPaolo Bonzini
aio_set_fd_poll(AioContext * ctx,int fd,IOHandler * io_poll_begin,IOHandler * io_poll_end)1826eeef447SMarc-André Lureau static void aio_set_fd_poll(AioContext *ctx, int fd,
183c2b38b27SPaolo Bonzini IOHandler *io_poll_begin,
184c2b38b27SPaolo Bonzini IOHandler *io_poll_end)
185c2b38b27SPaolo Bonzini {
186c2b38b27SPaolo Bonzini AioHandler *node = find_aio_handler(ctx, fd);
187c2b38b27SPaolo Bonzini
188c2b38b27SPaolo Bonzini if (!node) {
189c2b38b27SPaolo Bonzini return;
190c2b38b27SPaolo Bonzini }
191c2b38b27SPaolo Bonzini
192c2b38b27SPaolo Bonzini node->io_poll_begin = io_poll_begin;
193c2b38b27SPaolo Bonzini node->io_poll_end = io_poll_end;
194c2b38b27SPaolo Bonzini }
195c2b38b27SPaolo Bonzini
aio_set_event_notifier(AioContext * ctx,EventNotifier * notifier,EventNotifierHandler * io_read,AioPollFn * io_poll,EventNotifierHandler * io_poll_ready)196c2b38b27SPaolo Bonzini void aio_set_event_notifier(AioContext *ctx,
197c2b38b27SPaolo Bonzini EventNotifier *notifier,
198c2b38b27SPaolo Bonzini EventNotifierHandler *io_read,
199826cc324SStefan Hajnoczi AioPollFn *io_poll,
200826cc324SStefan Hajnoczi EventNotifierHandler *io_poll_ready)
201c2b38b27SPaolo Bonzini {
20260f782b6SStefan Hajnoczi aio_set_fd_handler(ctx, event_notifier_get_fd(notifier),
203826cc324SStefan Hajnoczi (IOHandler *)io_read, NULL, io_poll,
204826cc324SStefan Hajnoczi (IOHandler *)io_poll_ready, notifier);
205c2b38b27SPaolo Bonzini }
206c2b38b27SPaolo Bonzini
aio_set_event_notifier_poll(AioContext * ctx,EventNotifier * notifier,EventNotifierHandler * io_poll_begin,EventNotifierHandler * io_poll_end)207c2b38b27SPaolo Bonzini void aio_set_event_notifier_poll(AioContext *ctx,
208c2b38b27SPaolo Bonzini EventNotifier *notifier,
209c2b38b27SPaolo Bonzini EventNotifierHandler *io_poll_begin,
210c2b38b27SPaolo Bonzini EventNotifierHandler *io_poll_end)
211c2b38b27SPaolo Bonzini {
212c2b38b27SPaolo Bonzini aio_set_fd_poll(ctx, event_notifier_get_fd(notifier),
213c2b38b27SPaolo Bonzini (IOHandler *)io_poll_begin,
214c2b38b27SPaolo Bonzini (IOHandler *)io_poll_end);
215c2b38b27SPaolo Bonzini }
216c2b38b27SPaolo Bonzini
poll_set_started(AioContext * ctx,AioHandlerList * ready_list,bool started)217826cc324SStefan Hajnoczi static bool poll_set_started(AioContext *ctx, AioHandlerList *ready_list,
218826cc324SStefan Hajnoczi bool started)
219c2b38b27SPaolo Bonzini {
220c2b38b27SPaolo Bonzini AioHandler *node;
221e4346192SStefan Hajnoczi bool progress = false;
222c2b38b27SPaolo Bonzini
223c2b38b27SPaolo Bonzini if (started == ctx->poll_started) {
224e4346192SStefan Hajnoczi return false;
225c2b38b27SPaolo Bonzini }
226c2b38b27SPaolo Bonzini
227c2b38b27SPaolo Bonzini ctx->poll_started = started;
228c2b38b27SPaolo Bonzini
229c2b38b27SPaolo Bonzini qemu_lockcnt_inc(&ctx->list_lock);
230d37d0e36SStefan Hajnoczi QLIST_FOREACH(node, &ctx->poll_aio_handlers, node_poll) {
231c2b38b27SPaolo Bonzini IOHandler *fn;
232c2b38b27SPaolo Bonzini
2334749079cSStefan Hajnoczi if (QLIST_IS_INSERTED(node, node_deleted)) {
234c2b38b27SPaolo Bonzini continue;
235c2b38b27SPaolo Bonzini }
236c2b38b27SPaolo Bonzini
237c2b38b27SPaolo Bonzini if (started) {
238c2b38b27SPaolo Bonzini fn = node->io_poll_begin;
239c2b38b27SPaolo Bonzini } else {
240c2b38b27SPaolo Bonzini fn = node->io_poll_end;
241c2b38b27SPaolo Bonzini }
242c2b38b27SPaolo Bonzini
243c2b38b27SPaolo Bonzini if (fn) {
244c2b38b27SPaolo Bonzini fn(node->opaque);
245c2b38b27SPaolo Bonzini }
246e4346192SStefan Hajnoczi
247e4346192SStefan Hajnoczi /* Poll one last time in case ->io_poll_end() raced with the event */
248826cc324SStefan Hajnoczi if (!started && node->io_poll(node->opaque)) {
249fc879646SStefan Hajnoczi aio_add_poll_ready_handler(ready_list, node);
250826cc324SStefan Hajnoczi progress = true;
251e4346192SStefan Hajnoczi }
252c2b38b27SPaolo Bonzini }
253c2b38b27SPaolo Bonzini qemu_lockcnt_dec(&ctx->list_lock);
254e4346192SStefan Hajnoczi
255e4346192SStefan Hajnoczi return progress;
256c2b38b27SPaolo Bonzini }
257c2b38b27SPaolo Bonzini
258c2b38b27SPaolo Bonzini
aio_prepare(AioContext * ctx)259c2b38b27SPaolo Bonzini bool aio_prepare(AioContext *ctx)
260c2b38b27SPaolo Bonzini {
261826cc324SStefan Hajnoczi AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list);
262826cc324SStefan Hajnoczi
263c2b38b27SPaolo Bonzini /* Poll mode cannot be used with glib's event loop, disable it. */
264826cc324SStefan Hajnoczi poll_set_started(ctx, &ready_list, false);
265826cc324SStefan Hajnoczi /* TODO what to do with this list? */
266c2b38b27SPaolo Bonzini
267c2b38b27SPaolo Bonzini return false;
268c2b38b27SPaolo Bonzini }
269c2b38b27SPaolo Bonzini
aio_pending(AioContext * ctx)270c2b38b27SPaolo Bonzini bool aio_pending(AioContext *ctx)
271c2b38b27SPaolo Bonzini {
272c2b38b27SPaolo Bonzini AioHandler *node;
273c2b38b27SPaolo Bonzini bool result = false;
274c2b38b27SPaolo Bonzini
275c2b38b27SPaolo Bonzini /*
276c2b38b27SPaolo Bonzini * We have to walk very carefully in case aio_set_fd_handler is
277c2b38b27SPaolo Bonzini * called while we're walking.
278c2b38b27SPaolo Bonzini */
279c2b38b27SPaolo Bonzini qemu_lockcnt_inc(&ctx->list_lock);
280c2b38b27SPaolo Bonzini
281c2b38b27SPaolo Bonzini QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
282c2b38b27SPaolo Bonzini int revents;
283c2b38b27SPaolo Bonzini
284fc879646SStefan Hajnoczi /* TODO should this check poll ready? */
285c2b38b27SPaolo Bonzini revents = node->pfd.revents & node->pfd.events;
28660f782b6SStefan Hajnoczi if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read) {
287c2b38b27SPaolo Bonzini result = true;
288c2b38b27SPaolo Bonzini break;
289c2b38b27SPaolo Bonzini }
29060f782b6SStefan Hajnoczi if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write) {
291c2b38b27SPaolo Bonzini result = true;
292c2b38b27SPaolo Bonzini break;
293c2b38b27SPaolo Bonzini }
294c2b38b27SPaolo Bonzini }
295c2b38b27SPaolo Bonzini qemu_lockcnt_dec(&ctx->list_lock);
296c2b38b27SPaolo Bonzini
297c2b38b27SPaolo Bonzini return result;
298c2b38b27SPaolo Bonzini }
299c2b38b27SPaolo Bonzini
aio_free_deleted_handlers(AioContext * ctx)3004749079cSStefan Hajnoczi static void aio_free_deleted_handlers(AioContext *ctx)
3014749079cSStefan Hajnoczi {
3024749079cSStefan Hajnoczi AioHandler *node;
3034749079cSStefan Hajnoczi
3044749079cSStefan Hajnoczi if (QLIST_EMPTY_RCU(&ctx->deleted_aio_handlers)) {
3054749079cSStefan Hajnoczi return;
3064749079cSStefan Hajnoczi }
3074749079cSStefan Hajnoczi if (!qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
3084749079cSStefan Hajnoczi return; /* we are nested, let the parent do the freeing */
3094749079cSStefan Hajnoczi }
3104749079cSStefan Hajnoczi
3114749079cSStefan Hajnoczi while ((node = QLIST_FIRST_RCU(&ctx->deleted_aio_handlers))) {
3124749079cSStefan Hajnoczi QLIST_REMOVE(node, node);
3134749079cSStefan Hajnoczi QLIST_REMOVE(node, node_deleted);
314d37d0e36SStefan Hajnoczi QLIST_SAFE_REMOVE(node, node_poll);
3154749079cSStefan Hajnoczi g_free(node);
3164749079cSStefan Hajnoczi }
3174749079cSStefan Hajnoczi
3184749079cSStefan Hajnoczi qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
3194749079cSStefan Hajnoczi }
3204749079cSStefan Hajnoczi
aio_dispatch_handler(AioContext * ctx,AioHandler * node)3217391d34cSStefan Hajnoczi static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node)
322c2b38b27SPaolo Bonzini {
323c2b38b27SPaolo Bonzini bool progress = false;
324fc879646SStefan Hajnoczi bool poll_ready;
325c2b38b27SPaolo Bonzini int revents;
326c2b38b27SPaolo Bonzini
327c2b38b27SPaolo Bonzini revents = node->pfd.revents & node->pfd.events;
328c2b38b27SPaolo Bonzini node->pfd.revents = 0;
329c2b38b27SPaolo Bonzini
330fc879646SStefan Hajnoczi poll_ready = node->poll_ready;
331fc879646SStefan Hajnoczi node->poll_ready = false;
332fc879646SStefan Hajnoczi
333d37d0e36SStefan Hajnoczi /*
334d37d0e36SStefan Hajnoczi * Start polling AioHandlers when they become ready because activity is
335d37d0e36SStefan Hajnoczi * likely to continue. Note that starvation is theoretically possible when
336d37d0e36SStefan Hajnoczi * fdmon_supports_polling(), but only until the fd fires for the first
337d37d0e36SStefan Hajnoczi * time.
338d37d0e36SStefan Hajnoczi */
339d37d0e36SStefan Hajnoczi if (!QLIST_IS_INSERTED(node, node_deleted) &&
340d37d0e36SStefan Hajnoczi !QLIST_IS_INSERTED(node, node_poll) &&
341d37d0e36SStefan Hajnoczi node->io_poll) {
342d37d0e36SStefan Hajnoczi trace_poll_add(ctx, node, node->pfd.fd, revents);
343d37d0e36SStefan Hajnoczi if (ctx->poll_started && node->io_poll_begin) {
344d37d0e36SStefan Hajnoczi node->io_poll_begin(node->opaque);
345d37d0e36SStefan Hajnoczi }
346d37d0e36SStefan Hajnoczi QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll);
347d37d0e36SStefan Hajnoczi }
348826cc324SStefan Hajnoczi if (!QLIST_IS_INSERTED(node, node_deleted) &&
34960f782b6SStefan Hajnoczi poll_ready && revents == 0 && node->io_poll_ready) {
3506d740fb0SStefan Hajnoczi /*
3516d740fb0SStefan Hajnoczi * Remove temporarily to avoid infinite loops when ->io_poll_ready()
3526d740fb0SStefan Hajnoczi * calls aio_poll() before clearing the condition that made the poll
3536d740fb0SStefan Hajnoczi * handler become ready.
3546d740fb0SStefan Hajnoczi */
3556d740fb0SStefan Hajnoczi QLIST_SAFE_REMOVE(node, node_poll);
3566d740fb0SStefan Hajnoczi
357826cc324SStefan Hajnoczi node->io_poll_ready(node->opaque);
358826cc324SStefan Hajnoczi
3596d740fb0SStefan Hajnoczi if (!QLIST_IS_INSERTED(node, node_poll)) {
3606d740fb0SStefan Hajnoczi QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll);
3616d740fb0SStefan Hajnoczi }
3626d740fb0SStefan Hajnoczi
363826cc324SStefan Hajnoczi /*
364826cc324SStefan Hajnoczi * Return early since revents was zero. aio_notify() does not count as
365826cc324SStefan Hajnoczi * progress.
366826cc324SStefan Hajnoczi */
367826cc324SStefan Hajnoczi return node->opaque != &ctx->notifier;
368826cc324SStefan Hajnoczi }
369d37d0e36SStefan Hajnoczi
3704749079cSStefan Hajnoczi if (!QLIST_IS_INSERTED(node, node_deleted) &&
371c2b38b27SPaolo Bonzini (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
372c2b38b27SPaolo Bonzini node->io_read) {
373c2b38b27SPaolo Bonzini node->io_read(node->opaque);
374c2b38b27SPaolo Bonzini
375c2b38b27SPaolo Bonzini /* aio_notify() does not count as progress */
376c2b38b27SPaolo Bonzini if (node->opaque != &ctx->notifier) {
377c2b38b27SPaolo Bonzini progress = true;
378c2b38b27SPaolo Bonzini }
379c2b38b27SPaolo Bonzini }
3804749079cSStefan Hajnoczi if (!QLIST_IS_INSERTED(node, node_deleted) &&
381c2b38b27SPaolo Bonzini (revents & (G_IO_OUT | G_IO_ERR)) &&
382c2b38b27SPaolo Bonzini node->io_write) {
383c2b38b27SPaolo Bonzini node->io_write(node->opaque);
384c2b38b27SPaolo Bonzini progress = true;
385c2b38b27SPaolo Bonzini }
3867391d34cSStefan Hajnoczi
3877391d34cSStefan Hajnoczi return progress;
3887391d34cSStefan Hajnoczi }
3897391d34cSStefan Hajnoczi
3907391d34cSStefan Hajnoczi /*
3917391d34cSStefan Hajnoczi * If we have a list of ready handlers then this is more efficient than
3927391d34cSStefan Hajnoczi * scanning all handlers with aio_dispatch_handlers().
3937391d34cSStefan Hajnoczi */
aio_dispatch_ready_handlers(AioContext * ctx,AioHandlerList * ready_list)3947391d34cSStefan Hajnoczi static bool aio_dispatch_ready_handlers(AioContext *ctx,
3957391d34cSStefan Hajnoczi AioHandlerList *ready_list)
3967391d34cSStefan Hajnoczi {
3977391d34cSStefan Hajnoczi bool progress = false;
3987391d34cSStefan Hajnoczi AioHandler *node;
3997391d34cSStefan Hajnoczi
4007391d34cSStefan Hajnoczi while ((node = QLIST_FIRST(ready_list))) {
401c39cbedbSStefan Hajnoczi QLIST_REMOVE(node, node_ready);
4027391d34cSStefan Hajnoczi progress = aio_dispatch_handler(ctx, node) || progress;
4037391d34cSStefan Hajnoczi }
4047391d34cSStefan Hajnoczi
4057391d34cSStefan Hajnoczi return progress;
4067391d34cSStefan Hajnoczi }
4077391d34cSStefan Hajnoczi
4087391d34cSStefan Hajnoczi /* Slower than aio_dispatch_ready_handlers() but only used via glib */
aio_dispatch_handlers(AioContext * ctx)4097391d34cSStefan Hajnoczi static bool aio_dispatch_handlers(AioContext *ctx)
4107391d34cSStefan Hajnoczi {
4117391d34cSStefan Hajnoczi AioHandler *node, *tmp;
4127391d34cSStefan Hajnoczi bool progress = false;
4137391d34cSStefan Hajnoczi
4147391d34cSStefan Hajnoczi QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
4157391d34cSStefan Hajnoczi progress = aio_dispatch_handler(ctx, node) || progress;
416c2b38b27SPaolo Bonzini }
417c2b38b27SPaolo Bonzini
418c2b38b27SPaolo Bonzini return progress;
419c2b38b27SPaolo Bonzini }
420c2b38b27SPaolo Bonzini
aio_dispatch(AioContext * ctx)421a153bf52SPaolo Bonzini void aio_dispatch(AioContext *ctx)
422c2b38b27SPaolo Bonzini {
423a153bf52SPaolo Bonzini qemu_lockcnt_inc(&ctx->list_lock);
424bd451435SPaolo Bonzini aio_bh_poll(ctx);
425a153bf52SPaolo Bonzini aio_dispatch_handlers(ctx);
4264749079cSStefan Hajnoczi aio_free_deleted_handlers(ctx);
427a153bf52SPaolo Bonzini qemu_lockcnt_dec(&ctx->list_lock);
428c2b38b27SPaolo Bonzini
429a153bf52SPaolo Bonzini timerlistgroup_run_timers(&ctx->tlg);
430c2b38b27SPaolo Bonzini }
431c2b38b27SPaolo Bonzini
run_poll_handlers_once(AioContext * ctx,AioHandlerList * ready_list,int64_t now,int64_t * timeout)432d37d0e36SStefan Hajnoczi static bool run_poll_handlers_once(AioContext *ctx,
433826cc324SStefan Hajnoczi AioHandlerList *ready_list,
434d37d0e36SStefan Hajnoczi int64_t now,
435d37d0e36SStefan Hajnoczi int64_t *timeout)
436c2b38b27SPaolo Bonzini {
437c2b38b27SPaolo Bonzini bool progress = false;
438c2b38b27SPaolo Bonzini AioHandler *node;
439d37d0e36SStefan Hajnoczi AioHandler *tmp;
440c2b38b27SPaolo Bonzini
441d37d0e36SStefan Hajnoczi QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) {
44260f782b6SStefan Hajnoczi if (node->io_poll(node->opaque)) {
443fc879646SStefan Hajnoczi aio_add_poll_ready_handler(ready_list, node);
444826cc324SStefan Hajnoczi
445d37d0e36SStefan Hajnoczi node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS;
446d37d0e36SStefan Hajnoczi
447993ed89fSPaolo Bonzini /*
448993ed89fSPaolo Bonzini * Polling was successful, exit try_poll_mode immediately
449993ed89fSPaolo Bonzini * to adjust the next polling time.
450993ed89fSPaolo Bonzini */
451e30cffa0SPaolo Bonzini *timeout = 0;
452cfeb35d6SPaolo Bonzini if (node->opaque != &ctx->notifier) {
453c2b38b27SPaolo Bonzini progress = true;
454c2b38b27SPaolo Bonzini }
455cfeb35d6SPaolo Bonzini }
456c2b38b27SPaolo Bonzini
457c2b38b27SPaolo Bonzini /* Caller handles freeing deleted nodes. Don't do it here. */
458c2b38b27SPaolo Bonzini }
459c2b38b27SPaolo Bonzini
460c2b38b27SPaolo Bonzini return progress;
461c2b38b27SPaolo Bonzini }
462c2b38b27SPaolo Bonzini
fdmon_supports_polling(AioContext * ctx)463d37d0e36SStefan Hajnoczi static bool fdmon_supports_polling(AioContext *ctx)
464d37d0e36SStefan Hajnoczi {
465d37d0e36SStefan Hajnoczi return ctx->fdmon_ops->need_wait != aio_poll_disabled;
466d37d0e36SStefan Hajnoczi }
467d37d0e36SStefan Hajnoczi
remove_idle_poll_handlers(AioContext * ctx,AioHandlerList * ready_list,int64_t now)468826cc324SStefan Hajnoczi static bool remove_idle_poll_handlers(AioContext *ctx,
469826cc324SStefan Hajnoczi AioHandlerList *ready_list,
470826cc324SStefan Hajnoczi int64_t now)
471d37d0e36SStefan Hajnoczi {
472d37d0e36SStefan Hajnoczi AioHandler *node;
473d37d0e36SStefan Hajnoczi AioHandler *tmp;
474d37d0e36SStefan Hajnoczi bool progress = false;
475d37d0e36SStefan Hajnoczi
476d37d0e36SStefan Hajnoczi /*
477d37d0e36SStefan Hajnoczi * File descriptor monitoring implementations without userspace polling
478d37d0e36SStefan Hajnoczi * support suffer from starvation when a subset of handlers is polled
479d37d0e36SStefan Hajnoczi * because fds will not be processed in a timely fashion. Don't remove
480d37d0e36SStefan Hajnoczi * idle poll handlers.
481d37d0e36SStefan Hajnoczi */
482d37d0e36SStefan Hajnoczi if (!fdmon_supports_polling(ctx)) {
483d37d0e36SStefan Hajnoczi return false;
484d37d0e36SStefan Hajnoczi }
485d37d0e36SStefan Hajnoczi
486d37d0e36SStefan Hajnoczi QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) {
487d37d0e36SStefan Hajnoczi if (node->poll_idle_timeout == 0LL) {
488d37d0e36SStefan Hajnoczi node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS;
489d37d0e36SStefan Hajnoczi } else if (now >= node->poll_idle_timeout) {
490d37d0e36SStefan Hajnoczi trace_poll_remove(ctx, node, node->pfd.fd);
491d37d0e36SStefan Hajnoczi node->poll_idle_timeout = 0LL;
492d37d0e36SStefan Hajnoczi QLIST_SAFE_REMOVE(node, node_poll);
493d37d0e36SStefan Hajnoczi if (ctx->poll_started && node->io_poll_end) {
494d37d0e36SStefan Hajnoczi node->io_poll_end(node->opaque);
495d37d0e36SStefan Hajnoczi
496d37d0e36SStefan Hajnoczi /*
497d37d0e36SStefan Hajnoczi * Final poll in case ->io_poll_end() races with an event.
498d37d0e36SStefan Hajnoczi * Nevermind about re-adding the handler in the rare case where
499d37d0e36SStefan Hajnoczi * this causes progress.
500d37d0e36SStefan Hajnoczi */
501826cc324SStefan Hajnoczi if (node->io_poll(node->opaque)) {
502fc879646SStefan Hajnoczi aio_add_poll_ready_handler(ready_list, node);
503826cc324SStefan Hajnoczi progress = true;
504826cc324SStefan Hajnoczi }
505d37d0e36SStefan Hajnoczi }
506d37d0e36SStefan Hajnoczi }
507d37d0e36SStefan Hajnoczi }
508d37d0e36SStefan Hajnoczi
509d37d0e36SStefan Hajnoczi return progress;
510d37d0e36SStefan Hajnoczi }
511d37d0e36SStefan Hajnoczi
512c2b38b27SPaolo Bonzini /* run_poll_handlers:
513c2b38b27SPaolo Bonzini * @ctx: the AioContext
514826cc324SStefan Hajnoczi * @ready_list: the list to place ready handlers on
515c2b38b27SPaolo Bonzini * @max_ns: maximum time to poll for, in nanoseconds
516c2b38b27SPaolo Bonzini *
517c2b38b27SPaolo Bonzini * Polls for a given time.
518c2b38b27SPaolo Bonzini *
519c2b38b27SPaolo Bonzini * Note that the caller must have incremented ctx->list_lock.
520c2b38b27SPaolo Bonzini *
521c2b38b27SPaolo Bonzini * Returns: true if progress was made, false otherwise
522c2b38b27SPaolo Bonzini */
run_poll_handlers(AioContext * ctx,AioHandlerList * ready_list,int64_t max_ns,int64_t * timeout)523826cc324SStefan Hajnoczi static bool run_poll_handlers(AioContext *ctx, AioHandlerList *ready_list,
524826cc324SStefan Hajnoczi int64_t max_ns, int64_t *timeout)
525c2b38b27SPaolo Bonzini {
526c2b38b27SPaolo Bonzini bool progress;
527e30cffa0SPaolo Bonzini int64_t start_time, elapsed_time;
528c2b38b27SPaolo Bonzini
529c2b38b27SPaolo Bonzini assert(qemu_lockcnt_count(&ctx->list_lock) > 0);
530c2b38b27SPaolo Bonzini
531e30cffa0SPaolo Bonzini trace_run_poll_handlers_begin(ctx, max_ns, *timeout);
532c2b38b27SPaolo Bonzini
5333aa221b3SStefan Hajnoczi /*
5343aa221b3SStefan Hajnoczi * Optimization: ->io_poll() handlers often contain RCU read critical
5353aa221b3SStefan Hajnoczi * sections and we therefore see many rcu_read_lock() -> rcu_read_unlock()
5363aa221b3SStefan Hajnoczi * -> rcu_read_lock() -> ... sequences with expensive memory
5373aa221b3SStefan Hajnoczi * synchronization primitives. Make the entire polling loop an RCU
5383aa221b3SStefan Hajnoczi * critical section because nested rcu_read_lock()/rcu_read_unlock() calls
5393aa221b3SStefan Hajnoczi * are cheap.
5403aa221b3SStefan Hajnoczi */
5413aa221b3SStefan Hajnoczi RCU_READ_LOCK_GUARD();
5423aa221b3SStefan Hajnoczi
543e30cffa0SPaolo Bonzini start_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
544c2b38b27SPaolo Bonzini do {
545826cc324SStefan Hajnoczi progress = run_poll_handlers_once(ctx, ready_list,
546826cc324SStefan Hajnoczi start_time, timeout);
547e30cffa0SPaolo Bonzini elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time;
548993ed89fSPaolo Bonzini max_ns = qemu_soonest_timeout(*timeout, max_ns);
549993ed89fSPaolo Bonzini assert(!(max_ns && progress));
550aa38e19fSStefan Hajnoczi } while (elapsed_time < max_ns && !ctx->fdmon_ops->need_wait(ctx));
551c2b38b27SPaolo Bonzini
552826cc324SStefan Hajnoczi if (remove_idle_poll_handlers(ctx, ready_list,
553826cc324SStefan Hajnoczi start_time + elapsed_time)) {
554d37d0e36SStefan Hajnoczi *timeout = 0;
555d37d0e36SStefan Hajnoczi progress = true;
556d37d0e36SStefan Hajnoczi }
557d37d0e36SStefan Hajnoczi
558e30cffa0SPaolo Bonzini /* If time has passed with no successful polling, adjust *timeout to
559e30cffa0SPaolo Bonzini * keep the same ending time.
560e30cffa0SPaolo Bonzini */
561e30cffa0SPaolo Bonzini if (*timeout != -1) {
562e30cffa0SPaolo Bonzini *timeout -= MIN(*timeout, elapsed_time);
563e30cffa0SPaolo Bonzini }
564c2b38b27SPaolo Bonzini
565e30cffa0SPaolo Bonzini trace_run_poll_handlers_end(ctx, progress, *timeout);
566c2b38b27SPaolo Bonzini return progress;
567c2b38b27SPaolo Bonzini }
568c2b38b27SPaolo Bonzini
569c2b38b27SPaolo Bonzini /* try_poll_mode:
570c2b38b27SPaolo Bonzini * @ctx: the AioContext
571826cc324SStefan Hajnoczi * @ready_list: list to add handlers that need to be run
572e30cffa0SPaolo Bonzini * @timeout: timeout for blocking wait, computed by the caller and updated if
573e30cffa0SPaolo Bonzini * polling succeeds.
574c2b38b27SPaolo Bonzini *
575c2b38b27SPaolo Bonzini * Note that the caller must have incremented ctx->list_lock.
576c2b38b27SPaolo Bonzini *
577c2b38b27SPaolo Bonzini * Returns: true if progress was made, false otherwise
578c2b38b27SPaolo Bonzini */
try_poll_mode(AioContext * ctx,AioHandlerList * ready_list,int64_t * timeout)579826cc324SStefan Hajnoczi static bool try_poll_mode(AioContext *ctx, AioHandlerList *ready_list,
580826cc324SStefan Hajnoczi int64_t *timeout)
581c2b38b27SPaolo Bonzini {
582d37d0e36SStefan Hajnoczi int64_t max_ns;
583c2b38b27SPaolo Bonzini
584d37d0e36SStefan Hajnoczi if (QLIST_EMPTY_RCU(&ctx->poll_aio_handlers)) {
585d37d0e36SStefan Hajnoczi return false;
586d37d0e36SStefan Hajnoczi }
587d37d0e36SStefan Hajnoczi
588d37d0e36SStefan Hajnoczi max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns);
589aa38e19fSStefan Hajnoczi if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) {
590816a430cSChao Gao /*
591816a430cSChao Gao * Enable poll mode. It pairs with the poll_set_started() in
592816a430cSChao Gao * aio_poll() which disables poll mode.
593816a430cSChao Gao */
594826cc324SStefan Hajnoczi poll_set_started(ctx, ready_list, true);
595c2b38b27SPaolo Bonzini
596826cc324SStefan Hajnoczi if (run_poll_handlers(ctx, ready_list, max_ns, timeout)) {
597c2b38b27SPaolo Bonzini return true;
598c2b38b27SPaolo Bonzini }
599c2b38b27SPaolo Bonzini }
600e4346192SStefan Hajnoczi return false;
601c2b38b27SPaolo Bonzini }
602c2b38b27SPaolo Bonzini
aio_poll(AioContext * ctx,bool blocking)603c2b38b27SPaolo Bonzini bool aio_poll(AioContext *ctx, bool blocking)
604c2b38b27SPaolo Bonzini {
6057391d34cSStefan Hajnoczi AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list);
606c2b38b27SPaolo Bonzini bool progress;
60744277bf9SStefan Hajnoczi bool use_notify_me;
608c2b38b27SPaolo Bonzini int64_t timeout;
609c2b38b27SPaolo Bonzini int64_t start = 0;
610c2b38b27SPaolo Bonzini
6115710a3e0SPaolo Bonzini /*
6125710a3e0SPaolo Bonzini * There cannot be two concurrent aio_poll calls for the same AioContext (or
6135710a3e0SPaolo Bonzini * an aio_poll concurrent with a GSource prepare/check/dispatch callback).
6145710a3e0SPaolo Bonzini * We rely on this below to avoid slow locked accesses to ctx->notify_me.
6159ce44e2cSKevin Wolf *
6169ce44e2cSKevin Wolf * aio_poll() may only be called in the AioContext's thread. iohandler_ctx
6179ce44e2cSKevin Wolf * is special in that it runs in the main thread, but that thread's context
6189ce44e2cSKevin Wolf * is qemu_aio_context.
6195710a3e0SPaolo Bonzini */
6209ce44e2cSKevin Wolf assert(in_aio_context_home_thread(ctx == iohandler_get_aio_context() ?
6219ce44e2cSKevin Wolf qemu_get_aio_context() : ctx));
6220dc165c1SKevin Wolf
623c2b38b27SPaolo Bonzini qemu_lockcnt_inc(&ctx->list_lock);
624c2b38b27SPaolo Bonzini
625c2b38b27SPaolo Bonzini if (ctx->poll_max_ns) {
626c2b38b27SPaolo Bonzini start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
627c2b38b27SPaolo Bonzini }
628c2b38b27SPaolo Bonzini
629e30cffa0SPaolo Bonzini timeout = blocking ? aio_compute_timeout(ctx) : 0;
630826cc324SStefan Hajnoczi progress = try_poll_mode(ctx, &ready_list, &timeout);
631e30cffa0SPaolo Bonzini assert(!(timeout && progress));
632e30cffa0SPaolo Bonzini
63344277bf9SStefan Hajnoczi /*
63444277bf9SStefan Hajnoczi * aio_notify can avoid the expensive event_notifier_set if
63544277bf9SStefan Hajnoczi * everything (file descriptors, bottom halves, timers) will
63644277bf9SStefan Hajnoczi * be re-evaluated before the next blocking poll(). This is
63744277bf9SStefan Hajnoczi * already true when aio_poll is called with blocking == false;
63844277bf9SStefan Hajnoczi * if blocking == true, it is only true after poll() returns,
63944277bf9SStefan Hajnoczi * so disable the optimization now.
64044277bf9SStefan Hajnoczi */
64144277bf9SStefan Hajnoczi use_notify_me = timeout != 0;
64244277bf9SStefan Hajnoczi if (use_notify_me) {
643d73415a3SStefan Hajnoczi qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) + 2);
64444277bf9SStefan Hajnoczi /*
64544277bf9SStefan Hajnoczi * Write ctx->notify_me before reading ctx->notified. Pairs with
64644277bf9SStefan Hajnoczi * smp_mb in aio_notify().
64744277bf9SStefan Hajnoczi */
64844277bf9SStefan Hajnoczi smp_mb();
64944277bf9SStefan Hajnoczi
65044277bf9SStefan Hajnoczi /* Don't block if aio_notify() was called */
651d73415a3SStefan Hajnoczi if (qatomic_read(&ctx->notified)) {
65244277bf9SStefan Hajnoczi timeout = 0;
65344277bf9SStefan Hajnoczi }
65444277bf9SStefan Hajnoczi }
65544277bf9SStefan Hajnoczi
656e30cffa0SPaolo Bonzini /* If polling is allowed, non-blocking aio_poll does not need the
657e30cffa0SPaolo Bonzini * system call---a single round of run_poll_handlers_once suffices.
658e30cffa0SPaolo Bonzini */
659aa38e19fSStefan Hajnoczi if (timeout || ctx->fdmon_ops->need_wait(ctx)) {
660816a430cSChao Gao /*
661816a430cSChao Gao * Disable poll mode. poll mode should be disabled before the call
662816a430cSChao Gao * of ctx->fdmon_ops->wait() so that guest's notification can wake
663816a430cSChao Gao * up IO threads when some work becomes pending. It is essential to
664816a430cSChao Gao * avoid hangs or unnecessary latency.
665816a430cSChao Gao */
666816a430cSChao Gao if (poll_set_started(ctx, &ready_list, false)) {
667816a430cSChao Gao timeout = 0;
668816a430cSChao Gao progress = true;
669816a430cSChao Gao }
670816a430cSChao Gao
671826cc324SStefan Hajnoczi ctx->fdmon_ops->wait(ctx, &ready_list, timeout);
672c2b38b27SPaolo Bonzini }
673c2b38b27SPaolo Bonzini
67444277bf9SStefan Hajnoczi if (use_notify_me) {
6755710a3e0SPaolo Bonzini /* Finish the poll before clearing the flag. */
676d73415a3SStefan Hajnoczi qatomic_store_release(&ctx->notify_me,
677d73415a3SStefan Hajnoczi qatomic_read(&ctx->notify_me) - 2);
678c2b38b27SPaolo Bonzini }
679c2b38b27SPaolo Bonzini
68044277bf9SStefan Hajnoczi aio_notify_accept(ctx);
68144277bf9SStefan Hajnoczi
682c2b38b27SPaolo Bonzini /* Adjust polling time */
683c2b38b27SPaolo Bonzini if (ctx->poll_max_ns) {
684c2b38b27SPaolo Bonzini int64_t block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start;
685c2b38b27SPaolo Bonzini
686c2b38b27SPaolo Bonzini if (block_ns <= ctx->poll_ns) {
687c2b38b27SPaolo Bonzini /* This is the sweet spot, no adjustment needed */
688c2b38b27SPaolo Bonzini } else if (block_ns > ctx->poll_max_ns) {
689c2b38b27SPaolo Bonzini /* We'd have to poll for too long, poll less */
690c2b38b27SPaolo Bonzini int64_t old = ctx->poll_ns;
691c2b38b27SPaolo Bonzini
692c2b38b27SPaolo Bonzini if (ctx->poll_shrink) {
693c2b38b27SPaolo Bonzini ctx->poll_ns /= ctx->poll_shrink;
694c2b38b27SPaolo Bonzini } else {
695c2b38b27SPaolo Bonzini ctx->poll_ns = 0;
696c2b38b27SPaolo Bonzini }
697c2b38b27SPaolo Bonzini
698c2b38b27SPaolo Bonzini trace_poll_shrink(ctx, old, ctx->poll_ns);
699c2b38b27SPaolo Bonzini } else if (ctx->poll_ns < ctx->poll_max_ns &&
700c2b38b27SPaolo Bonzini block_ns < ctx->poll_max_ns) {
701c2b38b27SPaolo Bonzini /* There is room to grow, poll longer */
702c2b38b27SPaolo Bonzini int64_t old = ctx->poll_ns;
703c2b38b27SPaolo Bonzini int64_t grow = ctx->poll_grow;
704c2b38b27SPaolo Bonzini
705c2b38b27SPaolo Bonzini if (grow == 0) {
706c2b38b27SPaolo Bonzini grow = 2;
707c2b38b27SPaolo Bonzini }
708c2b38b27SPaolo Bonzini
709c2b38b27SPaolo Bonzini if (ctx->poll_ns) {
710c2b38b27SPaolo Bonzini ctx->poll_ns *= grow;
711c2b38b27SPaolo Bonzini } else {
712c2b38b27SPaolo Bonzini ctx->poll_ns = 4000; /* start polling at 4 microseconds */
713c2b38b27SPaolo Bonzini }
714c2b38b27SPaolo Bonzini
715c2b38b27SPaolo Bonzini if (ctx->poll_ns > ctx->poll_max_ns) {
716c2b38b27SPaolo Bonzini ctx->poll_ns = ctx->poll_max_ns;
717c2b38b27SPaolo Bonzini }
718c2b38b27SPaolo Bonzini
719c2b38b27SPaolo Bonzini trace_poll_grow(ctx, old, ctx->poll_ns);
720c2b38b27SPaolo Bonzini }
721c2b38b27SPaolo Bonzini }
722c2b38b27SPaolo Bonzini
723a153bf52SPaolo Bonzini progress |= aio_bh_poll(ctx);
7247391d34cSStefan Hajnoczi progress |= aio_dispatch_ready_handlers(ctx, &ready_list);
725c2b38b27SPaolo Bonzini
7264749079cSStefan Hajnoczi aio_free_deleted_handlers(ctx);
7274749079cSStefan Hajnoczi
728bd451435SPaolo Bonzini qemu_lockcnt_dec(&ctx->list_lock);
729bd451435SPaolo Bonzini
730a153bf52SPaolo Bonzini progress |= timerlistgroup_run_timers(&ctx->tlg);
731a153bf52SPaolo Bonzini
732c2b38b27SPaolo Bonzini return progress;
733c2b38b27SPaolo Bonzini }
734c2b38b27SPaolo Bonzini
aio_context_setup(AioContext * ctx)735c2b38b27SPaolo Bonzini void aio_context_setup(AioContext *ctx)
736c2b38b27SPaolo Bonzini {
7371f050a46SStefan Hajnoczi ctx->fdmon_ops = &fdmon_poll_ops;
7381f050a46SStefan Hajnoczi ctx->epollfd = -1;
7391f050a46SStefan Hajnoczi
74073fd282eSStefan Hajnoczi /* Use the fastest fd monitoring implementation if available */
74173fd282eSStefan Hajnoczi if (fdmon_io_uring_setup(ctx)) {
74273fd282eSStefan Hajnoczi return;
74373fd282eSStefan Hajnoczi }
74473fd282eSStefan Hajnoczi
7451f050a46SStefan Hajnoczi fdmon_epoll_setup(ctx);
746c2b38b27SPaolo Bonzini }
747c2b38b27SPaolo Bonzini
aio_context_destroy(AioContext * ctx)748cd0a6d2bSJie Wang void aio_context_destroy(AioContext *ctx)
749cd0a6d2bSJie Wang {
75073fd282eSStefan Hajnoczi fdmon_io_uring_destroy(ctx);
7511f050a46SStefan Hajnoczi fdmon_epoll_disable(ctx);
752de137e44SStefan Hajnoczi aio_free_deleted_handlers(ctx);
753cd0a6d2bSJie Wang }
754cd0a6d2bSJie Wang
aio_context_use_g_source(AioContext * ctx)755ba607ca8SStefan Hajnoczi void aio_context_use_g_source(AioContext *ctx)
756ba607ca8SStefan Hajnoczi {
757ba607ca8SStefan Hajnoczi /*
758ba607ca8SStefan Hajnoczi * Disable io_uring when the glib main loop is used because it doesn't
759ba607ca8SStefan Hajnoczi * support mixed glib/aio_poll() usage. It relies on aio_poll() being
760ba607ca8SStefan Hajnoczi * called regularly so that changes to the monitored file descriptors are
761ba607ca8SStefan Hajnoczi * submitted, otherwise a list of pending fd handlers builds up.
762ba607ca8SStefan Hajnoczi */
763ba607ca8SStefan Hajnoczi fdmon_io_uring_destroy(ctx);
764ba607ca8SStefan Hajnoczi aio_free_deleted_handlers(ctx);
765ba607ca8SStefan Hajnoczi }
766ba607ca8SStefan Hajnoczi
aio_context_set_poll_params(AioContext * ctx,int64_t max_ns,int64_t grow,int64_t shrink,Error ** errp)767c2b38b27SPaolo Bonzini void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
768c2b38b27SPaolo Bonzini int64_t grow, int64_t shrink, Error **errp)
769c2b38b27SPaolo Bonzini {
770c2b38b27SPaolo Bonzini /* No thread synchronization here, it doesn't matter if an incorrect value
771c2b38b27SPaolo Bonzini * is used once.
772c2b38b27SPaolo Bonzini */
773c2b38b27SPaolo Bonzini ctx->poll_max_ns = max_ns;
774c2b38b27SPaolo Bonzini ctx->poll_ns = 0;
775c2b38b27SPaolo Bonzini ctx->poll_grow = grow;
776c2b38b27SPaolo Bonzini ctx->poll_shrink = shrink;
777c2b38b27SPaolo Bonzini
778c2b38b27SPaolo Bonzini aio_notify(ctx);
779c2b38b27SPaolo Bonzini }
7801793ad02SStefano Garzarella
aio_context_set_aio_params(AioContext * ctx,int64_t max_batch)781897a06c6SPhilippe Mathieu-Daudé void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch)
7821793ad02SStefano Garzarella {
7831793ad02SStefano Garzarella /*
7841793ad02SStefano Garzarella * No thread synchronization here, it doesn't matter if an incorrect value
7851793ad02SStefano Garzarella * is used once.
7861793ad02SStefano Garzarella */
7871793ad02SStefano Garzarella ctx->aio_max_batch = max_batch;
7881793ad02SStefano Garzarella
7891793ad02SStefano Garzarella aio_notify(ctx);
7901793ad02SStefano Garzarella }
791