1c2b38b27SPaolo Bonzini /* 2c2b38b27SPaolo Bonzini * QEMU aio implementation 3c2b38b27SPaolo Bonzini * 4c2b38b27SPaolo Bonzini * Copyright IBM, Corp. 2008 5c2b38b27SPaolo Bonzini * 6c2b38b27SPaolo Bonzini * Authors: 7c2b38b27SPaolo Bonzini * Anthony Liguori <aliguori@us.ibm.com> 8c2b38b27SPaolo Bonzini * 9c2b38b27SPaolo Bonzini * This work is licensed under the terms of the GNU GPL, version 2. See 10c2b38b27SPaolo Bonzini * the COPYING file in the top-level directory. 11c2b38b27SPaolo Bonzini * 12c2b38b27SPaolo Bonzini * Contributions after 2012-01-13 are licensed under the terms of the 13c2b38b27SPaolo Bonzini * GNU GPL, version 2 or (at your option) any later version. 14c2b38b27SPaolo Bonzini */ 15c2b38b27SPaolo Bonzini 16c2b38b27SPaolo Bonzini #include "qemu/osdep.h" 17c2b38b27SPaolo Bonzini #include "qemu-common.h" 18c2b38b27SPaolo Bonzini #include "block/block.h" 19c2b38b27SPaolo Bonzini #include "qemu/rcu_queue.h" 20c2b38b27SPaolo Bonzini #include "qemu/sockets.h" 21c2b38b27SPaolo Bonzini #include "qemu/cutils.h" 22c2b38b27SPaolo Bonzini #include "trace.h" 23c2b38b27SPaolo Bonzini #ifdef CONFIG_EPOLL_CREATE1 24c2b38b27SPaolo Bonzini #include <sys/epoll.h> 25c2b38b27SPaolo Bonzini #endif 26c2b38b27SPaolo Bonzini 27c2b38b27SPaolo Bonzini struct AioHandler 28c2b38b27SPaolo Bonzini { 29c2b38b27SPaolo Bonzini GPollFD pfd; 30c2b38b27SPaolo Bonzini IOHandler *io_read; 31c2b38b27SPaolo Bonzini IOHandler *io_write; 32c2b38b27SPaolo Bonzini AioPollFn *io_poll; 33c2b38b27SPaolo Bonzini IOHandler *io_poll_begin; 34c2b38b27SPaolo Bonzini IOHandler *io_poll_end; 35c2b38b27SPaolo Bonzini int deleted; 36c2b38b27SPaolo Bonzini void *opaque; 37c2b38b27SPaolo Bonzini bool is_external; 38c2b38b27SPaolo Bonzini QLIST_ENTRY(AioHandler) node; 39c2b38b27SPaolo Bonzini }; 40c2b38b27SPaolo Bonzini 41c2b38b27SPaolo Bonzini #ifdef CONFIG_EPOLL_CREATE1 42c2b38b27SPaolo Bonzini 43c2b38b27SPaolo Bonzini /* The fd number threashold to switch to epoll */ 44c2b38b27SPaolo Bonzini #define EPOLL_ENABLE_THRESHOLD 64 45c2b38b27SPaolo Bonzini 46c2b38b27SPaolo Bonzini static void aio_epoll_disable(AioContext *ctx) 47c2b38b27SPaolo Bonzini { 48c2b38b27SPaolo Bonzini ctx->epoll_available = false; 49c2b38b27SPaolo Bonzini if (!ctx->epoll_enabled) { 50c2b38b27SPaolo Bonzini return; 51c2b38b27SPaolo Bonzini } 52c2b38b27SPaolo Bonzini ctx->epoll_enabled = false; 53c2b38b27SPaolo Bonzini close(ctx->epollfd); 54c2b38b27SPaolo Bonzini } 55c2b38b27SPaolo Bonzini 56c2b38b27SPaolo Bonzini static inline int epoll_events_from_pfd(int pfd_events) 57c2b38b27SPaolo Bonzini { 58c2b38b27SPaolo Bonzini return (pfd_events & G_IO_IN ? EPOLLIN : 0) | 59c2b38b27SPaolo Bonzini (pfd_events & G_IO_OUT ? EPOLLOUT : 0) | 60c2b38b27SPaolo Bonzini (pfd_events & G_IO_HUP ? EPOLLHUP : 0) | 61c2b38b27SPaolo Bonzini (pfd_events & G_IO_ERR ? EPOLLERR : 0); 62c2b38b27SPaolo Bonzini } 63c2b38b27SPaolo Bonzini 64c2b38b27SPaolo Bonzini static bool aio_epoll_try_enable(AioContext *ctx) 65c2b38b27SPaolo Bonzini { 66c2b38b27SPaolo Bonzini AioHandler *node; 67c2b38b27SPaolo Bonzini struct epoll_event event; 68c2b38b27SPaolo Bonzini 69c2b38b27SPaolo Bonzini QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { 70c2b38b27SPaolo Bonzini int r; 71c2b38b27SPaolo Bonzini if (node->deleted || !node->pfd.events) { 72c2b38b27SPaolo Bonzini continue; 73c2b38b27SPaolo Bonzini } 74c2b38b27SPaolo Bonzini event.events = epoll_events_from_pfd(node->pfd.events); 75c2b38b27SPaolo Bonzini event.data.ptr = node; 76c2b38b27SPaolo Bonzini r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, node->pfd.fd, &event); 77c2b38b27SPaolo Bonzini if (r) { 78c2b38b27SPaolo Bonzini return false; 79c2b38b27SPaolo Bonzini } 80c2b38b27SPaolo Bonzini } 81c2b38b27SPaolo Bonzini ctx->epoll_enabled = true; 82c2b38b27SPaolo Bonzini return true; 83c2b38b27SPaolo Bonzini } 84c2b38b27SPaolo Bonzini 85c2b38b27SPaolo Bonzini static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new) 86c2b38b27SPaolo Bonzini { 87c2b38b27SPaolo Bonzini struct epoll_event event; 88c2b38b27SPaolo Bonzini int r; 89c2b38b27SPaolo Bonzini int ctl; 90c2b38b27SPaolo Bonzini 91c2b38b27SPaolo Bonzini if (!ctx->epoll_enabled) { 92c2b38b27SPaolo Bonzini return; 93c2b38b27SPaolo Bonzini } 94c2b38b27SPaolo Bonzini if (!node->pfd.events) { 95c2b38b27SPaolo Bonzini ctl = EPOLL_CTL_DEL; 96c2b38b27SPaolo Bonzini } else { 97c2b38b27SPaolo Bonzini event.data.ptr = node; 98c2b38b27SPaolo Bonzini event.events = epoll_events_from_pfd(node->pfd.events); 99c2b38b27SPaolo Bonzini ctl = is_new ? EPOLL_CTL_ADD : EPOLL_CTL_MOD; 100c2b38b27SPaolo Bonzini } 101c2b38b27SPaolo Bonzini 102c2b38b27SPaolo Bonzini r = epoll_ctl(ctx->epollfd, ctl, node->pfd.fd, &event); 103c2b38b27SPaolo Bonzini if (r) { 104c2b38b27SPaolo Bonzini aio_epoll_disable(ctx); 105c2b38b27SPaolo Bonzini } 106c2b38b27SPaolo Bonzini } 107c2b38b27SPaolo Bonzini 108c2b38b27SPaolo Bonzini static int aio_epoll(AioContext *ctx, GPollFD *pfds, 109c2b38b27SPaolo Bonzini unsigned npfd, int64_t timeout) 110c2b38b27SPaolo Bonzini { 111c2b38b27SPaolo Bonzini AioHandler *node; 112c2b38b27SPaolo Bonzini int i, ret = 0; 113c2b38b27SPaolo Bonzini struct epoll_event events[128]; 114c2b38b27SPaolo Bonzini 115c2b38b27SPaolo Bonzini assert(npfd == 1); 116c2b38b27SPaolo Bonzini assert(pfds[0].fd == ctx->epollfd); 117c2b38b27SPaolo Bonzini if (timeout > 0) { 118c2b38b27SPaolo Bonzini ret = qemu_poll_ns(pfds, npfd, timeout); 119c2b38b27SPaolo Bonzini } 120c2b38b27SPaolo Bonzini if (timeout <= 0 || ret > 0) { 121c2b38b27SPaolo Bonzini ret = epoll_wait(ctx->epollfd, events, 122c2b38b27SPaolo Bonzini sizeof(events) / sizeof(events[0]), 123c2b38b27SPaolo Bonzini timeout); 124c2b38b27SPaolo Bonzini if (ret <= 0) { 125c2b38b27SPaolo Bonzini goto out; 126c2b38b27SPaolo Bonzini } 127c2b38b27SPaolo Bonzini for (i = 0; i < ret; i++) { 128c2b38b27SPaolo Bonzini int ev = events[i].events; 129c2b38b27SPaolo Bonzini node = events[i].data.ptr; 130c2b38b27SPaolo Bonzini node->pfd.revents = (ev & EPOLLIN ? G_IO_IN : 0) | 131c2b38b27SPaolo Bonzini (ev & EPOLLOUT ? G_IO_OUT : 0) | 132c2b38b27SPaolo Bonzini (ev & EPOLLHUP ? G_IO_HUP : 0) | 133c2b38b27SPaolo Bonzini (ev & EPOLLERR ? G_IO_ERR : 0); 134c2b38b27SPaolo Bonzini } 135c2b38b27SPaolo Bonzini } 136c2b38b27SPaolo Bonzini out: 137c2b38b27SPaolo Bonzini return ret; 138c2b38b27SPaolo Bonzini } 139c2b38b27SPaolo Bonzini 140c2b38b27SPaolo Bonzini static bool aio_epoll_enabled(AioContext *ctx) 141c2b38b27SPaolo Bonzini { 142c2b38b27SPaolo Bonzini /* Fall back to ppoll when external clients are disabled. */ 143c2b38b27SPaolo Bonzini return !aio_external_disabled(ctx) && ctx->epoll_enabled; 144c2b38b27SPaolo Bonzini } 145c2b38b27SPaolo Bonzini 146c2b38b27SPaolo Bonzini static bool aio_epoll_check_poll(AioContext *ctx, GPollFD *pfds, 147c2b38b27SPaolo Bonzini unsigned npfd, int64_t timeout) 148c2b38b27SPaolo Bonzini { 149c2b38b27SPaolo Bonzini if (!ctx->epoll_available) { 150c2b38b27SPaolo Bonzini return false; 151c2b38b27SPaolo Bonzini } 152c2b38b27SPaolo Bonzini if (aio_epoll_enabled(ctx)) { 153c2b38b27SPaolo Bonzini return true; 154c2b38b27SPaolo Bonzini } 155c2b38b27SPaolo Bonzini if (npfd >= EPOLL_ENABLE_THRESHOLD) { 156c2b38b27SPaolo Bonzini if (aio_epoll_try_enable(ctx)) { 157c2b38b27SPaolo Bonzini return true; 158c2b38b27SPaolo Bonzini } else { 159c2b38b27SPaolo Bonzini aio_epoll_disable(ctx); 160c2b38b27SPaolo Bonzini } 161c2b38b27SPaolo Bonzini } 162c2b38b27SPaolo Bonzini return false; 163c2b38b27SPaolo Bonzini } 164c2b38b27SPaolo Bonzini 165c2b38b27SPaolo Bonzini #else 166c2b38b27SPaolo Bonzini 167c2b38b27SPaolo Bonzini static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new) 168c2b38b27SPaolo Bonzini { 169c2b38b27SPaolo Bonzini } 170c2b38b27SPaolo Bonzini 171c2b38b27SPaolo Bonzini static int aio_epoll(AioContext *ctx, GPollFD *pfds, 172c2b38b27SPaolo Bonzini unsigned npfd, int64_t timeout) 173c2b38b27SPaolo Bonzini { 174c2b38b27SPaolo Bonzini assert(false); 175c2b38b27SPaolo Bonzini } 176c2b38b27SPaolo Bonzini 177c2b38b27SPaolo Bonzini static bool aio_epoll_enabled(AioContext *ctx) 178c2b38b27SPaolo Bonzini { 179c2b38b27SPaolo Bonzini return false; 180c2b38b27SPaolo Bonzini } 181c2b38b27SPaolo Bonzini 182c2b38b27SPaolo Bonzini static bool aio_epoll_check_poll(AioContext *ctx, GPollFD *pfds, 183c2b38b27SPaolo Bonzini unsigned npfd, int64_t timeout) 184c2b38b27SPaolo Bonzini { 185c2b38b27SPaolo Bonzini return false; 186c2b38b27SPaolo Bonzini } 187c2b38b27SPaolo Bonzini 188c2b38b27SPaolo Bonzini #endif 189c2b38b27SPaolo Bonzini 190c2b38b27SPaolo Bonzini static AioHandler *find_aio_handler(AioContext *ctx, int fd) 191c2b38b27SPaolo Bonzini { 192c2b38b27SPaolo Bonzini AioHandler *node; 193c2b38b27SPaolo Bonzini 194c2b38b27SPaolo Bonzini QLIST_FOREACH(node, &ctx->aio_handlers, node) { 195c2b38b27SPaolo Bonzini if (node->pfd.fd == fd) 196c2b38b27SPaolo Bonzini if (!node->deleted) 197c2b38b27SPaolo Bonzini return node; 198c2b38b27SPaolo Bonzini } 199c2b38b27SPaolo Bonzini 200c2b38b27SPaolo Bonzini return NULL; 201c2b38b27SPaolo Bonzini } 202c2b38b27SPaolo Bonzini 203c2b38b27SPaolo Bonzini void aio_set_fd_handler(AioContext *ctx, 204c2b38b27SPaolo Bonzini int fd, 205c2b38b27SPaolo Bonzini bool is_external, 206c2b38b27SPaolo Bonzini IOHandler *io_read, 207c2b38b27SPaolo Bonzini IOHandler *io_write, 208c2b38b27SPaolo Bonzini AioPollFn *io_poll, 209c2b38b27SPaolo Bonzini void *opaque) 210c2b38b27SPaolo Bonzini { 211c2b38b27SPaolo Bonzini AioHandler *node; 212c2b38b27SPaolo Bonzini bool is_new = false; 213c2b38b27SPaolo Bonzini bool deleted = false; 214c2b38b27SPaolo Bonzini 215c2b38b27SPaolo Bonzini qemu_lockcnt_lock(&ctx->list_lock); 216c2b38b27SPaolo Bonzini 217c2b38b27SPaolo Bonzini node = find_aio_handler(ctx, fd); 218c2b38b27SPaolo Bonzini 219c2b38b27SPaolo Bonzini /* Are we deleting the fd handler? */ 220c2b38b27SPaolo Bonzini if (!io_read && !io_write && !io_poll) { 221c2b38b27SPaolo Bonzini if (node == NULL) { 222c2b38b27SPaolo Bonzini qemu_lockcnt_unlock(&ctx->list_lock); 223c2b38b27SPaolo Bonzini return; 224c2b38b27SPaolo Bonzini } 225c2b38b27SPaolo Bonzini 226c2b38b27SPaolo Bonzini g_source_remove_poll(&ctx->source, &node->pfd); 227c2b38b27SPaolo Bonzini 228c2b38b27SPaolo Bonzini /* If the lock is held, just mark the node as deleted */ 229c2b38b27SPaolo Bonzini if (qemu_lockcnt_count(&ctx->list_lock)) { 230c2b38b27SPaolo Bonzini node->deleted = 1; 231c2b38b27SPaolo Bonzini node->pfd.revents = 0; 232c2b38b27SPaolo Bonzini } else { 233c2b38b27SPaolo Bonzini /* Otherwise, delete it for real. We can't just mark it as 234c2b38b27SPaolo Bonzini * deleted because deleted nodes are only cleaned up while 235c2b38b27SPaolo Bonzini * no one is walking the handlers list. 236c2b38b27SPaolo Bonzini */ 237c2b38b27SPaolo Bonzini QLIST_REMOVE(node, node); 238c2b38b27SPaolo Bonzini deleted = true; 239c2b38b27SPaolo Bonzini } 240c2b38b27SPaolo Bonzini 241c2b38b27SPaolo Bonzini if (!node->io_poll) { 242c2b38b27SPaolo Bonzini ctx->poll_disable_cnt--; 243c2b38b27SPaolo Bonzini } 244c2b38b27SPaolo Bonzini } else { 245c2b38b27SPaolo Bonzini if (node == NULL) { 246c2b38b27SPaolo Bonzini /* Alloc and insert if it's not already there */ 247c2b38b27SPaolo Bonzini node = g_new0(AioHandler, 1); 248c2b38b27SPaolo Bonzini node->pfd.fd = fd; 249c2b38b27SPaolo Bonzini QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node); 250c2b38b27SPaolo Bonzini 251c2b38b27SPaolo Bonzini g_source_add_poll(&ctx->source, &node->pfd); 252c2b38b27SPaolo Bonzini is_new = true; 253c2b38b27SPaolo Bonzini 254c2b38b27SPaolo Bonzini ctx->poll_disable_cnt += !io_poll; 255c2b38b27SPaolo Bonzini } else { 256c2b38b27SPaolo Bonzini ctx->poll_disable_cnt += !io_poll - !node->io_poll; 257c2b38b27SPaolo Bonzini } 258c2b38b27SPaolo Bonzini 259c2b38b27SPaolo Bonzini /* Update handler with latest information */ 260c2b38b27SPaolo Bonzini node->io_read = io_read; 261c2b38b27SPaolo Bonzini node->io_write = io_write; 262c2b38b27SPaolo Bonzini node->io_poll = io_poll; 263c2b38b27SPaolo Bonzini node->opaque = opaque; 264c2b38b27SPaolo Bonzini node->is_external = is_external; 265c2b38b27SPaolo Bonzini 266c2b38b27SPaolo Bonzini node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0); 267c2b38b27SPaolo Bonzini node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0); 268c2b38b27SPaolo Bonzini } 269c2b38b27SPaolo Bonzini 270c2b38b27SPaolo Bonzini aio_epoll_update(ctx, node, is_new); 271c2b38b27SPaolo Bonzini qemu_lockcnt_unlock(&ctx->list_lock); 272c2b38b27SPaolo Bonzini aio_notify(ctx); 273c2b38b27SPaolo Bonzini 274c2b38b27SPaolo Bonzini if (deleted) { 275c2b38b27SPaolo Bonzini g_free(node); 276c2b38b27SPaolo Bonzini } 277c2b38b27SPaolo Bonzini } 278c2b38b27SPaolo Bonzini 279c2b38b27SPaolo Bonzini void aio_set_fd_poll(AioContext *ctx, int fd, 280c2b38b27SPaolo Bonzini IOHandler *io_poll_begin, 281c2b38b27SPaolo Bonzini IOHandler *io_poll_end) 282c2b38b27SPaolo Bonzini { 283c2b38b27SPaolo Bonzini AioHandler *node = find_aio_handler(ctx, fd); 284c2b38b27SPaolo Bonzini 285c2b38b27SPaolo Bonzini if (!node) { 286c2b38b27SPaolo Bonzini return; 287c2b38b27SPaolo Bonzini } 288c2b38b27SPaolo Bonzini 289c2b38b27SPaolo Bonzini node->io_poll_begin = io_poll_begin; 290c2b38b27SPaolo Bonzini node->io_poll_end = io_poll_end; 291c2b38b27SPaolo Bonzini } 292c2b38b27SPaolo Bonzini 293c2b38b27SPaolo Bonzini void aio_set_event_notifier(AioContext *ctx, 294c2b38b27SPaolo Bonzini EventNotifier *notifier, 295c2b38b27SPaolo Bonzini bool is_external, 296c2b38b27SPaolo Bonzini EventNotifierHandler *io_read, 297c2b38b27SPaolo Bonzini AioPollFn *io_poll) 298c2b38b27SPaolo Bonzini { 299c2b38b27SPaolo Bonzini aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), is_external, 300c2b38b27SPaolo Bonzini (IOHandler *)io_read, NULL, io_poll, notifier); 301c2b38b27SPaolo Bonzini } 302c2b38b27SPaolo Bonzini 303c2b38b27SPaolo Bonzini void aio_set_event_notifier_poll(AioContext *ctx, 304c2b38b27SPaolo Bonzini EventNotifier *notifier, 305c2b38b27SPaolo Bonzini EventNotifierHandler *io_poll_begin, 306c2b38b27SPaolo Bonzini EventNotifierHandler *io_poll_end) 307c2b38b27SPaolo Bonzini { 308c2b38b27SPaolo Bonzini aio_set_fd_poll(ctx, event_notifier_get_fd(notifier), 309c2b38b27SPaolo Bonzini (IOHandler *)io_poll_begin, 310c2b38b27SPaolo Bonzini (IOHandler *)io_poll_end); 311c2b38b27SPaolo Bonzini } 312c2b38b27SPaolo Bonzini 313c2b38b27SPaolo Bonzini static void poll_set_started(AioContext *ctx, bool started) 314c2b38b27SPaolo Bonzini { 315c2b38b27SPaolo Bonzini AioHandler *node; 316c2b38b27SPaolo Bonzini 317c2b38b27SPaolo Bonzini if (started == ctx->poll_started) { 318c2b38b27SPaolo Bonzini return; 319c2b38b27SPaolo Bonzini } 320c2b38b27SPaolo Bonzini 321c2b38b27SPaolo Bonzini ctx->poll_started = started; 322c2b38b27SPaolo Bonzini 323c2b38b27SPaolo Bonzini qemu_lockcnt_inc(&ctx->list_lock); 324c2b38b27SPaolo Bonzini QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { 325c2b38b27SPaolo Bonzini IOHandler *fn; 326c2b38b27SPaolo Bonzini 327c2b38b27SPaolo Bonzini if (node->deleted) { 328c2b38b27SPaolo Bonzini continue; 329c2b38b27SPaolo Bonzini } 330c2b38b27SPaolo Bonzini 331c2b38b27SPaolo Bonzini if (started) { 332c2b38b27SPaolo Bonzini fn = node->io_poll_begin; 333c2b38b27SPaolo Bonzini } else { 334c2b38b27SPaolo Bonzini fn = node->io_poll_end; 335c2b38b27SPaolo Bonzini } 336c2b38b27SPaolo Bonzini 337c2b38b27SPaolo Bonzini if (fn) { 338c2b38b27SPaolo Bonzini fn(node->opaque); 339c2b38b27SPaolo Bonzini } 340c2b38b27SPaolo Bonzini } 341c2b38b27SPaolo Bonzini qemu_lockcnt_dec(&ctx->list_lock); 342c2b38b27SPaolo Bonzini } 343c2b38b27SPaolo Bonzini 344c2b38b27SPaolo Bonzini 345c2b38b27SPaolo Bonzini bool aio_prepare(AioContext *ctx) 346c2b38b27SPaolo Bonzini { 347c2b38b27SPaolo Bonzini /* Poll mode cannot be used with glib's event loop, disable it. */ 348c2b38b27SPaolo Bonzini poll_set_started(ctx, false); 349c2b38b27SPaolo Bonzini 350c2b38b27SPaolo Bonzini return false; 351c2b38b27SPaolo Bonzini } 352c2b38b27SPaolo Bonzini 353c2b38b27SPaolo Bonzini bool aio_pending(AioContext *ctx) 354c2b38b27SPaolo Bonzini { 355c2b38b27SPaolo Bonzini AioHandler *node; 356c2b38b27SPaolo Bonzini bool result = false; 357c2b38b27SPaolo Bonzini 358c2b38b27SPaolo Bonzini /* 359c2b38b27SPaolo Bonzini * We have to walk very carefully in case aio_set_fd_handler is 360c2b38b27SPaolo Bonzini * called while we're walking. 361c2b38b27SPaolo Bonzini */ 362c2b38b27SPaolo Bonzini qemu_lockcnt_inc(&ctx->list_lock); 363c2b38b27SPaolo Bonzini 364c2b38b27SPaolo Bonzini QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { 365c2b38b27SPaolo Bonzini int revents; 366c2b38b27SPaolo Bonzini 367c2b38b27SPaolo Bonzini revents = node->pfd.revents & node->pfd.events; 368c2b38b27SPaolo Bonzini if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read && 369c2b38b27SPaolo Bonzini aio_node_check(ctx, node->is_external)) { 370c2b38b27SPaolo Bonzini result = true; 371c2b38b27SPaolo Bonzini break; 372c2b38b27SPaolo Bonzini } 373c2b38b27SPaolo Bonzini if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write && 374c2b38b27SPaolo Bonzini aio_node_check(ctx, node->is_external)) { 375c2b38b27SPaolo Bonzini result = true; 376c2b38b27SPaolo Bonzini break; 377c2b38b27SPaolo Bonzini } 378c2b38b27SPaolo Bonzini } 379c2b38b27SPaolo Bonzini qemu_lockcnt_dec(&ctx->list_lock); 380c2b38b27SPaolo Bonzini 381c2b38b27SPaolo Bonzini return result; 382c2b38b27SPaolo Bonzini } 383c2b38b27SPaolo Bonzini 384c2b38b27SPaolo Bonzini static bool aio_dispatch_handlers(AioContext *ctx) 385c2b38b27SPaolo Bonzini { 386c2b38b27SPaolo Bonzini AioHandler *node, *tmp; 387c2b38b27SPaolo Bonzini bool progress = false; 388c2b38b27SPaolo Bonzini 389c2b38b27SPaolo Bonzini QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) { 390c2b38b27SPaolo Bonzini int revents; 391c2b38b27SPaolo Bonzini 392c2b38b27SPaolo Bonzini revents = node->pfd.revents & node->pfd.events; 393c2b38b27SPaolo Bonzini node->pfd.revents = 0; 394c2b38b27SPaolo Bonzini 395c2b38b27SPaolo Bonzini if (!node->deleted && 396c2b38b27SPaolo Bonzini (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) && 397c2b38b27SPaolo Bonzini aio_node_check(ctx, node->is_external) && 398c2b38b27SPaolo Bonzini node->io_read) { 399c2b38b27SPaolo Bonzini node->io_read(node->opaque); 400c2b38b27SPaolo Bonzini 401c2b38b27SPaolo Bonzini /* aio_notify() does not count as progress */ 402c2b38b27SPaolo Bonzini if (node->opaque != &ctx->notifier) { 403c2b38b27SPaolo Bonzini progress = true; 404c2b38b27SPaolo Bonzini } 405c2b38b27SPaolo Bonzini } 406c2b38b27SPaolo Bonzini if (!node->deleted && 407c2b38b27SPaolo Bonzini (revents & (G_IO_OUT | G_IO_ERR)) && 408c2b38b27SPaolo Bonzini aio_node_check(ctx, node->is_external) && 409c2b38b27SPaolo Bonzini node->io_write) { 410c2b38b27SPaolo Bonzini node->io_write(node->opaque); 411c2b38b27SPaolo Bonzini progress = true; 412c2b38b27SPaolo Bonzini } 413c2b38b27SPaolo Bonzini 414c2b38b27SPaolo Bonzini if (node->deleted) { 415c2b38b27SPaolo Bonzini if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) { 416c2b38b27SPaolo Bonzini QLIST_REMOVE(node, node); 417c2b38b27SPaolo Bonzini g_free(node); 418c2b38b27SPaolo Bonzini qemu_lockcnt_inc_and_unlock(&ctx->list_lock); 419c2b38b27SPaolo Bonzini } 420c2b38b27SPaolo Bonzini } 421c2b38b27SPaolo Bonzini } 422c2b38b27SPaolo Bonzini 423c2b38b27SPaolo Bonzini return progress; 424c2b38b27SPaolo Bonzini } 425c2b38b27SPaolo Bonzini 426*a153bf52SPaolo Bonzini void aio_dispatch(AioContext *ctx) 427c2b38b27SPaolo Bonzini { 428*a153bf52SPaolo Bonzini aio_bh_poll(ctx); 429c2b38b27SPaolo Bonzini 430*a153bf52SPaolo Bonzini qemu_lockcnt_inc(&ctx->list_lock); 431*a153bf52SPaolo Bonzini aio_dispatch_handlers(ctx); 432*a153bf52SPaolo Bonzini qemu_lockcnt_dec(&ctx->list_lock); 433c2b38b27SPaolo Bonzini 434*a153bf52SPaolo Bonzini timerlistgroup_run_timers(&ctx->tlg); 435c2b38b27SPaolo Bonzini } 436c2b38b27SPaolo Bonzini 437c2b38b27SPaolo Bonzini /* These thread-local variables are used only in a small part of aio_poll 438c2b38b27SPaolo Bonzini * around the call to the poll() system call. In particular they are not 439c2b38b27SPaolo Bonzini * used while aio_poll is performing callbacks, which makes it much easier 440c2b38b27SPaolo Bonzini * to think about reentrancy! 441c2b38b27SPaolo Bonzini * 442c2b38b27SPaolo Bonzini * Stack-allocated arrays would be perfect but they have size limitations; 443c2b38b27SPaolo Bonzini * heap allocation is expensive enough that we want to reuse arrays across 444c2b38b27SPaolo Bonzini * calls to aio_poll(). And because poll() has to be called without holding 445c2b38b27SPaolo Bonzini * any lock, the arrays cannot be stored in AioContext. Thread-local data 446c2b38b27SPaolo Bonzini * has none of the disadvantages of these three options. 447c2b38b27SPaolo Bonzini */ 448c2b38b27SPaolo Bonzini static __thread GPollFD *pollfds; 449c2b38b27SPaolo Bonzini static __thread AioHandler **nodes; 450c2b38b27SPaolo Bonzini static __thread unsigned npfd, nalloc; 451c2b38b27SPaolo Bonzini static __thread Notifier pollfds_cleanup_notifier; 452c2b38b27SPaolo Bonzini 453c2b38b27SPaolo Bonzini static void pollfds_cleanup(Notifier *n, void *unused) 454c2b38b27SPaolo Bonzini { 455c2b38b27SPaolo Bonzini g_assert(npfd == 0); 456c2b38b27SPaolo Bonzini g_free(pollfds); 457c2b38b27SPaolo Bonzini g_free(nodes); 458c2b38b27SPaolo Bonzini nalloc = 0; 459c2b38b27SPaolo Bonzini } 460c2b38b27SPaolo Bonzini 461c2b38b27SPaolo Bonzini static void add_pollfd(AioHandler *node) 462c2b38b27SPaolo Bonzini { 463c2b38b27SPaolo Bonzini if (npfd == nalloc) { 464c2b38b27SPaolo Bonzini if (nalloc == 0) { 465c2b38b27SPaolo Bonzini pollfds_cleanup_notifier.notify = pollfds_cleanup; 466c2b38b27SPaolo Bonzini qemu_thread_atexit_add(&pollfds_cleanup_notifier); 467c2b38b27SPaolo Bonzini nalloc = 8; 468c2b38b27SPaolo Bonzini } else { 469c2b38b27SPaolo Bonzini g_assert(nalloc <= INT_MAX); 470c2b38b27SPaolo Bonzini nalloc *= 2; 471c2b38b27SPaolo Bonzini } 472c2b38b27SPaolo Bonzini pollfds = g_renew(GPollFD, pollfds, nalloc); 473c2b38b27SPaolo Bonzini nodes = g_renew(AioHandler *, nodes, nalloc); 474c2b38b27SPaolo Bonzini } 475c2b38b27SPaolo Bonzini nodes[npfd] = node; 476c2b38b27SPaolo Bonzini pollfds[npfd] = (GPollFD) { 477c2b38b27SPaolo Bonzini .fd = node->pfd.fd, 478c2b38b27SPaolo Bonzini .events = node->pfd.events, 479c2b38b27SPaolo Bonzini }; 480c2b38b27SPaolo Bonzini npfd++; 481c2b38b27SPaolo Bonzini } 482c2b38b27SPaolo Bonzini 483c2b38b27SPaolo Bonzini static bool run_poll_handlers_once(AioContext *ctx) 484c2b38b27SPaolo Bonzini { 485c2b38b27SPaolo Bonzini bool progress = false; 486c2b38b27SPaolo Bonzini AioHandler *node; 487c2b38b27SPaolo Bonzini 488c2b38b27SPaolo Bonzini QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { 489c2b38b27SPaolo Bonzini if (!node->deleted && node->io_poll && 490c2b38b27SPaolo Bonzini aio_node_check(ctx, node->is_external) && 491c2b38b27SPaolo Bonzini node->io_poll(node->opaque)) { 492c2b38b27SPaolo Bonzini progress = true; 493c2b38b27SPaolo Bonzini } 494c2b38b27SPaolo Bonzini 495c2b38b27SPaolo Bonzini /* Caller handles freeing deleted nodes. Don't do it here. */ 496c2b38b27SPaolo Bonzini } 497c2b38b27SPaolo Bonzini 498c2b38b27SPaolo Bonzini return progress; 499c2b38b27SPaolo Bonzini } 500c2b38b27SPaolo Bonzini 501c2b38b27SPaolo Bonzini /* run_poll_handlers: 502c2b38b27SPaolo Bonzini * @ctx: the AioContext 503c2b38b27SPaolo Bonzini * @max_ns: maximum time to poll for, in nanoseconds 504c2b38b27SPaolo Bonzini * 505c2b38b27SPaolo Bonzini * Polls for a given time. 506c2b38b27SPaolo Bonzini * 507c2b38b27SPaolo Bonzini * Note that ctx->notify_me must be non-zero so this function can detect 508c2b38b27SPaolo Bonzini * aio_notify(). 509c2b38b27SPaolo Bonzini * 510c2b38b27SPaolo Bonzini * Note that the caller must have incremented ctx->list_lock. 511c2b38b27SPaolo Bonzini * 512c2b38b27SPaolo Bonzini * Returns: true if progress was made, false otherwise 513c2b38b27SPaolo Bonzini */ 514c2b38b27SPaolo Bonzini static bool run_poll_handlers(AioContext *ctx, int64_t max_ns) 515c2b38b27SPaolo Bonzini { 516c2b38b27SPaolo Bonzini bool progress; 517c2b38b27SPaolo Bonzini int64_t end_time; 518c2b38b27SPaolo Bonzini 519c2b38b27SPaolo Bonzini assert(ctx->notify_me); 520c2b38b27SPaolo Bonzini assert(qemu_lockcnt_count(&ctx->list_lock) > 0); 521c2b38b27SPaolo Bonzini assert(ctx->poll_disable_cnt == 0); 522c2b38b27SPaolo Bonzini 523c2b38b27SPaolo Bonzini trace_run_poll_handlers_begin(ctx, max_ns); 524c2b38b27SPaolo Bonzini 525c2b38b27SPaolo Bonzini end_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + max_ns; 526c2b38b27SPaolo Bonzini 527c2b38b27SPaolo Bonzini do { 528c2b38b27SPaolo Bonzini progress = run_poll_handlers_once(ctx); 529c2b38b27SPaolo Bonzini } while (!progress && qemu_clock_get_ns(QEMU_CLOCK_REALTIME) < end_time); 530c2b38b27SPaolo Bonzini 531c2b38b27SPaolo Bonzini trace_run_poll_handlers_end(ctx, progress); 532c2b38b27SPaolo Bonzini 533c2b38b27SPaolo Bonzini return progress; 534c2b38b27SPaolo Bonzini } 535c2b38b27SPaolo Bonzini 536c2b38b27SPaolo Bonzini /* try_poll_mode: 537c2b38b27SPaolo Bonzini * @ctx: the AioContext 538c2b38b27SPaolo Bonzini * @blocking: busy polling is only attempted when blocking is true 539c2b38b27SPaolo Bonzini * 540c2b38b27SPaolo Bonzini * ctx->notify_me must be non-zero so this function can detect aio_notify(). 541c2b38b27SPaolo Bonzini * 542c2b38b27SPaolo Bonzini * Note that the caller must have incremented ctx->list_lock. 543c2b38b27SPaolo Bonzini * 544c2b38b27SPaolo Bonzini * Returns: true if progress was made, false otherwise 545c2b38b27SPaolo Bonzini */ 546c2b38b27SPaolo Bonzini static bool try_poll_mode(AioContext *ctx, bool blocking) 547c2b38b27SPaolo Bonzini { 548c2b38b27SPaolo Bonzini if (blocking && ctx->poll_max_ns && ctx->poll_disable_cnt == 0) { 549c2b38b27SPaolo Bonzini /* See qemu_soonest_timeout() uint64_t hack */ 550c2b38b27SPaolo Bonzini int64_t max_ns = MIN((uint64_t)aio_compute_timeout(ctx), 551c2b38b27SPaolo Bonzini (uint64_t)ctx->poll_ns); 552c2b38b27SPaolo Bonzini 553c2b38b27SPaolo Bonzini if (max_ns) { 554c2b38b27SPaolo Bonzini poll_set_started(ctx, true); 555c2b38b27SPaolo Bonzini 556c2b38b27SPaolo Bonzini if (run_poll_handlers(ctx, max_ns)) { 557c2b38b27SPaolo Bonzini return true; 558c2b38b27SPaolo Bonzini } 559c2b38b27SPaolo Bonzini } 560c2b38b27SPaolo Bonzini } 561c2b38b27SPaolo Bonzini 562c2b38b27SPaolo Bonzini poll_set_started(ctx, false); 563c2b38b27SPaolo Bonzini 564c2b38b27SPaolo Bonzini /* Even if we don't run busy polling, try polling once in case it can make 565c2b38b27SPaolo Bonzini * progress and the caller will be able to avoid ppoll(2)/epoll_wait(2). 566c2b38b27SPaolo Bonzini */ 567c2b38b27SPaolo Bonzini return run_poll_handlers_once(ctx); 568c2b38b27SPaolo Bonzini } 569c2b38b27SPaolo Bonzini 570c2b38b27SPaolo Bonzini bool aio_poll(AioContext *ctx, bool blocking) 571c2b38b27SPaolo Bonzini { 572c2b38b27SPaolo Bonzini AioHandler *node; 573c2b38b27SPaolo Bonzini int i; 574c2b38b27SPaolo Bonzini int ret = 0; 575c2b38b27SPaolo Bonzini bool progress; 576c2b38b27SPaolo Bonzini int64_t timeout; 577c2b38b27SPaolo Bonzini int64_t start = 0; 578c2b38b27SPaolo Bonzini 579c2b38b27SPaolo Bonzini /* aio_notify can avoid the expensive event_notifier_set if 580c2b38b27SPaolo Bonzini * everything (file descriptors, bottom halves, timers) will 581c2b38b27SPaolo Bonzini * be re-evaluated before the next blocking poll(). This is 582c2b38b27SPaolo Bonzini * already true when aio_poll is called with blocking == false; 583c2b38b27SPaolo Bonzini * if blocking == true, it is only true after poll() returns, 584c2b38b27SPaolo Bonzini * so disable the optimization now. 585c2b38b27SPaolo Bonzini */ 586c2b38b27SPaolo Bonzini if (blocking) { 587c2b38b27SPaolo Bonzini atomic_add(&ctx->notify_me, 2); 588c2b38b27SPaolo Bonzini } 589c2b38b27SPaolo Bonzini 590c2b38b27SPaolo Bonzini qemu_lockcnt_inc(&ctx->list_lock); 591c2b38b27SPaolo Bonzini 592c2b38b27SPaolo Bonzini if (ctx->poll_max_ns) { 593c2b38b27SPaolo Bonzini start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 594c2b38b27SPaolo Bonzini } 595c2b38b27SPaolo Bonzini 5960836c72fSPaolo Bonzini progress = try_poll_mode(ctx, blocking); 5970836c72fSPaolo Bonzini if (!progress) { 598c2b38b27SPaolo Bonzini assert(npfd == 0); 599c2b38b27SPaolo Bonzini 600c2b38b27SPaolo Bonzini /* fill pollfds */ 601c2b38b27SPaolo Bonzini 602c2b38b27SPaolo Bonzini if (!aio_epoll_enabled(ctx)) { 603c2b38b27SPaolo Bonzini QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { 604c2b38b27SPaolo Bonzini if (!node->deleted && node->pfd.events 605c2b38b27SPaolo Bonzini && aio_node_check(ctx, node->is_external)) { 606c2b38b27SPaolo Bonzini add_pollfd(node); 607c2b38b27SPaolo Bonzini } 608c2b38b27SPaolo Bonzini } 609c2b38b27SPaolo Bonzini } 610c2b38b27SPaolo Bonzini 611c2b38b27SPaolo Bonzini timeout = blocking ? aio_compute_timeout(ctx) : 0; 612c2b38b27SPaolo Bonzini 613c2b38b27SPaolo Bonzini /* wait until next event */ 614c2b38b27SPaolo Bonzini if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) { 615c2b38b27SPaolo Bonzini AioHandler epoll_handler; 616c2b38b27SPaolo Bonzini 617c2b38b27SPaolo Bonzini epoll_handler.pfd.fd = ctx->epollfd; 618c2b38b27SPaolo Bonzini epoll_handler.pfd.events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR; 619c2b38b27SPaolo Bonzini npfd = 0; 620c2b38b27SPaolo Bonzini add_pollfd(&epoll_handler); 621c2b38b27SPaolo Bonzini ret = aio_epoll(ctx, pollfds, npfd, timeout); 622c2b38b27SPaolo Bonzini } else { 623c2b38b27SPaolo Bonzini ret = qemu_poll_ns(pollfds, npfd, timeout); 624c2b38b27SPaolo Bonzini } 625c2b38b27SPaolo Bonzini } 626c2b38b27SPaolo Bonzini 627c2b38b27SPaolo Bonzini if (blocking) { 628c2b38b27SPaolo Bonzini atomic_sub(&ctx->notify_me, 2); 629c2b38b27SPaolo Bonzini } 630c2b38b27SPaolo Bonzini 631c2b38b27SPaolo Bonzini /* Adjust polling time */ 632c2b38b27SPaolo Bonzini if (ctx->poll_max_ns) { 633c2b38b27SPaolo Bonzini int64_t block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start; 634c2b38b27SPaolo Bonzini 635c2b38b27SPaolo Bonzini if (block_ns <= ctx->poll_ns) { 636c2b38b27SPaolo Bonzini /* This is the sweet spot, no adjustment needed */ 637c2b38b27SPaolo Bonzini } else if (block_ns > ctx->poll_max_ns) { 638c2b38b27SPaolo Bonzini /* We'd have to poll for too long, poll less */ 639c2b38b27SPaolo Bonzini int64_t old = ctx->poll_ns; 640c2b38b27SPaolo Bonzini 641c2b38b27SPaolo Bonzini if (ctx->poll_shrink) { 642c2b38b27SPaolo Bonzini ctx->poll_ns /= ctx->poll_shrink; 643c2b38b27SPaolo Bonzini } else { 644c2b38b27SPaolo Bonzini ctx->poll_ns = 0; 645c2b38b27SPaolo Bonzini } 646c2b38b27SPaolo Bonzini 647c2b38b27SPaolo Bonzini trace_poll_shrink(ctx, old, ctx->poll_ns); 648c2b38b27SPaolo Bonzini } else if (ctx->poll_ns < ctx->poll_max_ns && 649c2b38b27SPaolo Bonzini block_ns < ctx->poll_max_ns) { 650c2b38b27SPaolo Bonzini /* There is room to grow, poll longer */ 651c2b38b27SPaolo Bonzini int64_t old = ctx->poll_ns; 652c2b38b27SPaolo Bonzini int64_t grow = ctx->poll_grow; 653c2b38b27SPaolo Bonzini 654c2b38b27SPaolo Bonzini if (grow == 0) { 655c2b38b27SPaolo Bonzini grow = 2; 656c2b38b27SPaolo Bonzini } 657c2b38b27SPaolo Bonzini 658c2b38b27SPaolo Bonzini if (ctx->poll_ns) { 659c2b38b27SPaolo Bonzini ctx->poll_ns *= grow; 660c2b38b27SPaolo Bonzini } else { 661c2b38b27SPaolo Bonzini ctx->poll_ns = 4000; /* start polling at 4 microseconds */ 662c2b38b27SPaolo Bonzini } 663c2b38b27SPaolo Bonzini 664c2b38b27SPaolo Bonzini if (ctx->poll_ns > ctx->poll_max_ns) { 665c2b38b27SPaolo Bonzini ctx->poll_ns = ctx->poll_max_ns; 666c2b38b27SPaolo Bonzini } 667c2b38b27SPaolo Bonzini 668c2b38b27SPaolo Bonzini trace_poll_grow(ctx, old, ctx->poll_ns); 669c2b38b27SPaolo Bonzini } 670c2b38b27SPaolo Bonzini } 671c2b38b27SPaolo Bonzini 672c2b38b27SPaolo Bonzini aio_notify_accept(ctx); 673c2b38b27SPaolo Bonzini 674c2b38b27SPaolo Bonzini /* if we have any readable fds, dispatch event */ 675c2b38b27SPaolo Bonzini if (ret > 0) { 676c2b38b27SPaolo Bonzini for (i = 0; i < npfd; i++) { 677c2b38b27SPaolo Bonzini nodes[i]->pfd.revents = pollfds[i].revents; 678c2b38b27SPaolo Bonzini } 679c2b38b27SPaolo Bonzini } 680c2b38b27SPaolo Bonzini 681c2b38b27SPaolo Bonzini npfd = 0; 682c2b38b27SPaolo Bonzini qemu_lockcnt_dec(&ctx->list_lock); 683c2b38b27SPaolo Bonzini 684*a153bf52SPaolo Bonzini progress |= aio_bh_poll(ctx); 685*a153bf52SPaolo Bonzini 686*a153bf52SPaolo Bonzini if (ret > 0) { 687*a153bf52SPaolo Bonzini qemu_lockcnt_inc(&ctx->list_lock); 688*a153bf52SPaolo Bonzini progress |= aio_dispatch_handlers(ctx); 689*a153bf52SPaolo Bonzini qemu_lockcnt_dec(&ctx->list_lock); 690c2b38b27SPaolo Bonzini } 691c2b38b27SPaolo Bonzini 692*a153bf52SPaolo Bonzini progress |= timerlistgroup_run_timers(&ctx->tlg); 693*a153bf52SPaolo Bonzini 694c2b38b27SPaolo Bonzini return progress; 695c2b38b27SPaolo Bonzini } 696c2b38b27SPaolo Bonzini 697c2b38b27SPaolo Bonzini void aio_context_setup(AioContext *ctx) 698c2b38b27SPaolo Bonzini { 699c2b38b27SPaolo Bonzini /* TODO remove this in final patch submission */ 700c2b38b27SPaolo Bonzini if (getenv("QEMU_AIO_POLL_MAX_NS")) { 701c2b38b27SPaolo Bonzini fprintf(stderr, "The QEMU_AIO_POLL_MAX_NS environment variable has " 702c2b38b27SPaolo Bonzini "been replaced with -object iothread,poll-max-ns=NUM\n"); 703c2b38b27SPaolo Bonzini exit(1); 704c2b38b27SPaolo Bonzini } 705c2b38b27SPaolo Bonzini 706c2b38b27SPaolo Bonzini #ifdef CONFIG_EPOLL_CREATE1 707c2b38b27SPaolo Bonzini assert(!ctx->epollfd); 708c2b38b27SPaolo Bonzini ctx->epollfd = epoll_create1(EPOLL_CLOEXEC); 709c2b38b27SPaolo Bonzini if (ctx->epollfd == -1) { 710c2b38b27SPaolo Bonzini fprintf(stderr, "Failed to create epoll instance: %s", strerror(errno)); 711c2b38b27SPaolo Bonzini ctx->epoll_available = false; 712c2b38b27SPaolo Bonzini } else { 713c2b38b27SPaolo Bonzini ctx->epoll_available = true; 714c2b38b27SPaolo Bonzini } 715c2b38b27SPaolo Bonzini #endif 716c2b38b27SPaolo Bonzini } 717c2b38b27SPaolo Bonzini 718c2b38b27SPaolo Bonzini void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, 719c2b38b27SPaolo Bonzini int64_t grow, int64_t shrink, Error **errp) 720c2b38b27SPaolo Bonzini { 721c2b38b27SPaolo Bonzini /* No thread synchronization here, it doesn't matter if an incorrect value 722c2b38b27SPaolo Bonzini * is used once. 723c2b38b27SPaolo Bonzini */ 724c2b38b27SPaolo Bonzini ctx->poll_max_ns = max_ns; 725c2b38b27SPaolo Bonzini ctx->poll_ns = 0; 726c2b38b27SPaolo Bonzini ctx->poll_grow = grow; 727c2b38b27SPaolo Bonzini ctx->poll_shrink = shrink; 728c2b38b27SPaolo Bonzini 729c2b38b27SPaolo Bonzini aio_notify(ctx); 730c2b38b27SPaolo Bonzini } 731