xref: /openbmc/qemu/util/fdmon-poll.c (revision b4b9a0e3)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * poll(2) file descriptor monitoring
4  *
5  * Uses ppoll(2) when available, g_poll() otherwise.
6  */
7 
8 #include "qemu/osdep.h"
9 #include "aio-posix.h"
10 #include "qemu/rcu_queue.h"
11 
12 /*
13  * These thread-local variables are used only in fdmon_poll_wait() around the
14  * call to the poll() system call.  In particular they are not used while
15  * aio_poll is performing callbacks, which makes it much easier to think about
16  * reentrancy!
17  *
18  * Stack-allocated arrays would be perfect but they have size limitations;
19  * heap allocation is expensive enough that we want to reuse arrays across
20  * calls to aio_poll().  And because poll() has to be called without holding
21  * any lock, the arrays cannot be stored in AioContext.  Thread-local data
22  * has none of the disadvantages of these three options.
23  */
24 static __thread GPollFD *pollfds;
25 static __thread AioHandler **nodes;
26 static __thread unsigned npfd, nalloc;
27 static __thread Notifier pollfds_cleanup_notifier;
28 
29 static void pollfds_cleanup(Notifier *n, void *unused)
30 {
31     g_assert(npfd == 0);
32     g_free(pollfds);
33     g_free(nodes);
34     nalloc = 0;
35 }
36 
37 static void add_pollfd(AioHandler *node)
38 {
39     if (npfd == nalloc) {
40         if (nalloc == 0) {
41             pollfds_cleanup_notifier.notify = pollfds_cleanup;
42             qemu_thread_atexit_add(&pollfds_cleanup_notifier);
43             nalloc = 8;
44         } else {
45             g_assert(nalloc <= INT_MAX);
46             nalloc *= 2;
47         }
48         pollfds = g_renew(GPollFD, pollfds, nalloc);
49         nodes = g_renew(AioHandler *, nodes, nalloc);
50     }
51     nodes[npfd] = node;
52     pollfds[npfd] = (GPollFD) {
53         .fd = node->pfd.fd,
54         .events = node->pfd.events,
55     };
56     npfd++;
57 }
58 
59 static int fdmon_poll_wait(AioContext *ctx, AioHandlerList *ready_list,
60                             int64_t timeout)
61 {
62     AioHandler *node;
63     int ret;
64 
65     assert(npfd == 0);
66 
67     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
68         if (!QLIST_IS_INSERTED(node, node_deleted) && node->pfd.events
69                 && aio_node_check(ctx, node->is_external)) {
70             add_pollfd(node);
71         }
72     }
73 
74     /* epoll(7) is faster above a certain number of fds */
75     if (fdmon_epoll_try_upgrade(ctx, npfd)) {
76         npfd = 0; /* we won't need pollfds[], reset npfd */
77         return ctx->fdmon_ops->wait(ctx, ready_list, timeout);
78     }
79 
80     ret = qemu_poll_ns(pollfds, npfd, timeout);
81     if (ret > 0) {
82         int i;
83 
84         for (i = 0; i < npfd; i++) {
85             int revents = pollfds[i].revents;
86 
87             if (revents) {
88                 aio_add_ready_handler(ready_list, nodes[i], revents);
89             }
90         }
91     }
92 
93     npfd = 0;
94     return ret;
95 }
96 
97 static void fdmon_poll_update(AioContext *ctx,
98                               AioHandler *old_node,
99                               AioHandler *new_node)
100 {
101     /* Do nothing, AioHandler already contains the state we'll need */
102 }
103 
104 const FDMonOps fdmon_poll_ops = {
105     .update = fdmon_poll_update,
106     .wait = fdmon_poll_wait,
107     .need_wait = aio_poll_disabled,
108 };
109