Lines Matching +full:wait +full:- +full:monitoring +full:- +full:ns

1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Linux io_uring file descriptor monitoring
5 * The Linux io_uring API supports file descriptor monitoring with a few
16 * 3. File descriptor monitoring is O(1) like epoll(7) so it scales better than
26 * File descriptor monitoring is implemented using the following operations:
28 * 1. IORING_OP_POLL_ADD - adds a file descriptor to be monitored.
29 * 2. IORING_OP_POLL_REMOVE - removes a file descriptor being monitored. When
31 * re-added with the new poll mask, so this operation is also used as part
33 * 3. IORING_OP_TIMEOUT - added every time a blocking syscall is made to wait
34 * for events. This operation self-cancels if another event completes
42 * ctx->submit_list so that fdmon_io_uring_wait() can submit IORING_OP_POLL_ADD
49 #include "aio-posix.h"
82 struct io_uring *ring = &ctx->fdmon_io_uring; in get_sqe()
93 } while (ret == -EINTR); in get_sqe()
106 old_flags = qatomic_fetch_or(&node->flags, FDMON_IO_URING_PENDING | flags); in enqueue()
130 *flags = qatomic_fetch_and(&node->flags, ~(FDMON_IO_URING_PENDING | in dequeue()
140 enqueue(&ctx->submit_list, new_node, FDMON_IO_URING_ADD); in fdmon_io_uring_update()
146 * IORING_OP_POLL_REMOVE are async. We need to wait for the original in fdmon_io_uring_update()
155 * ctx->deleted_aio_handlers yet. Instead, manually fudge the list in fdmon_io_uring_update()
160 * handler on the real ctx->deleted_aio_handlers list to be freed. in fdmon_io_uring_update()
163 old_node->node_deleted.le_prev = &old_node->node_deleted.le_next; in fdmon_io_uring_update()
165 enqueue(&ctx->submit_list, old_node, FDMON_IO_URING_REMOVE); in fdmon_io_uring_update()
172 int events = poll_events_from_pfd(node->pfd.events); in add_poll_add_sqe()
174 io_uring_prep_poll_add(sqe, node->pfd.fd, events); in add_poll_add_sqe()
190 /* Add a timeout that self-cancels when another cqe becomes ready */
191 static void add_timeout_sqe(AioContext *ctx, int64_t ns) in add_timeout_sqe() argument
195 .tv_sec = ns / NANOSECONDS_PER_SECOND, in add_timeout_sqe()
196 .tv_nsec = ns % NANOSECONDS_PER_SECOND, in add_timeout_sqe()
204 /* Add sqes from ctx->submit_list for submission */
211 QSLIST_MOVE_ATOMIC(&submit_list, &ctx->submit_list); in fill_sq_ring()
242 flags = qatomic_fetch_and(&node->flags, ~FDMON_IO_URING_REMOVE); in process_cqe()
244 QLIST_INSERT_HEAD_RCU(&ctx->deleted_aio_handlers, node, node_deleted); in process_cqe()
248 aio_add_ready_handler(ready_list, node, pfd_events_from_poll(cqe->res)); in process_cqe()
250 /* IORING_OP_POLL_ADD is one-shot so we must re-arm it */ in process_cqe()
257 struct io_uring *ring = &ctx->fdmon_io_uring; in process_cq_ring()
282 wait_nr = 0; /* non-blocking */ in fdmon_io_uring_wait()
290 ret = io_uring_submit_and_wait(&ctx->fdmon_io_uring, wait_nr); in fdmon_io_uring_wait()
291 } while (ret == -EINTR); in fdmon_io_uring_wait()
301 if (io_uring_cq_ready(&ctx->fdmon_io_uring)) { in fdmon_io_uring_need_wait()
306 if (io_uring_sq_ready(&ctx->fdmon_io_uring)) { in fdmon_io_uring_need_wait()
311 if (!QSLIST_EMPTY_RCU(&ctx->submit_list)) { in fdmon_io_uring_need_wait()
320 .wait = fdmon_io_uring_wait,
328 ret = io_uring_queue_init(FDMON_IO_URING_ENTRIES, &ctx->fdmon_io_uring, 0); in fdmon_io_uring_setup()
333 QSLIST_INIT(&ctx->submit_list); in fdmon_io_uring_setup()
334 ctx->fdmon_ops = &fdmon_io_uring_ops; in fdmon_io_uring_setup()
340 if (ctx->fdmon_ops == &fdmon_io_uring_ops) { in fdmon_io_uring_destroy()
343 io_uring_queue_exit(&ctx->fdmon_io_uring); in fdmon_io_uring_destroy()
346 while ((node = QSLIST_FIRST_RCU(&ctx->submit_list))) { in fdmon_io_uring_destroy()
347 unsigned flags = qatomic_fetch_and(&node->flags, in fdmon_io_uring_destroy()
353 QLIST_INSERT_HEAD_RCU(&ctx->deleted_aio_handlers, node, node_deleted); in fdmon_io_uring_destroy()
356 QSLIST_REMOVE_HEAD_RCU(&ctx->submit_list, node_submitted); in fdmon_io_uring_destroy()
359 ctx->fdmon_ops = &fdmon_poll_ops; in fdmon_io_uring_destroy()