Lines Matching full:req
34 struct io_kiocb *req; member
71 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req) in io_poll_get_ownership_slowpath() argument
80 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs); in io_poll_get_ownership_slowpath()
83 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); in io_poll_get_ownership_slowpath()
92 static inline bool io_poll_get_ownership(struct io_kiocb *req) in io_poll_get_ownership() argument
94 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS)) in io_poll_get_ownership()
95 return io_poll_get_ownership_slowpath(req); in io_poll_get_ownership()
96 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); in io_poll_get_ownership()
99 static void io_poll_mark_cancelled(struct io_kiocb *req) in io_poll_mark_cancelled() argument
101 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs); in io_poll_mark_cancelled()
104 static struct io_poll *io_poll_get_double(struct io_kiocb *req) in io_poll_get_double() argument
107 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_double()
108 return req->async_data; in io_poll_get_double()
109 return req->apoll->double_poll; in io_poll_get_double()
112 static struct io_poll *io_poll_get_single(struct io_kiocb *req) in io_poll_get_single() argument
114 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_single()
115 return io_kiocb_to_cmd(req, struct io_poll); in io_poll_get_single()
116 return &req->apoll->poll; in io_poll_get_single()
119 static void io_poll_req_insert(struct io_kiocb *req) in io_poll_req_insert() argument
121 struct io_hash_table *table = &req->ctx->cancel_table; in io_poll_req_insert()
122 u32 index = hash_long(req->cqe.user_data, table->hash_bits); in io_poll_req_insert()
126 hlist_add_head(&req->hash_node, &hb->list); in io_poll_req_insert()
130 static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx) in io_poll_req_delete() argument
132 struct io_hash_table *table = &req->ctx->cancel_table; in io_poll_req_delete()
133 u32 index = hash_long(req->cqe.user_data, table->hash_bits); in io_poll_req_delete()
137 hash_del(&req->hash_node); in io_poll_req_delete()
141 static void io_poll_req_insert_locked(struct io_kiocb *req) in io_poll_req_insert_locked() argument
143 struct io_hash_table *table = &req->ctx->cancel_table_locked; in io_poll_req_insert_locked()
144 u32 index = hash_long(req->cqe.user_data, table->hash_bits); in io_poll_req_insert_locked()
146 lockdep_assert_held(&req->ctx->uring_lock); in io_poll_req_insert_locked()
148 hlist_add_head(&req->hash_node, &table->hbs[index].list); in io_poll_req_insert_locked()
151 static void io_poll_tw_hash_eject(struct io_kiocb *req, struct io_tw_state *ts) in io_poll_tw_hash_eject() argument
153 struct io_ring_ctx *ctx = req->ctx; in io_poll_tw_hash_eject()
155 if (req->flags & REQ_F_HASH_LOCKED) { in io_poll_tw_hash_eject()
163 hash_del(&req->hash_node); in io_poll_tw_hash_eject()
164 req->flags &= ~REQ_F_HASH_LOCKED; in io_poll_tw_hash_eject()
166 io_poll_req_delete(req, ctx); in io_poll_tw_hash_eject()
192 static void io_poll_remove_entries(struct io_kiocb *req) in io_poll_remove_entries() argument
198 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL))) in io_poll_remove_entries()
217 if (req->flags & REQ_F_SINGLE_POLL) in io_poll_remove_entries()
218 io_poll_remove_entry(io_poll_get_single(req)); in io_poll_remove_entries()
219 if (req->flags & REQ_F_DOUBLE_POLL) in io_poll_remove_entries()
220 io_poll_remove_entry(io_poll_get_double(req)); in io_poll_remove_entries()
232 static void __io_poll_execute(struct io_kiocb *req, int mask) in __io_poll_execute() argument
234 io_req_set_res(req, mask, 0); in __io_poll_execute()
235 req->io_task_work.func = io_poll_task_func; in __io_poll_execute()
237 trace_io_uring_task_add(req, mask); in __io_poll_execute()
238 io_req_task_work_add(req); in __io_poll_execute()
241 static inline void io_poll_execute(struct io_kiocb *req, int res) in io_poll_execute() argument
243 if (io_poll_get_ownership(req)) in io_poll_execute()
244 __io_poll_execute(req, res); in io_poll_execute()
254 * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
255 * poll and that the result is stored in req->cqe.
257 static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts) in io_poll_check_events() argument
261 /* req->task == current here, checking PF_EXITING is safe */ in io_poll_check_events()
262 if (unlikely(req->task->flags & PF_EXITING)) in io_poll_check_events()
266 v = atomic_read(&req->poll_refs); in io_poll_check_events()
280 req->cqe.res = 0; in io_poll_check_events()
283 req->cqe.res = 0; in io_poll_check_events()
289 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs); in io_poll_check_events()
295 if (!req->cqe.res) { in io_poll_check_events()
296 struct poll_table_struct pt = { ._key = req->apoll_events }; in io_poll_check_events()
297 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events; in io_poll_check_events()
304 if (unlikely(!req->cqe.res)) { in io_poll_check_events()
306 if (!(req->apoll_events & EPOLLONESHOT)) in io_poll_check_events()
311 if (unlikely(req->cqe.res & EPOLLERR)) in io_poll_check_events()
312 req_set_fail(req); in io_poll_check_events()
313 if (req->apoll_events & EPOLLONESHOT) in io_poll_check_events()
317 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { in io_poll_check_events()
318 __poll_t mask = mangle_poll(req->cqe.res & in io_poll_check_events()
319 req->apoll_events); in io_poll_check_events()
321 if (!io_fill_cqe_req_aux(req, ts->locked, mask, in io_poll_check_events()
323 io_req_set_res(req, mask, 0); in io_poll_check_events()
327 int ret = io_poll_issue(req, ts); in io_poll_check_events()
337 req->cqe.res = 0; in io_poll_check_events()
343 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) & in io_poll_check_events()
349 void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts) in io_poll_task_func() argument
353 ret = io_poll_check_events(req, ts); in io_poll_task_func()
355 io_kbuf_recycle(req, 0); in io_poll_task_func()
358 io_kbuf_recycle(req, 0); in io_poll_task_func()
359 __io_poll_execute(req, 0); in io_poll_task_func()
362 io_poll_remove_entries(req); in io_poll_task_func()
363 io_poll_tw_hash_eject(req, ts); in io_poll_task_func()
365 if (req->opcode == IORING_OP_POLL_ADD) { in io_poll_task_func()
369 poll = io_kiocb_to_cmd(req, struct io_poll); in io_poll_task_func()
370 req->cqe.res = mangle_poll(req->cqe.res & poll->events); in io_poll_task_func()
372 io_req_task_submit(req, ts); in io_poll_task_func()
375 req->cqe.res = ret; in io_poll_task_func()
376 req_set_fail(req); in io_poll_task_func()
379 io_req_set_res(req, req->cqe.res, 0); in io_poll_task_func()
380 io_req_task_complete(req, ts); in io_poll_task_func()
382 io_tw_lock(req->ctx, ts); in io_poll_task_func()
385 io_req_task_complete(req, ts); in io_poll_task_func()
387 io_req_task_submit(req, ts); in io_poll_task_func()
389 io_req_defer_failed(req, ret); in io_poll_task_func()
393 static void io_poll_cancel_req(struct io_kiocb *req) in io_poll_cancel_req() argument
395 io_poll_mark_cancelled(req); in io_poll_cancel_req()
397 io_poll_execute(req, 0); in io_poll_cancel_req()
402 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll) in io_pollfree_wake() argument
404 io_poll_mark_cancelled(req); in io_pollfree_wake()
406 io_poll_execute(req, 0); in io_pollfree_wake()
419 * as req->head is NULL'ed out, the request can be in io_pollfree_wake()
430 struct io_kiocb *req = wqe_to_req(wait); in io_poll_wake() local
435 return io_pollfree_wake(req, poll); in io_poll_wake()
441 if (io_poll_get_ownership(req)) { in io_poll_wake()
455 req->flags &= ~REQ_F_DOUBLE_POLL; in io_poll_wake()
457 req->flags &= ~REQ_F_SINGLE_POLL; in io_poll_wake()
459 __io_poll_execute(req, mask); in io_poll_wake()
465 static bool io_poll_double_prepare(struct io_kiocb *req) in io_poll_double_prepare() argument
468 struct io_poll *poll = io_poll_get_single(req); in io_poll_double_prepare()
474 * poll arm might not hold ownership and so race for req->flags with in io_poll_double_prepare()
481 req->flags |= REQ_F_DOUBLE_POLL; in io_poll_double_prepare()
482 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_double_prepare()
483 req->flags |= REQ_F_ASYNC_DATA; in io_poll_double_prepare()
494 struct io_kiocb *req = pt->req; in __io_queue_proc() local
495 unsigned long wqe_private = (unsigned long) req; in __io_queue_proc()
525 if (!io_poll_double_prepare(req)) { in __io_queue_proc()
533 req->flags |= REQ_F_SINGLE_POLL; in __io_queue_proc()
550 struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll); in io_poll_queue_proc()
553 (struct io_poll **) &pt->req->async_data); in io_poll_queue_proc()
556 static bool io_poll_can_finish_inline(struct io_kiocb *req, in io_poll_can_finish_inline() argument
559 return pt->owning || io_poll_get_ownership(req); in io_poll_can_finish_inline()
562 static void io_poll_add_hash(struct io_kiocb *req) in io_poll_add_hash() argument
564 if (req->flags & REQ_F_HASH_LOCKED) in io_poll_add_hash()
565 io_poll_req_insert_locked(req); in io_poll_add_hash()
567 io_poll_req_insert(req); in io_poll_add_hash()
576 static int __io_arm_poll_handler(struct io_kiocb *req, in __io_arm_poll_handler() argument
581 struct io_ring_ctx *ctx = req->ctx; in __io_arm_poll_handler()
583 INIT_HLIST_NODE(&req->hash_node); in __io_arm_poll_handler()
584 req->work.cancel_seq = atomic_read(&ctx->cancel_seq); in __io_arm_poll_handler()
586 poll->file = req->file; in __io_arm_poll_handler()
587 req->apoll_events = poll->events; in __io_arm_poll_handler()
590 ipt->req = req; in __io_arm_poll_handler()
605 atomic_set(&req->poll_refs, (int)ipt->owning); in __io_arm_poll_handler()
609 req->flags &= ~REQ_F_HASH_LOCKED; in __io_arm_poll_handler()
611 mask = vfs_poll(req->file, &ipt->pt) & poll->events; in __io_arm_poll_handler()
614 io_poll_remove_entries(req); in __io_arm_poll_handler()
616 if (!io_poll_can_finish_inline(req, ipt)) { in __io_arm_poll_handler()
617 io_poll_mark_cancelled(req); in __io_arm_poll_handler()
628 if (!io_poll_can_finish_inline(req, ipt)) { in __io_arm_poll_handler()
629 io_poll_add_hash(req); in __io_arm_poll_handler()
632 io_poll_remove_entries(req); in __io_arm_poll_handler()
634 /* no one else has access to the req, forget about the ref */ in __io_arm_poll_handler()
638 io_poll_add_hash(req); in __io_arm_poll_handler()
641 io_poll_can_finish_inline(req, ipt)) { in __io_arm_poll_handler()
642 __io_poll_execute(req, mask); in __io_arm_poll_handler()
651 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1) in __io_arm_poll_handler()
652 __io_poll_execute(req, 0); in __io_arm_poll_handler()
661 struct async_poll *apoll = pt->req->apoll; in io_async_queue_proc()
674 static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req, in io_req_alloc_apoll() argument
677 struct io_ring_ctx *ctx = req->ctx; in io_req_alloc_apoll()
681 if (req->flags & REQ_F_POLLED) { in io_req_alloc_apoll()
682 apoll = req->apoll; in io_req_alloc_apoll()
698 req->apoll = apoll; in io_req_alloc_apoll()
704 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) in io_arm_poll_handler() argument
706 const struct io_issue_def *def = &io_issue_defs[req->opcode]; in io_arm_poll_handler()
716 req->flags |= REQ_F_HASH_LOCKED; in io_arm_poll_handler()
720 if (!file_can_poll(req->file)) in io_arm_poll_handler()
722 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) in io_arm_poll_handler()
729 if (req->flags & REQ_F_CLEAR_POLLIN) in io_arm_poll_handler()
737 apoll = io_req_alloc_apoll(req, issue_flags); in io_arm_poll_handler()
740 req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL); in io_arm_poll_handler()
741 req->flags |= REQ_F_POLLED; in io_arm_poll_handler()
744 io_kbuf_recycle(req, issue_flags); in io_arm_poll_handler()
746 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags); in io_arm_poll_handler()
749 trace_io_uring_poll_arm(req, mask, apoll->poll.events); in io_arm_poll_handler()
759 struct io_kiocb *req; in io_poll_remove_all_table() local
767 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) { in io_poll_remove_all_table()
768 if (io_match_task_safe(req, tsk, cancel_all)) { in io_poll_remove_all_table()
769 hlist_del_init(&req->hash_node); in io_poll_remove_all_table()
770 io_poll_cancel_req(req); in io_poll_remove_all_table()
798 struct io_kiocb *req; in io_poll_find() local
805 hlist_for_each_entry(req, &hb->list, hash_node) { in io_poll_find()
806 if (cd->data != req->cqe.user_data) in io_poll_find()
808 if (poll_only && req->opcode != IORING_OP_POLL_ADD) in io_poll_find()
811 if (cd->seq == req->work.cancel_seq) in io_poll_find()
813 req->work.cancel_seq = cd->seq; in io_poll_find()
816 return req; in io_poll_find()
828 struct io_kiocb *req; in io_poll_file_find() local
837 hlist_for_each_entry(req, &hb->list, hash_node) { in io_poll_file_find()
838 if (io_cancel_req_match(req, cd)) { in io_poll_file_find()
840 return req; in io_poll_file_find()
848 static int io_poll_disarm(struct io_kiocb *req) in io_poll_disarm() argument
850 if (!req) in io_poll_disarm()
852 if (!io_poll_get_ownership(req)) in io_poll_disarm()
854 io_poll_remove_entries(req); in io_poll_disarm()
855 hash_del(&req->hash_node); in io_poll_disarm()
863 struct io_kiocb *req; in __io_poll_cancel() local
867 req = io_poll_file_find(ctx, cd, table, &bucket); in __io_poll_cancel()
869 req = io_poll_find(ctx, false, cd, table, &bucket); in __io_poll_cancel()
871 if (req) in __io_poll_cancel()
872 io_poll_cancel_req(req); in __io_poll_cancel()
875 return req ? 0 : -ENOENT; in __io_poll_cancel()
910 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_poll_remove_prep() argument
912 struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update); in io_poll_remove_prep()
940 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_poll_add_prep() argument
942 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); in io_poll_add_prep()
950 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP)) in io_poll_add_prep()
957 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) in io_poll_add() argument
959 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); in io_poll_add()
969 if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER)) in io_poll_add()
970 req->flags |= REQ_F_HASH_LOCKED; in io_poll_add()
972 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags); in io_poll_add()
974 io_req_set_res(req, ipt.result_mask, 0); in io_poll_add()
980 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) in io_poll_remove() argument
982 struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update); in io_poll_remove()
983 struct io_ring_ctx *ctx = req->ctx; in io_poll_remove()
1041 req_set_fail(req); in io_poll_remove()
1045 io_req_set_res(req, ret, 0); in io_poll_remove()