1329061d3SJens Axboe // SPDX-License-Identifier: GPL-2.0 2329061d3SJens Axboe #include <linux/kernel.h> 3329061d3SJens Axboe #include <linux/errno.h> 4329061d3SJens Axboe #include <linux/fs.h> 5329061d3SJens Axboe #include <linux/file.h> 6329061d3SJens Axboe #include <linux/mm.h> 7329061d3SJens Axboe #include <linux/slab.h> 8329061d3SJens Axboe #include <linux/poll.h> 9329061d3SJens Axboe #include <linux/hashtable.h> 10329061d3SJens Axboe #include <linux/io_uring.h> 11329061d3SJens Axboe 12329061d3SJens Axboe #include <trace/events/io_uring.h> 13329061d3SJens Axboe 14329061d3SJens Axboe #include <uapi/linux/io_uring.h> 15329061d3SJens Axboe 16329061d3SJens Axboe #include "io_uring.h" 17329061d3SJens Axboe #include "refs.h" 18329061d3SJens Axboe #include "opdef.h" 193b77495aSJens Axboe #include "kbuf.h" 20329061d3SJens Axboe #include "poll.h" 2138513c46SHao Xu #include "cancel.h" 22329061d3SJens Axboe 23329061d3SJens Axboe struct io_poll_update { 24329061d3SJens Axboe struct file *file; 25329061d3SJens Axboe u64 old_user_data; 26329061d3SJens Axboe u64 new_user_data; 27329061d3SJens Axboe __poll_t events; 28329061d3SJens Axboe bool update_events; 29329061d3SJens Axboe bool update_user_data; 30329061d3SJens Axboe }; 31329061d3SJens Axboe 32329061d3SJens Axboe struct io_poll_table { 33329061d3SJens Axboe struct poll_table_struct pt; 34329061d3SJens Axboe struct io_kiocb *req; 35329061d3SJens Axboe int nr_entries; 36329061d3SJens Axboe int error; 3749f1c68eSPavel Begunkov bool owning; 38063a0079SPavel Begunkov /* output value, set only if arm poll returns >0 */ 39063a0079SPavel Begunkov __poll_t result_mask; 40329061d3SJens Axboe }; 41329061d3SJens Axboe 42329061d3SJens Axboe #define IO_POLL_CANCEL_FLAG BIT(31) 43a26a35e9SPavel Begunkov #define IO_POLL_RETRY_FLAG BIT(30) 44a26a35e9SPavel Begunkov #define IO_POLL_REF_MASK GENMASK(29, 0) 45a26a35e9SPavel Begunkov 46a26a35e9SPavel Begunkov /* 47a26a35e9SPavel Begunkov * We usually have 1-2 refs taken, 128 is more than enough and we want to 48a26a35e9SPavel Begunkov * maximise the margin between this amount and the moment when it overflows. 49a26a35e9SPavel Begunkov */ 50a26a35e9SPavel Begunkov #define IO_POLL_REF_BIAS 128 51329061d3SJens Axboe 520638cd7bSPavel Begunkov #define IO_WQE_F_DOUBLE 1 530638cd7bSPavel Begunkov 541947ddf9SJens Axboe static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, 551947ddf9SJens Axboe void *key); 561947ddf9SJens Axboe 570638cd7bSPavel Begunkov static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe) 580638cd7bSPavel Begunkov { 590638cd7bSPavel Begunkov unsigned long priv = (unsigned long)wqe->private; 600638cd7bSPavel Begunkov 610638cd7bSPavel Begunkov return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE); 620638cd7bSPavel Begunkov } 630638cd7bSPavel Begunkov 640638cd7bSPavel Begunkov static inline bool wqe_is_double(struct wait_queue_entry *wqe) 650638cd7bSPavel Begunkov { 660638cd7bSPavel Begunkov unsigned long priv = (unsigned long)wqe->private; 670638cd7bSPavel Begunkov 680638cd7bSPavel Begunkov return priv & IO_WQE_F_DOUBLE; 690638cd7bSPavel Begunkov } 700638cd7bSPavel Begunkov 71a26a35e9SPavel Begunkov static bool io_poll_get_ownership_slowpath(struct io_kiocb *req) 72a26a35e9SPavel Begunkov { 73a26a35e9SPavel Begunkov int v; 74a26a35e9SPavel Begunkov 75a26a35e9SPavel Begunkov /* 76a26a35e9SPavel Begunkov * poll_refs are already elevated and we don't have much hope for 77a26a35e9SPavel Begunkov * grabbing the ownership. Instead of incrementing set a retry flag 78a26a35e9SPavel Begunkov * to notify the loop that there might have been some change. 79a26a35e9SPavel Begunkov */ 80a26a35e9SPavel Begunkov v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs); 81a26a35e9SPavel Begunkov if (v & IO_POLL_REF_MASK) 82a26a35e9SPavel Begunkov return false; 83a26a35e9SPavel Begunkov return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); 84a26a35e9SPavel Begunkov } 85a26a35e9SPavel Begunkov 86329061d3SJens Axboe /* 87329061d3SJens Axboe * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can 88329061d3SJens Axboe * bump it and acquire ownership. It's disallowed to modify requests while not 89329061d3SJens Axboe * owning it, that prevents from races for enqueueing task_work's and b/w 90329061d3SJens Axboe * arming poll and wakeups. 91329061d3SJens Axboe */ 92329061d3SJens Axboe static inline bool io_poll_get_ownership(struct io_kiocb *req) 93329061d3SJens Axboe { 94a26a35e9SPavel Begunkov if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS)) 95a26a35e9SPavel Begunkov return io_poll_get_ownership_slowpath(req); 96329061d3SJens Axboe return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); 97329061d3SJens Axboe } 98329061d3SJens Axboe 99329061d3SJens Axboe static void io_poll_mark_cancelled(struct io_kiocb *req) 100329061d3SJens Axboe { 101329061d3SJens Axboe atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs); 102329061d3SJens Axboe } 103329061d3SJens Axboe 104329061d3SJens Axboe static struct io_poll *io_poll_get_double(struct io_kiocb *req) 105329061d3SJens Axboe { 106329061d3SJens Axboe /* pure poll stashes this in ->async_data, poll driven retry elsewhere */ 107329061d3SJens Axboe if (req->opcode == IORING_OP_POLL_ADD) 108329061d3SJens Axboe return req->async_data; 109329061d3SJens Axboe return req->apoll->double_poll; 110329061d3SJens Axboe } 111329061d3SJens Axboe 112329061d3SJens Axboe static struct io_poll *io_poll_get_single(struct io_kiocb *req) 113329061d3SJens Axboe { 114329061d3SJens Axboe if (req->opcode == IORING_OP_POLL_ADD) 115f2ccb5aeSStefan Metzmacher return io_kiocb_to_cmd(req, struct io_poll); 116329061d3SJens Axboe return &req->apoll->poll; 117329061d3SJens Axboe } 118329061d3SJens Axboe 119329061d3SJens Axboe static void io_poll_req_insert(struct io_kiocb *req) 120329061d3SJens Axboe { 121e6f89be6SPavel Begunkov struct io_hash_table *table = &req->ctx->cancel_table; 122e6f89be6SPavel Begunkov u32 index = hash_long(req->cqe.user_data, table->hash_bits); 123e6f89be6SPavel Begunkov struct io_hash_bucket *hb = &table->hbs[index]; 124329061d3SJens Axboe 12538513c46SHao Xu spin_lock(&hb->lock); 12638513c46SHao Xu hlist_add_head(&req->hash_node, &hb->list); 12738513c46SHao Xu spin_unlock(&hb->lock); 12838513c46SHao Xu } 12938513c46SHao Xu 13038513c46SHao Xu static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx) 13138513c46SHao Xu { 132e6f89be6SPavel Begunkov struct io_hash_table *table = &req->ctx->cancel_table; 133e6f89be6SPavel Begunkov u32 index = hash_long(req->cqe.user_data, table->hash_bits); 134e6f89be6SPavel Begunkov spinlock_t *lock = &table->hbs[index].lock; 13538513c46SHao Xu 13638513c46SHao Xu spin_lock(lock); 13738513c46SHao Xu hash_del(&req->hash_node); 13838513c46SHao Xu spin_unlock(lock); 139329061d3SJens Axboe } 140329061d3SJens Axboe 1419ca9fb24SPavel Begunkov static void io_poll_req_insert_locked(struct io_kiocb *req) 1429ca9fb24SPavel Begunkov { 1439ca9fb24SPavel Begunkov struct io_hash_table *table = &req->ctx->cancel_table_locked; 1449ca9fb24SPavel Begunkov u32 index = hash_long(req->cqe.user_data, table->hash_bits); 1459ca9fb24SPavel Begunkov 1465576035fSPavel Begunkov lockdep_assert_held(&req->ctx->uring_lock); 1475576035fSPavel Begunkov 1489ca9fb24SPavel Begunkov hlist_add_head(&req->hash_node, &table->hbs[index].list); 1499ca9fb24SPavel Begunkov } 1509ca9fb24SPavel Begunkov 1519ca9fb24SPavel Begunkov static void io_poll_tw_hash_eject(struct io_kiocb *req, bool *locked) 1529ca9fb24SPavel Begunkov { 1539ca9fb24SPavel Begunkov struct io_ring_ctx *ctx = req->ctx; 1549ca9fb24SPavel Begunkov 1559ca9fb24SPavel Begunkov if (req->flags & REQ_F_HASH_LOCKED) { 1569ca9fb24SPavel Begunkov /* 1579ca9fb24SPavel Begunkov * ->cancel_table_locked is protected by ->uring_lock in 1589ca9fb24SPavel Begunkov * contrast to per bucket spinlocks. Likely, tctx_task_work() 1599ca9fb24SPavel Begunkov * already grabbed the mutex for us, but there is a chance it 1609ca9fb24SPavel Begunkov * failed. 1619ca9fb24SPavel Begunkov */ 1629ca9fb24SPavel Begunkov io_tw_lock(ctx, locked); 1639ca9fb24SPavel Begunkov hash_del(&req->hash_node); 164b21a51e2SPavel Begunkov req->flags &= ~REQ_F_HASH_LOCKED; 1659ca9fb24SPavel Begunkov } else { 1669ca9fb24SPavel Begunkov io_poll_req_delete(req, ctx); 1679ca9fb24SPavel Begunkov } 1689ca9fb24SPavel Begunkov } 1699ca9fb24SPavel Begunkov 1701947ddf9SJens Axboe static void io_init_poll_iocb(struct io_poll *poll, __poll_t events) 171329061d3SJens Axboe { 172329061d3SJens Axboe poll->head = NULL; 173329061d3SJens Axboe #define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP) 174329061d3SJens Axboe /* mask in events that we always want/need */ 175329061d3SJens Axboe poll->events = events | IO_POLL_UNMASK; 176329061d3SJens Axboe INIT_LIST_HEAD(&poll->wait.entry); 1771947ddf9SJens Axboe init_waitqueue_func_entry(&poll->wait, io_poll_wake); 178329061d3SJens Axboe } 179329061d3SJens Axboe 180329061d3SJens Axboe static inline void io_poll_remove_entry(struct io_poll *poll) 181329061d3SJens Axboe { 182329061d3SJens Axboe struct wait_queue_head *head = smp_load_acquire(&poll->head); 183329061d3SJens Axboe 184329061d3SJens Axboe if (head) { 185329061d3SJens Axboe spin_lock_irq(&head->lock); 186329061d3SJens Axboe list_del_init(&poll->wait.entry); 187329061d3SJens Axboe poll->head = NULL; 188329061d3SJens Axboe spin_unlock_irq(&head->lock); 189329061d3SJens Axboe } 190329061d3SJens Axboe } 191329061d3SJens Axboe 192329061d3SJens Axboe static void io_poll_remove_entries(struct io_kiocb *req) 193329061d3SJens Axboe { 194329061d3SJens Axboe /* 195329061d3SJens Axboe * Nothing to do if neither of those flags are set. Avoid dipping 196329061d3SJens Axboe * into the poll/apoll/double cachelines if we can. 197329061d3SJens Axboe */ 198329061d3SJens Axboe if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL))) 199329061d3SJens Axboe return; 200329061d3SJens Axboe 201329061d3SJens Axboe /* 202329061d3SJens Axboe * While we hold the waitqueue lock and the waitqueue is nonempty, 203329061d3SJens Axboe * wake_up_pollfree() will wait for us. However, taking the waitqueue 204329061d3SJens Axboe * lock in the first place can race with the waitqueue being freed. 205329061d3SJens Axboe * 206329061d3SJens Axboe * We solve this as eventpoll does: by taking advantage of the fact that 207329061d3SJens Axboe * all users of wake_up_pollfree() will RCU-delay the actual free. If 208329061d3SJens Axboe * we enter rcu_read_lock() and see that the pointer to the queue is 209329061d3SJens Axboe * non-NULL, we can then lock it without the memory being freed out from 210329061d3SJens Axboe * under us. 211329061d3SJens Axboe * 212329061d3SJens Axboe * Keep holding rcu_read_lock() as long as we hold the queue lock, in 213329061d3SJens Axboe * case the caller deletes the entry from the queue, leaving it empty. 214329061d3SJens Axboe * In that case, only RCU prevents the queue memory from being freed. 215329061d3SJens Axboe */ 216329061d3SJens Axboe rcu_read_lock(); 217329061d3SJens Axboe if (req->flags & REQ_F_SINGLE_POLL) 218329061d3SJens Axboe io_poll_remove_entry(io_poll_get_single(req)); 219329061d3SJens Axboe if (req->flags & REQ_F_DOUBLE_POLL) 220329061d3SJens Axboe io_poll_remove_entry(io_poll_get_double(req)); 221329061d3SJens Axboe rcu_read_unlock(); 222329061d3SJens Axboe } 223329061d3SJens Axboe 2242ba69707SDylan Yudaken enum { 2252ba69707SDylan Yudaken IOU_POLL_DONE = 0, 2262ba69707SDylan Yudaken IOU_POLL_NO_ACTION = 1, 227114eccdfSDylan Yudaken IOU_POLL_REMOVE_POLL_USE_RES = 2, 2286e5aedb9SJens Axboe IOU_POLL_REISSUE = 3, 2292ba69707SDylan Yudaken }; 2302ba69707SDylan Yudaken 231329061d3SJens Axboe /* 232329061d3SJens Axboe * All poll tw should go through this. Checks for poll events, manages 233329061d3SJens Axboe * references, does rewait, etc. 234329061d3SJens Axboe * 2356e5aedb9SJens Axboe * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action 2366e5aedb9SJens Axboe * require, which is either spurious wakeup or multishot CQE is served. 2376e5aedb9SJens Axboe * IOU_POLL_DONE when it's done with the request, then the mask is stored in 2386e5aedb9SJens Axboe * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot 2396e5aedb9SJens Axboe * poll and that the result is stored in req->cqe. 240329061d3SJens Axboe */ 241329061d3SJens Axboe static int io_poll_check_events(struct io_kiocb *req, bool *locked) 242329061d3SJens Axboe { 2436e5aedb9SJens Axboe int v; 244329061d3SJens Axboe 245329061d3SJens Axboe /* req->task == current here, checking PF_EXITING is safe */ 246329061d3SJens Axboe if (unlikely(req->task->flags & PF_EXITING)) 247329061d3SJens Axboe return -ECANCELED; 248329061d3SJens Axboe 249329061d3SJens Axboe do { 250329061d3SJens Axboe v = atomic_read(&req->poll_refs); 251329061d3SJens Axboe 2529805fa2dSPavel Begunkov if (unlikely(v != 1)) { 2539805fa2dSPavel Begunkov /* tw should be the owner and so have some refs */ 254329061d3SJens Axboe if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK))) 255c3bfb57eSPavel Begunkov return IOU_POLL_NO_ACTION; 256329061d3SJens Axboe if (v & IO_POLL_CANCEL_FLAG) 257329061d3SJens Axboe return -ECANCELED; 258539bcb57SPavel Begunkov /* 259539bcb57SPavel Begunkov * cqe.res contains only events of the first wake up 2609805fa2dSPavel Begunkov * and all others are to be lost. Redo vfs_poll() to get 261539bcb57SPavel Begunkov * up to date state. 262539bcb57SPavel Begunkov */ 263539bcb57SPavel Begunkov if ((v & IO_POLL_REF_MASK) != 1) 264539bcb57SPavel Begunkov req->cqe.res = 0; 2659805fa2dSPavel Begunkov 266a26a35e9SPavel Begunkov if (v & IO_POLL_RETRY_FLAG) { 267a26a35e9SPavel Begunkov req->cqe.res = 0; 268a26a35e9SPavel Begunkov /* 269a26a35e9SPavel Begunkov * We won't find new events that came in between 2709805fa2dSPavel Begunkov * vfs_poll and the ref put unless we clear the 2719805fa2dSPavel Begunkov * flag in advance. 272a26a35e9SPavel Begunkov */ 273a26a35e9SPavel Begunkov atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs); 274a26a35e9SPavel Begunkov v &= ~IO_POLL_RETRY_FLAG; 275a26a35e9SPavel Begunkov } 2769805fa2dSPavel Begunkov } 277329061d3SJens Axboe 2782ba69707SDylan Yudaken /* the mask was stashed in __io_poll_execute */ 279329061d3SJens Axboe if (!req->cqe.res) { 280329061d3SJens Axboe struct poll_table_struct pt = { ._key = req->apoll_events }; 281329061d3SJens Axboe req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events; 2826e5aedb9SJens Axboe /* 2836e5aedb9SJens Axboe * We got woken with a mask, but someone else got to 2846e5aedb9SJens Axboe * it first. The above vfs_poll() doesn't add us back 2856e5aedb9SJens Axboe * to the waitqueue, so if we get nothing back, we 2866e5aedb9SJens Axboe * should be safe and attempt a reissue. 2876e5aedb9SJens Axboe */ 2888caa03f1SJens Axboe if (unlikely(!req->cqe.res)) { 2898caa03f1SJens Axboe /* Multishot armed need not reissue */ 2908caa03f1SJens Axboe if (!(req->apoll_events & EPOLLONESHOT)) 2918caa03f1SJens Axboe continue; 2926e5aedb9SJens Axboe return IOU_POLL_REISSUE; 293329061d3SJens Axboe } 2948caa03f1SJens Axboe } 295329061d3SJens Axboe if (req->apoll_events & EPOLLONESHOT) 2962ba69707SDylan Yudaken return IOU_POLL_DONE; 297329061d3SJens Axboe 298329061d3SJens Axboe /* multishot, just fill a CQE and proceed */ 299329061d3SJens Axboe if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { 300329061d3SJens Axboe __poll_t mask = mangle_poll(req->cqe.res & 301329061d3SJens Axboe req->apoll_events); 302329061d3SJens Axboe 303047b6aefSPavel Begunkov if (!io_aux_cqe(req->ctx, *locked, req->cqe.user_data, 304a2da6763SDylan Yudaken mask, IORING_CQE_F_MORE, false)) { 305a2da6763SDylan Yudaken io_req_set_res(req, mask, 0); 306a2da6763SDylan Yudaken return IOU_POLL_REMOVE_POLL_USE_RES; 307a2da6763SDylan Yudaken } 308d245bca6SPavel Begunkov } else { 3096e5aedb9SJens Axboe int ret = io_poll_issue(req, locked); 310114eccdfSDylan Yudaken if (ret == IOU_STOP_MULTISHOT) 311114eccdfSDylan Yudaken return IOU_POLL_REMOVE_POLL_USE_RES; 3122ba69707SDylan Yudaken if (ret < 0) 313329061d3SJens Axboe return ret; 314d245bca6SPavel Begunkov } 315329061d3SJens Axboe 316b98186aeSPavel Begunkov /* force the next iteration to vfs_poll() */ 317b98186aeSPavel Begunkov req->cqe.res = 0; 318b98186aeSPavel Begunkov 319329061d3SJens Axboe /* 320329061d3SJens Axboe * Release all references, retry if someone tried to restart 321329061d3SJens Axboe * task_work while we were executing it. 322329061d3SJens Axboe */ 32312ad3d2dSLin Ma } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) & 32412ad3d2dSLin Ma IO_POLL_REF_MASK); 325329061d3SJens Axboe 3262ba69707SDylan Yudaken return IOU_POLL_NO_ACTION; 327329061d3SJens Axboe } 328329061d3SJens Axboe 329329061d3SJens Axboe static void io_poll_task_func(struct io_kiocb *req, bool *locked) 330329061d3SJens Axboe { 331329061d3SJens Axboe int ret; 332329061d3SJens Axboe 333329061d3SJens Axboe ret = io_poll_check_events(req, locked); 3342ba69707SDylan Yudaken if (ret == IOU_POLL_NO_ACTION) 335329061d3SJens Axboe return; 336443e5755SPavel Begunkov io_poll_remove_entries(req); 337443e5755SPavel Begunkov io_poll_tw_hash_eject(req, locked); 338329061d3SJens Axboe 339443e5755SPavel Begunkov if (req->opcode == IORING_OP_POLL_ADD) { 3402ba69707SDylan Yudaken if (ret == IOU_POLL_DONE) { 341443e5755SPavel Begunkov struct io_poll *poll; 342443e5755SPavel Begunkov 343443e5755SPavel Begunkov poll = io_kiocb_to_cmd(req, struct io_poll); 344329061d3SJens Axboe req->cqe.res = mangle_poll(req->cqe.res & poll->events); 3456e5aedb9SJens Axboe } else if (ret == IOU_POLL_REISSUE) { 3466e5aedb9SJens Axboe io_req_task_submit(req, locked); 3476e5aedb9SJens Axboe return; 348114eccdfSDylan Yudaken } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) { 349329061d3SJens Axboe req->cqe.res = ret; 350329061d3SJens Axboe req_set_fail(req); 351329061d3SJens Axboe } 352329061d3SJens Axboe 3530ec6dca2SPavel Begunkov io_req_set_res(req, req->cqe.res, 0); 3540ec6dca2SPavel Begunkov io_req_task_complete(req, locked); 355443e5755SPavel Begunkov } else { 356c06c6c5dSDylan Yudaken io_tw_lock(req->ctx, locked); 357329061d3SJens Axboe 358114eccdfSDylan Yudaken if (ret == IOU_POLL_REMOVE_POLL_USE_RES) 359c06c6c5dSDylan Yudaken io_req_task_complete(req, locked); 3606e5aedb9SJens Axboe else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE) 361329061d3SJens Axboe io_req_task_submit(req, locked); 362329061d3SJens Axboe else 363973fc83fSDylan Yudaken io_req_defer_failed(req, ret); 364329061d3SJens Axboe } 365443e5755SPavel Begunkov } 366329061d3SJens Axboe 36713a99017SPavel Begunkov static void __io_poll_execute(struct io_kiocb *req, int mask) 368329061d3SJens Axboe { 369329061d3SJens Axboe io_req_set_res(req, mask, 0); 370329061d3SJens Axboe req->io_task_work.func = io_poll_task_func; 371329061d3SJens Axboe 37248863ffdSPavel Begunkov trace_io_uring_task_add(req, mask); 373329061d3SJens Axboe io_req_task_work_add(req); 374329061d3SJens Axboe } 375329061d3SJens Axboe 37613a99017SPavel Begunkov static inline void io_poll_execute(struct io_kiocb *req, int res) 377329061d3SJens Axboe { 378329061d3SJens Axboe if (io_poll_get_ownership(req)) 37913a99017SPavel Begunkov __io_poll_execute(req, res); 380329061d3SJens Axboe } 381329061d3SJens Axboe 382329061d3SJens Axboe static void io_poll_cancel_req(struct io_kiocb *req) 383329061d3SJens Axboe { 384329061d3SJens Axboe io_poll_mark_cancelled(req); 385329061d3SJens Axboe /* kick tw, which should complete the request */ 38613a99017SPavel Begunkov io_poll_execute(req, 0); 387329061d3SJens Axboe } 388329061d3SJens Axboe 389329061d3SJens Axboe #define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI) 390329061d3SJens Axboe 391fe991a76SJens Axboe static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll) 392329061d3SJens Axboe { 393329061d3SJens Axboe io_poll_mark_cancelled(req); 394329061d3SJens Axboe /* we have to kick tw in case it's not already */ 39513a99017SPavel Begunkov io_poll_execute(req, 0); 396329061d3SJens Axboe 397329061d3SJens Axboe /* 398329061d3SJens Axboe * If the waitqueue is being freed early but someone is already 399329061d3SJens Axboe * holds ownership over it, we have to tear down the request as 400329061d3SJens Axboe * best we can. That means immediately removing the request from 401329061d3SJens Axboe * its waitqueue and preventing all further accesses to the 402329061d3SJens Axboe * waitqueue via the request. 403329061d3SJens Axboe */ 404329061d3SJens Axboe list_del_init(&poll->wait.entry); 405329061d3SJens Axboe 406329061d3SJens Axboe /* 407329061d3SJens Axboe * Careful: this *must* be the last step, since as soon 408329061d3SJens Axboe * as req->head is NULL'ed out, the request can be 409329061d3SJens Axboe * completed and freed, since aio_poll_complete_work() 410329061d3SJens Axboe * will no longer need to take the waitqueue lock. 411329061d3SJens Axboe */ 412329061d3SJens Axboe smp_store_release(&poll->head, NULL); 413329061d3SJens Axboe return 1; 414329061d3SJens Axboe } 415329061d3SJens Axboe 416fe991a76SJens Axboe static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, 417fe991a76SJens Axboe void *key) 418fe991a76SJens Axboe { 419fe991a76SJens Axboe struct io_kiocb *req = wqe_to_req(wait); 420fe991a76SJens Axboe struct io_poll *poll = container_of(wait, struct io_poll, wait); 421fe991a76SJens Axboe __poll_t mask = key_to_poll(key); 422fe991a76SJens Axboe 423fe991a76SJens Axboe if (unlikely(mask & POLLFREE)) 424fe991a76SJens Axboe return io_pollfree_wake(req, poll); 425fe991a76SJens Axboe 426329061d3SJens Axboe /* for instances that support it check for an event match first */ 427329061d3SJens Axboe if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON))) 428329061d3SJens Axboe return 0; 429329061d3SJens Axboe 430329061d3SJens Axboe if (io_poll_get_ownership(req)) { 43144648532SJens Axboe /* 43244648532SJens Axboe * If we trigger a multishot poll off our own wakeup path, 43344648532SJens Axboe * disable multishot as there is a circular dependency between 43444648532SJens Axboe * CQ posting and triggering the event. 43544648532SJens Axboe */ 43644648532SJens Axboe if (mask & EPOLL_URING_WAKE) 43744648532SJens Axboe poll->events |= EPOLLONESHOT; 43844648532SJens Axboe 439329061d3SJens Axboe /* optional, saves extra locking for removal in tw handler */ 440329061d3SJens Axboe if (mask && poll->events & EPOLLONESHOT) { 441329061d3SJens Axboe list_del_init(&poll->wait.entry); 442329061d3SJens Axboe poll->head = NULL; 443329061d3SJens Axboe if (wqe_is_double(wait)) 444329061d3SJens Axboe req->flags &= ~REQ_F_DOUBLE_POLL; 445329061d3SJens Axboe else 446329061d3SJens Axboe req->flags &= ~REQ_F_SINGLE_POLL; 447329061d3SJens Axboe } 44813a99017SPavel Begunkov __io_poll_execute(req, mask); 449329061d3SJens Axboe } 450329061d3SJens Axboe return 1; 451329061d3SJens Axboe } 452329061d3SJens Axboe 45330a33669SPavel Begunkov /* fails only when polling is already completing by the first entry */ 45430a33669SPavel Begunkov static bool io_poll_double_prepare(struct io_kiocb *req) 45549f1c68eSPavel Begunkov { 45649f1c68eSPavel Begunkov struct wait_queue_head *head; 45749f1c68eSPavel Begunkov struct io_poll *poll = io_poll_get_single(req); 45849f1c68eSPavel Begunkov 45949f1c68eSPavel Begunkov /* head is RCU protected, see io_poll_remove_entries() comments */ 46049f1c68eSPavel Begunkov rcu_read_lock(); 46149f1c68eSPavel Begunkov head = smp_load_acquire(&poll->head); 46249f1c68eSPavel Begunkov /* 46330a33669SPavel Begunkov * poll arm might not hold ownership and so race for req->flags with 46430a33669SPavel Begunkov * io_poll_wake(). There is only one poll entry queued, serialise with 46530a33669SPavel Begunkov * it by taking its head lock. As we're still arming the tw hanlder 46630a33669SPavel Begunkov * is not going to be run, so there are no races with it. 46749f1c68eSPavel Begunkov */ 46830a33669SPavel Begunkov if (head) { 46949f1c68eSPavel Begunkov spin_lock_irq(&head->lock); 47049f1c68eSPavel Begunkov req->flags |= REQ_F_DOUBLE_POLL; 471ceff5017SPavel Begunkov if (req->opcode == IORING_OP_POLL_ADD) 472ceff5017SPavel Begunkov req->flags |= REQ_F_ASYNC_DATA; 47349f1c68eSPavel Begunkov spin_unlock_irq(&head->lock); 47430a33669SPavel Begunkov } 47549f1c68eSPavel Begunkov rcu_read_unlock(); 47630a33669SPavel Begunkov return !!head; 47749f1c68eSPavel Begunkov } 47849f1c68eSPavel Begunkov 479329061d3SJens Axboe static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt, 480329061d3SJens Axboe struct wait_queue_head *head, 481329061d3SJens Axboe struct io_poll **poll_ptr) 482329061d3SJens Axboe { 483329061d3SJens Axboe struct io_kiocb *req = pt->req; 484329061d3SJens Axboe unsigned long wqe_private = (unsigned long) req; 485329061d3SJens Axboe 486329061d3SJens Axboe /* 487329061d3SJens Axboe * The file being polled uses multiple waitqueues for poll handling 488329061d3SJens Axboe * (e.g. one for read, one for write). Setup a separate io_poll 489329061d3SJens Axboe * if this happens. 490329061d3SJens Axboe */ 491329061d3SJens Axboe if (unlikely(pt->nr_entries)) { 492329061d3SJens Axboe struct io_poll *first = poll; 493329061d3SJens Axboe 494329061d3SJens Axboe /* double add on the same waitqueue head, ignore */ 495329061d3SJens Axboe if (first->head == head) 496329061d3SJens Axboe return; 497329061d3SJens Axboe /* already have a 2nd entry, fail a third attempt */ 498329061d3SJens Axboe if (*poll_ptr) { 499329061d3SJens Axboe if ((*poll_ptr)->head == head) 500329061d3SJens Axboe return; 501329061d3SJens Axboe pt->error = -EINVAL; 502329061d3SJens Axboe return; 503329061d3SJens Axboe } 504329061d3SJens Axboe 505329061d3SJens Axboe poll = kmalloc(sizeof(*poll), GFP_ATOMIC); 506329061d3SJens Axboe if (!poll) { 507329061d3SJens Axboe pt->error = -ENOMEM; 508329061d3SJens Axboe return; 509329061d3SJens Axboe } 51049f1c68eSPavel Begunkov 511329061d3SJens Axboe /* mark as double wq entry */ 5120638cd7bSPavel Begunkov wqe_private |= IO_WQE_F_DOUBLE; 5131947ddf9SJens Axboe io_init_poll_iocb(poll, first->events); 51430a33669SPavel Begunkov if (!io_poll_double_prepare(req)) { 51530a33669SPavel Begunkov /* the request is completing, just back off */ 51630a33669SPavel Begunkov kfree(poll); 51730a33669SPavel Begunkov return; 51830a33669SPavel Begunkov } 519329061d3SJens Axboe *poll_ptr = poll; 52049f1c68eSPavel Begunkov } else { 52149f1c68eSPavel Begunkov /* fine to modify, there is no poll queued to race with us */ 52249f1c68eSPavel Begunkov req->flags |= REQ_F_SINGLE_POLL; 523329061d3SJens Axboe } 524329061d3SJens Axboe 525329061d3SJens Axboe pt->nr_entries++; 526329061d3SJens Axboe poll->head = head; 527329061d3SJens Axboe poll->wait.private = (void *) wqe_private; 528329061d3SJens Axboe 529329061d3SJens Axboe if (poll->events & EPOLLEXCLUSIVE) 530329061d3SJens Axboe add_wait_queue_exclusive(head, &poll->wait); 531329061d3SJens Axboe else 532329061d3SJens Axboe add_wait_queue(head, &poll->wait); 533329061d3SJens Axboe } 534329061d3SJens Axboe 535329061d3SJens Axboe static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, 536329061d3SJens Axboe struct poll_table_struct *p) 537329061d3SJens Axboe { 538329061d3SJens Axboe struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); 539f2ccb5aeSStefan Metzmacher struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll); 540329061d3SJens Axboe 541329061d3SJens Axboe __io_queue_proc(poll, pt, head, 542329061d3SJens Axboe (struct io_poll **) &pt->req->async_data); 543329061d3SJens Axboe } 544329061d3SJens Axboe 54549f1c68eSPavel Begunkov static bool io_poll_can_finish_inline(struct io_kiocb *req, 54649f1c68eSPavel Begunkov struct io_poll_table *pt) 54749f1c68eSPavel Begunkov { 54849f1c68eSPavel Begunkov return pt->owning || io_poll_get_ownership(req); 54949f1c68eSPavel Begunkov } 55049f1c68eSPavel Begunkov 551febb985cSJens Axboe static void io_poll_add_hash(struct io_kiocb *req) 552febb985cSJens Axboe { 553febb985cSJens Axboe if (req->flags & REQ_F_HASH_LOCKED) 554febb985cSJens Axboe io_poll_req_insert_locked(req); 555febb985cSJens Axboe else 556febb985cSJens Axboe io_poll_req_insert(req); 557febb985cSJens Axboe } 558febb985cSJens Axboe 559de08356fSPavel Begunkov /* 560de08356fSPavel Begunkov * Returns 0 when it's handed over for polling. The caller owns the requests if 561de08356fSPavel Begunkov * it returns non-zero, but otherwise should not touch it. Negative values 562de08356fSPavel Begunkov * contain an error code. When the result is >0, the polling has completed 563de08356fSPavel Begunkov * inline and ipt.result_mask is set to the mask. 564de08356fSPavel Begunkov */ 565329061d3SJens Axboe static int __io_arm_poll_handler(struct io_kiocb *req, 566329061d3SJens Axboe struct io_poll *poll, 56749f1c68eSPavel Begunkov struct io_poll_table *ipt, __poll_t mask, 56849f1c68eSPavel Begunkov unsigned issue_flags) 569329061d3SJens Axboe { 570329061d3SJens Axboe struct io_ring_ctx *ctx = req->ctx; 571329061d3SJens Axboe 572329061d3SJens Axboe INIT_HLIST_NODE(&req->hash_node); 573329061d3SJens Axboe req->work.cancel_seq = atomic_read(&ctx->cancel_seq); 5741947ddf9SJens Axboe io_init_poll_iocb(poll, mask); 575329061d3SJens Axboe poll->file = req->file; 576329061d3SJens Axboe req->apoll_events = poll->events; 577329061d3SJens Axboe 578329061d3SJens Axboe ipt->pt._key = mask; 579329061d3SJens Axboe ipt->req = req; 580329061d3SJens Axboe ipt->error = 0; 581329061d3SJens Axboe ipt->nr_entries = 0; 582329061d3SJens Axboe /* 58349f1c68eSPavel Begunkov * Polling is either completed here or via task_work, so if we're in the 58449f1c68eSPavel Begunkov * task context we're naturally serialised with tw by merit of running 58549f1c68eSPavel Begunkov * the same task. When it's io-wq, take the ownership to prevent tw 58649f1c68eSPavel Begunkov * from running. However, when we're in the task context, skip taking 58749f1c68eSPavel Begunkov * it as an optimisation. 58849f1c68eSPavel Begunkov * 58949f1c68eSPavel Begunkov * Note: even though the request won't be completed/freed, without 59049f1c68eSPavel Begunkov * ownership we still can race with io_poll_wake(). 59149f1c68eSPavel Begunkov * io_poll_can_finish_inline() tries to deal with that. 592329061d3SJens Axboe */ 59349f1c68eSPavel Begunkov ipt->owning = issue_flags & IO_URING_F_UNLOCKED; 59449f1c68eSPavel Begunkov atomic_set(&req->poll_refs, (int)ipt->owning); 595e8375e43SPavel Begunkov 596e8375e43SPavel Begunkov /* io-wq doesn't hold uring_lock */ 597e8375e43SPavel Begunkov if (issue_flags & IO_URING_F_UNLOCKED) 598e8375e43SPavel Begunkov req->flags &= ~REQ_F_HASH_LOCKED; 599e8375e43SPavel Begunkov 600329061d3SJens Axboe mask = vfs_poll(req->file, &ipt->pt) & poll->events; 601329061d3SJens Axboe 602de08356fSPavel Begunkov if (unlikely(ipt->error || !ipt->nr_entries)) { 603de08356fSPavel Begunkov io_poll_remove_entries(req); 604de08356fSPavel Begunkov 60549f1c68eSPavel Begunkov if (!io_poll_can_finish_inline(req, ipt)) { 60649f1c68eSPavel Begunkov io_poll_mark_cancelled(req); 60749f1c68eSPavel Begunkov return 0; 60849f1c68eSPavel Begunkov } else if (mask && (poll->events & EPOLLET)) { 609de08356fSPavel Begunkov ipt->result_mask = mask; 610de08356fSPavel Begunkov return 1; 611de08356fSPavel Begunkov } 61249f1c68eSPavel Begunkov return ipt->error ?: -EINVAL; 613de08356fSPavel Begunkov } 614de08356fSPavel Begunkov 615b9ba8a44SJens Axboe if (mask && 616b9ba8a44SJens Axboe ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) { 617febb985cSJens Axboe if (!io_poll_can_finish_inline(req, ipt)) { 618febb985cSJens Axboe io_poll_add_hash(req); 61949f1c68eSPavel Begunkov return 0; 620febb985cSJens Axboe } 621329061d3SJens Axboe io_poll_remove_entries(req); 622063a0079SPavel Begunkov ipt->result_mask = mask; 623329061d3SJens Axboe /* no one else has access to the req, forget about the ref */ 624063a0079SPavel Begunkov return 1; 625329061d3SJens Axboe } 626b9ba8a44SJens Axboe 627febb985cSJens Axboe io_poll_add_hash(req); 628329061d3SJens Axboe 62949f1c68eSPavel Begunkov if (mask && (poll->events & EPOLLET) && 63049f1c68eSPavel Begunkov io_poll_can_finish_inline(req, ipt)) { 63113a99017SPavel Begunkov __io_poll_execute(req, mask); 632329061d3SJens Axboe return 0; 633329061d3SJens Axboe } 634329061d3SJens Axboe 63549f1c68eSPavel Begunkov if (ipt->owning) { 636329061d3SJens Axboe /* 6372f389343SPavel Begunkov * Try to release ownership. If we see a change of state, e.g. 6382f389343SPavel Begunkov * poll was waken up, queue up a tw, it'll deal with it. 639329061d3SJens Axboe */ 6402f389343SPavel Begunkov if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1) 64113a99017SPavel Begunkov __io_poll_execute(req, 0); 64249f1c68eSPavel Begunkov } 643329061d3SJens Axboe return 0; 644329061d3SJens Axboe } 645329061d3SJens Axboe 646329061d3SJens Axboe static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, 647329061d3SJens Axboe struct poll_table_struct *p) 648329061d3SJens Axboe { 649329061d3SJens Axboe struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); 650329061d3SJens Axboe struct async_poll *apoll = pt->req->apoll; 651329061d3SJens Axboe 652329061d3SJens Axboe __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll); 653329061d3SJens Axboe } 654329061d3SJens Axboe 655c16bda37SJens Axboe /* 656c16bda37SJens Axboe * We can't reliably detect loops in repeated poll triggers and issue 657c16bda37SJens Axboe * subsequently failing. But rather than fail these immediately, allow a 658c16bda37SJens Axboe * certain amount of retries before we give up. Given that this condition 659c16bda37SJens Axboe * should _rarely_ trigger even once, we should be fine with a larger value. 660c16bda37SJens Axboe */ 661c16bda37SJens Axboe #define APOLL_MAX_RETRY 128 662c16bda37SJens Axboe 6635204aa8cSPavel Begunkov static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req, 6645204aa8cSPavel Begunkov unsigned issue_flags) 6655204aa8cSPavel Begunkov { 6665204aa8cSPavel Begunkov struct io_ring_ctx *ctx = req->ctx; 6679b797a37SJens Axboe struct io_cache_entry *entry; 6685204aa8cSPavel Begunkov struct async_poll *apoll; 6695204aa8cSPavel Begunkov 6705204aa8cSPavel Begunkov if (req->flags & REQ_F_POLLED) { 6715204aa8cSPavel Begunkov apoll = req->apoll; 6725204aa8cSPavel Begunkov kfree(apoll->double_poll); 673df730ec2SXinghui Li } else if (!(issue_flags & IO_URING_F_UNLOCKED)) { 674df730ec2SXinghui Li entry = io_alloc_cache_get(&ctx->apoll_cache); 675df730ec2SXinghui Li if (entry == NULL) 676df730ec2SXinghui Li goto alloc_apoll; 6779b797a37SJens Axboe apoll = container_of(entry, struct async_poll, cache); 678c16bda37SJens Axboe apoll->poll.retries = APOLL_MAX_RETRY; 6795204aa8cSPavel Begunkov } else { 680df730ec2SXinghui Li alloc_apoll: 6815204aa8cSPavel Begunkov apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); 6825204aa8cSPavel Begunkov if (unlikely(!apoll)) 6835204aa8cSPavel Begunkov return NULL; 684c16bda37SJens Axboe apoll->poll.retries = APOLL_MAX_RETRY; 6855204aa8cSPavel Begunkov } 6865204aa8cSPavel Begunkov apoll->double_poll = NULL; 6875204aa8cSPavel Begunkov req->apoll = apoll; 688c16bda37SJens Axboe if (unlikely(!--apoll->poll.retries)) 689c16bda37SJens Axboe return NULL; 6905204aa8cSPavel Begunkov return apoll; 6915204aa8cSPavel Begunkov } 6925204aa8cSPavel Begunkov 693329061d3SJens Axboe int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) 694329061d3SJens Axboe { 695a7dd2782SBreno Leitao const struct io_issue_def *def = &io_issue_defs[req->opcode]; 696329061d3SJens Axboe struct async_poll *apoll; 697329061d3SJens Axboe struct io_poll_table ipt; 698b9ba8a44SJens Axboe __poll_t mask = POLLPRI | POLLERR | EPOLLET; 699329061d3SJens Axboe int ret; 700329061d3SJens Axboe 7019ca9fb24SPavel Begunkov /* 7029ca9fb24SPavel Begunkov * apoll requests already grab the mutex to complete in the tw handler, 7039ca9fb24SPavel Begunkov * so removal from the mutex-backed hash is free, use it by default. 7049ca9fb24SPavel Begunkov */ 7059ca9fb24SPavel Begunkov req->flags |= REQ_F_HASH_LOCKED; 7069ca9fb24SPavel Begunkov 707329061d3SJens Axboe if (!def->pollin && !def->pollout) 708329061d3SJens Axboe return IO_APOLL_ABORTED; 709329061d3SJens Axboe if (!file_can_poll(req->file)) 710329061d3SJens Axboe return IO_APOLL_ABORTED; 711329061d3SJens Axboe if (!(req->flags & REQ_F_APOLL_MULTISHOT)) 712329061d3SJens Axboe mask |= EPOLLONESHOT; 713329061d3SJens Axboe 714329061d3SJens Axboe if (def->pollin) { 715329061d3SJens Axboe mask |= EPOLLIN | EPOLLRDNORM; 716329061d3SJens Axboe 717329061d3SJens Axboe /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */ 718329061d3SJens Axboe if (req->flags & REQ_F_CLEAR_POLLIN) 719329061d3SJens Axboe mask &= ~EPOLLIN; 720329061d3SJens Axboe } else { 721329061d3SJens Axboe mask |= EPOLLOUT | EPOLLWRNORM; 722329061d3SJens Axboe } 723329061d3SJens Axboe if (def->poll_exclusive) 724329061d3SJens Axboe mask |= EPOLLEXCLUSIVE; 7255204aa8cSPavel Begunkov 7265204aa8cSPavel Begunkov apoll = io_req_alloc_apoll(req, issue_flags); 7275204aa8cSPavel Begunkov if (!apoll) 728329061d3SJens Axboe return IO_APOLL_ABORTED; 729*005308f7SJens Axboe req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL); 730329061d3SJens Axboe req->flags |= REQ_F_POLLED; 731329061d3SJens Axboe ipt.pt._qproc = io_async_queue_proc; 732329061d3SJens Axboe 733329061d3SJens Axboe io_kbuf_recycle(req, issue_flags); 734329061d3SJens Axboe 73549f1c68eSPavel Begunkov ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags); 736de08356fSPavel Begunkov if (ret) 737de08356fSPavel Begunkov return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED; 73848863ffdSPavel Begunkov trace_io_uring_poll_arm(req, mask, apoll->poll.events); 739329061d3SJens Axboe return IO_APOLL_OK; 740329061d3SJens Axboe } 741329061d3SJens Axboe 7429ca9fb24SPavel Begunkov static __cold bool io_poll_remove_all_table(struct task_struct *tsk, 7439ca9fb24SPavel Begunkov struct io_hash_table *table, 744329061d3SJens Axboe bool cancel_all) 745329061d3SJens Axboe { 746e6f89be6SPavel Begunkov unsigned nr_buckets = 1U << table->hash_bits; 747329061d3SJens Axboe struct hlist_node *tmp; 748329061d3SJens Axboe struct io_kiocb *req; 749329061d3SJens Axboe bool found = false; 750329061d3SJens Axboe int i; 751329061d3SJens Axboe 752e6f89be6SPavel Begunkov for (i = 0; i < nr_buckets; i++) { 753e6f89be6SPavel Begunkov struct io_hash_bucket *hb = &table->hbs[i]; 754329061d3SJens Axboe 75538513c46SHao Xu spin_lock(&hb->lock); 75638513c46SHao Xu hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) { 757329061d3SJens Axboe if (io_match_task_safe(req, tsk, cancel_all)) { 758329061d3SJens Axboe hlist_del_init(&req->hash_node); 759329061d3SJens Axboe io_poll_cancel_req(req); 760329061d3SJens Axboe found = true; 761329061d3SJens Axboe } 762329061d3SJens Axboe } 76338513c46SHao Xu spin_unlock(&hb->lock); 764329061d3SJens Axboe } 765329061d3SJens Axboe return found; 766329061d3SJens Axboe } 767329061d3SJens Axboe 7689ca9fb24SPavel Begunkov /* 7699ca9fb24SPavel Begunkov * Returns true if we found and killed one or more poll requests 7709ca9fb24SPavel Begunkov */ 7719ca9fb24SPavel Begunkov __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, 7729ca9fb24SPavel Begunkov bool cancel_all) 7739ca9fb24SPavel Begunkov __must_hold(&ctx->uring_lock) 7749ca9fb24SPavel Begunkov { 775b321823aSPavel Begunkov bool ret; 776b321823aSPavel Begunkov 777b321823aSPavel Begunkov ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all); 778b321823aSPavel Begunkov ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all); 779b321823aSPavel Begunkov return ret; 7809ca9fb24SPavel Begunkov } 7819ca9fb24SPavel Begunkov 782329061d3SJens Axboe static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, 7831ab1edb0SPavel Begunkov struct io_cancel_data *cd, 784e6f89be6SPavel Begunkov struct io_hash_table *table, 7851ab1edb0SPavel Begunkov struct io_hash_bucket **out_bucket) 786329061d3SJens Axboe { 787329061d3SJens Axboe struct io_kiocb *req; 788e6f89be6SPavel Begunkov u32 index = hash_long(cd->data, table->hash_bits); 789e6f89be6SPavel Begunkov struct io_hash_bucket *hb = &table->hbs[index]; 790329061d3SJens Axboe 7911ab1edb0SPavel Begunkov *out_bucket = NULL; 7921ab1edb0SPavel Begunkov 79338513c46SHao Xu spin_lock(&hb->lock); 79438513c46SHao Xu hlist_for_each_entry(req, &hb->list, hash_node) { 795329061d3SJens Axboe if (cd->data != req->cqe.user_data) 796329061d3SJens Axboe continue; 797329061d3SJens Axboe if (poll_only && req->opcode != IORING_OP_POLL_ADD) 798329061d3SJens Axboe continue; 799329061d3SJens Axboe if (cd->flags & IORING_ASYNC_CANCEL_ALL) { 800329061d3SJens Axboe if (cd->seq == req->work.cancel_seq) 801329061d3SJens Axboe continue; 802329061d3SJens Axboe req->work.cancel_seq = cd->seq; 803329061d3SJens Axboe } 8041ab1edb0SPavel Begunkov *out_bucket = hb; 805329061d3SJens Axboe return req; 806329061d3SJens Axboe } 80738513c46SHao Xu spin_unlock(&hb->lock); 808329061d3SJens Axboe return NULL; 809329061d3SJens Axboe } 810329061d3SJens Axboe 811329061d3SJens Axboe static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx, 8121ab1edb0SPavel Begunkov struct io_cancel_data *cd, 813e6f89be6SPavel Begunkov struct io_hash_table *table, 8141ab1edb0SPavel Begunkov struct io_hash_bucket **out_bucket) 815329061d3SJens Axboe { 816e6f89be6SPavel Begunkov unsigned nr_buckets = 1U << table->hash_bits; 817329061d3SJens Axboe struct io_kiocb *req; 818329061d3SJens Axboe int i; 819329061d3SJens Axboe 8201ab1edb0SPavel Begunkov *out_bucket = NULL; 8211ab1edb0SPavel Begunkov 822e6f89be6SPavel Begunkov for (i = 0; i < nr_buckets; i++) { 823e6f89be6SPavel Begunkov struct io_hash_bucket *hb = &table->hbs[i]; 824329061d3SJens Axboe 82538513c46SHao Xu spin_lock(&hb->lock); 82638513c46SHao Xu hlist_for_each_entry(req, &hb->list, hash_node) { 827329061d3SJens Axboe if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) && 828329061d3SJens Axboe req->file != cd->file) 829329061d3SJens Axboe continue; 830329061d3SJens Axboe if (cd->seq == req->work.cancel_seq) 831329061d3SJens Axboe continue; 832329061d3SJens Axboe req->work.cancel_seq = cd->seq; 8331ab1edb0SPavel Begunkov *out_bucket = hb; 834329061d3SJens Axboe return req; 835329061d3SJens Axboe } 83638513c46SHao Xu spin_unlock(&hb->lock); 837329061d3SJens Axboe } 838329061d3SJens Axboe return NULL; 839329061d3SJens Axboe } 840329061d3SJens Axboe 8419ca9fb24SPavel Begunkov static int io_poll_disarm(struct io_kiocb *req) 842329061d3SJens Axboe { 8439ca9fb24SPavel Begunkov if (!req) 8449ca9fb24SPavel Begunkov return -ENOENT; 845329061d3SJens Axboe if (!io_poll_get_ownership(req)) 8469ca9fb24SPavel Begunkov return -EALREADY; 847329061d3SJens Axboe io_poll_remove_entries(req); 848329061d3SJens Axboe hash_del(&req->hash_node); 8499ca9fb24SPavel Begunkov return 0; 850329061d3SJens Axboe } 851329061d3SJens Axboe 852a2cdd519SPavel Begunkov static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, 853e6f89be6SPavel Begunkov struct io_hash_table *table) 854329061d3SJens Axboe { 8551ab1edb0SPavel Begunkov struct io_hash_bucket *bucket; 856329061d3SJens Axboe struct io_kiocb *req; 857329061d3SJens Axboe 858329061d3SJens Axboe if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY)) 859e6f89be6SPavel Begunkov req = io_poll_file_find(ctx, cd, table, &bucket); 860329061d3SJens Axboe else 861e6f89be6SPavel Begunkov req = io_poll_find(ctx, false, cd, table, &bucket); 8621ab1edb0SPavel Begunkov 8631ab1edb0SPavel Begunkov if (req) 864329061d3SJens Axboe io_poll_cancel_req(req); 8651ab1edb0SPavel Begunkov if (bucket) 8661ab1edb0SPavel Begunkov spin_unlock(&bucket->lock); 8671ab1edb0SPavel Begunkov return req ? 0 : -ENOENT; 868329061d3SJens Axboe } 869329061d3SJens Axboe 8705d7943d9SPavel Begunkov int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, 8715d7943d9SPavel Begunkov unsigned issue_flags) 872a2cdd519SPavel Begunkov { 8739ca9fb24SPavel Begunkov int ret; 8749ca9fb24SPavel Begunkov 8759ca9fb24SPavel Begunkov ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table); 8769ca9fb24SPavel Begunkov if (ret != -ENOENT) 8779ca9fb24SPavel Begunkov return ret; 8789ca9fb24SPavel Begunkov 8799ca9fb24SPavel Begunkov io_ring_submit_lock(ctx, issue_flags); 8809ca9fb24SPavel Begunkov ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked); 8819ca9fb24SPavel Begunkov io_ring_submit_unlock(ctx, issue_flags); 8829ca9fb24SPavel Begunkov return ret; 883a2cdd519SPavel Begunkov } 884a2cdd519SPavel Begunkov 885329061d3SJens Axboe static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe, 886329061d3SJens Axboe unsigned int flags) 887329061d3SJens Axboe { 888329061d3SJens Axboe u32 events; 889329061d3SJens Axboe 890329061d3SJens Axboe events = READ_ONCE(sqe->poll32_events); 891329061d3SJens Axboe #ifdef __BIG_ENDIAN 892329061d3SJens Axboe events = swahw32(events); 893329061d3SJens Axboe #endif 894329061d3SJens Axboe if (!(flags & IORING_POLL_ADD_MULTI)) 895329061d3SJens Axboe events |= EPOLLONESHOT; 896b9ba8a44SJens Axboe if (!(flags & IORING_POLL_ADD_LEVEL)) 897b9ba8a44SJens Axboe events |= EPOLLET; 898b9ba8a44SJens Axboe return demangle_poll(events) | 899b9ba8a44SJens Axboe (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET)); 900329061d3SJens Axboe } 901329061d3SJens Axboe 902329061d3SJens Axboe int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 903329061d3SJens Axboe { 904f2ccb5aeSStefan Metzmacher struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update); 905329061d3SJens Axboe u32 flags; 906329061d3SJens Axboe 907329061d3SJens Axboe if (sqe->buf_index || sqe->splice_fd_in) 908329061d3SJens Axboe return -EINVAL; 909329061d3SJens Axboe flags = READ_ONCE(sqe->len); 910329061d3SJens Axboe if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA | 911329061d3SJens Axboe IORING_POLL_ADD_MULTI)) 912329061d3SJens Axboe return -EINVAL; 913329061d3SJens Axboe /* meaningless without update */ 914329061d3SJens Axboe if (flags == IORING_POLL_ADD_MULTI) 915329061d3SJens Axboe return -EINVAL; 916329061d3SJens Axboe 917329061d3SJens Axboe upd->old_user_data = READ_ONCE(sqe->addr); 918329061d3SJens Axboe upd->update_events = flags & IORING_POLL_UPDATE_EVENTS; 919329061d3SJens Axboe upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA; 920329061d3SJens Axboe 921329061d3SJens Axboe upd->new_user_data = READ_ONCE(sqe->off); 922329061d3SJens Axboe if (!upd->update_user_data && upd->new_user_data) 923329061d3SJens Axboe return -EINVAL; 924329061d3SJens Axboe if (upd->update_events) 925329061d3SJens Axboe upd->events = io_poll_parse_events(sqe, flags); 926329061d3SJens Axboe else if (sqe->poll32_events) 927329061d3SJens Axboe return -EINVAL; 928329061d3SJens Axboe 929329061d3SJens Axboe return 0; 930329061d3SJens Axboe } 931329061d3SJens Axboe 932329061d3SJens Axboe int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 933329061d3SJens Axboe { 934f2ccb5aeSStefan Metzmacher struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); 935329061d3SJens Axboe u32 flags; 936329061d3SJens Axboe 937329061d3SJens Axboe if (sqe->buf_index || sqe->off || sqe->addr) 938329061d3SJens Axboe return -EINVAL; 939329061d3SJens Axboe flags = READ_ONCE(sqe->len); 940d59bd748SJens Axboe if (flags & ~IORING_POLL_ADD_MULTI) 941329061d3SJens Axboe return -EINVAL; 942329061d3SJens Axboe if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP)) 943329061d3SJens Axboe return -EINVAL; 944329061d3SJens Axboe 945329061d3SJens Axboe poll->events = io_poll_parse_events(sqe, flags); 946329061d3SJens Axboe return 0; 947329061d3SJens Axboe } 948329061d3SJens Axboe 949329061d3SJens Axboe int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) 950329061d3SJens Axboe { 951f2ccb5aeSStefan Metzmacher struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); 952329061d3SJens Axboe struct io_poll_table ipt; 953329061d3SJens Axboe int ret; 954329061d3SJens Axboe 955329061d3SJens Axboe ipt.pt._qproc = io_poll_queue_proc; 956329061d3SJens Axboe 9579ca9fb24SPavel Begunkov /* 9589ca9fb24SPavel Begunkov * If sqpoll or single issuer, there is no contention for ->uring_lock 9599ca9fb24SPavel Begunkov * and we'll end up holding it in tw handlers anyway. 9609ca9fb24SPavel Begunkov */ 961e8375e43SPavel Begunkov if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER)) 9629ca9fb24SPavel Begunkov req->flags |= REQ_F_HASH_LOCKED; 9639ca9fb24SPavel Begunkov 96449f1c68eSPavel Begunkov ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags); 965de08356fSPavel Begunkov if (ret > 0) { 966063a0079SPavel Begunkov io_req_set_res(req, ipt.result_mask, 0); 967329061d3SJens Axboe return IOU_OK; 968329061d3SJens Axboe } 969de08356fSPavel Begunkov return ret ?: IOU_ISSUE_SKIP_COMPLETE; 970329061d3SJens Axboe } 971329061d3SJens Axboe 972329061d3SJens Axboe int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) 973329061d3SJens Axboe { 974f2ccb5aeSStefan Metzmacher struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update); 975329061d3SJens Axboe struct io_cancel_data cd = { .data = poll_update->old_user_data, }; 976329061d3SJens Axboe struct io_ring_ctx *ctx = req->ctx; 9771ab1edb0SPavel Begunkov struct io_hash_bucket *bucket; 978329061d3SJens Axboe struct io_kiocb *preq; 979329061d3SJens Axboe int ret2, ret = 0; 980329061d3SJens Axboe bool locked; 981329061d3SJens Axboe 982e6f89be6SPavel Begunkov preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket); 9831ab1edb0SPavel Begunkov ret2 = io_poll_disarm(preq); 9841ab1edb0SPavel Begunkov if (bucket) 9851ab1edb0SPavel Begunkov spin_unlock(&bucket->lock); 9869ca9fb24SPavel Begunkov if (!ret2) 9879ca9fb24SPavel Begunkov goto found; 9889ca9fb24SPavel Begunkov if (ret2 != -ENOENT) { 9899ca9fb24SPavel Begunkov ret = ret2; 99038513c46SHao Xu goto out; 99138513c46SHao Xu } 992329061d3SJens Axboe 9939ca9fb24SPavel Begunkov io_ring_submit_lock(ctx, issue_flags); 9949ca9fb24SPavel Begunkov preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket); 9959ca9fb24SPavel Begunkov ret2 = io_poll_disarm(preq); 9969ca9fb24SPavel Begunkov if (bucket) 9979ca9fb24SPavel Begunkov spin_unlock(&bucket->lock); 9989ca9fb24SPavel Begunkov io_ring_submit_unlock(ctx, issue_flags); 9999ca9fb24SPavel Begunkov if (ret2) { 10009ca9fb24SPavel Begunkov ret = ret2; 10019ca9fb24SPavel Begunkov goto out; 10029ca9fb24SPavel Begunkov } 10039ca9fb24SPavel Begunkov 10049ca9fb24SPavel Begunkov found: 1005bce5d70cSPavel Begunkov if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) { 1006bce5d70cSPavel Begunkov ret = -EFAULT; 1007bce5d70cSPavel Begunkov goto out; 1008bce5d70cSPavel Begunkov } 1009bce5d70cSPavel Begunkov 1010329061d3SJens Axboe if (poll_update->update_events || poll_update->update_user_data) { 1011329061d3SJens Axboe /* only mask one event flags, keep behavior flags */ 1012329061d3SJens Axboe if (poll_update->update_events) { 1013f2ccb5aeSStefan Metzmacher struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll); 1014329061d3SJens Axboe 1015329061d3SJens Axboe poll->events &= ~0xffff; 1016329061d3SJens Axboe poll->events |= poll_update->events & 0xffff; 1017329061d3SJens Axboe poll->events |= IO_POLL_UNMASK; 1018329061d3SJens Axboe } 1019329061d3SJens Axboe if (poll_update->update_user_data) 1020329061d3SJens Axboe preq->cqe.user_data = poll_update->new_user_data; 1021329061d3SJens Axboe 1022329061d3SJens Axboe ret2 = io_poll_add(preq, issue_flags); 1023329061d3SJens Axboe /* successfully updated, don't complete poll request */ 1024329061d3SJens Axboe if (!ret2 || ret2 == -EIOCBQUEUED) 1025329061d3SJens Axboe goto out; 1026329061d3SJens Axboe } 1027329061d3SJens Axboe 1028329061d3SJens Axboe req_set_fail(preq); 1029329061d3SJens Axboe io_req_set_res(preq, -ECANCELED, 0); 1030329061d3SJens Axboe locked = !(issue_flags & IO_URING_F_UNLOCKED); 1031329061d3SJens Axboe io_req_task_complete(preq, &locked); 1032329061d3SJens Axboe out: 1033329061d3SJens Axboe if (ret < 0) { 1034329061d3SJens Axboe req_set_fail(req); 1035329061d3SJens Axboe return ret; 1036329061d3SJens Axboe } 1037329061d3SJens Axboe /* complete update request, we're done with it */ 1038329061d3SJens Axboe io_req_set_res(req, ret, 0); 1039329061d3SJens Axboe return IOU_OK; 1040329061d3SJens Axboe } 10419da7471eSJens Axboe 10429b797a37SJens Axboe void io_apoll_cache_free(struct io_cache_entry *entry) 10439da7471eSJens Axboe { 10449b797a37SJens Axboe kfree(container_of(entry, struct async_poll, cache)); 10459da7471eSJens Axboe } 1046