1329061d3SJens Axboe // SPDX-License-Identifier: GPL-2.0 2329061d3SJens Axboe #include <linux/kernel.h> 3329061d3SJens Axboe #include <linux/errno.h> 4329061d3SJens Axboe #include <linux/fs.h> 5329061d3SJens Axboe #include <linux/file.h> 6329061d3SJens Axboe #include <linux/mm.h> 7329061d3SJens Axboe #include <linux/slab.h> 8329061d3SJens Axboe #include <linux/poll.h> 9329061d3SJens Axboe #include <linux/hashtable.h> 10329061d3SJens Axboe #include <linux/io_uring.h> 11329061d3SJens Axboe 12329061d3SJens Axboe #include <trace/events/io_uring.h> 13329061d3SJens Axboe 14329061d3SJens Axboe #include <uapi/linux/io_uring.h> 15329061d3SJens Axboe 16329061d3SJens Axboe #include "io_uring.h" 17329061d3SJens Axboe #include "refs.h" 18329061d3SJens Axboe #include "opdef.h" 193b77495aSJens Axboe #include "kbuf.h" 20329061d3SJens Axboe #include "poll.h" 2138513c46SHao Xu #include "cancel.h" 22329061d3SJens Axboe 23329061d3SJens Axboe struct io_poll_update { 24329061d3SJens Axboe struct file *file; 25329061d3SJens Axboe u64 old_user_data; 26329061d3SJens Axboe u64 new_user_data; 27329061d3SJens Axboe __poll_t events; 28329061d3SJens Axboe bool update_events; 29329061d3SJens Axboe bool update_user_data; 30329061d3SJens Axboe }; 31329061d3SJens Axboe 32329061d3SJens Axboe struct io_poll_table { 33329061d3SJens Axboe struct poll_table_struct pt; 34329061d3SJens Axboe struct io_kiocb *req; 35329061d3SJens Axboe int nr_entries; 36329061d3SJens Axboe int error; 3749f1c68eSPavel Begunkov bool owning; 38063a0079SPavel Begunkov /* output value, set only if arm poll returns >0 */ 39063a0079SPavel Begunkov __poll_t result_mask; 40329061d3SJens Axboe }; 41329061d3SJens Axboe 42329061d3SJens Axboe #define IO_POLL_CANCEL_FLAG BIT(31) 43a26a35e9SPavel Begunkov #define IO_POLL_RETRY_FLAG BIT(30) 44a26a35e9SPavel Begunkov #define IO_POLL_REF_MASK GENMASK(29, 0) 45a26a35e9SPavel Begunkov 46a26a35e9SPavel Begunkov /* 47a26a35e9SPavel Begunkov * We usually have 1-2 refs taken, 128 is more than enough and we want to 48a26a35e9SPavel Begunkov * maximise the margin between this amount and the moment when it overflows. 49a26a35e9SPavel Begunkov */ 50a26a35e9SPavel Begunkov #define IO_POLL_REF_BIAS 128 51329061d3SJens Axboe 520638cd7bSPavel Begunkov #define IO_WQE_F_DOUBLE 1 530638cd7bSPavel Begunkov 540638cd7bSPavel Begunkov static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe) 550638cd7bSPavel Begunkov { 560638cd7bSPavel Begunkov unsigned long priv = (unsigned long)wqe->private; 570638cd7bSPavel Begunkov 580638cd7bSPavel Begunkov return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE); 590638cd7bSPavel Begunkov } 600638cd7bSPavel Begunkov 610638cd7bSPavel Begunkov static inline bool wqe_is_double(struct wait_queue_entry *wqe) 620638cd7bSPavel Begunkov { 630638cd7bSPavel Begunkov unsigned long priv = (unsigned long)wqe->private; 640638cd7bSPavel Begunkov 650638cd7bSPavel Begunkov return priv & IO_WQE_F_DOUBLE; 660638cd7bSPavel Begunkov } 670638cd7bSPavel Begunkov 68a26a35e9SPavel Begunkov static bool io_poll_get_ownership_slowpath(struct io_kiocb *req) 69a26a35e9SPavel Begunkov { 70a26a35e9SPavel Begunkov int v; 71a26a35e9SPavel Begunkov 72a26a35e9SPavel Begunkov /* 73a26a35e9SPavel Begunkov * poll_refs are already elevated and we don't have much hope for 74a26a35e9SPavel Begunkov * grabbing the ownership. Instead of incrementing set a retry flag 75a26a35e9SPavel Begunkov * to notify the loop that there might have been some change. 76a26a35e9SPavel Begunkov */ 77a26a35e9SPavel Begunkov v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs); 78a26a35e9SPavel Begunkov if (v & IO_POLL_REF_MASK) 79a26a35e9SPavel Begunkov return false; 80a26a35e9SPavel Begunkov return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); 81a26a35e9SPavel Begunkov } 82a26a35e9SPavel Begunkov 83329061d3SJens Axboe /* 84329061d3SJens Axboe * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can 85329061d3SJens Axboe * bump it and acquire ownership. It's disallowed to modify requests while not 86329061d3SJens Axboe * owning it, that prevents from races for enqueueing task_work's and b/w 87329061d3SJens Axboe * arming poll and wakeups. 88329061d3SJens Axboe */ 89329061d3SJens Axboe static inline bool io_poll_get_ownership(struct io_kiocb *req) 90329061d3SJens Axboe { 91a26a35e9SPavel Begunkov if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS)) 92a26a35e9SPavel Begunkov return io_poll_get_ownership_slowpath(req); 93329061d3SJens Axboe return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); 94329061d3SJens Axboe } 95329061d3SJens Axboe 96329061d3SJens Axboe static void io_poll_mark_cancelled(struct io_kiocb *req) 97329061d3SJens Axboe { 98329061d3SJens Axboe atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs); 99329061d3SJens Axboe } 100329061d3SJens Axboe 101329061d3SJens Axboe static struct io_poll *io_poll_get_double(struct io_kiocb *req) 102329061d3SJens Axboe { 103329061d3SJens Axboe /* pure poll stashes this in ->async_data, poll driven retry elsewhere */ 104329061d3SJens Axboe if (req->opcode == IORING_OP_POLL_ADD) 105329061d3SJens Axboe return req->async_data; 106329061d3SJens Axboe return req->apoll->double_poll; 107329061d3SJens Axboe } 108329061d3SJens Axboe 109329061d3SJens Axboe static struct io_poll *io_poll_get_single(struct io_kiocb *req) 110329061d3SJens Axboe { 111329061d3SJens Axboe if (req->opcode == IORING_OP_POLL_ADD) 112f2ccb5aeSStefan Metzmacher return io_kiocb_to_cmd(req, struct io_poll); 113329061d3SJens Axboe return &req->apoll->poll; 114329061d3SJens Axboe } 115329061d3SJens Axboe 116329061d3SJens Axboe static void io_poll_req_insert(struct io_kiocb *req) 117329061d3SJens Axboe { 118e6f89be6SPavel Begunkov struct io_hash_table *table = &req->ctx->cancel_table; 119e6f89be6SPavel Begunkov u32 index = hash_long(req->cqe.user_data, table->hash_bits); 120e6f89be6SPavel Begunkov struct io_hash_bucket *hb = &table->hbs[index]; 121329061d3SJens Axboe 12238513c46SHao Xu spin_lock(&hb->lock); 12338513c46SHao Xu hlist_add_head(&req->hash_node, &hb->list); 12438513c46SHao Xu spin_unlock(&hb->lock); 12538513c46SHao Xu } 12638513c46SHao Xu 12738513c46SHao Xu static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx) 12838513c46SHao Xu { 129e6f89be6SPavel Begunkov struct io_hash_table *table = &req->ctx->cancel_table; 130e6f89be6SPavel Begunkov u32 index = hash_long(req->cqe.user_data, table->hash_bits); 131e6f89be6SPavel Begunkov spinlock_t *lock = &table->hbs[index].lock; 13238513c46SHao Xu 13338513c46SHao Xu spin_lock(lock); 13438513c46SHao Xu hash_del(&req->hash_node); 13538513c46SHao Xu spin_unlock(lock); 136329061d3SJens Axboe } 137329061d3SJens Axboe 1389ca9fb24SPavel Begunkov static void io_poll_req_insert_locked(struct io_kiocb *req) 1399ca9fb24SPavel Begunkov { 1409ca9fb24SPavel Begunkov struct io_hash_table *table = &req->ctx->cancel_table_locked; 1419ca9fb24SPavel Begunkov u32 index = hash_long(req->cqe.user_data, table->hash_bits); 1429ca9fb24SPavel Begunkov 1435576035fSPavel Begunkov lockdep_assert_held(&req->ctx->uring_lock); 1445576035fSPavel Begunkov 1459ca9fb24SPavel Begunkov hlist_add_head(&req->hash_node, &table->hbs[index].list); 1469ca9fb24SPavel Begunkov } 1479ca9fb24SPavel Begunkov 1489ca9fb24SPavel Begunkov static void io_poll_tw_hash_eject(struct io_kiocb *req, bool *locked) 1499ca9fb24SPavel Begunkov { 1509ca9fb24SPavel Begunkov struct io_ring_ctx *ctx = req->ctx; 1519ca9fb24SPavel Begunkov 1529ca9fb24SPavel Begunkov if (req->flags & REQ_F_HASH_LOCKED) { 1539ca9fb24SPavel Begunkov /* 1549ca9fb24SPavel Begunkov * ->cancel_table_locked is protected by ->uring_lock in 1559ca9fb24SPavel Begunkov * contrast to per bucket spinlocks. Likely, tctx_task_work() 1569ca9fb24SPavel Begunkov * already grabbed the mutex for us, but there is a chance it 1579ca9fb24SPavel Begunkov * failed. 1589ca9fb24SPavel Begunkov */ 1599ca9fb24SPavel Begunkov io_tw_lock(ctx, locked); 1609ca9fb24SPavel Begunkov hash_del(&req->hash_node); 161b21a51e2SPavel Begunkov req->flags &= ~REQ_F_HASH_LOCKED; 1629ca9fb24SPavel Begunkov } else { 1639ca9fb24SPavel Begunkov io_poll_req_delete(req, ctx); 1649ca9fb24SPavel Begunkov } 1659ca9fb24SPavel Begunkov } 1669ca9fb24SPavel Begunkov 167329061d3SJens Axboe static void io_init_poll_iocb(struct io_poll *poll, __poll_t events, 168329061d3SJens Axboe wait_queue_func_t wake_func) 169329061d3SJens Axboe { 170329061d3SJens Axboe poll->head = NULL; 171329061d3SJens Axboe #define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP) 172329061d3SJens Axboe /* mask in events that we always want/need */ 173329061d3SJens Axboe poll->events = events | IO_POLL_UNMASK; 174329061d3SJens Axboe INIT_LIST_HEAD(&poll->wait.entry); 175329061d3SJens Axboe init_waitqueue_func_entry(&poll->wait, wake_func); 176329061d3SJens Axboe } 177329061d3SJens Axboe 178329061d3SJens Axboe static inline void io_poll_remove_entry(struct io_poll *poll) 179329061d3SJens Axboe { 180329061d3SJens Axboe struct wait_queue_head *head = smp_load_acquire(&poll->head); 181329061d3SJens Axboe 182329061d3SJens Axboe if (head) { 183329061d3SJens Axboe spin_lock_irq(&head->lock); 184329061d3SJens Axboe list_del_init(&poll->wait.entry); 185329061d3SJens Axboe poll->head = NULL; 186329061d3SJens Axboe spin_unlock_irq(&head->lock); 187329061d3SJens Axboe } 188329061d3SJens Axboe } 189329061d3SJens Axboe 190329061d3SJens Axboe static void io_poll_remove_entries(struct io_kiocb *req) 191329061d3SJens Axboe { 192329061d3SJens Axboe /* 193329061d3SJens Axboe * Nothing to do if neither of those flags are set. Avoid dipping 194329061d3SJens Axboe * into the poll/apoll/double cachelines if we can. 195329061d3SJens Axboe */ 196329061d3SJens Axboe if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL))) 197329061d3SJens Axboe return; 198329061d3SJens Axboe 199329061d3SJens Axboe /* 200329061d3SJens Axboe * While we hold the waitqueue lock and the waitqueue is nonempty, 201329061d3SJens Axboe * wake_up_pollfree() will wait for us. However, taking the waitqueue 202329061d3SJens Axboe * lock in the first place can race with the waitqueue being freed. 203329061d3SJens Axboe * 204329061d3SJens Axboe * We solve this as eventpoll does: by taking advantage of the fact that 205329061d3SJens Axboe * all users of wake_up_pollfree() will RCU-delay the actual free. If 206329061d3SJens Axboe * we enter rcu_read_lock() and see that the pointer to the queue is 207329061d3SJens Axboe * non-NULL, we can then lock it without the memory being freed out from 208329061d3SJens Axboe * under us. 209329061d3SJens Axboe * 210329061d3SJens Axboe * Keep holding rcu_read_lock() as long as we hold the queue lock, in 211329061d3SJens Axboe * case the caller deletes the entry from the queue, leaving it empty. 212329061d3SJens Axboe * In that case, only RCU prevents the queue memory from being freed. 213329061d3SJens Axboe */ 214329061d3SJens Axboe rcu_read_lock(); 215329061d3SJens Axboe if (req->flags & REQ_F_SINGLE_POLL) 216329061d3SJens Axboe io_poll_remove_entry(io_poll_get_single(req)); 217329061d3SJens Axboe if (req->flags & REQ_F_DOUBLE_POLL) 218329061d3SJens Axboe io_poll_remove_entry(io_poll_get_double(req)); 219329061d3SJens Axboe rcu_read_unlock(); 220329061d3SJens Axboe } 221329061d3SJens Axboe 2222ba69707SDylan Yudaken enum { 2232ba69707SDylan Yudaken IOU_POLL_DONE = 0, 2242ba69707SDylan Yudaken IOU_POLL_NO_ACTION = 1, 225114eccdfSDylan Yudaken IOU_POLL_REMOVE_POLL_USE_RES = 2, 2262ba69707SDylan Yudaken }; 2272ba69707SDylan Yudaken 228329061d3SJens Axboe /* 229329061d3SJens Axboe * All poll tw should go through this. Checks for poll events, manages 230329061d3SJens Axboe * references, does rewait, etc. 231329061d3SJens Axboe * 2322ba69707SDylan Yudaken * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action require, 2332ba69707SDylan Yudaken * which is either spurious wakeup or multishot CQE is served. 2342ba69707SDylan Yudaken * IOU_POLL_DONE when it's done with the request, then the mask is stored in req->cqe.res. 235114eccdfSDylan Yudaken * IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot poll and that the result 236114eccdfSDylan Yudaken * is stored in req->cqe. 237329061d3SJens Axboe */ 238329061d3SJens Axboe static int io_poll_check_events(struct io_kiocb *req, bool *locked) 239329061d3SJens Axboe { 240329061d3SJens Axboe int v, ret; 241329061d3SJens Axboe 242329061d3SJens Axboe /* req->task == current here, checking PF_EXITING is safe */ 243329061d3SJens Axboe if (unlikely(req->task->flags & PF_EXITING)) 244329061d3SJens Axboe return -ECANCELED; 245329061d3SJens Axboe 246329061d3SJens Axboe do { 247329061d3SJens Axboe v = atomic_read(&req->poll_refs); 248329061d3SJens Axboe 2499805fa2dSPavel Begunkov if (unlikely(v != 1)) { 2509805fa2dSPavel Begunkov /* tw should be the owner and so have some refs */ 251329061d3SJens Axboe if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK))) 2522ba69707SDylan Yudaken return IOU_POLL_DONE; 253329061d3SJens Axboe if (v & IO_POLL_CANCEL_FLAG) 254329061d3SJens Axboe return -ECANCELED; 255539bcb57SPavel Begunkov /* 256539bcb57SPavel Begunkov * cqe.res contains only events of the first wake up 2579805fa2dSPavel Begunkov * and all others are to be lost. Redo vfs_poll() to get 258539bcb57SPavel Begunkov * up to date state. 259539bcb57SPavel Begunkov */ 260539bcb57SPavel Begunkov if ((v & IO_POLL_REF_MASK) != 1) 261539bcb57SPavel Begunkov req->cqe.res = 0; 2629805fa2dSPavel Begunkov 263a26a35e9SPavel Begunkov if (v & IO_POLL_RETRY_FLAG) { 264a26a35e9SPavel Begunkov req->cqe.res = 0; 265a26a35e9SPavel Begunkov /* 266a26a35e9SPavel Begunkov * We won't find new events that came in between 2679805fa2dSPavel Begunkov * vfs_poll and the ref put unless we clear the 2689805fa2dSPavel Begunkov * flag in advance. 269a26a35e9SPavel Begunkov */ 270a26a35e9SPavel Begunkov atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs); 271a26a35e9SPavel Begunkov v &= ~IO_POLL_RETRY_FLAG; 272a26a35e9SPavel Begunkov } 2739805fa2dSPavel Begunkov } 274329061d3SJens Axboe 2752ba69707SDylan Yudaken /* the mask was stashed in __io_poll_execute */ 276329061d3SJens Axboe if (!req->cqe.res) { 277329061d3SJens Axboe struct poll_table_struct pt = { ._key = req->apoll_events }; 278329061d3SJens Axboe req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events; 279329061d3SJens Axboe } 280329061d3SJens Axboe 281329061d3SJens Axboe if ((unlikely(!req->cqe.res))) 282329061d3SJens Axboe continue; 283329061d3SJens Axboe if (req->apoll_events & EPOLLONESHOT) 2842ba69707SDylan Yudaken return IOU_POLL_DONE; 285329061d3SJens Axboe 286329061d3SJens Axboe /* multishot, just fill a CQE and proceed */ 287329061d3SJens Axboe if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { 288329061d3SJens Axboe __poll_t mask = mangle_poll(req->cqe.res & 289329061d3SJens Axboe req->apoll_events); 290329061d3SJens Axboe 291*047b6aefSPavel Begunkov if (!io_aux_cqe(req->ctx, *locked, req->cqe.user_data, 292a2da6763SDylan Yudaken mask, IORING_CQE_F_MORE, false)) { 293a2da6763SDylan Yudaken io_req_set_res(req, mask, 0); 294a2da6763SDylan Yudaken return IOU_POLL_REMOVE_POLL_USE_RES; 295a2da6763SDylan Yudaken } 296d245bca6SPavel Begunkov } else { 297329061d3SJens Axboe ret = io_poll_issue(req, locked); 298114eccdfSDylan Yudaken if (ret == IOU_STOP_MULTISHOT) 299114eccdfSDylan Yudaken return IOU_POLL_REMOVE_POLL_USE_RES; 3002ba69707SDylan Yudaken if (ret < 0) 301329061d3SJens Axboe return ret; 302d245bca6SPavel Begunkov } 303329061d3SJens Axboe 304b98186aeSPavel Begunkov /* force the next iteration to vfs_poll() */ 305b98186aeSPavel Begunkov req->cqe.res = 0; 306b98186aeSPavel Begunkov 307329061d3SJens Axboe /* 308329061d3SJens Axboe * Release all references, retry if someone tried to restart 309329061d3SJens Axboe * task_work while we were executing it. 310329061d3SJens Axboe */ 31112ad3d2dSLin Ma } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) & 31212ad3d2dSLin Ma IO_POLL_REF_MASK); 313329061d3SJens Axboe 3142ba69707SDylan Yudaken return IOU_POLL_NO_ACTION; 315329061d3SJens Axboe } 316329061d3SJens Axboe 317329061d3SJens Axboe static void io_poll_task_func(struct io_kiocb *req, bool *locked) 318329061d3SJens Axboe { 319329061d3SJens Axboe int ret; 320329061d3SJens Axboe 321329061d3SJens Axboe ret = io_poll_check_events(req, locked); 3222ba69707SDylan Yudaken if (ret == IOU_POLL_NO_ACTION) 323329061d3SJens Axboe return; 324329061d3SJens Axboe 3252ba69707SDylan Yudaken if (ret == IOU_POLL_DONE) { 326f2ccb5aeSStefan Metzmacher struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); 327329061d3SJens Axboe req->cqe.res = mangle_poll(req->cqe.res & poll->events); 328114eccdfSDylan Yudaken } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) { 329329061d3SJens Axboe req->cqe.res = ret; 330329061d3SJens Axboe req_set_fail(req); 331329061d3SJens Axboe } 332329061d3SJens Axboe 333329061d3SJens Axboe io_poll_remove_entries(req); 3349ca9fb24SPavel Begunkov io_poll_tw_hash_eject(req, locked); 3359ca9fb24SPavel Begunkov 3360ec6dca2SPavel Begunkov io_req_set_res(req, req->cqe.res, 0); 3370ec6dca2SPavel Begunkov io_req_task_complete(req, locked); 338329061d3SJens Axboe } 339329061d3SJens Axboe 340329061d3SJens Axboe static void io_apoll_task_func(struct io_kiocb *req, bool *locked) 341329061d3SJens Axboe { 342329061d3SJens Axboe int ret; 343329061d3SJens Axboe 344329061d3SJens Axboe ret = io_poll_check_events(req, locked); 3452ba69707SDylan Yudaken if (ret == IOU_POLL_NO_ACTION) 346329061d3SJens Axboe return; 347329061d3SJens Axboe 348c06c6c5dSDylan Yudaken io_tw_lock(req->ctx, locked); 349329061d3SJens Axboe io_poll_remove_entries(req); 3509ca9fb24SPavel Begunkov io_poll_tw_hash_eject(req, locked); 351329061d3SJens Axboe 352114eccdfSDylan Yudaken if (ret == IOU_POLL_REMOVE_POLL_USE_RES) 353c06c6c5dSDylan Yudaken io_req_task_complete(req, locked); 354114eccdfSDylan Yudaken else if (ret == IOU_POLL_DONE) 355329061d3SJens Axboe io_req_task_submit(req, locked); 356329061d3SJens Axboe else 357973fc83fSDylan Yudaken io_req_defer_failed(req, ret); 358329061d3SJens Axboe } 359329061d3SJens Axboe 36013a99017SPavel Begunkov static void __io_poll_execute(struct io_kiocb *req, int mask) 361329061d3SJens Axboe { 362329061d3SJens Axboe io_req_set_res(req, mask, 0); 363cd42a53dSLin Ma 364329061d3SJens Axboe if (req->opcode == IORING_OP_POLL_ADD) 365329061d3SJens Axboe req->io_task_work.func = io_poll_task_func; 366329061d3SJens Axboe else 367329061d3SJens Axboe req->io_task_work.func = io_apoll_task_func; 368329061d3SJens Axboe 36948863ffdSPavel Begunkov trace_io_uring_task_add(req, mask); 370329061d3SJens Axboe io_req_task_work_add(req); 371329061d3SJens Axboe } 372329061d3SJens Axboe 37313a99017SPavel Begunkov static inline void io_poll_execute(struct io_kiocb *req, int res) 374329061d3SJens Axboe { 375329061d3SJens Axboe if (io_poll_get_ownership(req)) 37613a99017SPavel Begunkov __io_poll_execute(req, res); 377329061d3SJens Axboe } 378329061d3SJens Axboe 379329061d3SJens Axboe static void io_poll_cancel_req(struct io_kiocb *req) 380329061d3SJens Axboe { 381329061d3SJens Axboe io_poll_mark_cancelled(req); 382329061d3SJens Axboe /* kick tw, which should complete the request */ 38313a99017SPavel Begunkov io_poll_execute(req, 0); 384329061d3SJens Axboe } 385329061d3SJens Axboe 386329061d3SJens Axboe #define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI) 387329061d3SJens Axboe 388fe991a76SJens Axboe static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll) 389329061d3SJens Axboe { 390329061d3SJens Axboe io_poll_mark_cancelled(req); 391329061d3SJens Axboe /* we have to kick tw in case it's not already */ 39213a99017SPavel Begunkov io_poll_execute(req, 0); 393329061d3SJens Axboe 394329061d3SJens Axboe /* 395329061d3SJens Axboe * If the waitqueue is being freed early but someone is already 396329061d3SJens Axboe * holds ownership over it, we have to tear down the request as 397329061d3SJens Axboe * best we can. That means immediately removing the request from 398329061d3SJens Axboe * its waitqueue and preventing all further accesses to the 399329061d3SJens Axboe * waitqueue via the request. 400329061d3SJens Axboe */ 401329061d3SJens Axboe list_del_init(&poll->wait.entry); 402329061d3SJens Axboe 403329061d3SJens Axboe /* 404329061d3SJens Axboe * Careful: this *must* be the last step, since as soon 405329061d3SJens Axboe * as req->head is NULL'ed out, the request can be 406329061d3SJens Axboe * completed and freed, since aio_poll_complete_work() 407329061d3SJens Axboe * will no longer need to take the waitqueue lock. 408329061d3SJens Axboe */ 409329061d3SJens Axboe smp_store_release(&poll->head, NULL); 410329061d3SJens Axboe return 1; 411329061d3SJens Axboe } 412329061d3SJens Axboe 413fe991a76SJens Axboe static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, 414fe991a76SJens Axboe void *key) 415fe991a76SJens Axboe { 416fe991a76SJens Axboe struct io_kiocb *req = wqe_to_req(wait); 417fe991a76SJens Axboe struct io_poll *poll = container_of(wait, struct io_poll, wait); 418fe991a76SJens Axboe __poll_t mask = key_to_poll(key); 419fe991a76SJens Axboe 420fe991a76SJens Axboe if (unlikely(mask & POLLFREE)) 421fe991a76SJens Axboe return io_pollfree_wake(req, poll); 422fe991a76SJens Axboe 423329061d3SJens Axboe /* for instances that support it check for an event match first */ 424329061d3SJens Axboe if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON))) 425329061d3SJens Axboe return 0; 426329061d3SJens Axboe 427329061d3SJens Axboe if (io_poll_get_ownership(req)) { 42844648532SJens Axboe /* 42944648532SJens Axboe * If we trigger a multishot poll off our own wakeup path, 43044648532SJens Axboe * disable multishot as there is a circular dependency between 43144648532SJens Axboe * CQ posting and triggering the event. 43244648532SJens Axboe */ 43344648532SJens Axboe if (mask & EPOLL_URING_WAKE) 43444648532SJens Axboe poll->events |= EPOLLONESHOT; 43544648532SJens Axboe 436329061d3SJens Axboe /* optional, saves extra locking for removal in tw handler */ 437329061d3SJens Axboe if (mask && poll->events & EPOLLONESHOT) { 438329061d3SJens Axboe list_del_init(&poll->wait.entry); 439329061d3SJens Axboe poll->head = NULL; 440329061d3SJens Axboe if (wqe_is_double(wait)) 441329061d3SJens Axboe req->flags &= ~REQ_F_DOUBLE_POLL; 442329061d3SJens Axboe else 443329061d3SJens Axboe req->flags &= ~REQ_F_SINGLE_POLL; 444329061d3SJens Axboe } 44513a99017SPavel Begunkov __io_poll_execute(req, mask); 446329061d3SJens Axboe } 447329061d3SJens Axboe return 1; 448329061d3SJens Axboe } 449329061d3SJens Axboe 45030a33669SPavel Begunkov /* fails only when polling is already completing by the first entry */ 45130a33669SPavel Begunkov static bool io_poll_double_prepare(struct io_kiocb *req) 45249f1c68eSPavel Begunkov { 45349f1c68eSPavel Begunkov struct wait_queue_head *head; 45449f1c68eSPavel Begunkov struct io_poll *poll = io_poll_get_single(req); 45549f1c68eSPavel Begunkov 45649f1c68eSPavel Begunkov /* head is RCU protected, see io_poll_remove_entries() comments */ 45749f1c68eSPavel Begunkov rcu_read_lock(); 45849f1c68eSPavel Begunkov head = smp_load_acquire(&poll->head); 45949f1c68eSPavel Begunkov /* 46030a33669SPavel Begunkov * poll arm might not hold ownership and so race for req->flags with 46130a33669SPavel Begunkov * io_poll_wake(). There is only one poll entry queued, serialise with 46230a33669SPavel Begunkov * it by taking its head lock. As we're still arming the tw hanlder 46330a33669SPavel Begunkov * is not going to be run, so there are no races with it. 46449f1c68eSPavel Begunkov */ 46530a33669SPavel Begunkov if (head) { 46649f1c68eSPavel Begunkov spin_lock_irq(&head->lock); 46749f1c68eSPavel Begunkov req->flags |= REQ_F_DOUBLE_POLL; 468ceff5017SPavel Begunkov if (req->opcode == IORING_OP_POLL_ADD) 469ceff5017SPavel Begunkov req->flags |= REQ_F_ASYNC_DATA; 47049f1c68eSPavel Begunkov spin_unlock_irq(&head->lock); 47130a33669SPavel Begunkov } 47249f1c68eSPavel Begunkov rcu_read_unlock(); 47330a33669SPavel Begunkov return !!head; 47449f1c68eSPavel Begunkov } 47549f1c68eSPavel Begunkov 476329061d3SJens Axboe static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt, 477329061d3SJens Axboe struct wait_queue_head *head, 478329061d3SJens Axboe struct io_poll **poll_ptr) 479329061d3SJens Axboe { 480329061d3SJens Axboe struct io_kiocb *req = pt->req; 481329061d3SJens Axboe unsigned long wqe_private = (unsigned long) req; 482329061d3SJens Axboe 483329061d3SJens Axboe /* 484329061d3SJens Axboe * The file being polled uses multiple waitqueues for poll handling 485329061d3SJens Axboe * (e.g. one for read, one for write). Setup a separate io_poll 486329061d3SJens Axboe * if this happens. 487329061d3SJens Axboe */ 488329061d3SJens Axboe if (unlikely(pt->nr_entries)) { 489329061d3SJens Axboe struct io_poll *first = poll; 490329061d3SJens Axboe 491329061d3SJens Axboe /* double add on the same waitqueue head, ignore */ 492329061d3SJens Axboe if (first->head == head) 493329061d3SJens Axboe return; 494329061d3SJens Axboe /* already have a 2nd entry, fail a third attempt */ 495329061d3SJens Axboe if (*poll_ptr) { 496329061d3SJens Axboe if ((*poll_ptr)->head == head) 497329061d3SJens Axboe return; 498329061d3SJens Axboe pt->error = -EINVAL; 499329061d3SJens Axboe return; 500329061d3SJens Axboe } 501329061d3SJens Axboe 502329061d3SJens Axboe poll = kmalloc(sizeof(*poll), GFP_ATOMIC); 503329061d3SJens Axboe if (!poll) { 504329061d3SJens Axboe pt->error = -ENOMEM; 505329061d3SJens Axboe return; 506329061d3SJens Axboe } 50749f1c68eSPavel Begunkov 508329061d3SJens Axboe /* mark as double wq entry */ 5090638cd7bSPavel Begunkov wqe_private |= IO_WQE_F_DOUBLE; 510329061d3SJens Axboe io_init_poll_iocb(poll, first->events, first->wait.func); 51130a33669SPavel Begunkov if (!io_poll_double_prepare(req)) { 51230a33669SPavel Begunkov /* the request is completing, just back off */ 51330a33669SPavel Begunkov kfree(poll); 51430a33669SPavel Begunkov return; 51530a33669SPavel Begunkov } 516329061d3SJens Axboe *poll_ptr = poll; 51749f1c68eSPavel Begunkov } else { 51849f1c68eSPavel Begunkov /* fine to modify, there is no poll queued to race with us */ 51949f1c68eSPavel Begunkov req->flags |= REQ_F_SINGLE_POLL; 520329061d3SJens Axboe } 521329061d3SJens Axboe 522329061d3SJens Axboe pt->nr_entries++; 523329061d3SJens Axboe poll->head = head; 524329061d3SJens Axboe poll->wait.private = (void *) wqe_private; 525329061d3SJens Axboe 526329061d3SJens Axboe if (poll->events & EPOLLEXCLUSIVE) 527329061d3SJens Axboe add_wait_queue_exclusive(head, &poll->wait); 528329061d3SJens Axboe else 529329061d3SJens Axboe add_wait_queue(head, &poll->wait); 530329061d3SJens Axboe } 531329061d3SJens Axboe 532329061d3SJens Axboe static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, 533329061d3SJens Axboe struct poll_table_struct *p) 534329061d3SJens Axboe { 535329061d3SJens Axboe struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); 536f2ccb5aeSStefan Metzmacher struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll); 537329061d3SJens Axboe 538329061d3SJens Axboe __io_queue_proc(poll, pt, head, 539329061d3SJens Axboe (struct io_poll **) &pt->req->async_data); 540329061d3SJens Axboe } 541329061d3SJens Axboe 54249f1c68eSPavel Begunkov static bool io_poll_can_finish_inline(struct io_kiocb *req, 54349f1c68eSPavel Begunkov struct io_poll_table *pt) 54449f1c68eSPavel Begunkov { 54549f1c68eSPavel Begunkov return pt->owning || io_poll_get_ownership(req); 54649f1c68eSPavel Begunkov } 54749f1c68eSPavel Begunkov 548de08356fSPavel Begunkov /* 549de08356fSPavel Begunkov * Returns 0 when it's handed over for polling. The caller owns the requests if 550de08356fSPavel Begunkov * it returns non-zero, but otherwise should not touch it. Negative values 551de08356fSPavel Begunkov * contain an error code. When the result is >0, the polling has completed 552de08356fSPavel Begunkov * inline and ipt.result_mask is set to the mask. 553de08356fSPavel Begunkov */ 554329061d3SJens Axboe static int __io_arm_poll_handler(struct io_kiocb *req, 555329061d3SJens Axboe struct io_poll *poll, 55649f1c68eSPavel Begunkov struct io_poll_table *ipt, __poll_t mask, 55749f1c68eSPavel Begunkov unsigned issue_flags) 558329061d3SJens Axboe { 559329061d3SJens Axboe struct io_ring_ctx *ctx = req->ctx; 560329061d3SJens Axboe 561329061d3SJens Axboe INIT_HLIST_NODE(&req->hash_node); 562329061d3SJens Axboe req->work.cancel_seq = atomic_read(&ctx->cancel_seq); 563329061d3SJens Axboe io_init_poll_iocb(poll, mask, io_poll_wake); 564329061d3SJens Axboe poll->file = req->file; 565329061d3SJens Axboe req->apoll_events = poll->events; 566329061d3SJens Axboe 567329061d3SJens Axboe ipt->pt._key = mask; 568329061d3SJens Axboe ipt->req = req; 569329061d3SJens Axboe ipt->error = 0; 570329061d3SJens Axboe ipt->nr_entries = 0; 571329061d3SJens Axboe /* 57249f1c68eSPavel Begunkov * Polling is either completed here or via task_work, so if we're in the 57349f1c68eSPavel Begunkov * task context we're naturally serialised with tw by merit of running 57449f1c68eSPavel Begunkov * the same task. When it's io-wq, take the ownership to prevent tw 57549f1c68eSPavel Begunkov * from running. However, when we're in the task context, skip taking 57649f1c68eSPavel Begunkov * it as an optimisation. 57749f1c68eSPavel Begunkov * 57849f1c68eSPavel Begunkov * Note: even though the request won't be completed/freed, without 57949f1c68eSPavel Begunkov * ownership we still can race with io_poll_wake(). 58049f1c68eSPavel Begunkov * io_poll_can_finish_inline() tries to deal with that. 581329061d3SJens Axboe */ 58249f1c68eSPavel Begunkov ipt->owning = issue_flags & IO_URING_F_UNLOCKED; 58349f1c68eSPavel Begunkov atomic_set(&req->poll_refs, (int)ipt->owning); 584e8375e43SPavel Begunkov 585e8375e43SPavel Begunkov /* io-wq doesn't hold uring_lock */ 586e8375e43SPavel Begunkov if (issue_flags & IO_URING_F_UNLOCKED) 587e8375e43SPavel Begunkov req->flags &= ~REQ_F_HASH_LOCKED; 588e8375e43SPavel Begunkov 589329061d3SJens Axboe mask = vfs_poll(req->file, &ipt->pt) & poll->events; 590329061d3SJens Axboe 591de08356fSPavel Begunkov if (unlikely(ipt->error || !ipt->nr_entries)) { 592de08356fSPavel Begunkov io_poll_remove_entries(req); 593de08356fSPavel Begunkov 59449f1c68eSPavel Begunkov if (!io_poll_can_finish_inline(req, ipt)) { 59549f1c68eSPavel Begunkov io_poll_mark_cancelled(req); 59649f1c68eSPavel Begunkov return 0; 59749f1c68eSPavel Begunkov } else if (mask && (poll->events & EPOLLET)) { 598de08356fSPavel Begunkov ipt->result_mask = mask; 599de08356fSPavel Begunkov return 1; 600de08356fSPavel Begunkov } 60149f1c68eSPavel Begunkov return ipt->error ?: -EINVAL; 602de08356fSPavel Begunkov } 603de08356fSPavel Begunkov 604b9ba8a44SJens Axboe if (mask && 605b9ba8a44SJens Axboe ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) { 60649f1c68eSPavel Begunkov if (!io_poll_can_finish_inline(req, ipt)) 60749f1c68eSPavel Begunkov return 0; 608329061d3SJens Axboe io_poll_remove_entries(req); 609063a0079SPavel Begunkov ipt->result_mask = mask; 610329061d3SJens Axboe /* no one else has access to the req, forget about the ref */ 611063a0079SPavel Begunkov return 1; 612329061d3SJens Axboe } 613b9ba8a44SJens Axboe 6149ca9fb24SPavel Begunkov if (req->flags & REQ_F_HASH_LOCKED) 6159ca9fb24SPavel Begunkov io_poll_req_insert_locked(req); 6169ca9fb24SPavel Begunkov else 617329061d3SJens Axboe io_poll_req_insert(req); 618329061d3SJens Axboe 61949f1c68eSPavel Begunkov if (mask && (poll->events & EPOLLET) && 62049f1c68eSPavel Begunkov io_poll_can_finish_inline(req, ipt)) { 62113a99017SPavel Begunkov __io_poll_execute(req, mask); 622329061d3SJens Axboe return 0; 623329061d3SJens Axboe } 624329061d3SJens Axboe 62549f1c68eSPavel Begunkov if (ipt->owning) { 626329061d3SJens Axboe /* 6272f389343SPavel Begunkov * Try to release ownership. If we see a change of state, e.g. 6282f389343SPavel Begunkov * poll was waken up, queue up a tw, it'll deal with it. 629329061d3SJens Axboe */ 6302f389343SPavel Begunkov if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1) 63113a99017SPavel Begunkov __io_poll_execute(req, 0); 63249f1c68eSPavel Begunkov } 633329061d3SJens Axboe return 0; 634329061d3SJens Axboe } 635329061d3SJens Axboe 636329061d3SJens Axboe static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, 637329061d3SJens Axboe struct poll_table_struct *p) 638329061d3SJens Axboe { 639329061d3SJens Axboe struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); 640329061d3SJens Axboe struct async_poll *apoll = pt->req->apoll; 641329061d3SJens Axboe 642329061d3SJens Axboe __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll); 643329061d3SJens Axboe } 644329061d3SJens Axboe 6455204aa8cSPavel Begunkov static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req, 6465204aa8cSPavel Begunkov unsigned issue_flags) 6475204aa8cSPavel Begunkov { 6485204aa8cSPavel Begunkov struct io_ring_ctx *ctx = req->ctx; 6499b797a37SJens Axboe struct io_cache_entry *entry; 6505204aa8cSPavel Begunkov struct async_poll *apoll; 6515204aa8cSPavel Begunkov 6525204aa8cSPavel Begunkov if (req->flags & REQ_F_POLLED) { 6535204aa8cSPavel Begunkov apoll = req->apoll; 6545204aa8cSPavel Begunkov kfree(apoll->double_poll); 655df730ec2SXinghui Li } else if (!(issue_flags & IO_URING_F_UNLOCKED)) { 656df730ec2SXinghui Li entry = io_alloc_cache_get(&ctx->apoll_cache); 657df730ec2SXinghui Li if (entry == NULL) 658df730ec2SXinghui Li goto alloc_apoll; 6599b797a37SJens Axboe apoll = container_of(entry, struct async_poll, cache); 6605204aa8cSPavel Begunkov } else { 661df730ec2SXinghui Li alloc_apoll: 6625204aa8cSPavel Begunkov apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); 6635204aa8cSPavel Begunkov if (unlikely(!apoll)) 6645204aa8cSPavel Begunkov return NULL; 6655204aa8cSPavel Begunkov } 6665204aa8cSPavel Begunkov apoll->double_poll = NULL; 6675204aa8cSPavel Begunkov req->apoll = apoll; 6685204aa8cSPavel Begunkov return apoll; 6695204aa8cSPavel Begunkov } 6705204aa8cSPavel Begunkov 671329061d3SJens Axboe int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) 672329061d3SJens Axboe { 673329061d3SJens Axboe const struct io_op_def *def = &io_op_defs[req->opcode]; 674329061d3SJens Axboe struct async_poll *apoll; 675329061d3SJens Axboe struct io_poll_table ipt; 676b9ba8a44SJens Axboe __poll_t mask = POLLPRI | POLLERR | EPOLLET; 677329061d3SJens Axboe int ret; 678329061d3SJens Axboe 6799ca9fb24SPavel Begunkov /* 6809ca9fb24SPavel Begunkov * apoll requests already grab the mutex to complete in the tw handler, 6819ca9fb24SPavel Begunkov * so removal from the mutex-backed hash is free, use it by default. 6829ca9fb24SPavel Begunkov */ 6839ca9fb24SPavel Begunkov req->flags |= REQ_F_HASH_LOCKED; 6849ca9fb24SPavel Begunkov 685329061d3SJens Axboe if (!def->pollin && !def->pollout) 686329061d3SJens Axboe return IO_APOLL_ABORTED; 687329061d3SJens Axboe if (!file_can_poll(req->file)) 688329061d3SJens Axboe return IO_APOLL_ABORTED; 689329061d3SJens Axboe if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED) 690329061d3SJens Axboe return IO_APOLL_ABORTED; 691329061d3SJens Axboe if (!(req->flags & REQ_F_APOLL_MULTISHOT)) 692329061d3SJens Axboe mask |= EPOLLONESHOT; 693329061d3SJens Axboe 694329061d3SJens Axboe if (def->pollin) { 695329061d3SJens Axboe mask |= EPOLLIN | EPOLLRDNORM; 696329061d3SJens Axboe 697329061d3SJens Axboe /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */ 698329061d3SJens Axboe if (req->flags & REQ_F_CLEAR_POLLIN) 699329061d3SJens Axboe mask &= ~EPOLLIN; 700329061d3SJens Axboe } else { 701329061d3SJens Axboe mask |= EPOLLOUT | EPOLLWRNORM; 702329061d3SJens Axboe } 703329061d3SJens Axboe if (def->poll_exclusive) 704329061d3SJens Axboe mask |= EPOLLEXCLUSIVE; 7055204aa8cSPavel Begunkov 7065204aa8cSPavel Begunkov apoll = io_req_alloc_apoll(req, issue_flags); 7075204aa8cSPavel Begunkov if (!apoll) 708329061d3SJens Axboe return IO_APOLL_ABORTED; 709329061d3SJens Axboe req->flags |= REQ_F_POLLED; 710329061d3SJens Axboe ipt.pt._qproc = io_async_queue_proc; 711329061d3SJens Axboe 712329061d3SJens Axboe io_kbuf_recycle(req, issue_flags); 713329061d3SJens Axboe 71449f1c68eSPavel Begunkov ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags); 715de08356fSPavel Begunkov if (ret) 716de08356fSPavel Begunkov return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED; 71748863ffdSPavel Begunkov trace_io_uring_poll_arm(req, mask, apoll->poll.events); 718329061d3SJens Axboe return IO_APOLL_OK; 719329061d3SJens Axboe } 720329061d3SJens Axboe 7219ca9fb24SPavel Begunkov static __cold bool io_poll_remove_all_table(struct task_struct *tsk, 7229ca9fb24SPavel Begunkov struct io_hash_table *table, 723329061d3SJens Axboe bool cancel_all) 724329061d3SJens Axboe { 725e6f89be6SPavel Begunkov unsigned nr_buckets = 1U << table->hash_bits; 726329061d3SJens Axboe struct hlist_node *tmp; 727329061d3SJens Axboe struct io_kiocb *req; 728329061d3SJens Axboe bool found = false; 729329061d3SJens Axboe int i; 730329061d3SJens Axboe 731e6f89be6SPavel Begunkov for (i = 0; i < nr_buckets; i++) { 732e6f89be6SPavel Begunkov struct io_hash_bucket *hb = &table->hbs[i]; 733329061d3SJens Axboe 73438513c46SHao Xu spin_lock(&hb->lock); 73538513c46SHao Xu hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) { 736329061d3SJens Axboe if (io_match_task_safe(req, tsk, cancel_all)) { 737329061d3SJens Axboe hlist_del_init(&req->hash_node); 738329061d3SJens Axboe io_poll_cancel_req(req); 739329061d3SJens Axboe found = true; 740329061d3SJens Axboe } 741329061d3SJens Axboe } 74238513c46SHao Xu spin_unlock(&hb->lock); 743329061d3SJens Axboe } 744329061d3SJens Axboe return found; 745329061d3SJens Axboe } 746329061d3SJens Axboe 7479ca9fb24SPavel Begunkov /* 7489ca9fb24SPavel Begunkov * Returns true if we found and killed one or more poll requests 7499ca9fb24SPavel Begunkov */ 7509ca9fb24SPavel Begunkov __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, 7519ca9fb24SPavel Begunkov bool cancel_all) 7529ca9fb24SPavel Begunkov __must_hold(&ctx->uring_lock) 7539ca9fb24SPavel Begunkov { 754b321823aSPavel Begunkov bool ret; 755b321823aSPavel Begunkov 756b321823aSPavel Begunkov ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all); 757b321823aSPavel Begunkov ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all); 758b321823aSPavel Begunkov return ret; 7599ca9fb24SPavel Begunkov } 7609ca9fb24SPavel Begunkov 761329061d3SJens Axboe static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, 7621ab1edb0SPavel Begunkov struct io_cancel_data *cd, 763e6f89be6SPavel Begunkov struct io_hash_table *table, 7641ab1edb0SPavel Begunkov struct io_hash_bucket **out_bucket) 765329061d3SJens Axboe { 766329061d3SJens Axboe struct io_kiocb *req; 767e6f89be6SPavel Begunkov u32 index = hash_long(cd->data, table->hash_bits); 768e6f89be6SPavel Begunkov struct io_hash_bucket *hb = &table->hbs[index]; 769329061d3SJens Axboe 7701ab1edb0SPavel Begunkov *out_bucket = NULL; 7711ab1edb0SPavel Begunkov 77238513c46SHao Xu spin_lock(&hb->lock); 77338513c46SHao Xu hlist_for_each_entry(req, &hb->list, hash_node) { 774329061d3SJens Axboe if (cd->data != req->cqe.user_data) 775329061d3SJens Axboe continue; 776329061d3SJens Axboe if (poll_only && req->opcode != IORING_OP_POLL_ADD) 777329061d3SJens Axboe continue; 778329061d3SJens Axboe if (cd->flags & IORING_ASYNC_CANCEL_ALL) { 779329061d3SJens Axboe if (cd->seq == req->work.cancel_seq) 780329061d3SJens Axboe continue; 781329061d3SJens Axboe req->work.cancel_seq = cd->seq; 782329061d3SJens Axboe } 7831ab1edb0SPavel Begunkov *out_bucket = hb; 784329061d3SJens Axboe return req; 785329061d3SJens Axboe } 78638513c46SHao Xu spin_unlock(&hb->lock); 787329061d3SJens Axboe return NULL; 788329061d3SJens Axboe } 789329061d3SJens Axboe 790329061d3SJens Axboe static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx, 7911ab1edb0SPavel Begunkov struct io_cancel_data *cd, 792e6f89be6SPavel Begunkov struct io_hash_table *table, 7931ab1edb0SPavel Begunkov struct io_hash_bucket **out_bucket) 794329061d3SJens Axboe { 795e6f89be6SPavel Begunkov unsigned nr_buckets = 1U << table->hash_bits; 796329061d3SJens Axboe struct io_kiocb *req; 797329061d3SJens Axboe int i; 798329061d3SJens Axboe 7991ab1edb0SPavel Begunkov *out_bucket = NULL; 8001ab1edb0SPavel Begunkov 801e6f89be6SPavel Begunkov for (i = 0; i < nr_buckets; i++) { 802e6f89be6SPavel Begunkov struct io_hash_bucket *hb = &table->hbs[i]; 803329061d3SJens Axboe 80438513c46SHao Xu spin_lock(&hb->lock); 80538513c46SHao Xu hlist_for_each_entry(req, &hb->list, hash_node) { 806329061d3SJens Axboe if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) && 807329061d3SJens Axboe req->file != cd->file) 808329061d3SJens Axboe continue; 809329061d3SJens Axboe if (cd->seq == req->work.cancel_seq) 810329061d3SJens Axboe continue; 811329061d3SJens Axboe req->work.cancel_seq = cd->seq; 8121ab1edb0SPavel Begunkov *out_bucket = hb; 813329061d3SJens Axboe return req; 814329061d3SJens Axboe } 81538513c46SHao Xu spin_unlock(&hb->lock); 816329061d3SJens Axboe } 817329061d3SJens Axboe return NULL; 818329061d3SJens Axboe } 819329061d3SJens Axboe 8209ca9fb24SPavel Begunkov static int io_poll_disarm(struct io_kiocb *req) 821329061d3SJens Axboe { 8229ca9fb24SPavel Begunkov if (!req) 8239ca9fb24SPavel Begunkov return -ENOENT; 824329061d3SJens Axboe if (!io_poll_get_ownership(req)) 8259ca9fb24SPavel Begunkov return -EALREADY; 826329061d3SJens Axboe io_poll_remove_entries(req); 827329061d3SJens Axboe hash_del(&req->hash_node); 8289ca9fb24SPavel Begunkov return 0; 829329061d3SJens Axboe } 830329061d3SJens Axboe 831a2cdd519SPavel Begunkov static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, 832e6f89be6SPavel Begunkov struct io_hash_table *table) 833329061d3SJens Axboe { 8341ab1edb0SPavel Begunkov struct io_hash_bucket *bucket; 835329061d3SJens Axboe struct io_kiocb *req; 836329061d3SJens Axboe 837329061d3SJens Axboe if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY)) 838e6f89be6SPavel Begunkov req = io_poll_file_find(ctx, cd, table, &bucket); 839329061d3SJens Axboe else 840e6f89be6SPavel Begunkov req = io_poll_find(ctx, false, cd, table, &bucket); 8411ab1edb0SPavel Begunkov 8421ab1edb0SPavel Begunkov if (req) 843329061d3SJens Axboe io_poll_cancel_req(req); 8441ab1edb0SPavel Begunkov if (bucket) 8451ab1edb0SPavel Begunkov spin_unlock(&bucket->lock); 8461ab1edb0SPavel Begunkov return req ? 0 : -ENOENT; 847329061d3SJens Axboe } 848329061d3SJens Axboe 8495d7943d9SPavel Begunkov int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, 8505d7943d9SPavel Begunkov unsigned issue_flags) 851a2cdd519SPavel Begunkov { 8529ca9fb24SPavel Begunkov int ret; 8539ca9fb24SPavel Begunkov 8549ca9fb24SPavel Begunkov ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table); 8559ca9fb24SPavel Begunkov if (ret != -ENOENT) 8569ca9fb24SPavel Begunkov return ret; 8579ca9fb24SPavel Begunkov 8589ca9fb24SPavel Begunkov io_ring_submit_lock(ctx, issue_flags); 8599ca9fb24SPavel Begunkov ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked); 8609ca9fb24SPavel Begunkov io_ring_submit_unlock(ctx, issue_flags); 8619ca9fb24SPavel Begunkov return ret; 862a2cdd519SPavel Begunkov } 863a2cdd519SPavel Begunkov 864329061d3SJens Axboe static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe, 865329061d3SJens Axboe unsigned int flags) 866329061d3SJens Axboe { 867329061d3SJens Axboe u32 events; 868329061d3SJens Axboe 869329061d3SJens Axboe events = READ_ONCE(sqe->poll32_events); 870329061d3SJens Axboe #ifdef __BIG_ENDIAN 871329061d3SJens Axboe events = swahw32(events); 872329061d3SJens Axboe #endif 873329061d3SJens Axboe if (!(flags & IORING_POLL_ADD_MULTI)) 874329061d3SJens Axboe events |= EPOLLONESHOT; 875b9ba8a44SJens Axboe if (!(flags & IORING_POLL_ADD_LEVEL)) 876b9ba8a44SJens Axboe events |= EPOLLET; 877b9ba8a44SJens Axboe return demangle_poll(events) | 878b9ba8a44SJens Axboe (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET)); 879329061d3SJens Axboe } 880329061d3SJens Axboe 881329061d3SJens Axboe int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 882329061d3SJens Axboe { 883f2ccb5aeSStefan Metzmacher struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update); 884329061d3SJens Axboe u32 flags; 885329061d3SJens Axboe 886329061d3SJens Axboe if (sqe->buf_index || sqe->splice_fd_in) 887329061d3SJens Axboe return -EINVAL; 888329061d3SJens Axboe flags = READ_ONCE(sqe->len); 889329061d3SJens Axboe if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA | 890329061d3SJens Axboe IORING_POLL_ADD_MULTI)) 891329061d3SJens Axboe return -EINVAL; 892329061d3SJens Axboe /* meaningless without update */ 893329061d3SJens Axboe if (flags == IORING_POLL_ADD_MULTI) 894329061d3SJens Axboe return -EINVAL; 895329061d3SJens Axboe 896329061d3SJens Axboe upd->old_user_data = READ_ONCE(sqe->addr); 897329061d3SJens Axboe upd->update_events = flags & IORING_POLL_UPDATE_EVENTS; 898329061d3SJens Axboe upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA; 899329061d3SJens Axboe 900329061d3SJens Axboe upd->new_user_data = READ_ONCE(sqe->off); 901329061d3SJens Axboe if (!upd->update_user_data && upd->new_user_data) 902329061d3SJens Axboe return -EINVAL; 903329061d3SJens Axboe if (upd->update_events) 904329061d3SJens Axboe upd->events = io_poll_parse_events(sqe, flags); 905329061d3SJens Axboe else if (sqe->poll32_events) 906329061d3SJens Axboe return -EINVAL; 907329061d3SJens Axboe 908329061d3SJens Axboe return 0; 909329061d3SJens Axboe } 910329061d3SJens Axboe 911329061d3SJens Axboe int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 912329061d3SJens Axboe { 913f2ccb5aeSStefan Metzmacher struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); 914329061d3SJens Axboe u32 flags; 915329061d3SJens Axboe 916329061d3SJens Axboe if (sqe->buf_index || sqe->off || sqe->addr) 917329061d3SJens Axboe return -EINVAL; 918329061d3SJens Axboe flags = READ_ONCE(sqe->len); 919d59bd748SJens Axboe if (flags & ~IORING_POLL_ADD_MULTI) 920329061d3SJens Axboe return -EINVAL; 921329061d3SJens Axboe if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP)) 922329061d3SJens Axboe return -EINVAL; 923329061d3SJens Axboe 924329061d3SJens Axboe poll->events = io_poll_parse_events(sqe, flags); 925329061d3SJens Axboe return 0; 926329061d3SJens Axboe } 927329061d3SJens Axboe 928329061d3SJens Axboe int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) 929329061d3SJens Axboe { 930f2ccb5aeSStefan Metzmacher struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); 931329061d3SJens Axboe struct io_poll_table ipt; 932329061d3SJens Axboe int ret; 933329061d3SJens Axboe 934329061d3SJens Axboe ipt.pt._qproc = io_poll_queue_proc; 935329061d3SJens Axboe 9369ca9fb24SPavel Begunkov /* 9379ca9fb24SPavel Begunkov * If sqpoll or single issuer, there is no contention for ->uring_lock 9389ca9fb24SPavel Begunkov * and we'll end up holding it in tw handlers anyway. 9399ca9fb24SPavel Begunkov */ 940e8375e43SPavel Begunkov if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER)) 9419ca9fb24SPavel Begunkov req->flags |= REQ_F_HASH_LOCKED; 9429ca9fb24SPavel Begunkov 94349f1c68eSPavel Begunkov ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags); 944de08356fSPavel Begunkov if (ret > 0) { 945063a0079SPavel Begunkov io_req_set_res(req, ipt.result_mask, 0); 946329061d3SJens Axboe return IOU_OK; 947329061d3SJens Axboe } 948de08356fSPavel Begunkov return ret ?: IOU_ISSUE_SKIP_COMPLETE; 949329061d3SJens Axboe } 950329061d3SJens Axboe 951329061d3SJens Axboe int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) 952329061d3SJens Axboe { 953f2ccb5aeSStefan Metzmacher struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update); 954329061d3SJens Axboe struct io_cancel_data cd = { .data = poll_update->old_user_data, }; 955329061d3SJens Axboe struct io_ring_ctx *ctx = req->ctx; 9561ab1edb0SPavel Begunkov struct io_hash_bucket *bucket; 957329061d3SJens Axboe struct io_kiocb *preq; 958329061d3SJens Axboe int ret2, ret = 0; 959329061d3SJens Axboe bool locked; 960329061d3SJens Axboe 961e6f89be6SPavel Begunkov preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket); 9621ab1edb0SPavel Begunkov ret2 = io_poll_disarm(preq); 9631ab1edb0SPavel Begunkov if (bucket) 9641ab1edb0SPavel Begunkov spin_unlock(&bucket->lock); 9659ca9fb24SPavel Begunkov if (!ret2) 9669ca9fb24SPavel Begunkov goto found; 9679ca9fb24SPavel Begunkov if (ret2 != -ENOENT) { 9689ca9fb24SPavel Begunkov ret = ret2; 96938513c46SHao Xu goto out; 97038513c46SHao Xu } 971329061d3SJens Axboe 9729ca9fb24SPavel Begunkov io_ring_submit_lock(ctx, issue_flags); 9739ca9fb24SPavel Begunkov preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket); 9749ca9fb24SPavel Begunkov ret2 = io_poll_disarm(preq); 9759ca9fb24SPavel Begunkov if (bucket) 9769ca9fb24SPavel Begunkov spin_unlock(&bucket->lock); 9779ca9fb24SPavel Begunkov io_ring_submit_unlock(ctx, issue_flags); 9789ca9fb24SPavel Begunkov if (ret2) { 9799ca9fb24SPavel Begunkov ret = ret2; 9809ca9fb24SPavel Begunkov goto out; 9819ca9fb24SPavel Begunkov } 9829ca9fb24SPavel Begunkov 9839ca9fb24SPavel Begunkov found: 984bce5d70cSPavel Begunkov if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) { 985bce5d70cSPavel Begunkov ret = -EFAULT; 986bce5d70cSPavel Begunkov goto out; 987bce5d70cSPavel Begunkov } 988bce5d70cSPavel Begunkov 989329061d3SJens Axboe if (poll_update->update_events || poll_update->update_user_data) { 990329061d3SJens Axboe /* only mask one event flags, keep behavior flags */ 991329061d3SJens Axboe if (poll_update->update_events) { 992f2ccb5aeSStefan Metzmacher struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll); 993329061d3SJens Axboe 994329061d3SJens Axboe poll->events &= ~0xffff; 995329061d3SJens Axboe poll->events |= poll_update->events & 0xffff; 996329061d3SJens Axboe poll->events |= IO_POLL_UNMASK; 997329061d3SJens Axboe } 998329061d3SJens Axboe if (poll_update->update_user_data) 999329061d3SJens Axboe preq->cqe.user_data = poll_update->new_user_data; 1000329061d3SJens Axboe 1001329061d3SJens Axboe ret2 = io_poll_add(preq, issue_flags); 1002329061d3SJens Axboe /* successfully updated, don't complete poll request */ 1003329061d3SJens Axboe if (!ret2 || ret2 == -EIOCBQUEUED) 1004329061d3SJens Axboe goto out; 1005329061d3SJens Axboe } 1006329061d3SJens Axboe 1007329061d3SJens Axboe req_set_fail(preq); 1008329061d3SJens Axboe io_req_set_res(preq, -ECANCELED, 0); 1009329061d3SJens Axboe locked = !(issue_flags & IO_URING_F_UNLOCKED); 1010329061d3SJens Axboe io_req_task_complete(preq, &locked); 1011329061d3SJens Axboe out: 1012329061d3SJens Axboe if (ret < 0) { 1013329061d3SJens Axboe req_set_fail(req); 1014329061d3SJens Axboe return ret; 1015329061d3SJens Axboe } 1016329061d3SJens Axboe /* complete update request, we're done with it */ 1017329061d3SJens Axboe io_req_set_res(req, ret, 0); 1018329061d3SJens Axboe return IOU_OK; 1019329061d3SJens Axboe } 10209da7471eSJens Axboe 10219b797a37SJens Axboe void io_apoll_cache_free(struct io_cache_entry *entry) 10229da7471eSJens Axboe { 10239b797a37SJens Axboe kfree(container_of(entry, struct async_poll, cache)); 10249da7471eSJens Axboe } 1025