1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/poll.h>
9 #include <linux/hashtable.h>
10 #include <linux/io_uring.h>
11
12 #include <trace/events/io_uring.h>
13
14 #include <uapi/linux/io_uring.h>
15
16 #include "io_uring.h"
17 #include "refs.h"
18 #include "opdef.h"
19 #include "kbuf.h"
20 #include "poll.h"
21 #include "cancel.h"
22
23 struct io_poll_update {
24 struct file *file;
25 u64 old_user_data;
26 u64 new_user_data;
27 __poll_t events;
28 bool update_events;
29 bool update_user_data;
30 };
31
32 struct io_poll_table {
33 struct poll_table_struct pt;
34 struct io_kiocb *req;
35 int nr_entries;
36 int error;
37 bool owning;
38 /* output value, set only if arm poll returns >0 */
39 __poll_t result_mask;
40 };
41
42 #define IO_POLL_CANCEL_FLAG BIT(31)
43 #define IO_POLL_RETRY_FLAG BIT(30)
44 #define IO_POLL_REF_MASK GENMASK(29, 0)
45
46 /*
47 * We usually have 1-2 refs taken, 128 is more than enough and we want to
48 * maximise the margin between this amount and the moment when it overflows.
49 */
50 #define IO_POLL_REF_BIAS 128
51
52 #define IO_WQE_F_DOUBLE 1
53
54 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
55 void *key);
56
wqe_to_req(struct wait_queue_entry * wqe)57 static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
58 {
59 unsigned long priv = (unsigned long)wqe->private;
60
61 return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
62 }
63
wqe_is_double(struct wait_queue_entry * wqe)64 static inline bool wqe_is_double(struct wait_queue_entry *wqe)
65 {
66 unsigned long priv = (unsigned long)wqe->private;
67
68 return priv & IO_WQE_F_DOUBLE;
69 }
70
io_poll_get_ownership_slowpath(struct io_kiocb * req)71 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
72 {
73 int v;
74
75 /*
76 * poll_refs are already elevated and we don't have much hope for
77 * grabbing the ownership. Instead of incrementing set a retry flag
78 * to notify the loop that there might have been some change.
79 */
80 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
81 if (v & IO_POLL_REF_MASK)
82 return false;
83 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
84 }
85
86 /*
87 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
88 * bump it and acquire ownership. It's disallowed to modify requests while not
89 * owning it, that prevents from races for enqueueing task_work's and b/w
90 * arming poll and wakeups.
91 */
io_poll_get_ownership(struct io_kiocb * req)92 static inline bool io_poll_get_ownership(struct io_kiocb *req)
93 {
94 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
95 return io_poll_get_ownership_slowpath(req);
96 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
97 }
98
io_poll_mark_cancelled(struct io_kiocb * req)99 static void io_poll_mark_cancelled(struct io_kiocb *req)
100 {
101 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
102 }
103
io_poll_get_double(struct io_kiocb * req)104 static struct io_poll *io_poll_get_double(struct io_kiocb *req)
105 {
106 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
107 if (req->opcode == IORING_OP_POLL_ADD)
108 return req->async_data;
109 return req->apoll->double_poll;
110 }
111
io_poll_get_single(struct io_kiocb * req)112 static struct io_poll *io_poll_get_single(struct io_kiocb *req)
113 {
114 if (req->opcode == IORING_OP_POLL_ADD)
115 return io_kiocb_to_cmd(req, struct io_poll);
116 return &req->apoll->poll;
117 }
118
io_poll_req_insert(struct io_kiocb * req)119 static void io_poll_req_insert(struct io_kiocb *req)
120 {
121 struct io_hash_table *table = &req->ctx->cancel_table;
122 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
123 struct io_hash_bucket *hb = &table->hbs[index];
124
125 spin_lock(&hb->lock);
126 hlist_add_head(&req->hash_node, &hb->list);
127 spin_unlock(&hb->lock);
128 }
129
io_poll_req_delete(struct io_kiocb * req,struct io_ring_ctx * ctx)130 static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
131 {
132 struct io_hash_table *table = &req->ctx->cancel_table;
133 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
134 spinlock_t *lock = &table->hbs[index].lock;
135
136 spin_lock(lock);
137 hash_del(&req->hash_node);
138 spin_unlock(lock);
139 }
140
io_poll_req_insert_locked(struct io_kiocb * req)141 static void io_poll_req_insert_locked(struct io_kiocb *req)
142 {
143 struct io_hash_table *table = &req->ctx->cancel_table_locked;
144 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
145
146 lockdep_assert_held(&req->ctx->uring_lock);
147
148 hlist_add_head(&req->hash_node, &table->hbs[index].list);
149 }
150
io_poll_tw_hash_eject(struct io_kiocb * req,struct io_tw_state * ts)151 static void io_poll_tw_hash_eject(struct io_kiocb *req, struct io_tw_state *ts)
152 {
153 struct io_ring_ctx *ctx = req->ctx;
154
155 if (req->flags & REQ_F_HASH_LOCKED) {
156 /*
157 * ->cancel_table_locked is protected by ->uring_lock in
158 * contrast to per bucket spinlocks. Likely, tctx_task_work()
159 * already grabbed the mutex for us, but there is a chance it
160 * failed.
161 */
162 io_tw_lock(ctx, ts);
163 hash_del(&req->hash_node);
164 req->flags &= ~REQ_F_HASH_LOCKED;
165 } else {
166 io_poll_req_delete(req, ctx);
167 }
168 }
169
io_init_poll_iocb(struct io_poll * poll,__poll_t events)170 static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
171 {
172 poll->head = NULL;
173 #define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
174 /* mask in events that we always want/need */
175 poll->events = events | IO_POLL_UNMASK;
176 INIT_LIST_HEAD(&poll->wait.entry);
177 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
178 }
179
io_poll_remove_entry(struct io_poll * poll)180 static inline void io_poll_remove_entry(struct io_poll *poll)
181 {
182 struct wait_queue_head *head = smp_load_acquire(&poll->head);
183
184 if (head) {
185 spin_lock_irq(&head->lock);
186 list_del_init(&poll->wait.entry);
187 poll->head = NULL;
188 spin_unlock_irq(&head->lock);
189 }
190 }
191
io_poll_remove_entries(struct io_kiocb * req)192 static void io_poll_remove_entries(struct io_kiocb *req)
193 {
194 /*
195 * Nothing to do if neither of those flags are set. Avoid dipping
196 * into the poll/apoll/double cachelines if we can.
197 */
198 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
199 return;
200
201 /*
202 * While we hold the waitqueue lock and the waitqueue is nonempty,
203 * wake_up_pollfree() will wait for us. However, taking the waitqueue
204 * lock in the first place can race with the waitqueue being freed.
205 *
206 * We solve this as eventpoll does: by taking advantage of the fact that
207 * all users of wake_up_pollfree() will RCU-delay the actual free. If
208 * we enter rcu_read_lock() and see that the pointer to the queue is
209 * non-NULL, we can then lock it without the memory being freed out from
210 * under us.
211 *
212 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
213 * case the caller deletes the entry from the queue, leaving it empty.
214 * In that case, only RCU prevents the queue memory from being freed.
215 */
216 rcu_read_lock();
217 if (req->flags & REQ_F_SINGLE_POLL)
218 io_poll_remove_entry(io_poll_get_single(req));
219 if (req->flags & REQ_F_DOUBLE_POLL)
220 io_poll_remove_entry(io_poll_get_double(req));
221 rcu_read_unlock();
222 }
223
224 enum {
225 IOU_POLL_DONE = 0,
226 IOU_POLL_NO_ACTION = 1,
227 IOU_POLL_REMOVE_POLL_USE_RES = 2,
228 IOU_POLL_REISSUE = 3,
229 IOU_POLL_REQUEUE = 4,
230 };
231
__io_poll_execute(struct io_kiocb * req,int mask)232 static void __io_poll_execute(struct io_kiocb *req, int mask)
233 {
234 io_req_set_res(req, mask, 0);
235 req->io_task_work.func = io_poll_task_func;
236
237 trace_io_uring_task_add(req, mask);
238 io_req_task_work_add(req);
239 }
240
io_poll_execute(struct io_kiocb * req,int res)241 static inline void io_poll_execute(struct io_kiocb *req, int res)
242 {
243 if (io_poll_get_ownership(req))
244 __io_poll_execute(req, res);
245 }
246
247 /*
248 * All poll tw should go through this. Checks for poll events, manages
249 * references, does rewait, etc.
250 *
251 * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
252 * require, which is either spurious wakeup or multishot CQE is served.
253 * IOU_POLL_DONE when it's done with the request, then the mask is stored in
254 * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
255 * poll and that the result is stored in req->cqe.
256 */
io_poll_check_events(struct io_kiocb * req,struct io_tw_state * ts)257 static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
258 {
259 int v;
260
261 /* req->task == current here, checking PF_EXITING is safe */
262 if (unlikely(req->task->flags & PF_EXITING))
263 return -ECANCELED;
264
265 do {
266 v = atomic_read(&req->poll_refs);
267
268 if (unlikely(v != 1)) {
269 /* tw should be the owner and so have some refs */
270 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
271 return IOU_POLL_NO_ACTION;
272 if (v & IO_POLL_CANCEL_FLAG)
273 return -ECANCELED;
274 /*
275 * cqe.res contains only events of the first wake up
276 * and all others are to be lost. Redo vfs_poll() to get
277 * up to date state.
278 */
279 if ((v & IO_POLL_REF_MASK) != 1)
280 req->cqe.res = 0;
281
282 if (v & IO_POLL_RETRY_FLAG) {
283 req->cqe.res = 0;
284 /*
285 * We won't find new events that came in between
286 * vfs_poll and the ref put unless we clear the
287 * flag in advance.
288 */
289 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
290 v &= ~IO_POLL_RETRY_FLAG;
291 }
292 }
293
294 /* the mask was stashed in __io_poll_execute */
295 if (!req->cqe.res) {
296 struct poll_table_struct pt = { ._key = req->apoll_events };
297 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
298 /*
299 * We got woken with a mask, but someone else got to
300 * it first. The above vfs_poll() doesn't add us back
301 * to the waitqueue, so if we get nothing back, we
302 * should be safe and attempt a reissue.
303 */
304 if (unlikely(!req->cqe.res)) {
305 /* Multishot armed need not reissue */
306 if (!(req->apoll_events & EPOLLONESHOT))
307 continue;
308 return IOU_POLL_REISSUE;
309 }
310 }
311 if (unlikely(req->cqe.res & EPOLLERR))
312 req_set_fail(req);
313 if (req->apoll_events & EPOLLONESHOT)
314 return IOU_POLL_DONE;
315
316 /* multishot, just fill a CQE and proceed */
317 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
318 __poll_t mask = mangle_poll(req->cqe.res &
319 req->apoll_events);
320
321 if (!io_fill_cqe_req_aux(req, ts->locked, mask,
322 IORING_CQE_F_MORE)) {
323 io_req_set_res(req, mask, 0);
324 return IOU_POLL_REMOVE_POLL_USE_RES;
325 }
326 } else {
327 int ret = io_poll_issue(req, ts);
328 if (ret == IOU_STOP_MULTISHOT)
329 return IOU_POLL_REMOVE_POLL_USE_RES;
330 else if (ret == IOU_REQUEUE)
331 return IOU_POLL_REQUEUE;
332 if (ret < 0)
333 return ret;
334 }
335
336 /* force the next iteration to vfs_poll() */
337 req->cqe.res = 0;
338
339 /*
340 * Release all references, retry if someone tried to restart
341 * task_work while we were executing it.
342 */
343 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) &
344 IO_POLL_REF_MASK);
345
346 return IOU_POLL_NO_ACTION;
347 }
348
io_poll_task_func(struct io_kiocb * req,struct io_tw_state * ts)349 void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
350 {
351 int ret;
352
353 ret = io_poll_check_events(req, ts);
354 if (ret == IOU_POLL_NO_ACTION) {
355 io_kbuf_recycle(req, 0);
356 return;
357 } else if (ret == IOU_POLL_REQUEUE) {
358 io_kbuf_recycle(req, 0);
359 __io_poll_execute(req, 0);
360 return;
361 }
362 io_poll_remove_entries(req);
363 io_poll_tw_hash_eject(req, ts);
364
365 if (req->opcode == IORING_OP_POLL_ADD) {
366 if (ret == IOU_POLL_DONE) {
367 struct io_poll *poll;
368
369 poll = io_kiocb_to_cmd(req, struct io_poll);
370 req->cqe.res = mangle_poll(req->cqe.res & poll->events);
371 } else if (ret == IOU_POLL_REISSUE) {
372 io_req_task_submit(req, ts);
373 return;
374 } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
375 req->cqe.res = ret;
376 req_set_fail(req);
377 }
378
379 io_req_set_res(req, req->cqe.res, 0);
380 io_req_task_complete(req, ts);
381 } else {
382 io_tw_lock(req->ctx, ts);
383
384 if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
385 io_req_task_complete(req, ts);
386 else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
387 io_req_task_submit(req, ts);
388 else
389 io_req_defer_failed(req, ret);
390 }
391 }
392
io_poll_cancel_req(struct io_kiocb * req)393 static void io_poll_cancel_req(struct io_kiocb *req)
394 {
395 io_poll_mark_cancelled(req);
396 /* kick tw, which should complete the request */
397 io_poll_execute(req, 0);
398 }
399
400 #define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI)
401
io_pollfree_wake(struct io_kiocb * req,struct io_poll * poll)402 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
403 {
404 io_poll_mark_cancelled(req);
405 /* we have to kick tw in case it's not already */
406 io_poll_execute(req, 0);
407
408 /*
409 * If the waitqueue is being freed early but someone is already
410 * holds ownership over it, we have to tear down the request as
411 * best we can. That means immediately removing the request from
412 * its waitqueue and preventing all further accesses to the
413 * waitqueue via the request.
414 */
415 list_del_init(&poll->wait.entry);
416
417 /*
418 * Careful: this *must* be the last step, since as soon
419 * as req->head is NULL'ed out, the request can be
420 * completed and freed, since aio_poll_complete_work()
421 * will no longer need to take the waitqueue lock.
422 */
423 smp_store_release(&poll->head, NULL);
424 return 1;
425 }
426
io_poll_wake(struct wait_queue_entry * wait,unsigned mode,int sync,void * key)427 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
428 void *key)
429 {
430 struct io_kiocb *req = wqe_to_req(wait);
431 struct io_poll *poll = container_of(wait, struct io_poll, wait);
432 __poll_t mask = key_to_poll(key);
433
434 if (unlikely(mask & POLLFREE))
435 return io_pollfree_wake(req, poll);
436
437 /* for instances that support it check for an event match first */
438 if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
439 return 0;
440
441 if (io_poll_get_ownership(req)) {
442 /*
443 * If we trigger a multishot poll off our own wakeup path,
444 * disable multishot as there is a circular dependency between
445 * CQ posting and triggering the event.
446 */
447 if (mask & EPOLL_URING_WAKE)
448 poll->events |= EPOLLONESHOT;
449
450 /* optional, saves extra locking for removal in tw handler */
451 if (mask && poll->events & EPOLLONESHOT) {
452 list_del_init(&poll->wait.entry);
453 poll->head = NULL;
454 if (wqe_is_double(wait))
455 req->flags &= ~REQ_F_DOUBLE_POLL;
456 else
457 req->flags &= ~REQ_F_SINGLE_POLL;
458 }
459 __io_poll_execute(req, mask);
460 }
461 return 1;
462 }
463
464 /* fails only when polling is already completing by the first entry */
io_poll_double_prepare(struct io_kiocb * req)465 static bool io_poll_double_prepare(struct io_kiocb *req)
466 {
467 struct wait_queue_head *head;
468 struct io_poll *poll = io_poll_get_single(req);
469
470 /* head is RCU protected, see io_poll_remove_entries() comments */
471 rcu_read_lock();
472 head = smp_load_acquire(&poll->head);
473 /*
474 * poll arm might not hold ownership and so race for req->flags with
475 * io_poll_wake(). There is only one poll entry queued, serialise with
476 * it by taking its head lock. As we're still arming the tw hanlder
477 * is not going to be run, so there are no races with it.
478 */
479 if (head) {
480 spin_lock_irq(&head->lock);
481 req->flags |= REQ_F_DOUBLE_POLL;
482 if (req->opcode == IORING_OP_POLL_ADD)
483 req->flags |= REQ_F_ASYNC_DATA;
484 spin_unlock_irq(&head->lock);
485 }
486 rcu_read_unlock();
487 return !!head;
488 }
489
__io_queue_proc(struct io_poll * poll,struct io_poll_table * pt,struct wait_queue_head * head,struct io_poll ** poll_ptr)490 static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
491 struct wait_queue_head *head,
492 struct io_poll **poll_ptr)
493 {
494 struct io_kiocb *req = pt->req;
495 unsigned long wqe_private = (unsigned long) req;
496
497 /*
498 * The file being polled uses multiple waitqueues for poll handling
499 * (e.g. one for read, one for write). Setup a separate io_poll
500 * if this happens.
501 */
502 if (unlikely(pt->nr_entries)) {
503 struct io_poll *first = poll;
504
505 /* double add on the same waitqueue head, ignore */
506 if (first->head == head)
507 return;
508 /* already have a 2nd entry, fail a third attempt */
509 if (*poll_ptr) {
510 if ((*poll_ptr)->head == head)
511 return;
512 pt->error = -EINVAL;
513 return;
514 }
515
516 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
517 if (!poll) {
518 pt->error = -ENOMEM;
519 return;
520 }
521
522 /* mark as double wq entry */
523 wqe_private |= IO_WQE_F_DOUBLE;
524 io_init_poll_iocb(poll, first->events);
525 if (!io_poll_double_prepare(req)) {
526 /* the request is completing, just back off */
527 kfree(poll);
528 return;
529 }
530 *poll_ptr = poll;
531 } else {
532 /* fine to modify, there is no poll queued to race with us */
533 req->flags |= REQ_F_SINGLE_POLL;
534 }
535
536 pt->nr_entries++;
537 poll->head = head;
538 poll->wait.private = (void *) wqe_private;
539
540 if (poll->events & EPOLLEXCLUSIVE)
541 add_wait_queue_exclusive(head, &poll->wait);
542 else
543 add_wait_queue(head, &poll->wait);
544 }
545
io_poll_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)546 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
547 struct poll_table_struct *p)
548 {
549 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
550 struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
551
552 __io_queue_proc(poll, pt, head,
553 (struct io_poll **) &pt->req->async_data);
554 }
555
io_poll_can_finish_inline(struct io_kiocb * req,struct io_poll_table * pt)556 static bool io_poll_can_finish_inline(struct io_kiocb *req,
557 struct io_poll_table *pt)
558 {
559 return pt->owning || io_poll_get_ownership(req);
560 }
561
io_poll_add_hash(struct io_kiocb * req)562 static void io_poll_add_hash(struct io_kiocb *req)
563 {
564 if (req->flags & REQ_F_HASH_LOCKED)
565 io_poll_req_insert_locked(req);
566 else
567 io_poll_req_insert(req);
568 }
569
570 /*
571 * Returns 0 when it's handed over for polling. The caller owns the requests if
572 * it returns non-zero, but otherwise should not touch it. Negative values
573 * contain an error code. When the result is >0, the polling has completed
574 * inline and ipt.result_mask is set to the mask.
575 */
__io_arm_poll_handler(struct io_kiocb * req,struct io_poll * poll,struct io_poll_table * ipt,__poll_t mask,unsigned issue_flags)576 static int __io_arm_poll_handler(struct io_kiocb *req,
577 struct io_poll *poll,
578 struct io_poll_table *ipt, __poll_t mask,
579 unsigned issue_flags)
580 {
581 struct io_ring_ctx *ctx = req->ctx;
582
583 INIT_HLIST_NODE(&req->hash_node);
584 req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
585 io_init_poll_iocb(poll, mask);
586 poll->file = req->file;
587 req->apoll_events = poll->events;
588
589 ipt->pt._key = mask;
590 ipt->req = req;
591 ipt->error = 0;
592 ipt->nr_entries = 0;
593 /*
594 * Polling is either completed here or via task_work, so if we're in the
595 * task context we're naturally serialised with tw by merit of running
596 * the same task. When it's io-wq, take the ownership to prevent tw
597 * from running. However, when we're in the task context, skip taking
598 * it as an optimisation.
599 *
600 * Note: even though the request won't be completed/freed, without
601 * ownership we still can race with io_poll_wake().
602 * io_poll_can_finish_inline() tries to deal with that.
603 */
604 ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
605 atomic_set(&req->poll_refs, (int)ipt->owning);
606
607 /* io-wq doesn't hold uring_lock */
608 if (issue_flags & IO_URING_F_UNLOCKED)
609 req->flags &= ~REQ_F_HASH_LOCKED;
610
611 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
612
613 if (unlikely(ipt->error || !ipt->nr_entries)) {
614 io_poll_remove_entries(req);
615
616 if (!io_poll_can_finish_inline(req, ipt)) {
617 io_poll_mark_cancelled(req);
618 return 0;
619 } else if (mask && (poll->events & EPOLLET)) {
620 ipt->result_mask = mask;
621 return 1;
622 }
623 return ipt->error ?: -EINVAL;
624 }
625
626 if (mask &&
627 ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
628 if (!io_poll_can_finish_inline(req, ipt)) {
629 io_poll_add_hash(req);
630 return 0;
631 }
632 io_poll_remove_entries(req);
633 ipt->result_mask = mask;
634 /* no one else has access to the req, forget about the ref */
635 return 1;
636 }
637
638 io_poll_add_hash(req);
639
640 if (mask && (poll->events & EPOLLET) &&
641 io_poll_can_finish_inline(req, ipt)) {
642 __io_poll_execute(req, mask);
643 return 0;
644 }
645
646 if (ipt->owning) {
647 /*
648 * Try to release ownership. If we see a change of state, e.g.
649 * poll was waken up, queue up a tw, it'll deal with it.
650 */
651 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
652 __io_poll_execute(req, 0);
653 }
654 return 0;
655 }
656
io_async_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)657 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
658 struct poll_table_struct *p)
659 {
660 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
661 struct async_poll *apoll = pt->req->apoll;
662
663 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
664 }
665
666 /*
667 * We can't reliably detect loops in repeated poll triggers and issue
668 * subsequently failing. But rather than fail these immediately, allow a
669 * certain amount of retries before we give up. Given that this condition
670 * should _rarely_ trigger even once, we should be fine with a larger value.
671 */
672 #define APOLL_MAX_RETRY 128
673
io_req_alloc_apoll(struct io_kiocb * req,unsigned issue_flags)674 static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
675 unsigned issue_flags)
676 {
677 struct io_ring_ctx *ctx = req->ctx;
678 struct io_cache_entry *entry;
679 struct async_poll *apoll;
680
681 if (req->flags & REQ_F_POLLED) {
682 apoll = req->apoll;
683 kfree(apoll->double_poll);
684 } else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
685 entry = io_alloc_cache_get(&ctx->apoll_cache);
686 if (entry == NULL)
687 goto alloc_apoll;
688 apoll = container_of(entry, struct async_poll, cache);
689 apoll->poll.retries = APOLL_MAX_RETRY;
690 } else {
691 alloc_apoll:
692 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
693 if (unlikely(!apoll))
694 return NULL;
695 apoll->poll.retries = APOLL_MAX_RETRY;
696 }
697 apoll->double_poll = NULL;
698 req->apoll = apoll;
699 if (unlikely(!--apoll->poll.retries))
700 return NULL;
701 return apoll;
702 }
703
io_arm_poll_handler(struct io_kiocb * req,unsigned issue_flags)704 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
705 {
706 const struct io_issue_def *def = &io_issue_defs[req->opcode];
707 struct async_poll *apoll;
708 struct io_poll_table ipt;
709 __poll_t mask = POLLPRI | POLLERR | EPOLLET;
710 int ret;
711
712 /*
713 * apoll requests already grab the mutex to complete in the tw handler,
714 * so removal from the mutex-backed hash is free, use it by default.
715 */
716 req->flags |= REQ_F_HASH_LOCKED;
717
718 if (!def->pollin && !def->pollout)
719 return IO_APOLL_ABORTED;
720 if (!file_can_poll(req->file))
721 return IO_APOLL_ABORTED;
722 if (!(req->flags & REQ_F_APOLL_MULTISHOT))
723 mask |= EPOLLONESHOT;
724
725 if (def->pollin) {
726 mask |= EPOLLIN | EPOLLRDNORM;
727
728 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
729 if (req->flags & REQ_F_CLEAR_POLLIN)
730 mask &= ~EPOLLIN;
731 } else {
732 mask |= EPOLLOUT | EPOLLWRNORM;
733 }
734 if (def->poll_exclusive)
735 mask |= EPOLLEXCLUSIVE;
736
737 apoll = io_req_alloc_apoll(req, issue_flags);
738 if (!apoll)
739 return IO_APOLL_ABORTED;
740 req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
741 req->flags |= REQ_F_POLLED;
742 ipt.pt._qproc = io_async_queue_proc;
743
744 io_kbuf_recycle(req, issue_flags);
745
746 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
747 if (ret)
748 return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
749 trace_io_uring_poll_arm(req, mask, apoll->poll.events);
750 return IO_APOLL_OK;
751 }
752
io_poll_remove_all_table(struct task_struct * tsk,struct io_hash_table * table,bool cancel_all)753 static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
754 struct io_hash_table *table,
755 bool cancel_all)
756 {
757 unsigned nr_buckets = 1U << table->hash_bits;
758 struct hlist_node *tmp;
759 struct io_kiocb *req;
760 bool found = false;
761 int i;
762
763 for (i = 0; i < nr_buckets; i++) {
764 struct io_hash_bucket *hb = &table->hbs[i];
765
766 spin_lock(&hb->lock);
767 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
768 if (io_match_task_safe(req, tsk, cancel_all)) {
769 hlist_del_init(&req->hash_node);
770 io_poll_cancel_req(req);
771 found = true;
772 }
773 }
774 spin_unlock(&hb->lock);
775 }
776 return found;
777 }
778
779 /*
780 * Returns true if we found and killed one or more poll requests
781 */
io_poll_remove_all(struct io_ring_ctx * ctx,struct task_struct * tsk,bool cancel_all)782 __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
783 bool cancel_all)
784 __must_hold(&ctx->uring_lock)
785 {
786 bool ret;
787
788 ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all);
789 ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
790 return ret;
791 }
792
io_poll_find(struct io_ring_ctx * ctx,bool poll_only,struct io_cancel_data * cd,struct io_hash_table * table,struct io_hash_bucket ** out_bucket)793 static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
794 struct io_cancel_data *cd,
795 struct io_hash_table *table,
796 struct io_hash_bucket **out_bucket)
797 {
798 struct io_kiocb *req;
799 u32 index = hash_long(cd->data, table->hash_bits);
800 struct io_hash_bucket *hb = &table->hbs[index];
801
802 *out_bucket = NULL;
803
804 spin_lock(&hb->lock);
805 hlist_for_each_entry(req, &hb->list, hash_node) {
806 if (cd->data != req->cqe.user_data)
807 continue;
808 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
809 continue;
810 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
811 if (cd->seq == req->work.cancel_seq)
812 continue;
813 req->work.cancel_seq = cd->seq;
814 }
815 *out_bucket = hb;
816 return req;
817 }
818 spin_unlock(&hb->lock);
819 return NULL;
820 }
821
io_poll_file_find(struct io_ring_ctx * ctx,struct io_cancel_data * cd,struct io_hash_table * table,struct io_hash_bucket ** out_bucket)822 static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
823 struct io_cancel_data *cd,
824 struct io_hash_table *table,
825 struct io_hash_bucket **out_bucket)
826 {
827 unsigned nr_buckets = 1U << table->hash_bits;
828 struct io_kiocb *req;
829 int i;
830
831 *out_bucket = NULL;
832
833 for (i = 0; i < nr_buckets; i++) {
834 struct io_hash_bucket *hb = &table->hbs[i];
835
836 spin_lock(&hb->lock);
837 hlist_for_each_entry(req, &hb->list, hash_node) {
838 if (io_cancel_req_match(req, cd)) {
839 *out_bucket = hb;
840 return req;
841 }
842 }
843 spin_unlock(&hb->lock);
844 }
845 return NULL;
846 }
847
io_poll_disarm(struct io_kiocb * req)848 static int io_poll_disarm(struct io_kiocb *req)
849 {
850 if (!req)
851 return -ENOENT;
852 if (!io_poll_get_ownership(req))
853 return -EALREADY;
854 io_poll_remove_entries(req);
855 hash_del(&req->hash_node);
856 return 0;
857 }
858
__io_poll_cancel(struct io_ring_ctx * ctx,struct io_cancel_data * cd,struct io_hash_table * table)859 static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
860 struct io_hash_table *table)
861 {
862 struct io_hash_bucket *bucket;
863 struct io_kiocb *req;
864
865 if (cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP |
866 IORING_ASYNC_CANCEL_ANY))
867 req = io_poll_file_find(ctx, cd, table, &bucket);
868 else
869 req = io_poll_find(ctx, false, cd, table, &bucket);
870
871 if (req)
872 io_poll_cancel_req(req);
873 if (bucket)
874 spin_unlock(&bucket->lock);
875 return req ? 0 : -ENOENT;
876 }
877
io_poll_cancel(struct io_ring_ctx * ctx,struct io_cancel_data * cd,unsigned issue_flags)878 int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
879 unsigned issue_flags)
880 {
881 int ret;
882
883 ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table);
884 if (ret != -ENOENT)
885 return ret;
886
887 io_ring_submit_lock(ctx, issue_flags);
888 ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked);
889 io_ring_submit_unlock(ctx, issue_flags);
890 return ret;
891 }
892
io_poll_parse_events(const struct io_uring_sqe * sqe,unsigned int flags)893 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
894 unsigned int flags)
895 {
896 u32 events;
897
898 events = READ_ONCE(sqe->poll32_events);
899 #ifdef __BIG_ENDIAN
900 events = swahw32(events);
901 #endif
902 if (!(flags & IORING_POLL_ADD_MULTI))
903 events |= EPOLLONESHOT;
904 if (!(flags & IORING_POLL_ADD_LEVEL))
905 events |= EPOLLET;
906 return demangle_poll(events) |
907 (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
908 }
909
io_poll_remove_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)910 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
911 {
912 struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
913 u32 flags;
914
915 if (sqe->buf_index || sqe->splice_fd_in)
916 return -EINVAL;
917 flags = READ_ONCE(sqe->len);
918 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
919 IORING_POLL_ADD_MULTI))
920 return -EINVAL;
921 /* meaningless without update */
922 if (flags == IORING_POLL_ADD_MULTI)
923 return -EINVAL;
924
925 upd->old_user_data = READ_ONCE(sqe->addr);
926 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
927 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
928
929 upd->new_user_data = READ_ONCE(sqe->off);
930 if (!upd->update_user_data && upd->new_user_data)
931 return -EINVAL;
932 if (upd->update_events)
933 upd->events = io_poll_parse_events(sqe, flags);
934 else if (sqe->poll32_events)
935 return -EINVAL;
936
937 return 0;
938 }
939
io_poll_add_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)940 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
941 {
942 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
943 u32 flags;
944
945 if (sqe->buf_index || sqe->off || sqe->addr)
946 return -EINVAL;
947 flags = READ_ONCE(sqe->len);
948 if (flags & ~IORING_POLL_ADD_MULTI)
949 return -EINVAL;
950 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
951 return -EINVAL;
952
953 poll->events = io_poll_parse_events(sqe, flags);
954 return 0;
955 }
956
io_poll_add(struct io_kiocb * req,unsigned int issue_flags)957 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
958 {
959 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
960 struct io_poll_table ipt;
961 int ret;
962
963 ipt.pt._qproc = io_poll_queue_proc;
964
965 /*
966 * If sqpoll or single issuer, there is no contention for ->uring_lock
967 * and we'll end up holding it in tw handlers anyway.
968 */
969 if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER))
970 req->flags |= REQ_F_HASH_LOCKED;
971
972 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
973 if (ret > 0) {
974 io_req_set_res(req, ipt.result_mask, 0);
975 return IOU_OK;
976 }
977 return ret ?: IOU_ISSUE_SKIP_COMPLETE;
978 }
979
io_poll_remove(struct io_kiocb * req,unsigned int issue_flags)980 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
981 {
982 struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
983 struct io_ring_ctx *ctx = req->ctx;
984 struct io_cancel_data cd = { .ctx = ctx, .data = poll_update->old_user_data, };
985 struct io_hash_bucket *bucket;
986 struct io_kiocb *preq;
987 int ret2, ret = 0;
988
989 io_ring_submit_lock(ctx, issue_flags);
990 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
991 ret2 = io_poll_disarm(preq);
992 if (bucket)
993 spin_unlock(&bucket->lock);
994 if (!ret2)
995 goto found;
996 if (ret2 != -ENOENT) {
997 ret = ret2;
998 goto out;
999 }
1000
1001 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
1002 ret2 = io_poll_disarm(preq);
1003 if (bucket)
1004 spin_unlock(&bucket->lock);
1005 if (ret2) {
1006 ret = ret2;
1007 goto out;
1008 }
1009
1010 found:
1011 if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
1012 ret = -EFAULT;
1013 goto out;
1014 }
1015
1016 if (poll_update->update_events || poll_update->update_user_data) {
1017 /* only mask one event flags, keep behavior flags */
1018 if (poll_update->update_events) {
1019 struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
1020
1021 poll->events &= ~0xffff;
1022 poll->events |= poll_update->events & 0xffff;
1023 poll->events |= IO_POLL_UNMASK;
1024 }
1025 if (poll_update->update_user_data)
1026 preq->cqe.user_data = poll_update->new_user_data;
1027
1028 ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED);
1029 /* successfully updated, don't complete poll request */
1030 if (!ret2 || ret2 == -EIOCBQUEUED)
1031 goto out;
1032 }
1033
1034 req_set_fail(preq);
1035 io_req_set_res(preq, -ECANCELED, 0);
1036 preq->io_task_work.func = io_req_task_complete;
1037 io_req_task_work_add(preq);
1038 out:
1039 io_ring_submit_unlock(ctx, issue_flags);
1040 if (ret < 0) {
1041 req_set_fail(req);
1042 return ret;
1043 }
1044 /* complete update request, we're done with it */
1045 io_req_set_res(req, ret, 0);
1046 return IOU_OK;
1047 }
1048
io_apoll_cache_free(struct io_cache_entry * entry)1049 void io_apoll_cache_free(struct io_cache_entry *entry)
1050 {
1051 kfree(container_of(entry, struct async_poll, cache));
1052 }
1053