xref: /openbmc/linux/io_uring/poll.c (revision dd093fb0)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/poll.h>
9 #include <linux/hashtable.h>
10 #include <linux/io_uring.h>
11 
12 #include <trace/events/io_uring.h>
13 
14 #include <uapi/linux/io_uring.h>
15 
16 #include "io_uring.h"
17 #include "refs.h"
18 #include "opdef.h"
19 #include "kbuf.h"
20 #include "poll.h"
21 #include "cancel.h"
22 
23 struct io_poll_update {
24 	struct file			*file;
25 	u64				old_user_data;
26 	u64				new_user_data;
27 	__poll_t			events;
28 	bool				update_events;
29 	bool				update_user_data;
30 };
31 
32 struct io_poll_table {
33 	struct poll_table_struct pt;
34 	struct io_kiocb *req;
35 	int nr_entries;
36 	int error;
37 	bool owning;
38 	/* output value, set only if arm poll returns >0 */
39 	__poll_t result_mask;
40 };
41 
42 #define IO_POLL_CANCEL_FLAG	BIT(31)
43 #define IO_POLL_RETRY_FLAG	BIT(30)
44 #define IO_POLL_REF_MASK	GENMASK(29, 0)
45 
46 /*
47  * We usually have 1-2 refs taken, 128 is more than enough and we want to
48  * maximise the margin between this amount and the moment when it overflows.
49  */
50 #define IO_POLL_REF_BIAS	128
51 
52 #define IO_WQE_F_DOUBLE		1
53 
54 static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
55 {
56 	unsigned long priv = (unsigned long)wqe->private;
57 
58 	return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
59 }
60 
61 static inline bool wqe_is_double(struct wait_queue_entry *wqe)
62 {
63 	unsigned long priv = (unsigned long)wqe->private;
64 
65 	return priv & IO_WQE_F_DOUBLE;
66 }
67 
68 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
69 {
70 	int v;
71 
72 	/*
73 	 * poll_refs are already elevated and we don't have much hope for
74 	 * grabbing the ownership. Instead of incrementing set a retry flag
75 	 * to notify the loop that there might have been some change.
76 	 */
77 	v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
78 	if (v & IO_POLL_REF_MASK)
79 		return false;
80 	return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
81 }
82 
83 /*
84  * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
85  * bump it and acquire ownership. It's disallowed to modify requests while not
86  * owning it, that prevents from races for enqueueing task_work's and b/w
87  * arming poll and wakeups.
88  */
89 static inline bool io_poll_get_ownership(struct io_kiocb *req)
90 {
91 	if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
92 		return io_poll_get_ownership_slowpath(req);
93 	return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
94 }
95 
96 static void io_poll_mark_cancelled(struct io_kiocb *req)
97 {
98 	atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
99 }
100 
101 static struct io_poll *io_poll_get_double(struct io_kiocb *req)
102 {
103 	/* pure poll stashes this in ->async_data, poll driven retry elsewhere */
104 	if (req->opcode == IORING_OP_POLL_ADD)
105 		return req->async_data;
106 	return req->apoll->double_poll;
107 }
108 
109 static struct io_poll *io_poll_get_single(struct io_kiocb *req)
110 {
111 	if (req->opcode == IORING_OP_POLL_ADD)
112 		return io_kiocb_to_cmd(req, struct io_poll);
113 	return &req->apoll->poll;
114 }
115 
116 static void io_poll_req_insert(struct io_kiocb *req)
117 {
118 	struct io_hash_table *table = &req->ctx->cancel_table;
119 	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
120 	struct io_hash_bucket *hb = &table->hbs[index];
121 
122 	spin_lock(&hb->lock);
123 	hlist_add_head(&req->hash_node, &hb->list);
124 	spin_unlock(&hb->lock);
125 }
126 
127 static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
128 {
129 	struct io_hash_table *table = &req->ctx->cancel_table;
130 	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
131 	spinlock_t *lock = &table->hbs[index].lock;
132 
133 	spin_lock(lock);
134 	hash_del(&req->hash_node);
135 	spin_unlock(lock);
136 }
137 
138 static void io_poll_req_insert_locked(struct io_kiocb *req)
139 {
140 	struct io_hash_table *table = &req->ctx->cancel_table_locked;
141 	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
142 
143 	lockdep_assert_held(&req->ctx->uring_lock);
144 
145 	hlist_add_head(&req->hash_node, &table->hbs[index].list);
146 }
147 
148 static void io_poll_tw_hash_eject(struct io_kiocb *req, bool *locked)
149 {
150 	struct io_ring_ctx *ctx = req->ctx;
151 
152 	if (req->flags & REQ_F_HASH_LOCKED) {
153 		/*
154 		 * ->cancel_table_locked is protected by ->uring_lock in
155 		 * contrast to per bucket spinlocks. Likely, tctx_task_work()
156 		 * already grabbed the mutex for us, but there is a chance it
157 		 * failed.
158 		 */
159 		io_tw_lock(ctx, locked);
160 		hash_del(&req->hash_node);
161 		req->flags &= ~REQ_F_HASH_LOCKED;
162 	} else {
163 		io_poll_req_delete(req, ctx);
164 	}
165 }
166 
167 static void io_init_poll_iocb(struct io_poll *poll, __poll_t events,
168 			      wait_queue_func_t wake_func)
169 {
170 	poll->head = NULL;
171 #define IO_POLL_UNMASK	(EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
172 	/* mask in events that we always want/need */
173 	poll->events = events | IO_POLL_UNMASK;
174 	INIT_LIST_HEAD(&poll->wait.entry);
175 	init_waitqueue_func_entry(&poll->wait, wake_func);
176 }
177 
178 static inline void io_poll_remove_entry(struct io_poll *poll)
179 {
180 	struct wait_queue_head *head = smp_load_acquire(&poll->head);
181 
182 	if (head) {
183 		spin_lock_irq(&head->lock);
184 		list_del_init(&poll->wait.entry);
185 		poll->head = NULL;
186 		spin_unlock_irq(&head->lock);
187 	}
188 }
189 
190 static void io_poll_remove_entries(struct io_kiocb *req)
191 {
192 	/*
193 	 * Nothing to do if neither of those flags are set. Avoid dipping
194 	 * into the poll/apoll/double cachelines if we can.
195 	 */
196 	if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
197 		return;
198 
199 	/*
200 	 * While we hold the waitqueue lock and the waitqueue is nonempty,
201 	 * wake_up_pollfree() will wait for us.  However, taking the waitqueue
202 	 * lock in the first place can race with the waitqueue being freed.
203 	 *
204 	 * We solve this as eventpoll does: by taking advantage of the fact that
205 	 * all users of wake_up_pollfree() will RCU-delay the actual free.  If
206 	 * we enter rcu_read_lock() and see that the pointer to the queue is
207 	 * non-NULL, we can then lock it without the memory being freed out from
208 	 * under us.
209 	 *
210 	 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
211 	 * case the caller deletes the entry from the queue, leaving it empty.
212 	 * In that case, only RCU prevents the queue memory from being freed.
213 	 */
214 	rcu_read_lock();
215 	if (req->flags & REQ_F_SINGLE_POLL)
216 		io_poll_remove_entry(io_poll_get_single(req));
217 	if (req->flags & REQ_F_DOUBLE_POLL)
218 		io_poll_remove_entry(io_poll_get_double(req));
219 	rcu_read_unlock();
220 }
221 
222 enum {
223 	IOU_POLL_DONE = 0,
224 	IOU_POLL_NO_ACTION = 1,
225 	IOU_POLL_REMOVE_POLL_USE_RES = 2,
226 	IOU_POLL_REISSUE = 3,
227 };
228 
229 /*
230  * All poll tw should go through this. Checks for poll events, manages
231  * references, does rewait, etc.
232  *
233  * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
234  * require, which is either spurious wakeup or multishot CQE is served.
235  * IOU_POLL_DONE when it's done with the request, then the mask is stored in
236  * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
237  * poll and that the result is stored in req->cqe.
238  */
239 static int io_poll_check_events(struct io_kiocb *req, bool *locked)
240 {
241 	int v;
242 
243 	/* req->task == current here, checking PF_EXITING is safe */
244 	if (unlikely(req->task->flags & PF_EXITING))
245 		return -ECANCELED;
246 
247 	do {
248 		v = atomic_read(&req->poll_refs);
249 
250 		if (unlikely(v != 1)) {
251 			/* tw should be the owner and so have some refs */
252 			if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
253 				return IOU_POLL_NO_ACTION;
254 			if (v & IO_POLL_CANCEL_FLAG)
255 				return -ECANCELED;
256 			/*
257 			 * cqe.res contains only events of the first wake up
258 			 * and all others are to be lost. Redo vfs_poll() to get
259 			 * up to date state.
260 			 */
261 			if ((v & IO_POLL_REF_MASK) != 1)
262 				req->cqe.res = 0;
263 
264 			if (v & IO_POLL_RETRY_FLAG) {
265 				req->cqe.res = 0;
266 				/*
267 				 * We won't find new events that came in between
268 				 * vfs_poll and the ref put unless we clear the
269 				 * flag in advance.
270 				 */
271 				atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
272 				v &= ~IO_POLL_RETRY_FLAG;
273 			}
274 		}
275 
276 		/* the mask was stashed in __io_poll_execute */
277 		if (!req->cqe.res) {
278 			struct poll_table_struct pt = { ._key = req->apoll_events };
279 			req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
280 			/*
281 			 * We got woken with a mask, but someone else got to
282 			 * it first. The above vfs_poll() doesn't add us back
283 			 * to the waitqueue, so if we get nothing back, we
284 			 * should be safe and attempt a reissue.
285 			 */
286 			if (unlikely(!req->cqe.res)) {
287 				/* Multishot armed need not reissue */
288 				if (!(req->apoll_events & EPOLLONESHOT))
289 					continue;
290 				return IOU_POLL_REISSUE;
291 			}
292 		}
293 		if (req->apoll_events & EPOLLONESHOT)
294 			return IOU_POLL_DONE;
295 
296 		/* multishot, just fill a CQE and proceed */
297 		if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
298 			__poll_t mask = mangle_poll(req->cqe.res &
299 						    req->apoll_events);
300 
301 			if (!io_aux_cqe(req->ctx, *locked, req->cqe.user_data,
302 					mask, IORING_CQE_F_MORE, false)) {
303 				io_req_set_res(req, mask, 0);
304 				return IOU_POLL_REMOVE_POLL_USE_RES;
305 			}
306 		} else {
307 			int ret = io_poll_issue(req, locked);
308 			if (ret == IOU_STOP_MULTISHOT)
309 				return IOU_POLL_REMOVE_POLL_USE_RES;
310 			if (ret < 0)
311 				return ret;
312 		}
313 
314 		/* force the next iteration to vfs_poll() */
315 		req->cqe.res = 0;
316 
317 		/*
318 		 * Release all references, retry if someone tried to restart
319 		 * task_work while we were executing it.
320 		 */
321 	} while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) &
322 					IO_POLL_REF_MASK);
323 
324 	return IOU_POLL_NO_ACTION;
325 }
326 
327 static void io_poll_task_func(struct io_kiocb *req, bool *locked)
328 {
329 	int ret;
330 
331 	ret = io_poll_check_events(req, locked);
332 	if (ret == IOU_POLL_NO_ACTION)
333 		return;
334 	io_poll_remove_entries(req);
335 	io_poll_tw_hash_eject(req, locked);
336 
337 	if (req->opcode == IORING_OP_POLL_ADD) {
338 		if (ret == IOU_POLL_DONE) {
339 			struct io_poll *poll;
340 
341 			poll = io_kiocb_to_cmd(req, struct io_poll);
342 			req->cqe.res = mangle_poll(req->cqe.res & poll->events);
343 		} else if (ret == IOU_POLL_REISSUE) {
344 			io_req_task_submit(req, locked);
345 			return;
346 		} else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
347 			req->cqe.res = ret;
348 			req_set_fail(req);
349 		}
350 
351 		io_req_set_res(req, req->cqe.res, 0);
352 		io_req_task_complete(req, locked);
353 	} else {
354 		io_tw_lock(req->ctx, locked);
355 
356 		if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
357 			io_req_task_complete(req, locked);
358 		else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
359 			io_req_task_submit(req, locked);
360 		else
361 			io_req_defer_failed(req, ret);
362 	}
363 }
364 
365 static void __io_poll_execute(struct io_kiocb *req, int mask)
366 {
367 	io_req_set_res(req, mask, 0);
368 	req->io_task_work.func = io_poll_task_func;
369 
370 	trace_io_uring_task_add(req, mask);
371 	io_req_task_work_add(req);
372 }
373 
374 static inline void io_poll_execute(struct io_kiocb *req, int res)
375 {
376 	if (io_poll_get_ownership(req))
377 		__io_poll_execute(req, res);
378 }
379 
380 static void io_poll_cancel_req(struct io_kiocb *req)
381 {
382 	io_poll_mark_cancelled(req);
383 	/* kick tw, which should complete the request */
384 	io_poll_execute(req, 0);
385 }
386 
387 #define IO_ASYNC_POLL_COMMON	(EPOLLONESHOT | EPOLLPRI)
388 
389 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
390 {
391 	io_poll_mark_cancelled(req);
392 	/* we have to kick tw in case it's not already */
393 	io_poll_execute(req, 0);
394 
395 	/*
396 	 * If the waitqueue is being freed early but someone is already
397 	 * holds ownership over it, we have to tear down the request as
398 	 * best we can. That means immediately removing the request from
399 	 * its waitqueue and preventing all further accesses to the
400 	 * waitqueue via the request.
401 	 */
402 	list_del_init(&poll->wait.entry);
403 
404 	/*
405 	 * Careful: this *must* be the last step, since as soon
406 	 * as req->head is NULL'ed out, the request can be
407 	 * completed and freed, since aio_poll_complete_work()
408 	 * will no longer need to take the waitqueue lock.
409 	 */
410 	smp_store_release(&poll->head, NULL);
411 	return 1;
412 }
413 
414 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
415 			void *key)
416 {
417 	struct io_kiocb *req = wqe_to_req(wait);
418 	struct io_poll *poll = container_of(wait, struct io_poll, wait);
419 	__poll_t mask = key_to_poll(key);
420 
421 	if (unlikely(mask & POLLFREE))
422 		return io_pollfree_wake(req, poll);
423 
424 	/* for instances that support it check for an event match first */
425 	if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
426 		return 0;
427 
428 	if (io_poll_get_ownership(req)) {
429 		/*
430 		 * If we trigger a multishot poll off our own wakeup path,
431 		 * disable multishot as there is a circular dependency between
432 		 * CQ posting and triggering the event.
433 		 */
434 		if (mask & EPOLL_URING_WAKE)
435 			poll->events |= EPOLLONESHOT;
436 
437 		/* optional, saves extra locking for removal in tw handler */
438 		if (mask && poll->events & EPOLLONESHOT) {
439 			list_del_init(&poll->wait.entry);
440 			poll->head = NULL;
441 			if (wqe_is_double(wait))
442 				req->flags &= ~REQ_F_DOUBLE_POLL;
443 			else
444 				req->flags &= ~REQ_F_SINGLE_POLL;
445 		}
446 		__io_poll_execute(req, mask);
447 	}
448 	return 1;
449 }
450 
451 /* fails only when polling is already completing by the first entry */
452 static bool io_poll_double_prepare(struct io_kiocb *req)
453 {
454 	struct wait_queue_head *head;
455 	struct io_poll *poll = io_poll_get_single(req);
456 
457 	/* head is RCU protected, see io_poll_remove_entries() comments */
458 	rcu_read_lock();
459 	head = smp_load_acquire(&poll->head);
460 	/*
461 	 * poll arm might not hold ownership and so race for req->flags with
462 	 * io_poll_wake(). There is only one poll entry queued, serialise with
463 	 * it by taking its head lock. As we're still arming the tw hanlder
464 	 * is not going to be run, so there are no races with it.
465 	 */
466 	if (head) {
467 		spin_lock_irq(&head->lock);
468 		req->flags |= REQ_F_DOUBLE_POLL;
469 		if (req->opcode == IORING_OP_POLL_ADD)
470 			req->flags |= REQ_F_ASYNC_DATA;
471 		spin_unlock_irq(&head->lock);
472 	}
473 	rcu_read_unlock();
474 	return !!head;
475 }
476 
477 static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
478 			    struct wait_queue_head *head,
479 			    struct io_poll **poll_ptr)
480 {
481 	struct io_kiocb *req = pt->req;
482 	unsigned long wqe_private = (unsigned long) req;
483 
484 	/*
485 	 * The file being polled uses multiple waitqueues for poll handling
486 	 * (e.g. one for read, one for write). Setup a separate io_poll
487 	 * if this happens.
488 	 */
489 	if (unlikely(pt->nr_entries)) {
490 		struct io_poll *first = poll;
491 
492 		/* double add on the same waitqueue head, ignore */
493 		if (first->head == head)
494 			return;
495 		/* already have a 2nd entry, fail a third attempt */
496 		if (*poll_ptr) {
497 			if ((*poll_ptr)->head == head)
498 				return;
499 			pt->error = -EINVAL;
500 			return;
501 		}
502 
503 		poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
504 		if (!poll) {
505 			pt->error = -ENOMEM;
506 			return;
507 		}
508 
509 		/* mark as double wq entry */
510 		wqe_private |= IO_WQE_F_DOUBLE;
511 		io_init_poll_iocb(poll, first->events, first->wait.func);
512 		if (!io_poll_double_prepare(req)) {
513 			/* the request is completing, just back off */
514 			kfree(poll);
515 			return;
516 		}
517 		*poll_ptr = poll;
518 	} else {
519 		/* fine to modify, there is no poll queued to race with us */
520 		req->flags |= REQ_F_SINGLE_POLL;
521 	}
522 
523 	pt->nr_entries++;
524 	poll->head = head;
525 	poll->wait.private = (void *) wqe_private;
526 
527 	if (poll->events & EPOLLEXCLUSIVE)
528 		add_wait_queue_exclusive(head, &poll->wait);
529 	else
530 		add_wait_queue(head, &poll->wait);
531 }
532 
533 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
534 			       struct poll_table_struct *p)
535 {
536 	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
537 	struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
538 
539 	__io_queue_proc(poll, pt, head,
540 			(struct io_poll **) &pt->req->async_data);
541 }
542 
543 static bool io_poll_can_finish_inline(struct io_kiocb *req,
544 				      struct io_poll_table *pt)
545 {
546 	return pt->owning || io_poll_get_ownership(req);
547 }
548 
549 static void io_poll_add_hash(struct io_kiocb *req)
550 {
551 	if (req->flags & REQ_F_HASH_LOCKED)
552 		io_poll_req_insert_locked(req);
553 	else
554 		io_poll_req_insert(req);
555 }
556 
557 /*
558  * Returns 0 when it's handed over for polling. The caller owns the requests if
559  * it returns non-zero, but otherwise should not touch it. Negative values
560  * contain an error code. When the result is >0, the polling has completed
561  * inline and ipt.result_mask is set to the mask.
562  */
563 static int __io_arm_poll_handler(struct io_kiocb *req,
564 				 struct io_poll *poll,
565 				 struct io_poll_table *ipt, __poll_t mask,
566 				 unsigned issue_flags)
567 {
568 	struct io_ring_ctx *ctx = req->ctx;
569 
570 	INIT_HLIST_NODE(&req->hash_node);
571 	req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
572 	io_init_poll_iocb(poll, mask, io_poll_wake);
573 	poll->file = req->file;
574 	req->apoll_events = poll->events;
575 
576 	ipt->pt._key = mask;
577 	ipt->req = req;
578 	ipt->error = 0;
579 	ipt->nr_entries = 0;
580 	/*
581 	 * Polling is either completed here or via task_work, so if we're in the
582 	 * task context we're naturally serialised with tw by merit of running
583 	 * the same task. When it's io-wq, take the ownership to prevent tw
584 	 * from running. However, when we're in the task context, skip taking
585 	 * it as an optimisation.
586 	 *
587 	 * Note: even though the request won't be completed/freed, without
588 	 * ownership we still can race with io_poll_wake().
589 	 * io_poll_can_finish_inline() tries to deal with that.
590 	 */
591 	ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
592 	atomic_set(&req->poll_refs, (int)ipt->owning);
593 
594 	/* io-wq doesn't hold uring_lock */
595 	if (issue_flags & IO_URING_F_UNLOCKED)
596 		req->flags &= ~REQ_F_HASH_LOCKED;
597 
598 	mask = vfs_poll(req->file, &ipt->pt) & poll->events;
599 
600 	if (unlikely(ipt->error || !ipt->nr_entries)) {
601 		io_poll_remove_entries(req);
602 
603 		if (!io_poll_can_finish_inline(req, ipt)) {
604 			io_poll_mark_cancelled(req);
605 			return 0;
606 		} else if (mask && (poll->events & EPOLLET)) {
607 			ipt->result_mask = mask;
608 			return 1;
609 		}
610 		return ipt->error ?: -EINVAL;
611 	}
612 
613 	if (mask &&
614 	   ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
615 		if (!io_poll_can_finish_inline(req, ipt)) {
616 			io_poll_add_hash(req);
617 			return 0;
618 		}
619 		io_poll_remove_entries(req);
620 		ipt->result_mask = mask;
621 		/* no one else has access to the req, forget about the ref */
622 		return 1;
623 	}
624 
625 	io_poll_add_hash(req);
626 
627 	if (mask && (poll->events & EPOLLET) &&
628 	    io_poll_can_finish_inline(req, ipt)) {
629 		__io_poll_execute(req, mask);
630 		return 0;
631 	}
632 
633 	if (ipt->owning) {
634 		/*
635 		 * Try to release ownership. If we see a change of state, e.g.
636 		 * poll was waken up, queue up a tw, it'll deal with it.
637 		 */
638 		if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
639 			__io_poll_execute(req, 0);
640 	}
641 	return 0;
642 }
643 
644 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
645 			       struct poll_table_struct *p)
646 {
647 	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
648 	struct async_poll *apoll = pt->req->apoll;
649 
650 	__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
651 }
652 
653 static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
654 					     unsigned issue_flags)
655 {
656 	struct io_ring_ctx *ctx = req->ctx;
657 	struct io_cache_entry *entry;
658 	struct async_poll *apoll;
659 
660 	if (req->flags & REQ_F_POLLED) {
661 		apoll = req->apoll;
662 		kfree(apoll->double_poll);
663 	} else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
664 		entry = io_alloc_cache_get(&ctx->apoll_cache);
665 		if (entry == NULL)
666 			goto alloc_apoll;
667 		apoll = container_of(entry, struct async_poll, cache);
668 	} else {
669 alloc_apoll:
670 		apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
671 		if (unlikely(!apoll))
672 			return NULL;
673 	}
674 	apoll->double_poll = NULL;
675 	req->apoll = apoll;
676 	return apoll;
677 }
678 
679 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
680 {
681 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
682 	struct async_poll *apoll;
683 	struct io_poll_table ipt;
684 	__poll_t mask = POLLPRI | POLLERR | EPOLLET;
685 	int ret;
686 
687 	/*
688 	 * apoll requests already grab the mutex to complete in the tw handler,
689 	 * so removal from the mutex-backed hash is free, use it by default.
690 	 */
691 	req->flags |= REQ_F_HASH_LOCKED;
692 
693 	if (!def->pollin && !def->pollout)
694 		return IO_APOLL_ABORTED;
695 	if (!file_can_poll(req->file))
696 		return IO_APOLL_ABORTED;
697 	if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
698 		return IO_APOLL_ABORTED;
699 	if (!(req->flags & REQ_F_APOLL_MULTISHOT))
700 		mask |= EPOLLONESHOT;
701 
702 	if (def->pollin) {
703 		mask |= EPOLLIN | EPOLLRDNORM;
704 
705 		/* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
706 		if (req->flags & REQ_F_CLEAR_POLLIN)
707 			mask &= ~EPOLLIN;
708 	} else {
709 		mask |= EPOLLOUT | EPOLLWRNORM;
710 	}
711 	if (def->poll_exclusive)
712 		mask |= EPOLLEXCLUSIVE;
713 
714 	apoll = io_req_alloc_apoll(req, issue_flags);
715 	if (!apoll)
716 		return IO_APOLL_ABORTED;
717 	req->flags |= REQ_F_POLLED;
718 	ipt.pt._qproc = io_async_queue_proc;
719 
720 	io_kbuf_recycle(req, issue_flags);
721 
722 	ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
723 	if (ret)
724 		return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
725 	trace_io_uring_poll_arm(req, mask, apoll->poll.events);
726 	return IO_APOLL_OK;
727 }
728 
729 static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
730 					    struct io_hash_table *table,
731 					    bool cancel_all)
732 {
733 	unsigned nr_buckets = 1U << table->hash_bits;
734 	struct hlist_node *tmp;
735 	struct io_kiocb *req;
736 	bool found = false;
737 	int i;
738 
739 	for (i = 0; i < nr_buckets; i++) {
740 		struct io_hash_bucket *hb = &table->hbs[i];
741 
742 		spin_lock(&hb->lock);
743 		hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
744 			if (io_match_task_safe(req, tsk, cancel_all)) {
745 				hlist_del_init(&req->hash_node);
746 				io_poll_cancel_req(req);
747 				found = true;
748 			}
749 		}
750 		spin_unlock(&hb->lock);
751 	}
752 	return found;
753 }
754 
755 /*
756  * Returns true if we found and killed one or more poll requests
757  */
758 __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
759 			       bool cancel_all)
760 	__must_hold(&ctx->uring_lock)
761 {
762 	bool ret;
763 
764 	ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all);
765 	ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
766 	return ret;
767 }
768 
769 static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
770 				     struct io_cancel_data *cd,
771 				     struct io_hash_table *table,
772 				     struct io_hash_bucket **out_bucket)
773 {
774 	struct io_kiocb *req;
775 	u32 index = hash_long(cd->data, table->hash_bits);
776 	struct io_hash_bucket *hb = &table->hbs[index];
777 
778 	*out_bucket = NULL;
779 
780 	spin_lock(&hb->lock);
781 	hlist_for_each_entry(req, &hb->list, hash_node) {
782 		if (cd->data != req->cqe.user_data)
783 			continue;
784 		if (poll_only && req->opcode != IORING_OP_POLL_ADD)
785 			continue;
786 		if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
787 			if (cd->seq == req->work.cancel_seq)
788 				continue;
789 			req->work.cancel_seq = cd->seq;
790 		}
791 		*out_bucket = hb;
792 		return req;
793 	}
794 	spin_unlock(&hb->lock);
795 	return NULL;
796 }
797 
798 static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
799 					  struct io_cancel_data *cd,
800 					  struct io_hash_table *table,
801 					  struct io_hash_bucket **out_bucket)
802 {
803 	unsigned nr_buckets = 1U << table->hash_bits;
804 	struct io_kiocb *req;
805 	int i;
806 
807 	*out_bucket = NULL;
808 
809 	for (i = 0; i < nr_buckets; i++) {
810 		struct io_hash_bucket *hb = &table->hbs[i];
811 
812 		spin_lock(&hb->lock);
813 		hlist_for_each_entry(req, &hb->list, hash_node) {
814 			if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
815 			    req->file != cd->file)
816 				continue;
817 			if (cd->seq == req->work.cancel_seq)
818 				continue;
819 			req->work.cancel_seq = cd->seq;
820 			*out_bucket = hb;
821 			return req;
822 		}
823 		spin_unlock(&hb->lock);
824 	}
825 	return NULL;
826 }
827 
828 static int io_poll_disarm(struct io_kiocb *req)
829 {
830 	if (!req)
831 		return -ENOENT;
832 	if (!io_poll_get_ownership(req))
833 		return -EALREADY;
834 	io_poll_remove_entries(req);
835 	hash_del(&req->hash_node);
836 	return 0;
837 }
838 
839 static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
840 			    struct io_hash_table *table)
841 {
842 	struct io_hash_bucket *bucket;
843 	struct io_kiocb *req;
844 
845 	if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
846 		req = io_poll_file_find(ctx, cd, table, &bucket);
847 	else
848 		req = io_poll_find(ctx, false, cd, table, &bucket);
849 
850 	if (req)
851 		io_poll_cancel_req(req);
852 	if (bucket)
853 		spin_unlock(&bucket->lock);
854 	return req ? 0 : -ENOENT;
855 }
856 
857 int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
858 		   unsigned issue_flags)
859 {
860 	int ret;
861 
862 	ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table);
863 	if (ret != -ENOENT)
864 		return ret;
865 
866 	io_ring_submit_lock(ctx, issue_flags);
867 	ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked);
868 	io_ring_submit_unlock(ctx, issue_flags);
869 	return ret;
870 }
871 
872 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
873 				     unsigned int flags)
874 {
875 	u32 events;
876 
877 	events = READ_ONCE(sqe->poll32_events);
878 #ifdef __BIG_ENDIAN
879 	events = swahw32(events);
880 #endif
881 	if (!(flags & IORING_POLL_ADD_MULTI))
882 		events |= EPOLLONESHOT;
883 	if (!(flags & IORING_POLL_ADD_LEVEL))
884 		events |= EPOLLET;
885 	return demangle_poll(events) |
886 		(events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
887 }
888 
889 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
890 {
891 	struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
892 	u32 flags;
893 
894 	if (sqe->buf_index || sqe->splice_fd_in)
895 		return -EINVAL;
896 	flags = READ_ONCE(sqe->len);
897 	if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
898 		      IORING_POLL_ADD_MULTI))
899 		return -EINVAL;
900 	/* meaningless without update */
901 	if (flags == IORING_POLL_ADD_MULTI)
902 		return -EINVAL;
903 
904 	upd->old_user_data = READ_ONCE(sqe->addr);
905 	upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
906 	upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
907 
908 	upd->new_user_data = READ_ONCE(sqe->off);
909 	if (!upd->update_user_data && upd->new_user_data)
910 		return -EINVAL;
911 	if (upd->update_events)
912 		upd->events = io_poll_parse_events(sqe, flags);
913 	else if (sqe->poll32_events)
914 		return -EINVAL;
915 
916 	return 0;
917 }
918 
919 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
920 {
921 	struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
922 	u32 flags;
923 
924 	if (sqe->buf_index || sqe->off || sqe->addr)
925 		return -EINVAL;
926 	flags = READ_ONCE(sqe->len);
927 	if (flags & ~IORING_POLL_ADD_MULTI)
928 		return -EINVAL;
929 	if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
930 		return -EINVAL;
931 
932 	poll->events = io_poll_parse_events(sqe, flags);
933 	return 0;
934 }
935 
936 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
937 {
938 	struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
939 	struct io_poll_table ipt;
940 	int ret;
941 
942 	ipt.pt._qproc = io_poll_queue_proc;
943 
944 	/*
945 	 * If sqpoll or single issuer, there is no contention for ->uring_lock
946 	 * and we'll end up holding it in tw handlers anyway.
947 	 */
948 	if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER))
949 		req->flags |= REQ_F_HASH_LOCKED;
950 
951 	ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
952 	if (ret > 0) {
953 		io_req_set_res(req, ipt.result_mask, 0);
954 		return IOU_OK;
955 	}
956 	return ret ?: IOU_ISSUE_SKIP_COMPLETE;
957 }
958 
959 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
960 {
961 	struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
962 	struct io_cancel_data cd = { .data = poll_update->old_user_data, };
963 	struct io_ring_ctx *ctx = req->ctx;
964 	struct io_hash_bucket *bucket;
965 	struct io_kiocb *preq;
966 	int ret2, ret = 0;
967 	bool locked;
968 
969 	preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
970 	ret2 = io_poll_disarm(preq);
971 	if (bucket)
972 		spin_unlock(&bucket->lock);
973 	if (!ret2)
974 		goto found;
975 	if (ret2 != -ENOENT) {
976 		ret = ret2;
977 		goto out;
978 	}
979 
980 	io_ring_submit_lock(ctx, issue_flags);
981 	preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
982 	ret2 = io_poll_disarm(preq);
983 	if (bucket)
984 		spin_unlock(&bucket->lock);
985 	io_ring_submit_unlock(ctx, issue_flags);
986 	if (ret2) {
987 		ret = ret2;
988 		goto out;
989 	}
990 
991 found:
992 	if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
993 		ret = -EFAULT;
994 		goto out;
995 	}
996 
997 	if (poll_update->update_events || poll_update->update_user_data) {
998 		/* only mask one event flags, keep behavior flags */
999 		if (poll_update->update_events) {
1000 			struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
1001 
1002 			poll->events &= ~0xffff;
1003 			poll->events |= poll_update->events & 0xffff;
1004 			poll->events |= IO_POLL_UNMASK;
1005 		}
1006 		if (poll_update->update_user_data)
1007 			preq->cqe.user_data = poll_update->new_user_data;
1008 
1009 		ret2 = io_poll_add(preq, issue_flags);
1010 		/* successfully updated, don't complete poll request */
1011 		if (!ret2 || ret2 == -EIOCBQUEUED)
1012 			goto out;
1013 	}
1014 
1015 	req_set_fail(preq);
1016 	io_req_set_res(preq, -ECANCELED, 0);
1017 	locked = !(issue_flags & IO_URING_F_UNLOCKED);
1018 	io_req_task_complete(preq, &locked);
1019 out:
1020 	if (ret < 0) {
1021 		req_set_fail(req);
1022 		return ret;
1023 	}
1024 	/* complete update request, we're done with it */
1025 	io_req_set_res(req, ret, 0);
1026 	return IOU_OK;
1027 }
1028 
1029 void io_apoll_cache_free(struct io_cache_entry *entry)
1030 {
1031 	kfree(container_of(entry, struct async_poll, cache));
1032 }
1033