xref: /openbmc/linux/io_uring/cancel.c (revision 747f7a29)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/nospec.h>
10 #include <linux/io_uring.h>
11 
12 #include <uapi/linux/io_uring.h>
13 
14 #include "io_uring.h"
15 #include "tctx.h"
16 #include "poll.h"
17 #include "timeout.h"
18 #include "cancel.h"
19 
20 struct io_cancel {
21 	struct file			*file;
22 	u64				addr;
23 	u32				flags;
24 	s32				fd;
25 };
26 
27 #define CANCEL_FLAGS	(IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
28 			 IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED)
29 
30 static bool io_cancel_cb(struct io_wq_work *work, void *data)
31 {
32 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
33 	struct io_cancel_data *cd = data;
34 
35 	if (req->ctx != cd->ctx)
36 		return false;
37 	if (cd->flags & IORING_ASYNC_CANCEL_ANY) {
38 		;
39 	} else if (cd->flags & IORING_ASYNC_CANCEL_FD) {
40 		if (req->file != cd->file)
41 			return false;
42 	} else {
43 		if (req->cqe.user_data != cd->data)
44 			return false;
45 	}
46 	if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
47 		if (cd->seq == req->work.cancel_seq)
48 			return false;
49 		req->work.cancel_seq = cd->seq;
50 	}
51 	return true;
52 }
53 
54 static int io_async_cancel_one(struct io_uring_task *tctx,
55 			       struct io_cancel_data *cd)
56 {
57 	enum io_wq_cancel cancel_ret;
58 	int ret = 0;
59 	bool all;
60 
61 	if (!tctx || !tctx->io_wq)
62 		return -ENOENT;
63 
64 	all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
65 	cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
66 	switch (cancel_ret) {
67 	case IO_WQ_CANCEL_OK:
68 		ret = 0;
69 		break;
70 	case IO_WQ_CANCEL_RUNNING:
71 		ret = -EALREADY;
72 		break;
73 	case IO_WQ_CANCEL_NOTFOUND:
74 		ret = -ENOENT;
75 		break;
76 	}
77 
78 	return ret;
79 }
80 
81 int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
82 		  unsigned issue_flags)
83 {
84 	struct io_ring_ctx *ctx = cd->ctx;
85 	int ret;
86 
87 	WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring);
88 
89 	ret = io_async_cancel_one(tctx, cd);
90 	/*
91 	 * Fall-through even for -EALREADY, as we may have poll armed
92 	 * that need unarming.
93 	 */
94 	if (!ret)
95 		return 0;
96 
97 	ret = io_poll_cancel(ctx, cd, issue_flags);
98 	if (ret != -ENOENT)
99 		return ret;
100 
101 	spin_lock(&ctx->completion_lock);
102 	if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
103 		ret = io_timeout_cancel(ctx, cd);
104 	spin_unlock(&ctx->completion_lock);
105 	return ret;
106 }
107 
108 int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
109 {
110 	struct io_cancel *cancel = io_kiocb_to_cmd(req);
111 
112 	if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
113 		return -EINVAL;
114 	if (sqe->off || sqe->len || sqe->splice_fd_in)
115 		return -EINVAL;
116 
117 	cancel->addr = READ_ONCE(sqe->addr);
118 	cancel->flags = READ_ONCE(sqe->cancel_flags);
119 	if (cancel->flags & ~CANCEL_FLAGS)
120 		return -EINVAL;
121 	if (cancel->flags & IORING_ASYNC_CANCEL_FD) {
122 		if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
123 			return -EINVAL;
124 		cancel->fd = READ_ONCE(sqe->fd);
125 	}
126 
127 	return 0;
128 }
129 
130 static int __io_async_cancel(struct io_cancel_data *cd,
131 			     struct io_uring_task *tctx,
132 			     unsigned int issue_flags)
133 {
134 	bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
135 	struct io_ring_ctx *ctx = cd->ctx;
136 	struct io_tctx_node *node;
137 	int ret, nr = 0;
138 
139 	do {
140 		ret = io_try_cancel(tctx, cd, issue_flags);
141 		if (ret == -ENOENT)
142 			break;
143 		if (!all)
144 			return ret;
145 		nr++;
146 	} while (1);
147 
148 	/* slow path, try all io-wq's */
149 	io_ring_submit_lock(ctx, issue_flags);
150 	ret = -ENOENT;
151 	list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
152 		struct io_uring_task *tctx = node->task->io_uring;
153 
154 		ret = io_async_cancel_one(tctx, cd);
155 		if (ret != -ENOENT) {
156 			if (!all)
157 				break;
158 			nr++;
159 		}
160 	}
161 	io_ring_submit_unlock(ctx, issue_flags);
162 	return all ? nr : ret;
163 }
164 
165 int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
166 {
167 	struct io_cancel *cancel = io_kiocb_to_cmd(req);
168 	struct io_cancel_data cd = {
169 		.ctx	= req->ctx,
170 		.data	= cancel->addr,
171 		.flags	= cancel->flags,
172 		.seq	= atomic_inc_return(&req->ctx->cancel_seq),
173 	};
174 	struct io_uring_task *tctx = req->task->io_uring;
175 	int ret;
176 
177 	if (cd.flags & IORING_ASYNC_CANCEL_FD) {
178 		if (req->flags & REQ_F_FIXED_FILE ||
179 		    cd.flags & IORING_ASYNC_CANCEL_FD_FIXED) {
180 			req->flags |= REQ_F_FIXED_FILE;
181 			req->file = io_file_get_fixed(req, cancel->fd,
182 							issue_flags);
183 		} else {
184 			req->file = io_file_get_normal(req, cancel->fd);
185 		}
186 		if (!req->file) {
187 			ret = -EBADF;
188 			goto done;
189 		}
190 		cd.file = req->file;
191 	}
192 
193 	ret = __io_async_cancel(&cd, tctx, issue_flags);
194 done:
195 	if (ret < 0)
196 		req_set_fail(req);
197 	io_req_set_res(req, ret, 0);
198 	return IOU_OK;
199 }
200 
201 void init_hash_table(struct io_hash_table *table, unsigned size)
202 {
203 	unsigned int i;
204 
205 	for (i = 0; i < size; i++) {
206 		spin_lock_init(&table->hbs[i].lock);
207 		INIT_HLIST_HEAD(&table->hbs[i].list);
208 	}
209 }
210 
211 static int __io_sync_cancel(struct io_uring_task *tctx,
212 			    struct io_cancel_data *cd, int fd)
213 {
214 	struct io_ring_ctx *ctx = cd->ctx;
215 
216 	/* fixed must be grabbed every time since we drop the uring_lock */
217 	if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
218 	    (cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
219 		unsigned long file_ptr;
220 
221 		if (unlikely(fd > ctx->nr_user_files))
222 			return -EBADF;
223 		fd = array_index_nospec(fd, ctx->nr_user_files);
224 		file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
225 		cd->file = (struct file *) (file_ptr & FFS_MASK);
226 		if (!cd->file)
227 			return -EBADF;
228 	}
229 
230 	return __io_async_cancel(cd, tctx, 0);
231 }
232 
233 int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
234 	__must_hold(&ctx->uring_lock)
235 {
236 	struct io_cancel_data cd = {
237 		.ctx	= ctx,
238 		.seq	= atomic_inc_return(&ctx->cancel_seq),
239 	};
240 	ktime_t timeout = KTIME_MAX;
241 	struct io_uring_sync_cancel_reg sc;
242 	struct fd f = { };
243 	DEFINE_WAIT(wait);
244 	int ret;
245 
246 	if (copy_from_user(&sc, arg, sizeof(sc)))
247 		return -EFAULT;
248 	if (sc.flags & ~CANCEL_FLAGS)
249 		return -EINVAL;
250 	if (sc.pad[0] || sc.pad[1] || sc.pad[2] || sc.pad[3])
251 		return -EINVAL;
252 
253 	cd.data = sc.addr;
254 	cd.flags = sc.flags;
255 
256 	/* we can grab a normal file descriptor upfront */
257 	if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
258 	   !(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
259 		f = fdget(sc.fd);
260 		if (!f.file)
261 			return -EBADF;
262 		cd.file = f.file;
263 	}
264 
265 	ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
266 
267 	/* found something, done! */
268 	if (ret != -EALREADY)
269 		goto out;
270 
271 	if (sc.timeout.tv_sec != -1UL || sc.timeout.tv_nsec != -1UL) {
272 		struct timespec64 ts = {
273 			.tv_sec		= sc.timeout.tv_sec,
274 			.tv_nsec	= sc.timeout.tv_nsec
275 		};
276 
277 		timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
278 	}
279 
280 	/*
281 	 * Keep looking until we get -ENOENT. we'll get woken everytime
282 	 * every time a request completes and will retry the cancelation.
283 	 */
284 	do {
285 		cd.seq = atomic_inc_return(&ctx->cancel_seq);
286 
287 		prepare_to_wait(&ctx->cq_wait, &wait, TASK_INTERRUPTIBLE);
288 
289 		ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
290 
291 		if (ret != -EALREADY)
292 			break;
293 
294 		mutex_unlock(&ctx->uring_lock);
295 		ret = io_run_task_work_sig();
296 		if (ret < 0) {
297 			mutex_lock(&ctx->uring_lock);
298 			break;
299 		}
300 		ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
301 		mutex_lock(&ctx->uring_lock);
302 		if (!ret) {
303 			ret = -ETIME;
304 			break;
305 		}
306 	} while (1);
307 
308 	finish_wait(&ctx->cq_wait, &wait);
309 
310 	if (ret == -ENOENT || ret > 0)
311 		ret = 0;
312 out:
313 	fdput(f);
314 	return ret;
315 }
316