xref: /openbmc/linux/io_uring/cancel.c (revision 0c7df8c2)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/nospec.h>
10 #include <linux/io_uring.h>
11 
12 #include <uapi/linux/io_uring.h>
13 
14 #include "io_uring.h"
15 #include "tctx.h"
16 #include "poll.h"
17 #include "timeout.h"
18 #include "cancel.h"
19 
20 struct io_cancel {
21 	struct file			*file;
22 	u64				addr;
23 	u32				flags;
24 	s32				fd;
25 	u8				opcode;
26 };
27 
28 #define CANCEL_FLAGS	(IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
29 			 IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED | \
30 			 IORING_ASYNC_CANCEL_USERDATA | IORING_ASYNC_CANCEL_OP)
31 
32 /*
33  * Returns true if the request matches the criteria outlined by 'cd'.
34  */
io_cancel_req_match(struct io_kiocb * req,struct io_cancel_data * cd)35 bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd)
36 {
37 	bool match_user_data = cd->flags & IORING_ASYNC_CANCEL_USERDATA;
38 
39 	if (req->ctx != cd->ctx)
40 		return false;
41 
42 	if (!(cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP)))
43 		match_user_data = true;
44 
45 	if (cd->flags & IORING_ASYNC_CANCEL_ANY)
46 		goto check_seq;
47 	if (cd->flags & IORING_ASYNC_CANCEL_FD) {
48 		if (req->file != cd->file)
49 			return false;
50 	}
51 	if (cd->flags & IORING_ASYNC_CANCEL_OP) {
52 		if (req->opcode != cd->opcode)
53 			return false;
54 	}
55 	if (match_user_data && req->cqe.user_data != cd->data)
56 		return false;
57 	if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
58 check_seq:
59 		if (cd->seq == req->work.cancel_seq)
60 			return false;
61 		req->work.cancel_seq = cd->seq;
62 	}
63 
64 	return true;
65 }
66 
io_cancel_cb(struct io_wq_work * work,void * data)67 static bool io_cancel_cb(struct io_wq_work *work, void *data)
68 {
69 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
70 	struct io_cancel_data *cd = data;
71 
72 	return io_cancel_req_match(req, cd);
73 }
74 
io_async_cancel_one(struct io_uring_task * tctx,struct io_cancel_data * cd)75 static int io_async_cancel_one(struct io_uring_task *tctx,
76 			       struct io_cancel_data *cd)
77 {
78 	enum io_wq_cancel cancel_ret;
79 	int ret = 0;
80 	bool all;
81 
82 	if (!tctx || !tctx->io_wq)
83 		return -ENOENT;
84 
85 	all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
86 	cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
87 	switch (cancel_ret) {
88 	case IO_WQ_CANCEL_OK:
89 		ret = 0;
90 		break;
91 	case IO_WQ_CANCEL_RUNNING:
92 		ret = -EALREADY;
93 		break;
94 	case IO_WQ_CANCEL_NOTFOUND:
95 		ret = -ENOENT;
96 		break;
97 	}
98 
99 	return ret;
100 }
101 
io_try_cancel(struct io_uring_task * tctx,struct io_cancel_data * cd,unsigned issue_flags)102 int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
103 		  unsigned issue_flags)
104 {
105 	struct io_ring_ctx *ctx = cd->ctx;
106 	int ret;
107 
108 	WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring);
109 
110 	ret = io_async_cancel_one(tctx, cd);
111 	/*
112 	 * Fall-through even for -EALREADY, as we may have poll armed
113 	 * that need unarming.
114 	 */
115 	if (!ret)
116 		return 0;
117 
118 	ret = io_poll_cancel(ctx, cd, issue_flags);
119 	if (ret != -ENOENT)
120 		return ret;
121 
122 	spin_lock(&ctx->completion_lock);
123 	if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
124 		ret = io_timeout_cancel(ctx, cd);
125 	spin_unlock(&ctx->completion_lock);
126 	return ret;
127 }
128 
io_async_cancel_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)129 int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
130 {
131 	struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
132 
133 	if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
134 		return -EINVAL;
135 	if (sqe->off || sqe->splice_fd_in)
136 		return -EINVAL;
137 
138 	cancel->addr = READ_ONCE(sqe->addr);
139 	cancel->flags = READ_ONCE(sqe->cancel_flags);
140 	if (cancel->flags & ~CANCEL_FLAGS)
141 		return -EINVAL;
142 	if (cancel->flags & IORING_ASYNC_CANCEL_FD) {
143 		if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
144 			return -EINVAL;
145 		cancel->fd = READ_ONCE(sqe->fd);
146 	}
147 	if (cancel->flags & IORING_ASYNC_CANCEL_OP) {
148 		if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
149 			return -EINVAL;
150 		cancel->opcode = READ_ONCE(sqe->len);
151 	}
152 
153 	return 0;
154 }
155 
__io_async_cancel(struct io_cancel_data * cd,struct io_uring_task * tctx,unsigned int issue_flags)156 static int __io_async_cancel(struct io_cancel_data *cd,
157 			     struct io_uring_task *tctx,
158 			     unsigned int issue_flags)
159 {
160 	bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
161 	struct io_ring_ctx *ctx = cd->ctx;
162 	struct io_tctx_node *node;
163 	int ret, nr = 0;
164 
165 	do {
166 		ret = io_try_cancel(tctx, cd, issue_flags);
167 		if (ret == -ENOENT)
168 			break;
169 		if (!all)
170 			return ret;
171 		nr++;
172 	} while (1);
173 
174 	/* slow path, try all io-wq's */
175 	io_ring_submit_lock(ctx, issue_flags);
176 	ret = -ENOENT;
177 	list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
178 		struct io_uring_task *tctx = node->task->io_uring;
179 
180 		ret = io_async_cancel_one(tctx, cd);
181 		if (ret != -ENOENT) {
182 			if (!all)
183 				break;
184 			nr++;
185 		}
186 	}
187 	io_ring_submit_unlock(ctx, issue_flags);
188 	return all ? nr : ret;
189 }
190 
io_async_cancel(struct io_kiocb * req,unsigned int issue_flags)191 int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
192 {
193 	struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
194 	struct io_cancel_data cd = {
195 		.ctx	= req->ctx,
196 		.data	= cancel->addr,
197 		.flags	= cancel->flags,
198 		.opcode	= cancel->opcode,
199 		.seq	= atomic_inc_return(&req->ctx->cancel_seq),
200 	};
201 	struct io_uring_task *tctx = req->task->io_uring;
202 	int ret;
203 
204 	if (cd.flags & IORING_ASYNC_CANCEL_FD) {
205 		if (req->flags & REQ_F_FIXED_FILE ||
206 		    cd.flags & IORING_ASYNC_CANCEL_FD_FIXED) {
207 			req->flags |= REQ_F_FIXED_FILE;
208 			req->file = io_file_get_fixed(req, cancel->fd,
209 							issue_flags);
210 		} else {
211 			req->file = io_file_get_normal(req, cancel->fd);
212 		}
213 		if (!req->file) {
214 			ret = -EBADF;
215 			goto done;
216 		}
217 		cd.file = req->file;
218 	}
219 
220 	ret = __io_async_cancel(&cd, tctx, issue_flags);
221 done:
222 	if (ret < 0)
223 		req_set_fail(req);
224 	io_req_set_res(req, ret, 0);
225 	return IOU_OK;
226 }
227 
init_hash_table(struct io_hash_table * table,unsigned size)228 void init_hash_table(struct io_hash_table *table, unsigned size)
229 {
230 	unsigned int i;
231 
232 	for (i = 0; i < size; i++) {
233 		spin_lock_init(&table->hbs[i].lock);
234 		INIT_HLIST_HEAD(&table->hbs[i].list);
235 	}
236 }
237 
__io_sync_cancel(struct io_uring_task * tctx,struct io_cancel_data * cd,int fd)238 static int __io_sync_cancel(struct io_uring_task *tctx,
239 			    struct io_cancel_data *cd, int fd)
240 {
241 	struct io_ring_ctx *ctx = cd->ctx;
242 
243 	/* fixed must be grabbed every time since we drop the uring_lock */
244 	if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
245 	    (cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
246 		if (unlikely(fd >= ctx->nr_user_files))
247 			return -EBADF;
248 		fd = array_index_nospec(fd, ctx->nr_user_files);
249 		cd->file = io_file_from_index(&ctx->file_table, fd);
250 		if (!cd->file)
251 			return -EBADF;
252 	}
253 
254 	return __io_async_cancel(cd, tctx, 0);
255 }
256 
io_sync_cancel(struct io_ring_ctx * ctx,void __user * arg)257 int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
258 	__must_hold(&ctx->uring_lock)
259 {
260 	struct io_cancel_data cd = {
261 		.ctx	= ctx,
262 		.seq	= atomic_inc_return(&ctx->cancel_seq),
263 	};
264 	ktime_t timeout = KTIME_MAX;
265 	struct io_uring_sync_cancel_reg sc;
266 	struct file *file = NULL;
267 	DEFINE_WAIT(wait);
268 	int ret, i;
269 
270 	if (copy_from_user(&sc, arg, sizeof(sc)))
271 		return -EFAULT;
272 	if (sc.flags & ~CANCEL_FLAGS)
273 		return -EINVAL;
274 	for (i = 0; i < ARRAY_SIZE(sc.pad); i++)
275 		if (sc.pad[i])
276 			return -EINVAL;
277 	for (i = 0; i < ARRAY_SIZE(sc.pad2); i++)
278 		if (sc.pad2[i])
279 			return -EINVAL;
280 
281 	cd.data = sc.addr;
282 	cd.flags = sc.flags;
283 	cd.opcode = sc.opcode;
284 
285 	/* we can grab a normal file descriptor upfront */
286 	if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
287 	   !(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
288 		file = fget(sc.fd);
289 		if (!file)
290 			return -EBADF;
291 		cd.file = file;
292 	}
293 
294 	ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
295 
296 	/* found something, done! */
297 	if (ret != -EALREADY)
298 		goto out;
299 
300 	if (sc.timeout.tv_sec != -1UL || sc.timeout.tv_nsec != -1UL) {
301 		struct timespec64 ts = {
302 			.tv_sec		= sc.timeout.tv_sec,
303 			.tv_nsec	= sc.timeout.tv_nsec
304 		};
305 
306 		timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
307 	}
308 
309 	/*
310 	 * Keep looking until we get -ENOENT. we'll get woken everytime
311 	 * every time a request completes and will retry the cancelation.
312 	 */
313 	do {
314 		cd.seq = atomic_inc_return(&ctx->cancel_seq);
315 
316 		prepare_to_wait(&ctx->cq_wait, &wait, TASK_INTERRUPTIBLE);
317 
318 		ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
319 
320 		mutex_unlock(&ctx->uring_lock);
321 		if (ret != -EALREADY)
322 			break;
323 
324 		ret = io_run_task_work_sig(ctx);
325 		if (ret < 0)
326 			break;
327 		ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
328 		if (!ret) {
329 			ret = -ETIME;
330 			break;
331 		}
332 		mutex_lock(&ctx->uring_lock);
333 	} while (1);
334 
335 	finish_wait(&ctx->cq_wait, &wait);
336 	mutex_lock(&ctx->uring_lock);
337 
338 	if (ret == -ENOENT || ret > 0)
339 		ret = 0;
340 out:
341 	if (file)
342 		fput(file);
343 	return ret;
344 }
345