xref: /openbmc/linux/io_uring/cancel.c (revision 7d8ca725)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/io_uring.h>
10 
11 #include <uapi/linux/io_uring.h>
12 
13 #include "io_uring.h"
14 #include "tctx.h"
15 #include "poll.h"
16 #include "timeout.h"
17 #include "cancel.h"
18 
19 struct io_cancel {
20 	struct file			*file;
21 	u64				addr;
22 	u32				flags;
23 	s32				fd;
24 };
25 
26 #define CANCEL_FLAGS	(IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
27 			 IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED)
28 
29 static bool io_cancel_cb(struct io_wq_work *work, void *data)
30 {
31 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
32 	struct io_cancel_data *cd = data;
33 
34 	if (req->ctx != cd->ctx)
35 		return false;
36 	if (cd->flags & IORING_ASYNC_CANCEL_ANY) {
37 		;
38 	} else if (cd->flags & IORING_ASYNC_CANCEL_FD) {
39 		if (req->file != cd->file)
40 			return false;
41 	} else {
42 		if (req->cqe.user_data != cd->data)
43 			return false;
44 	}
45 	if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
46 		if (cd->seq == req->work.cancel_seq)
47 			return false;
48 		req->work.cancel_seq = cd->seq;
49 	}
50 	return true;
51 }
52 
53 static int io_async_cancel_one(struct io_uring_task *tctx,
54 			       struct io_cancel_data *cd)
55 {
56 	enum io_wq_cancel cancel_ret;
57 	int ret = 0;
58 	bool all;
59 
60 	if (!tctx || !tctx->io_wq)
61 		return -ENOENT;
62 
63 	all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
64 	cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
65 	switch (cancel_ret) {
66 	case IO_WQ_CANCEL_OK:
67 		ret = 0;
68 		break;
69 	case IO_WQ_CANCEL_RUNNING:
70 		ret = -EALREADY;
71 		break;
72 	case IO_WQ_CANCEL_NOTFOUND:
73 		ret = -ENOENT;
74 		break;
75 	}
76 
77 	return ret;
78 }
79 
80 int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
81 		  unsigned issue_flags)
82 {
83 	struct io_ring_ctx *ctx = cd->ctx;
84 	int ret;
85 
86 	WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring);
87 
88 	ret = io_async_cancel_one(tctx, cd);
89 	/*
90 	 * Fall-through even for -EALREADY, as we may have poll armed
91 	 * that need unarming.
92 	 */
93 	if (!ret)
94 		return 0;
95 
96 	ret = io_poll_cancel(ctx, cd, issue_flags);
97 	if (ret != -ENOENT)
98 		return ret;
99 
100 	spin_lock(&ctx->completion_lock);
101 	if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
102 		ret = io_timeout_cancel(ctx, cd);
103 	spin_unlock(&ctx->completion_lock);
104 	return ret;
105 }
106 
107 int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
108 {
109 	struct io_cancel *cancel = io_kiocb_to_cmd(req);
110 
111 	if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
112 		return -EINVAL;
113 	if (sqe->off || sqe->len || sqe->splice_fd_in)
114 		return -EINVAL;
115 
116 	cancel->addr = READ_ONCE(sqe->addr);
117 	cancel->flags = READ_ONCE(sqe->cancel_flags);
118 	if (cancel->flags & ~CANCEL_FLAGS)
119 		return -EINVAL;
120 	if (cancel->flags & IORING_ASYNC_CANCEL_FD) {
121 		if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
122 			return -EINVAL;
123 		cancel->fd = READ_ONCE(sqe->fd);
124 	}
125 
126 	return 0;
127 }
128 
129 static int __io_async_cancel(struct io_cancel_data *cd,
130 			     struct io_uring_task *tctx,
131 			     unsigned int issue_flags)
132 {
133 	bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
134 	struct io_ring_ctx *ctx = cd->ctx;
135 	struct io_tctx_node *node;
136 	int ret, nr = 0;
137 
138 	do {
139 		ret = io_try_cancel(tctx, cd, issue_flags);
140 		if (ret == -ENOENT)
141 			break;
142 		if (!all)
143 			return ret;
144 		nr++;
145 	} while (1);
146 
147 	/* slow path, try all io-wq's */
148 	io_ring_submit_lock(ctx, issue_flags);
149 	ret = -ENOENT;
150 	list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
151 		struct io_uring_task *tctx = node->task->io_uring;
152 
153 		ret = io_async_cancel_one(tctx, cd);
154 		if (ret != -ENOENT) {
155 			if (!all)
156 				break;
157 			nr++;
158 		}
159 	}
160 	io_ring_submit_unlock(ctx, issue_flags);
161 	return all ? nr : ret;
162 }
163 
164 int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
165 {
166 	struct io_cancel *cancel = io_kiocb_to_cmd(req);
167 	struct io_cancel_data cd = {
168 		.ctx	= req->ctx,
169 		.data	= cancel->addr,
170 		.flags	= cancel->flags,
171 		.seq	= atomic_inc_return(&req->ctx->cancel_seq),
172 	};
173 	struct io_uring_task *tctx = req->task->io_uring;
174 	int ret;
175 
176 	if (cd.flags & IORING_ASYNC_CANCEL_FD) {
177 		if (req->flags & REQ_F_FIXED_FILE ||
178 		    cd.flags & IORING_ASYNC_CANCEL_FD_FIXED) {
179 			req->flags |= REQ_F_FIXED_FILE;
180 			req->file = io_file_get_fixed(req, cancel->fd,
181 							issue_flags);
182 		} else {
183 			req->file = io_file_get_normal(req, cancel->fd);
184 		}
185 		if (!req->file) {
186 			ret = -EBADF;
187 			goto done;
188 		}
189 		cd.file = req->file;
190 	}
191 
192 	ret = __io_async_cancel(&cd, tctx, issue_flags);
193 done:
194 	if (ret < 0)
195 		req_set_fail(req);
196 	io_req_set_res(req, ret, 0);
197 	return IOU_OK;
198 }
199 
200 void init_hash_table(struct io_hash_table *table, unsigned size)
201 {
202 	unsigned int i;
203 
204 	for (i = 0; i < size; i++) {
205 		spin_lock_init(&table->hbs[i].lock);
206 		INIT_HLIST_HEAD(&table->hbs[i].list);
207 	}
208 }
209