1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/file.h> 5 #include <linux/mm.h> 6 #include <linux/slab.h> 7 #include <linux/nospec.h> 8 #include <linux/io_uring.h> 9 10 #include <uapi/linux/io_uring.h> 11 12 #include "io_uring.h" 13 #include "tctx.h" 14 15 static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx, 16 struct task_struct *task) 17 { 18 struct io_wq_hash *hash; 19 struct io_wq_data data; 20 unsigned int concurrency; 21 22 mutex_lock(&ctx->uring_lock); 23 hash = ctx->hash_map; 24 if (!hash) { 25 hash = kzalloc(sizeof(*hash), GFP_KERNEL); 26 if (!hash) { 27 mutex_unlock(&ctx->uring_lock); 28 return ERR_PTR(-ENOMEM); 29 } 30 refcount_set(&hash->refs, 1); 31 init_waitqueue_head(&hash->wait); 32 ctx->hash_map = hash; 33 } 34 mutex_unlock(&ctx->uring_lock); 35 36 data.hash = hash; 37 data.task = task; 38 data.free_work = io_wq_free_work; 39 data.do_work = io_wq_submit_work; 40 41 /* Do QD, or 4 * CPUS, whatever is smallest */ 42 concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); 43 44 return io_wq_create(concurrency, &data); 45 } 46 47 void __io_uring_free(struct task_struct *tsk) 48 { 49 struct io_uring_task *tctx = tsk->io_uring; 50 struct io_tctx_node *node; 51 unsigned long index; 52 53 /* 54 * Fault injection forcing allocation errors in the xa_store() path 55 * can lead to xa_empty() returning false, even though no actual 56 * node is stored in the xarray. Until that gets sorted out, attempt 57 * an iteration here and warn if any entries are found. 58 */ 59 xa_for_each(&tctx->xa, index, node) { 60 WARN_ON_ONCE(1); 61 break; 62 } 63 WARN_ON_ONCE(tctx->io_wq); 64 WARN_ON_ONCE(tctx->cached_refs); 65 66 percpu_counter_destroy(&tctx->inflight); 67 kfree(tctx); 68 tsk->io_uring = NULL; 69 } 70 71 __cold int io_uring_alloc_task_context(struct task_struct *task, 72 struct io_ring_ctx *ctx) 73 { 74 struct io_uring_task *tctx; 75 int ret; 76 77 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL); 78 if (unlikely(!tctx)) 79 return -ENOMEM; 80 81 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL); 82 if (unlikely(ret)) { 83 kfree(tctx); 84 return ret; 85 } 86 87 tctx->io_wq = io_init_wq_offload(ctx, task); 88 if (IS_ERR(tctx->io_wq)) { 89 ret = PTR_ERR(tctx->io_wq); 90 percpu_counter_destroy(&tctx->inflight); 91 kfree(tctx); 92 return ret; 93 } 94 95 xa_init(&tctx->xa); 96 init_waitqueue_head(&tctx->wait); 97 atomic_set(&tctx->in_cancel, 0); 98 atomic_set(&tctx->inflight_tracked, 0); 99 task->io_uring = tctx; 100 init_llist_head(&tctx->task_list); 101 init_task_work(&tctx->task_work, tctx_task_work); 102 return 0; 103 } 104 105 int __io_uring_add_tctx_node(struct io_ring_ctx *ctx) 106 { 107 struct io_uring_task *tctx = current->io_uring; 108 struct io_tctx_node *node; 109 int ret; 110 111 if (unlikely(!tctx)) { 112 ret = io_uring_alloc_task_context(current, ctx); 113 if (unlikely(ret)) 114 return ret; 115 116 tctx = current->io_uring; 117 if (ctx->iowq_limits_set) { 118 unsigned int limits[2] = { ctx->iowq_limits[0], 119 ctx->iowq_limits[1], }; 120 121 ret = io_wq_max_workers(tctx->io_wq, limits); 122 if (ret) 123 return ret; 124 } 125 } 126 if (!xa_load(&tctx->xa, (unsigned long)ctx)) { 127 node = kmalloc(sizeof(*node), GFP_KERNEL); 128 if (!node) 129 return -ENOMEM; 130 node->ctx = ctx; 131 node->task = current; 132 133 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx, 134 node, GFP_KERNEL)); 135 if (ret) { 136 kfree(node); 137 return ret; 138 } 139 140 mutex_lock(&ctx->uring_lock); 141 list_add(&node->ctx_node, &ctx->tctx_list); 142 mutex_unlock(&ctx->uring_lock); 143 } 144 return 0; 145 } 146 147 int __io_uring_add_tctx_node_from_submit(struct io_ring_ctx *ctx) 148 { 149 int ret; 150 151 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER 152 && ctx->submitter_task != current) 153 return -EEXIST; 154 155 ret = __io_uring_add_tctx_node(ctx); 156 if (ret) 157 return ret; 158 159 current->io_uring->last = ctx; 160 return 0; 161 } 162 163 /* 164 * Remove this io_uring_file -> task mapping. 165 */ 166 __cold void io_uring_del_tctx_node(unsigned long index) 167 { 168 struct io_uring_task *tctx = current->io_uring; 169 struct io_tctx_node *node; 170 171 if (!tctx) 172 return; 173 node = xa_erase(&tctx->xa, index); 174 if (!node) 175 return; 176 177 WARN_ON_ONCE(current != node->task); 178 WARN_ON_ONCE(list_empty(&node->ctx_node)); 179 180 mutex_lock(&node->ctx->uring_lock); 181 list_del(&node->ctx_node); 182 mutex_unlock(&node->ctx->uring_lock); 183 184 if (tctx->last == node->ctx) 185 tctx->last = NULL; 186 kfree(node); 187 } 188 189 __cold void io_uring_clean_tctx(struct io_uring_task *tctx) 190 { 191 struct io_wq *wq = tctx->io_wq; 192 struct io_tctx_node *node; 193 unsigned long index; 194 195 xa_for_each(&tctx->xa, index, node) { 196 io_uring_del_tctx_node(index); 197 cond_resched(); 198 } 199 if (wq) { 200 /* 201 * Must be after io_uring_del_tctx_node() (removes nodes under 202 * uring_lock) to avoid race with io_uring_try_cancel_iowq(). 203 */ 204 io_wq_put_and_exit(wq); 205 tctx->io_wq = NULL; 206 } 207 } 208 209 void io_uring_unreg_ringfd(void) 210 { 211 struct io_uring_task *tctx = current->io_uring; 212 int i; 213 214 for (i = 0; i < IO_RINGFD_REG_MAX; i++) { 215 if (tctx->registered_rings[i]) { 216 fput(tctx->registered_rings[i]); 217 tctx->registered_rings[i] = NULL; 218 } 219 } 220 } 221 222 int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file, 223 int start, int end) 224 { 225 int offset; 226 for (offset = start; offset < end; offset++) { 227 offset = array_index_nospec(offset, IO_RINGFD_REG_MAX); 228 if (tctx->registered_rings[offset]) 229 continue; 230 231 tctx->registered_rings[offset] = file; 232 return offset; 233 } 234 return -EBUSY; 235 } 236 237 static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd, 238 int start, int end) 239 { 240 struct file *file; 241 int offset; 242 243 file = fget(fd); 244 if (!file) { 245 return -EBADF; 246 } else if (!io_is_uring_fops(file)) { 247 fput(file); 248 return -EOPNOTSUPP; 249 } 250 offset = io_ring_add_registered_file(tctx, file, start, end); 251 if (offset < 0) 252 fput(file); 253 return offset; 254 } 255 256 /* 257 * Register a ring fd to avoid fdget/fdput for each io_uring_enter() 258 * invocation. User passes in an array of struct io_uring_rsrc_update 259 * with ->data set to the ring_fd, and ->offset given for the desired 260 * index. If no index is desired, application may set ->offset == -1U 261 * and we'll find an available index. Returns number of entries 262 * successfully processed, or < 0 on error if none were processed. 263 */ 264 int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg, 265 unsigned nr_args) 266 { 267 struct io_uring_rsrc_update __user *arg = __arg; 268 struct io_uring_rsrc_update reg; 269 struct io_uring_task *tctx; 270 int ret, i; 271 272 if (!nr_args || nr_args > IO_RINGFD_REG_MAX) 273 return -EINVAL; 274 275 mutex_unlock(&ctx->uring_lock); 276 ret = __io_uring_add_tctx_node(ctx); 277 mutex_lock(&ctx->uring_lock); 278 if (ret) 279 return ret; 280 281 tctx = current->io_uring; 282 for (i = 0; i < nr_args; i++) { 283 int start, end; 284 285 if (copy_from_user(®, &arg[i], sizeof(reg))) { 286 ret = -EFAULT; 287 break; 288 } 289 290 if (reg.resv) { 291 ret = -EINVAL; 292 break; 293 } 294 295 if (reg.offset == -1U) { 296 start = 0; 297 end = IO_RINGFD_REG_MAX; 298 } else { 299 if (reg.offset >= IO_RINGFD_REG_MAX) { 300 ret = -EINVAL; 301 break; 302 } 303 start = reg.offset; 304 end = start + 1; 305 } 306 307 ret = io_ring_add_registered_fd(tctx, reg.data, start, end); 308 if (ret < 0) 309 break; 310 311 reg.offset = ret; 312 if (copy_to_user(&arg[i], ®, sizeof(reg))) { 313 fput(tctx->registered_rings[reg.offset]); 314 tctx->registered_rings[reg.offset] = NULL; 315 ret = -EFAULT; 316 break; 317 } 318 } 319 320 return i ? i : ret; 321 } 322 323 int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg, 324 unsigned nr_args) 325 { 326 struct io_uring_rsrc_update __user *arg = __arg; 327 struct io_uring_task *tctx = current->io_uring; 328 struct io_uring_rsrc_update reg; 329 int ret = 0, i; 330 331 if (!nr_args || nr_args > IO_RINGFD_REG_MAX) 332 return -EINVAL; 333 if (!tctx) 334 return 0; 335 336 for (i = 0; i < nr_args; i++) { 337 if (copy_from_user(®, &arg[i], sizeof(reg))) { 338 ret = -EFAULT; 339 break; 340 } 341 if (reg.resv || reg.data || reg.offset >= IO_RINGFD_REG_MAX) { 342 ret = -EINVAL; 343 break; 344 } 345 346 reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX); 347 if (tctx->registered_rings[reg.offset]) { 348 fput(tctx->registered_rings[reg.offset]); 349 tctx->registered_rings[reg.offset] = NULL; 350 } 351 } 352 353 return i ? i : ret; 354 } 355