1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/file.h> 5 #include <linux/mm.h> 6 #include <linux/slab.h> 7 #include <linux/nospec.h> 8 #include <linux/io_uring.h> 9 10 #include <uapi/linux/io_uring.h> 11 12 #include "io_uring.h" 13 #include "tctx.h" 14 15 static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx, 16 struct task_struct *task) 17 { 18 struct io_wq_hash *hash; 19 struct io_wq_data data; 20 unsigned int concurrency; 21 22 mutex_lock(&ctx->uring_lock); 23 hash = ctx->hash_map; 24 if (!hash) { 25 hash = kzalloc(sizeof(*hash), GFP_KERNEL); 26 if (!hash) { 27 mutex_unlock(&ctx->uring_lock); 28 return ERR_PTR(-ENOMEM); 29 } 30 refcount_set(&hash->refs, 1); 31 init_waitqueue_head(&hash->wait); 32 ctx->hash_map = hash; 33 } 34 mutex_unlock(&ctx->uring_lock); 35 36 data.hash = hash; 37 data.task = task; 38 data.free_work = io_wq_free_work; 39 data.do_work = io_wq_submit_work; 40 41 /* Do QD, or 4 * CPUS, whatever is smallest */ 42 concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); 43 44 return io_wq_create(concurrency, &data); 45 } 46 47 void __io_uring_free(struct task_struct *tsk) 48 { 49 struct io_uring_task *tctx = tsk->io_uring; 50 51 WARN_ON_ONCE(!xa_empty(&tctx->xa)); 52 WARN_ON_ONCE(tctx->io_wq); 53 WARN_ON_ONCE(tctx->cached_refs); 54 55 percpu_counter_destroy(&tctx->inflight); 56 kfree(tctx); 57 tsk->io_uring = NULL; 58 } 59 60 __cold int io_uring_alloc_task_context(struct task_struct *task, 61 struct io_ring_ctx *ctx) 62 { 63 struct io_uring_task *tctx; 64 int ret; 65 66 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL); 67 if (unlikely(!tctx)) 68 return -ENOMEM; 69 70 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL); 71 if (unlikely(ret)) { 72 kfree(tctx); 73 return ret; 74 } 75 76 tctx->io_wq = io_init_wq_offload(ctx, task); 77 if (IS_ERR(tctx->io_wq)) { 78 ret = PTR_ERR(tctx->io_wq); 79 percpu_counter_destroy(&tctx->inflight); 80 kfree(tctx); 81 return ret; 82 } 83 84 xa_init(&tctx->xa); 85 init_waitqueue_head(&tctx->wait); 86 atomic_set(&tctx->in_idle, 0); 87 atomic_set(&tctx->inflight_tracked, 0); 88 task->io_uring = tctx; 89 spin_lock_init(&tctx->task_lock); 90 INIT_WQ_LIST(&tctx->task_list); 91 INIT_WQ_LIST(&tctx->prio_task_list); 92 init_task_work(&tctx->task_work, tctx_task_work); 93 return 0; 94 } 95 96 static int io_register_submitter(struct io_ring_ctx *ctx) 97 { 98 int ret = 0; 99 100 mutex_lock(&ctx->uring_lock); 101 if (!ctx->submitter_task) 102 ctx->submitter_task = get_task_struct(current); 103 else if (ctx->submitter_task != current) 104 ret = -EEXIST; 105 mutex_unlock(&ctx->uring_lock); 106 107 return ret; 108 } 109 110 int __io_uring_add_tctx_node(struct io_ring_ctx *ctx, bool submitter) 111 { 112 struct io_uring_task *tctx = current->io_uring; 113 struct io_tctx_node *node; 114 int ret; 115 116 if ((ctx->flags & IORING_SETUP_SINGLE_ISSUER) && submitter) { 117 ret = io_register_submitter(ctx); 118 if (ret) 119 return ret; 120 } 121 122 if (unlikely(!tctx)) { 123 ret = io_uring_alloc_task_context(current, ctx); 124 if (unlikely(ret)) 125 return ret; 126 127 tctx = current->io_uring; 128 if (ctx->iowq_limits_set) { 129 unsigned int limits[2] = { ctx->iowq_limits[0], 130 ctx->iowq_limits[1], }; 131 132 ret = io_wq_max_workers(tctx->io_wq, limits); 133 if (ret) 134 return ret; 135 } 136 } 137 if (!xa_load(&tctx->xa, (unsigned long)ctx)) { 138 node = kmalloc(sizeof(*node), GFP_KERNEL); 139 if (!node) 140 return -ENOMEM; 141 node->ctx = ctx; 142 node->task = current; 143 144 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx, 145 node, GFP_KERNEL)); 146 if (ret) { 147 kfree(node); 148 return ret; 149 } 150 151 mutex_lock(&ctx->uring_lock); 152 list_add(&node->ctx_node, &ctx->tctx_list); 153 mutex_unlock(&ctx->uring_lock); 154 } 155 if (submitter) 156 tctx->last = ctx; 157 return 0; 158 } 159 160 /* 161 * Remove this io_uring_file -> task mapping. 162 */ 163 __cold void io_uring_del_tctx_node(unsigned long index) 164 { 165 struct io_uring_task *tctx = current->io_uring; 166 struct io_tctx_node *node; 167 168 if (!tctx) 169 return; 170 node = xa_erase(&tctx->xa, index); 171 if (!node) 172 return; 173 174 WARN_ON_ONCE(current != node->task); 175 WARN_ON_ONCE(list_empty(&node->ctx_node)); 176 177 mutex_lock(&node->ctx->uring_lock); 178 list_del(&node->ctx_node); 179 mutex_unlock(&node->ctx->uring_lock); 180 181 if (tctx->last == node->ctx) 182 tctx->last = NULL; 183 kfree(node); 184 } 185 186 __cold void io_uring_clean_tctx(struct io_uring_task *tctx) 187 { 188 struct io_wq *wq = tctx->io_wq; 189 struct io_tctx_node *node; 190 unsigned long index; 191 192 xa_for_each(&tctx->xa, index, node) { 193 io_uring_del_tctx_node(index); 194 cond_resched(); 195 } 196 if (wq) { 197 /* 198 * Must be after io_uring_del_tctx_node() (removes nodes under 199 * uring_lock) to avoid race with io_uring_try_cancel_iowq(). 200 */ 201 io_wq_put_and_exit(wq); 202 tctx->io_wq = NULL; 203 } 204 } 205 206 void io_uring_unreg_ringfd(void) 207 { 208 struct io_uring_task *tctx = current->io_uring; 209 int i; 210 211 for (i = 0; i < IO_RINGFD_REG_MAX; i++) { 212 if (tctx->registered_rings[i]) { 213 fput(tctx->registered_rings[i]); 214 tctx->registered_rings[i] = NULL; 215 } 216 } 217 } 218 219 static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd, 220 int start, int end) 221 { 222 struct file *file; 223 int offset; 224 225 for (offset = start; offset < end; offset++) { 226 offset = array_index_nospec(offset, IO_RINGFD_REG_MAX); 227 if (tctx->registered_rings[offset]) 228 continue; 229 230 file = fget(fd); 231 if (!file) { 232 return -EBADF; 233 } else if (!io_is_uring_fops(file)) { 234 fput(file); 235 return -EOPNOTSUPP; 236 } 237 tctx->registered_rings[offset] = file; 238 return offset; 239 } 240 241 return -EBUSY; 242 } 243 244 /* 245 * Register a ring fd to avoid fdget/fdput for each io_uring_enter() 246 * invocation. User passes in an array of struct io_uring_rsrc_update 247 * with ->data set to the ring_fd, and ->offset given for the desired 248 * index. If no index is desired, application may set ->offset == -1U 249 * and we'll find an available index. Returns number of entries 250 * successfully processed, or < 0 on error if none were processed. 251 */ 252 int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg, 253 unsigned nr_args) 254 { 255 struct io_uring_rsrc_update __user *arg = __arg; 256 struct io_uring_rsrc_update reg; 257 struct io_uring_task *tctx; 258 int ret, i; 259 260 if (!nr_args || nr_args > IO_RINGFD_REG_MAX) 261 return -EINVAL; 262 263 mutex_unlock(&ctx->uring_lock); 264 ret = __io_uring_add_tctx_node(ctx, false); 265 mutex_lock(&ctx->uring_lock); 266 if (ret) 267 return ret; 268 269 tctx = current->io_uring; 270 for (i = 0; i < nr_args; i++) { 271 int start, end; 272 273 if (copy_from_user(®, &arg[i], sizeof(reg))) { 274 ret = -EFAULT; 275 break; 276 } 277 278 if (reg.resv) { 279 ret = -EINVAL; 280 break; 281 } 282 283 if (reg.offset == -1U) { 284 start = 0; 285 end = IO_RINGFD_REG_MAX; 286 } else { 287 if (reg.offset >= IO_RINGFD_REG_MAX) { 288 ret = -EINVAL; 289 break; 290 } 291 start = reg.offset; 292 end = start + 1; 293 } 294 295 ret = io_ring_add_registered_fd(tctx, reg.data, start, end); 296 if (ret < 0) 297 break; 298 299 reg.offset = ret; 300 if (copy_to_user(&arg[i], ®, sizeof(reg))) { 301 fput(tctx->registered_rings[reg.offset]); 302 tctx->registered_rings[reg.offset] = NULL; 303 ret = -EFAULT; 304 break; 305 } 306 } 307 308 return i ? i : ret; 309 } 310 311 int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg, 312 unsigned nr_args) 313 { 314 struct io_uring_rsrc_update __user *arg = __arg; 315 struct io_uring_task *tctx = current->io_uring; 316 struct io_uring_rsrc_update reg; 317 int ret = 0, i; 318 319 if (!nr_args || nr_args > IO_RINGFD_REG_MAX) 320 return -EINVAL; 321 if (!tctx) 322 return 0; 323 324 for (i = 0; i < nr_args; i++) { 325 if (copy_from_user(®, &arg[i], sizeof(reg))) { 326 ret = -EFAULT; 327 break; 328 } 329 if (reg.resv || reg.data || reg.offset >= IO_RINGFD_REG_MAX) { 330 ret = -EINVAL; 331 break; 332 } 333 334 reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX); 335 if (tctx->registered_rings[reg.offset]) { 336 fput(tctx->registered_rings[reg.offset]); 337 tctx->registered_rings[reg.offset] = NULL; 338 } 339 } 340 341 return i ? i : ret; 342 } 343