1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Contains the core associated with submission side polling of the SQ 4 * ring, offloading submissions from the application to a kernel thread. 5 */ 6 #include <linux/kernel.h> 7 #include <linux/errno.h> 8 #include <linux/file.h> 9 #include <linux/mm.h> 10 #include <linux/slab.h> 11 #include <linux/audit.h> 12 #include <linux/security.h> 13 #include <linux/io_uring.h> 14 15 #include <uapi/linux/io_uring.h> 16 17 #include "io_uring.h" 18 #include "sqpoll.h" 19 20 #define IORING_SQPOLL_CAP_ENTRIES_VALUE 8 21 22 enum { 23 IO_SQ_THREAD_SHOULD_STOP = 0, 24 IO_SQ_THREAD_SHOULD_PARK, 25 }; 26 27 void io_sq_thread_unpark(struct io_sq_data *sqd) 28 __releases(&sqd->lock) 29 { 30 WARN_ON_ONCE(sqd->thread == current); 31 32 /* 33 * Do the dance but not conditional clear_bit() because it'd race with 34 * other threads incrementing park_pending and setting the bit. 35 */ 36 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); 37 if (atomic_dec_return(&sqd->park_pending)) 38 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); 39 mutex_unlock(&sqd->lock); 40 } 41 42 void io_sq_thread_park(struct io_sq_data *sqd) 43 __acquires(&sqd->lock) 44 { 45 WARN_ON_ONCE(sqd->thread == current); 46 47 atomic_inc(&sqd->park_pending); 48 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); 49 mutex_lock(&sqd->lock); 50 if (sqd->thread) 51 wake_up_process(sqd->thread); 52 } 53 54 void io_sq_thread_stop(struct io_sq_data *sqd) 55 { 56 WARN_ON_ONCE(sqd->thread == current); 57 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)); 58 59 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); 60 mutex_lock(&sqd->lock); 61 if (sqd->thread) 62 wake_up_process(sqd->thread); 63 mutex_unlock(&sqd->lock); 64 wait_for_completion(&sqd->exited); 65 } 66 67 void io_put_sq_data(struct io_sq_data *sqd) 68 { 69 if (refcount_dec_and_test(&sqd->refs)) { 70 WARN_ON_ONCE(atomic_read(&sqd->park_pending)); 71 72 io_sq_thread_stop(sqd); 73 kfree(sqd); 74 } 75 } 76 77 static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd) 78 { 79 struct io_ring_ctx *ctx; 80 unsigned sq_thread_idle = 0; 81 82 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) 83 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle); 84 sqd->sq_thread_idle = sq_thread_idle; 85 } 86 87 void io_sq_thread_finish(struct io_ring_ctx *ctx) 88 { 89 struct io_sq_data *sqd = ctx->sq_data; 90 91 if (sqd) { 92 io_sq_thread_park(sqd); 93 list_del_init(&ctx->sqd_list); 94 io_sqd_update_thread_idle(sqd); 95 io_sq_thread_unpark(sqd); 96 97 io_put_sq_data(sqd); 98 ctx->sq_data = NULL; 99 } 100 } 101 102 static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p) 103 { 104 struct io_ring_ctx *ctx_attach; 105 struct io_sq_data *sqd; 106 struct fd f; 107 108 f = fdget(p->wq_fd); 109 if (!f.file) 110 return ERR_PTR(-ENXIO); 111 if (!io_is_uring_fops(f.file)) { 112 fdput(f); 113 return ERR_PTR(-EINVAL); 114 } 115 116 ctx_attach = f.file->private_data; 117 sqd = ctx_attach->sq_data; 118 if (!sqd) { 119 fdput(f); 120 return ERR_PTR(-EINVAL); 121 } 122 if (sqd->task_tgid != current->tgid) { 123 fdput(f); 124 return ERR_PTR(-EPERM); 125 } 126 127 refcount_inc(&sqd->refs); 128 fdput(f); 129 return sqd; 130 } 131 132 static struct io_sq_data *io_get_sq_data(struct io_uring_params *p, 133 bool *attached) 134 { 135 struct io_sq_data *sqd; 136 137 *attached = false; 138 if (p->flags & IORING_SETUP_ATTACH_WQ) { 139 sqd = io_attach_sq_data(p); 140 if (!IS_ERR(sqd)) { 141 *attached = true; 142 return sqd; 143 } 144 /* fall through for EPERM case, setup new sqd/task */ 145 if (PTR_ERR(sqd) != -EPERM) 146 return sqd; 147 } 148 149 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL); 150 if (!sqd) 151 return ERR_PTR(-ENOMEM); 152 153 atomic_set(&sqd->park_pending, 0); 154 refcount_set(&sqd->refs, 1); 155 INIT_LIST_HEAD(&sqd->ctx_list); 156 mutex_init(&sqd->lock); 157 init_waitqueue_head(&sqd->wait); 158 init_completion(&sqd->exited); 159 return sqd; 160 } 161 162 static inline bool io_sqd_events_pending(struct io_sq_data *sqd) 163 { 164 return READ_ONCE(sqd->state); 165 } 166 167 static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) 168 { 169 unsigned int to_submit; 170 int ret = 0; 171 172 to_submit = io_sqring_entries(ctx); 173 /* if we're handling multiple rings, cap submit size for fairness */ 174 if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE) 175 to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE; 176 177 if (!wq_list_empty(&ctx->iopoll_list) || to_submit) { 178 const struct cred *creds = NULL; 179 180 if (ctx->sq_creds != current_cred()) 181 creds = override_creds(ctx->sq_creds); 182 183 mutex_lock(&ctx->uring_lock); 184 if (!wq_list_empty(&ctx->iopoll_list)) 185 io_do_iopoll(ctx, true); 186 187 /* 188 * Don't submit if refs are dying, good for io_uring_register(), 189 * but also it is relied upon by io_ring_exit_work() 190 */ 191 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) && 192 !(ctx->flags & IORING_SETUP_R_DISABLED)) 193 ret = io_submit_sqes(ctx, to_submit); 194 mutex_unlock(&ctx->uring_lock); 195 196 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait)) 197 wake_up(&ctx->sqo_sq_wait); 198 if (creds) 199 revert_creds(creds); 200 } 201 202 return ret; 203 } 204 205 static bool io_sqd_handle_event(struct io_sq_data *sqd) 206 { 207 bool did_sig = false; 208 struct ksignal ksig; 209 210 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || 211 signal_pending(current)) { 212 mutex_unlock(&sqd->lock); 213 if (signal_pending(current)) 214 did_sig = get_signal(&ksig); 215 cond_resched(); 216 mutex_lock(&sqd->lock); 217 sqd->sq_cpu = raw_smp_processor_id(); 218 } 219 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); 220 } 221 222 static int io_sq_thread(void *data) 223 { 224 struct io_sq_data *sqd = data; 225 struct io_ring_ctx *ctx; 226 unsigned long timeout = 0; 227 char buf[TASK_COMM_LEN]; 228 DEFINE_WAIT(wait); 229 230 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid); 231 set_task_comm(current, buf); 232 233 /* reset to our pid after we've set task_comm, for fdinfo */ 234 sqd->task_pid = current->pid; 235 236 if (sqd->sq_cpu != -1) { 237 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu)); 238 } else { 239 set_cpus_allowed_ptr(current, cpu_online_mask); 240 sqd->sq_cpu = raw_smp_processor_id(); 241 } 242 243 /* 244 * Force audit context to get setup, in case we do prep side async 245 * operations that would trigger an audit call before any issue side 246 * audit has been done. 247 */ 248 audit_uring_entry(IORING_OP_NOP); 249 audit_uring_exit(true, 0); 250 251 mutex_lock(&sqd->lock); 252 while (1) { 253 bool cap_entries, sqt_spin = false; 254 255 if (io_sqd_events_pending(sqd) || signal_pending(current)) { 256 if (io_sqd_handle_event(sqd)) 257 break; 258 timeout = jiffies + sqd->sq_thread_idle; 259 } 260 261 cap_entries = !list_is_singular(&sqd->ctx_list); 262 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { 263 int ret = __io_sq_thread(ctx, cap_entries); 264 265 if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list))) 266 sqt_spin = true; 267 } 268 if (io_run_task_work()) 269 sqt_spin = true; 270 271 if (sqt_spin || !time_after(jiffies, timeout)) { 272 if (sqt_spin) 273 timeout = jiffies + sqd->sq_thread_idle; 274 if (unlikely(need_resched())) { 275 mutex_unlock(&sqd->lock); 276 cond_resched(); 277 mutex_lock(&sqd->lock); 278 sqd->sq_cpu = raw_smp_processor_id(); 279 } 280 continue; 281 } 282 283 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE); 284 if (!io_sqd_events_pending(sqd) && !task_work_pending(current)) { 285 bool needs_sched = true; 286 287 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { 288 atomic_or(IORING_SQ_NEED_WAKEUP, 289 &ctx->rings->sq_flags); 290 if ((ctx->flags & IORING_SETUP_IOPOLL) && 291 !wq_list_empty(&ctx->iopoll_list)) { 292 needs_sched = false; 293 break; 294 } 295 296 /* 297 * Ensure the store of the wakeup flag is not 298 * reordered with the load of the SQ tail 299 */ 300 smp_mb__after_atomic(); 301 302 if (io_sqring_entries(ctx)) { 303 needs_sched = false; 304 break; 305 } 306 } 307 308 if (needs_sched) { 309 mutex_unlock(&sqd->lock); 310 schedule(); 311 mutex_lock(&sqd->lock); 312 sqd->sq_cpu = raw_smp_processor_id(); 313 } 314 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) 315 atomic_andnot(IORING_SQ_NEED_WAKEUP, 316 &ctx->rings->sq_flags); 317 } 318 319 finish_wait(&sqd->wait, &wait); 320 timeout = jiffies + sqd->sq_thread_idle; 321 } 322 323 io_uring_cancel_generic(true, sqd); 324 sqd->thread = NULL; 325 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) 326 atomic_or(IORING_SQ_NEED_WAKEUP, &ctx->rings->sq_flags); 327 io_run_task_work(); 328 mutex_unlock(&sqd->lock); 329 330 complete(&sqd->exited); 331 do_exit(0); 332 } 333 334 void io_sqpoll_wait_sq(struct io_ring_ctx *ctx) 335 { 336 DEFINE_WAIT(wait); 337 338 do { 339 if (!io_sqring_full(ctx)) 340 break; 341 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE); 342 343 if (!io_sqring_full(ctx)) 344 break; 345 schedule(); 346 } while (!signal_pending(current)); 347 348 finish_wait(&ctx->sqo_sq_wait, &wait); 349 } 350 351 __cold int io_sq_offload_create(struct io_ring_ctx *ctx, 352 struct io_uring_params *p) 353 { 354 int ret; 355 356 /* Retain compatibility with failing for an invalid attach attempt */ 357 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) == 358 IORING_SETUP_ATTACH_WQ) { 359 struct fd f; 360 361 f = fdget(p->wq_fd); 362 if (!f.file) 363 return -ENXIO; 364 if (!io_is_uring_fops(f.file)) { 365 fdput(f); 366 return -EINVAL; 367 } 368 fdput(f); 369 } 370 if (ctx->flags & IORING_SETUP_SQPOLL) { 371 struct task_struct *tsk; 372 struct io_sq_data *sqd; 373 bool attached; 374 375 ret = security_uring_sqpoll(); 376 if (ret) 377 return ret; 378 379 sqd = io_get_sq_data(p, &attached); 380 if (IS_ERR(sqd)) { 381 ret = PTR_ERR(sqd); 382 goto err; 383 } 384 385 ctx->sq_creds = get_current_cred(); 386 ctx->sq_data = sqd; 387 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle); 388 if (!ctx->sq_thread_idle) 389 ctx->sq_thread_idle = HZ; 390 391 io_sq_thread_park(sqd); 392 list_add(&ctx->sqd_list, &sqd->ctx_list); 393 io_sqd_update_thread_idle(sqd); 394 /* don't attach to a dying SQPOLL thread, would be racy */ 395 ret = (attached && !sqd->thread) ? -ENXIO : 0; 396 io_sq_thread_unpark(sqd); 397 398 if (ret < 0) 399 goto err; 400 if (attached) 401 return 0; 402 403 if (p->flags & IORING_SETUP_SQ_AFF) { 404 int cpu = p->sq_thread_cpu; 405 406 ret = -EINVAL; 407 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) 408 goto err_sqpoll; 409 sqd->sq_cpu = cpu; 410 } else { 411 sqd->sq_cpu = -1; 412 } 413 414 sqd->task_pid = current->pid; 415 sqd->task_tgid = current->tgid; 416 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE); 417 if (IS_ERR(tsk)) { 418 ret = PTR_ERR(tsk); 419 goto err_sqpoll; 420 } 421 422 sqd->thread = tsk; 423 ret = io_uring_alloc_task_context(tsk, ctx); 424 wake_up_new_task(tsk); 425 if (ret) 426 goto err; 427 } else if (p->flags & IORING_SETUP_SQ_AFF) { 428 /* Can't have SQ_AFF without SQPOLL */ 429 ret = -EINVAL; 430 goto err; 431 } 432 433 return 0; 434 err_sqpoll: 435 complete(&ctx->sq_data->exited); 436 err: 437 io_sq_thread_finish(ctx); 438 return ret; 439 } 440 441 __cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, 442 cpumask_var_t mask) 443 { 444 struct io_sq_data *sqd = ctx->sq_data; 445 int ret = -EINVAL; 446 447 if (sqd) { 448 io_sq_thread_park(sqd); 449 /* Don't set affinity for a dying thread */ 450 if (sqd->thread) 451 ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask); 452 io_sq_thread_unpark(sqd); 453 } 454 455 return ret; 456 } 457