1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Shared application/kernel submission and completion ring pairs, for 4 * supporting fast/efficient IO. 5 * 6 * A note on the read/write ordering memory barriers that are matched between 7 * the application and kernel side. 8 * 9 * After the application reads the CQ ring tail, it must use an 10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses 11 * before writing the tail (using smp_load_acquire to read the tail will 12 * do). It also needs a smp_mb() before updating CQ head (ordering the 13 * entry load(s) with the head store), pairing with an implicit barrier 14 * through a control-dependency in io_get_cqe (smp_store_release to 15 * store head will do). Failure to do so could lead to reading invalid 16 * CQ entries. 17 * 18 * Likewise, the application must use an appropriate smp_wmb() before 19 * writing the SQ tail (ordering SQ entry stores with the tail store), 20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release 21 * to store the tail will do). And it needs a barrier ordering the SQ 22 * head load before writing new SQ entries (smp_load_acquire to read 23 * head will do). 24 * 25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application 26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after* 27 * updating the SQ tail; a full memory barrier smp_mb() is needed 28 * between. 29 * 30 * Also see the examples in the liburing library: 31 * 32 * git://git.kernel.dk/liburing 33 * 34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens 35 * from data shared between the kernel and application. This is done both 36 * for ordering purposes, but also to ensure that once a value is loaded from 37 * data that the application could potentially modify, it remains stable. 38 * 39 * Copyright (C) 2018-2019 Jens Axboe 40 * Copyright (c) 2018-2019 Christoph Hellwig 41 */ 42 #include <linux/kernel.h> 43 #include <linux/init.h> 44 #include <linux/errno.h> 45 #include <linux/syscalls.h> 46 #include <net/compat.h> 47 #include <linux/refcount.h> 48 #include <linux/uio.h> 49 #include <linux/bits.h> 50 51 #include <linux/sched/signal.h> 52 #include <linux/fs.h> 53 #include <linux/file.h> 54 #include <linux/fdtable.h> 55 #include <linux/mm.h> 56 #include <linux/mman.h> 57 #include <linux/percpu.h> 58 #include <linux/slab.h> 59 #include <linux/bvec.h> 60 #include <linux/net.h> 61 #include <net/sock.h> 62 #include <net/af_unix.h> 63 #include <linux/anon_inodes.h> 64 #include <linux/sched/mm.h> 65 #include <linux/uaccess.h> 66 #include <linux/nospec.h> 67 #include <linux/highmem.h> 68 #include <linux/fsnotify.h> 69 #include <linux/fadvise.h> 70 #include <linux/task_work.h> 71 #include <linux/io_uring.h> 72 #include <linux/audit.h> 73 #include <linux/security.h> 74 #include <asm/shmparam.h> 75 76 #define CREATE_TRACE_POINTS 77 #include <trace/events/io_uring.h> 78 79 #include <uapi/linux/io_uring.h> 80 81 #include "io-wq.h" 82 83 #include "io_uring.h" 84 #include "opdef.h" 85 #include "refs.h" 86 #include "tctx.h" 87 #include "sqpoll.h" 88 #include "fdinfo.h" 89 #include "kbuf.h" 90 #include "rsrc.h" 91 #include "cancel.h" 92 #include "net.h" 93 #include "notif.h" 94 95 #include "timeout.h" 96 #include "poll.h" 97 #include "rw.h" 98 #include "alloc_cache.h" 99 100 #define IORING_MAX_ENTRIES 32768 101 #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES) 102 103 #define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \ 104 IORING_REGISTER_LAST + IORING_OP_LAST) 105 106 #define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \ 107 IOSQE_IO_HARDLINK | IOSQE_ASYNC) 108 109 #define SQE_VALID_FLAGS (SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \ 110 IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS) 111 112 #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \ 113 REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \ 114 REQ_F_ASYNC_DATA) 115 116 #define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\ 117 IO_REQ_CLEAN_FLAGS) 118 119 #define IO_TCTX_REFS_CACHE_NR (1U << 10) 120 121 #define IO_COMPL_BATCH 32 122 #define IO_REQ_ALLOC_BATCH 8 123 124 enum { 125 IO_CHECK_CQ_OVERFLOW_BIT, 126 IO_CHECK_CQ_DROPPED_BIT, 127 }; 128 129 enum { 130 IO_EVENTFD_OP_SIGNAL_BIT, 131 IO_EVENTFD_OP_FREE_BIT, 132 }; 133 134 struct io_defer_entry { 135 struct list_head list; 136 struct io_kiocb *req; 137 u32 seq; 138 }; 139 140 /* requests with any of those set should undergo io_disarm_next() */ 141 #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL) 142 #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK) 143 144 static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, 145 struct task_struct *task, 146 bool cancel_all); 147 148 static void io_queue_sqe(struct io_kiocb *req); 149 150 struct kmem_cache *req_cachep; 151 152 static int __read_mostly sysctl_io_uring_disabled; 153 static int __read_mostly sysctl_io_uring_group = -1; 154 155 #ifdef CONFIG_SYSCTL 156 static struct ctl_table kernel_io_uring_disabled_table[] = { 157 { 158 .procname = "io_uring_disabled", 159 .data = &sysctl_io_uring_disabled, 160 .maxlen = sizeof(sysctl_io_uring_disabled), 161 .mode = 0644, 162 .proc_handler = proc_dointvec_minmax, 163 .extra1 = SYSCTL_ZERO, 164 .extra2 = SYSCTL_TWO, 165 }, 166 { 167 .procname = "io_uring_group", 168 .data = &sysctl_io_uring_group, 169 .maxlen = sizeof(gid_t), 170 .mode = 0644, 171 .proc_handler = proc_dointvec, 172 }, 173 {}, 174 }; 175 #endif 176 177 static inline void io_submit_flush_completions(struct io_ring_ctx *ctx) 178 { 179 if (!wq_list_empty(&ctx->submit_state.compl_reqs) || 180 ctx->submit_state.cqes_count) 181 __io_submit_flush_completions(ctx); 182 } 183 184 static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx) 185 { 186 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head); 187 } 188 189 static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx) 190 { 191 return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head); 192 } 193 194 static bool io_match_linked(struct io_kiocb *head) 195 { 196 struct io_kiocb *req; 197 198 io_for_each_link(req, head) { 199 if (req->flags & REQ_F_INFLIGHT) 200 return true; 201 } 202 return false; 203 } 204 205 /* 206 * As io_match_task() but protected against racing with linked timeouts. 207 * User must not hold timeout_lock. 208 */ 209 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, 210 bool cancel_all) 211 { 212 bool matched; 213 214 if (task && head->task != task) 215 return false; 216 if (cancel_all) 217 return true; 218 219 if (head->flags & REQ_F_LINK_TIMEOUT) { 220 struct io_ring_ctx *ctx = head->ctx; 221 222 /* protect against races with linked timeouts */ 223 spin_lock_irq(&ctx->timeout_lock); 224 matched = io_match_linked(head); 225 spin_unlock_irq(&ctx->timeout_lock); 226 } else { 227 matched = io_match_linked(head); 228 } 229 return matched; 230 } 231 232 static inline void req_fail_link_node(struct io_kiocb *req, int res) 233 { 234 req_set_fail(req); 235 io_req_set_res(req, res, 0); 236 } 237 238 static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx) 239 { 240 wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); 241 } 242 243 static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref) 244 { 245 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); 246 247 complete(&ctx->ref_comp); 248 } 249 250 static __cold void io_fallback_req_func(struct work_struct *work) 251 { 252 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, 253 fallback_work.work); 254 struct llist_node *node = llist_del_all(&ctx->fallback_llist); 255 struct io_kiocb *req, *tmp; 256 struct io_tw_state ts = { .locked = true, }; 257 258 percpu_ref_get(&ctx->refs); 259 mutex_lock(&ctx->uring_lock); 260 llist_for_each_entry_safe(req, tmp, node, io_task_work.node) 261 req->io_task_work.func(req, &ts); 262 if (WARN_ON_ONCE(!ts.locked)) 263 return; 264 io_submit_flush_completions(ctx); 265 mutex_unlock(&ctx->uring_lock); 266 percpu_ref_put(&ctx->refs); 267 } 268 269 static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits) 270 { 271 unsigned hash_buckets = 1U << bits; 272 size_t hash_size = hash_buckets * sizeof(table->hbs[0]); 273 274 table->hbs = kmalloc(hash_size, GFP_KERNEL); 275 if (!table->hbs) 276 return -ENOMEM; 277 278 table->hash_bits = bits; 279 init_hash_table(table, hash_buckets); 280 return 0; 281 } 282 283 static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) 284 { 285 struct io_ring_ctx *ctx; 286 int hash_bits; 287 288 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 289 if (!ctx) 290 return NULL; 291 292 xa_init(&ctx->io_bl_xa); 293 294 /* 295 * Use 5 bits less than the max cq entries, that should give us around 296 * 32 entries per hash list if totally full and uniformly spread, but 297 * don't keep too many buckets to not overconsume memory. 298 */ 299 hash_bits = ilog2(p->cq_entries) - 5; 300 hash_bits = clamp(hash_bits, 1, 8); 301 if (io_alloc_hash_table(&ctx->cancel_table, hash_bits)) 302 goto err; 303 if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits)) 304 goto err; 305 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free, 306 0, GFP_KERNEL)) 307 goto err; 308 309 ctx->flags = p->flags; 310 init_waitqueue_head(&ctx->sqo_sq_wait); 311 INIT_LIST_HEAD(&ctx->sqd_list); 312 INIT_LIST_HEAD(&ctx->cq_overflow_list); 313 INIT_LIST_HEAD(&ctx->io_buffers_cache); 314 INIT_HLIST_HEAD(&ctx->io_buf_list); 315 io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX, 316 sizeof(struct io_rsrc_node)); 317 io_alloc_cache_init(&ctx->apoll_cache, IO_ALLOC_CACHE_MAX, 318 sizeof(struct async_poll)); 319 io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX, 320 sizeof(struct io_async_msghdr)); 321 init_completion(&ctx->ref_comp); 322 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1); 323 mutex_init(&ctx->uring_lock); 324 init_waitqueue_head(&ctx->cq_wait); 325 init_waitqueue_head(&ctx->poll_wq); 326 init_waitqueue_head(&ctx->rsrc_quiesce_wq); 327 spin_lock_init(&ctx->completion_lock); 328 spin_lock_init(&ctx->timeout_lock); 329 INIT_WQ_LIST(&ctx->iopoll_list); 330 INIT_LIST_HEAD(&ctx->io_buffers_pages); 331 INIT_LIST_HEAD(&ctx->io_buffers_comp); 332 INIT_LIST_HEAD(&ctx->defer_list); 333 INIT_LIST_HEAD(&ctx->timeout_list); 334 INIT_LIST_HEAD(&ctx->ltimeout_list); 335 INIT_LIST_HEAD(&ctx->rsrc_ref_list); 336 init_llist_head(&ctx->work_llist); 337 INIT_LIST_HEAD(&ctx->tctx_list); 338 ctx->submit_state.free_list.next = NULL; 339 INIT_WQ_LIST(&ctx->locked_free_list); 340 INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func); 341 INIT_WQ_LIST(&ctx->submit_state.compl_reqs); 342 return ctx; 343 err: 344 kfree(ctx->cancel_table.hbs); 345 kfree(ctx->cancel_table_locked.hbs); 346 kfree(ctx->io_bl); 347 xa_destroy(&ctx->io_bl_xa); 348 kfree(ctx); 349 return NULL; 350 } 351 352 static void io_account_cq_overflow(struct io_ring_ctx *ctx) 353 { 354 struct io_rings *r = ctx->rings; 355 356 WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1); 357 ctx->cq_extra--; 358 } 359 360 static bool req_need_defer(struct io_kiocb *req, u32 seq) 361 { 362 if (unlikely(req->flags & REQ_F_IO_DRAIN)) { 363 struct io_ring_ctx *ctx = req->ctx; 364 365 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail; 366 } 367 368 return false; 369 } 370 371 static void io_clean_op(struct io_kiocb *req) 372 { 373 if (req->flags & REQ_F_BUFFER_SELECTED) { 374 spin_lock(&req->ctx->completion_lock); 375 io_put_kbuf_comp(req); 376 spin_unlock(&req->ctx->completion_lock); 377 } 378 379 if (req->flags & REQ_F_NEED_CLEANUP) { 380 const struct io_cold_def *def = &io_cold_defs[req->opcode]; 381 382 if (def->cleanup) 383 def->cleanup(req); 384 } 385 if ((req->flags & REQ_F_POLLED) && req->apoll) { 386 kfree(req->apoll->double_poll); 387 kfree(req->apoll); 388 req->apoll = NULL; 389 } 390 if (req->flags & REQ_F_INFLIGHT) { 391 struct io_uring_task *tctx = req->task->io_uring; 392 393 atomic_dec(&tctx->inflight_tracked); 394 } 395 if (req->flags & REQ_F_CREDS) 396 put_cred(req->creds); 397 if (req->flags & REQ_F_ASYNC_DATA) { 398 kfree(req->async_data); 399 req->async_data = NULL; 400 } 401 req->flags &= ~IO_REQ_CLEAN_FLAGS; 402 } 403 404 static inline void io_req_track_inflight(struct io_kiocb *req) 405 { 406 if (!(req->flags & REQ_F_INFLIGHT)) { 407 req->flags |= REQ_F_INFLIGHT; 408 atomic_inc(&req->task->io_uring->inflight_tracked); 409 } 410 } 411 412 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req) 413 { 414 if (WARN_ON_ONCE(!req->link)) 415 return NULL; 416 417 req->flags &= ~REQ_F_ARM_LTIMEOUT; 418 req->flags |= REQ_F_LINK_TIMEOUT; 419 420 /* linked timeouts should have two refs once prep'ed */ 421 io_req_set_refcount(req); 422 __io_req_set_refcount(req->link, 2); 423 return req->link; 424 } 425 426 static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) 427 { 428 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT))) 429 return NULL; 430 return __io_prep_linked_timeout(req); 431 } 432 433 static noinline void __io_arm_ltimeout(struct io_kiocb *req) 434 { 435 io_queue_linked_timeout(__io_prep_linked_timeout(req)); 436 } 437 438 static inline void io_arm_ltimeout(struct io_kiocb *req) 439 { 440 if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT)) 441 __io_arm_ltimeout(req); 442 } 443 444 static void io_prep_async_work(struct io_kiocb *req) 445 { 446 const struct io_issue_def *def = &io_issue_defs[req->opcode]; 447 struct io_ring_ctx *ctx = req->ctx; 448 449 if (!(req->flags & REQ_F_CREDS)) { 450 req->flags |= REQ_F_CREDS; 451 req->creds = get_current_cred(); 452 } 453 454 req->work.list.next = NULL; 455 req->work.flags = 0; 456 req->work.cancel_seq = atomic_read(&ctx->cancel_seq); 457 if (req->flags & REQ_F_FORCE_ASYNC) 458 req->work.flags |= IO_WQ_WORK_CONCURRENT; 459 460 if (req->file && !(req->flags & REQ_F_FIXED_FILE)) 461 req->flags |= io_file_get_flags(req->file); 462 463 if (req->file && (req->flags & REQ_F_ISREG)) { 464 bool should_hash = def->hash_reg_file; 465 466 /* don't serialize this request if the fs doesn't need it */ 467 if (should_hash && (req->file->f_flags & O_DIRECT) && 468 (req->file->f_mode & FMODE_DIO_PARALLEL_WRITE)) 469 should_hash = false; 470 if (should_hash || (ctx->flags & IORING_SETUP_IOPOLL)) 471 io_wq_hash_work(&req->work, file_inode(req->file)); 472 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) { 473 if (def->unbound_nonreg_file) 474 req->work.flags |= IO_WQ_WORK_UNBOUND; 475 } 476 } 477 478 static void io_prep_async_link(struct io_kiocb *req) 479 { 480 struct io_kiocb *cur; 481 482 if (req->flags & REQ_F_LINK_TIMEOUT) { 483 struct io_ring_ctx *ctx = req->ctx; 484 485 spin_lock_irq(&ctx->timeout_lock); 486 io_for_each_link(cur, req) 487 io_prep_async_work(cur); 488 spin_unlock_irq(&ctx->timeout_lock); 489 } else { 490 io_for_each_link(cur, req) 491 io_prep_async_work(cur); 492 } 493 } 494 495 void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use) 496 { 497 struct io_kiocb *link = io_prep_linked_timeout(req); 498 struct io_uring_task *tctx = req->task->io_uring; 499 500 BUG_ON(!tctx); 501 BUG_ON(!tctx->io_wq); 502 503 /* init ->work of the whole link before punting */ 504 io_prep_async_link(req); 505 506 /* 507 * Not expected to happen, but if we do have a bug where this _can_ 508 * happen, catch it here and ensure the request is marked as 509 * canceled. That will make io-wq go through the usual work cancel 510 * procedure rather than attempt to run this request (or create a new 511 * worker for it). 512 */ 513 if (WARN_ON_ONCE(!same_thread_group(req->task, current))) 514 req->work.flags |= IO_WQ_WORK_CANCEL; 515 516 trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work)); 517 io_wq_enqueue(tctx->io_wq, &req->work); 518 if (link) 519 io_queue_linked_timeout(link); 520 } 521 522 static __cold void io_queue_deferred(struct io_ring_ctx *ctx) 523 { 524 while (!list_empty(&ctx->defer_list)) { 525 struct io_defer_entry *de = list_first_entry(&ctx->defer_list, 526 struct io_defer_entry, list); 527 528 if (req_need_defer(de->req, de->seq)) 529 break; 530 list_del_init(&de->list); 531 io_req_task_queue(de->req); 532 kfree(de); 533 } 534 } 535 536 537 static void io_eventfd_ops(struct rcu_head *rcu) 538 { 539 struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu); 540 int ops = atomic_xchg(&ev_fd->ops, 0); 541 542 if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT)) 543 eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE); 544 545 /* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback 546 * ordering in a race but if references are 0 we know we have to free 547 * it regardless. 548 */ 549 if (atomic_dec_and_test(&ev_fd->refs)) { 550 eventfd_ctx_put(ev_fd->cq_ev_fd); 551 kfree(ev_fd); 552 } 553 } 554 555 static void io_eventfd_signal(struct io_ring_ctx *ctx) 556 { 557 struct io_ev_fd *ev_fd = NULL; 558 559 rcu_read_lock(); 560 /* 561 * rcu_dereference ctx->io_ev_fd once and use it for both for checking 562 * and eventfd_signal 563 */ 564 ev_fd = rcu_dereference(ctx->io_ev_fd); 565 566 /* 567 * Check again if ev_fd exists incase an io_eventfd_unregister call 568 * completed between the NULL check of ctx->io_ev_fd at the start of 569 * the function and rcu_read_lock. 570 */ 571 if (unlikely(!ev_fd)) 572 goto out; 573 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED) 574 goto out; 575 if (ev_fd->eventfd_async && !io_wq_current_is_worker()) 576 goto out; 577 578 if (likely(eventfd_signal_allowed())) { 579 eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE); 580 } else { 581 atomic_inc(&ev_fd->refs); 582 if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops)) 583 call_rcu_hurry(&ev_fd->rcu, io_eventfd_ops); 584 else 585 atomic_dec(&ev_fd->refs); 586 } 587 588 out: 589 rcu_read_unlock(); 590 } 591 592 static void io_eventfd_flush_signal(struct io_ring_ctx *ctx) 593 { 594 bool skip; 595 596 spin_lock(&ctx->completion_lock); 597 598 /* 599 * Eventfd should only get triggered when at least one event has been 600 * posted. Some applications rely on the eventfd notification count 601 * only changing IFF a new CQE has been added to the CQ ring. There's 602 * no depedency on 1:1 relationship between how many times this 603 * function is called (and hence the eventfd count) and number of CQEs 604 * posted to the CQ ring. 605 */ 606 skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail; 607 ctx->evfd_last_cq_tail = ctx->cached_cq_tail; 608 spin_unlock(&ctx->completion_lock); 609 if (skip) 610 return; 611 612 io_eventfd_signal(ctx); 613 } 614 615 void __io_commit_cqring_flush(struct io_ring_ctx *ctx) 616 { 617 if (ctx->poll_activated) 618 io_poll_wq_wake(ctx); 619 if (ctx->off_timeout_used) 620 io_flush_timeouts(ctx); 621 if (ctx->drain_active) { 622 spin_lock(&ctx->completion_lock); 623 io_queue_deferred(ctx); 624 spin_unlock(&ctx->completion_lock); 625 } 626 if (ctx->has_evfd) 627 io_eventfd_flush_signal(ctx); 628 } 629 630 static inline void __io_cq_lock(struct io_ring_ctx *ctx) 631 { 632 if (!ctx->lockless_cq) 633 spin_lock(&ctx->completion_lock); 634 } 635 636 static inline void io_cq_lock(struct io_ring_ctx *ctx) 637 __acquires(ctx->completion_lock) 638 { 639 spin_lock(&ctx->completion_lock); 640 } 641 642 static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx) 643 { 644 io_commit_cqring(ctx); 645 if (!ctx->task_complete) { 646 if (!ctx->lockless_cq) 647 spin_unlock(&ctx->completion_lock); 648 /* IOPOLL rings only need to wake up if it's also SQPOLL */ 649 if (!ctx->syscall_iopoll) 650 io_cqring_wake(ctx); 651 } 652 io_commit_cqring_flush(ctx); 653 } 654 655 static void io_cq_unlock_post(struct io_ring_ctx *ctx) 656 __releases(ctx->completion_lock) 657 { 658 io_commit_cqring(ctx); 659 spin_unlock(&ctx->completion_lock); 660 io_cqring_wake(ctx); 661 io_commit_cqring_flush(ctx); 662 } 663 664 /* Returns true if there are no backlogged entries after the flush */ 665 static void io_cqring_overflow_kill(struct io_ring_ctx *ctx) 666 { 667 struct io_overflow_cqe *ocqe; 668 LIST_HEAD(list); 669 670 spin_lock(&ctx->completion_lock); 671 list_splice_init(&ctx->cq_overflow_list, &list); 672 clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq); 673 spin_unlock(&ctx->completion_lock); 674 675 while (!list_empty(&list)) { 676 ocqe = list_first_entry(&list, struct io_overflow_cqe, list); 677 list_del(&ocqe->list); 678 kfree(ocqe); 679 } 680 } 681 682 static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx) 683 { 684 size_t cqe_size = sizeof(struct io_uring_cqe); 685 686 if (__io_cqring_events(ctx) == ctx->cq_entries) 687 return; 688 689 if (ctx->flags & IORING_SETUP_CQE32) 690 cqe_size <<= 1; 691 692 io_cq_lock(ctx); 693 while (!list_empty(&ctx->cq_overflow_list)) { 694 struct io_uring_cqe *cqe; 695 struct io_overflow_cqe *ocqe; 696 697 if (!io_get_cqe_overflow(ctx, &cqe, true)) 698 break; 699 ocqe = list_first_entry(&ctx->cq_overflow_list, 700 struct io_overflow_cqe, list); 701 memcpy(cqe, &ocqe->cqe, cqe_size); 702 list_del(&ocqe->list); 703 kfree(ocqe); 704 } 705 706 if (list_empty(&ctx->cq_overflow_list)) { 707 clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq); 708 atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); 709 } 710 io_cq_unlock_post(ctx); 711 } 712 713 static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx) 714 { 715 /* iopoll syncs against uring_lock, not completion_lock */ 716 if (ctx->flags & IORING_SETUP_IOPOLL) 717 mutex_lock(&ctx->uring_lock); 718 __io_cqring_overflow_flush(ctx); 719 if (ctx->flags & IORING_SETUP_IOPOLL) 720 mutex_unlock(&ctx->uring_lock); 721 } 722 723 static void io_cqring_overflow_flush(struct io_ring_ctx *ctx) 724 { 725 if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) 726 io_cqring_do_overflow_flush(ctx); 727 } 728 729 /* can be called by any task */ 730 static void io_put_task_remote(struct task_struct *task) 731 { 732 struct io_uring_task *tctx = task->io_uring; 733 734 percpu_counter_sub(&tctx->inflight, 1); 735 if (unlikely(atomic_read(&tctx->in_cancel))) 736 wake_up(&tctx->wait); 737 put_task_struct(task); 738 } 739 740 /* used by a task to put its own references */ 741 static void io_put_task_local(struct task_struct *task) 742 { 743 task->io_uring->cached_refs++; 744 } 745 746 /* must to be called somewhat shortly after putting a request */ 747 static inline void io_put_task(struct task_struct *task) 748 { 749 if (likely(task == current)) 750 io_put_task_local(task); 751 else 752 io_put_task_remote(task); 753 } 754 755 void io_task_refs_refill(struct io_uring_task *tctx) 756 { 757 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR; 758 759 percpu_counter_add(&tctx->inflight, refill); 760 refcount_add(refill, ¤t->usage); 761 tctx->cached_refs += refill; 762 } 763 764 static __cold void io_uring_drop_tctx_refs(struct task_struct *task) 765 { 766 struct io_uring_task *tctx = task->io_uring; 767 unsigned int refs = tctx->cached_refs; 768 769 if (refs) { 770 tctx->cached_refs = 0; 771 percpu_counter_sub(&tctx->inflight, refs); 772 put_task_struct_many(task, refs); 773 } 774 } 775 776 static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, 777 s32 res, u32 cflags, u64 extra1, u64 extra2) 778 { 779 struct io_overflow_cqe *ocqe; 780 size_t ocq_size = sizeof(struct io_overflow_cqe); 781 bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32); 782 783 lockdep_assert_held(&ctx->completion_lock); 784 785 if (is_cqe32) 786 ocq_size += sizeof(struct io_uring_cqe); 787 788 ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT); 789 trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe); 790 if (!ocqe) { 791 /* 792 * If we're in ring overflow flush mode, or in task cancel mode, 793 * or cannot allocate an overflow entry, then we need to drop it 794 * on the floor. 795 */ 796 io_account_cq_overflow(ctx); 797 set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq); 798 return false; 799 } 800 if (list_empty(&ctx->cq_overflow_list)) { 801 set_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq); 802 atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); 803 804 } 805 ocqe->cqe.user_data = user_data; 806 ocqe->cqe.res = res; 807 ocqe->cqe.flags = cflags; 808 if (is_cqe32) { 809 ocqe->cqe.big_cqe[0] = extra1; 810 ocqe->cqe.big_cqe[1] = extra2; 811 } 812 list_add_tail(&ocqe->list, &ctx->cq_overflow_list); 813 return true; 814 } 815 816 void io_req_cqe_overflow(struct io_kiocb *req) 817 { 818 io_cqring_event_overflow(req->ctx, req->cqe.user_data, 819 req->cqe.res, req->cqe.flags, 820 req->big_cqe.extra1, req->big_cqe.extra2); 821 memset(&req->big_cqe, 0, sizeof(req->big_cqe)); 822 } 823 824 /* 825 * writes to the cq entry need to come after reading head; the 826 * control dependency is enough as we're using WRITE_ONCE to 827 * fill the cq entry 828 */ 829 bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow) 830 { 831 struct io_rings *rings = ctx->rings; 832 unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1); 833 unsigned int free, queued, len; 834 835 /* 836 * Posting into the CQ when there are pending overflowed CQEs may break 837 * ordering guarantees, which will affect links, F_MORE users and more. 838 * Force overflow the completion. 839 */ 840 if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))) 841 return false; 842 843 /* userspace may cheat modifying the tail, be safe and do min */ 844 queued = min(__io_cqring_events(ctx), ctx->cq_entries); 845 free = ctx->cq_entries - queued; 846 /* we need a contiguous range, limit based on the current array offset */ 847 len = min(free, ctx->cq_entries - off); 848 if (!len) 849 return false; 850 851 if (ctx->flags & IORING_SETUP_CQE32) { 852 off <<= 1; 853 len <<= 1; 854 } 855 856 ctx->cqe_cached = &rings->cqes[off]; 857 ctx->cqe_sentinel = ctx->cqe_cached + len; 858 return true; 859 } 860 861 static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, 862 u32 cflags) 863 { 864 struct io_uring_cqe *cqe; 865 866 ctx->cq_extra++; 867 868 /* 869 * If we can't get a cq entry, userspace overflowed the 870 * submission (by quite a lot). Increment the overflow count in 871 * the ring. 872 */ 873 if (likely(io_get_cqe(ctx, &cqe))) { 874 trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0); 875 876 WRITE_ONCE(cqe->user_data, user_data); 877 WRITE_ONCE(cqe->res, res); 878 WRITE_ONCE(cqe->flags, cflags); 879 880 if (ctx->flags & IORING_SETUP_CQE32) { 881 WRITE_ONCE(cqe->big_cqe[0], 0); 882 WRITE_ONCE(cqe->big_cqe[1], 0); 883 } 884 return true; 885 } 886 return false; 887 } 888 889 static void __io_flush_post_cqes(struct io_ring_ctx *ctx) 890 __must_hold(&ctx->uring_lock) 891 { 892 struct io_submit_state *state = &ctx->submit_state; 893 unsigned int i; 894 895 lockdep_assert_held(&ctx->uring_lock); 896 for (i = 0; i < state->cqes_count; i++) { 897 struct io_uring_cqe *cqe = &ctx->completion_cqes[i]; 898 899 if (!io_fill_cqe_aux(ctx, cqe->user_data, cqe->res, cqe->flags)) { 900 if (ctx->lockless_cq) { 901 spin_lock(&ctx->completion_lock); 902 io_cqring_event_overflow(ctx, cqe->user_data, 903 cqe->res, cqe->flags, 0, 0); 904 spin_unlock(&ctx->completion_lock); 905 } else { 906 io_cqring_event_overflow(ctx, cqe->user_data, 907 cqe->res, cqe->flags, 0, 0); 908 } 909 } 910 } 911 state->cqes_count = 0; 912 } 913 914 static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags, 915 bool allow_overflow) 916 { 917 bool filled; 918 919 io_cq_lock(ctx); 920 filled = io_fill_cqe_aux(ctx, user_data, res, cflags); 921 if (!filled && allow_overflow) 922 filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0); 923 924 io_cq_unlock_post(ctx); 925 return filled; 926 } 927 928 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) 929 { 930 return __io_post_aux_cqe(ctx, user_data, res, cflags, true); 931 } 932 933 /* 934 * A helper for multishot requests posting additional CQEs. 935 * Should only be used from a task_work including IO_URING_F_MULTISHOT. 936 */ 937 bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags) 938 { 939 struct io_ring_ctx *ctx = req->ctx; 940 u64 user_data = req->cqe.user_data; 941 struct io_uring_cqe *cqe; 942 943 if (!defer) 944 return __io_post_aux_cqe(ctx, user_data, res, cflags, false); 945 946 lockdep_assert_held(&ctx->uring_lock); 947 948 if (ctx->submit_state.cqes_count == ARRAY_SIZE(ctx->completion_cqes)) { 949 __io_cq_lock(ctx); 950 __io_flush_post_cqes(ctx); 951 /* no need to flush - flush is deferred */ 952 __io_cq_unlock_post(ctx); 953 } 954 955 /* For defered completions this is not as strict as it is otherwise, 956 * however it's main job is to prevent unbounded posted completions, 957 * and in that it works just as well. 958 */ 959 if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) 960 return false; 961 962 cqe = &ctx->completion_cqes[ctx->submit_state.cqes_count++]; 963 cqe->user_data = user_data; 964 cqe->res = res; 965 cqe->flags = cflags; 966 return true; 967 } 968 969 static void __io_req_complete_post(struct io_kiocb *req, unsigned issue_flags) 970 { 971 struct io_ring_ctx *ctx = req->ctx; 972 struct io_rsrc_node *rsrc_node = NULL; 973 974 io_cq_lock(ctx); 975 if (!(req->flags & REQ_F_CQE_SKIP)) { 976 if (!io_fill_cqe_req(ctx, req)) 977 io_req_cqe_overflow(req); 978 } 979 980 /* 981 * If we're the last reference to this request, add to our locked 982 * free_list cache. 983 */ 984 if (req_ref_put_and_test(req)) { 985 if (req->flags & IO_REQ_LINK_FLAGS) { 986 if (req->flags & IO_DISARM_MASK) 987 io_disarm_next(req); 988 if (req->link) { 989 io_req_task_queue(req->link); 990 req->link = NULL; 991 } 992 } 993 io_put_kbuf_comp(req); 994 if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS)) 995 io_clean_op(req); 996 io_put_file(req); 997 998 rsrc_node = req->rsrc_node; 999 /* 1000 * Selected buffer deallocation in io_clean_op() assumes that 1001 * we don't hold ->completion_lock. Clean them here to avoid 1002 * deadlocks. 1003 */ 1004 io_put_task_remote(req->task); 1005 wq_list_add_head(&req->comp_list, &ctx->locked_free_list); 1006 ctx->locked_free_nr++; 1007 } 1008 io_cq_unlock_post(ctx); 1009 1010 if (rsrc_node) { 1011 io_ring_submit_lock(ctx, issue_flags); 1012 io_put_rsrc_node(ctx, rsrc_node); 1013 io_ring_submit_unlock(ctx, issue_flags); 1014 } 1015 } 1016 1017 void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags) 1018 { 1019 if (req->ctx->task_complete && req->ctx->submitter_task != current) { 1020 req->io_task_work.func = io_req_task_complete; 1021 io_req_task_work_add(req); 1022 } else if (!(issue_flags & IO_URING_F_UNLOCKED) || 1023 !(req->ctx->flags & IORING_SETUP_IOPOLL)) { 1024 __io_req_complete_post(req, issue_flags); 1025 } else { 1026 struct io_ring_ctx *ctx = req->ctx; 1027 1028 mutex_lock(&ctx->uring_lock); 1029 __io_req_complete_post(req, issue_flags & ~IO_URING_F_UNLOCKED); 1030 mutex_unlock(&ctx->uring_lock); 1031 } 1032 } 1033 1034 void io_req_defer_failed(struct io_kiocb *req, s32 res) 1035 __must_hold(&ctx->uring_lock) 1036 { 1037 const struct io_cold_def *def = &io_cold_defs[req->opcode]; 1038 1039 lockdep_assert_held(&req->ctx->uring_lock); 1040 1041 req_set_fail(req); 1042 io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED)); 1043 if (def->fail) 1044 def->fail(req); 1045 io_req_complete_defer(req); 1046 } 1047 1048 /* 1049 * Don't initialise the fields below on every allocation, but do that in 1050 * advance and keep them valid across allocations. 1051 */ 1052 static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) 1053 { 1054 req->ctx = ctx; 1055 req->link = NULL; 1056 req->async_data = NULL; 1057 /* not necessary, but safer to zero */ 1058 memset(&req->cqe, 0, sizeof(req->cqe)); 1059 memset(&req->big_cqe, 0, sizeof(req->big_cqe)); 1060 } 1061 1062 static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx, 1063 struct io_submit_state *state) 1064 { 1065 spin_lock(&ctx->completion_lock); 1066 wq_list_splice(&ctx->locked_free_list, &state->free_list); 1067 ctx->locked_free_nr = 0; 1068 spin_unlock(&ctx->completion_lock); 1069 } 1070 1071 /* 1072 * A request might get retired back into the request caches even before opcode 1073 * handlers and io_issue_sqe() are done with it, e.g. inline completion path. 1074 * Because of that, io_alloc_req() should be called only under ->uring_lock 1075 * and with extra caution to not get a request that is still worked on. 1076 */ 1077 __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) 1078 __must_hold(&ctx->uring_lock) 1079 { 1080 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; 1081 void *reqs[IO_REQ_ALLOC_BATCH]; 1082 int ret, i; 1083 1084 /* 1085 * If we have more than a batch's worth of requests in our IRQ side 1086 * locked cache, grab the lock and move them over to our submission 1087 * side cache. 1088 */ 1089 if (data_race(ctx->locked_free_nr) > IO_COMPL_BATCH) { 1090 io_flush_cached_locked_reqs(ctx, &ctx->submit_state); 1091 if (!io_req_cache_empty(ctx)) 1092 return true; 1093 } 1094 1095 ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs); 1096 1097 /* 1098 * Bulk alloc is all-or-nothing. If we fail to get a batch, 1099 * retry single alloc to be on the safe side. 1100 */ 1101 if (unlikely(ret <= 0)) { 1102 reqs[0] = kmem_cache_alloc(req_cachep, gfp); 1103 if (!reqs[0]) 1104 return false; 1105 ret = 1; 1106 } 1107 1108 percpu_ref_get_many(&ctx->refs, ret); 1109 for (i = 0; i < ret; i++) { 1110 struct io_kiocb *req = reqs[i]; 1111 1112 io_preinit_req(req, ctx); 1113 io_req_add_to_cache(req, ctx); 1114 } 1115 return true; 1116 } 1117 1118 __cold void io_free_req(struct io_kiocb *req) 1119 { 1120 /* refs were already put, restore them for io_req_task_complete() */ 1121 req->flags &= ~REQ_F_REFCOUNT; 1122 /* we only want to free it, don't post CQEs */ 1123 req->flags |= REQ_F_CQE_SKIP; 1124 req->io_task_work.func = io_req_task_complete; 1125 io_req_task_work_add(req); 1126 } 1127 1128 static void __io_req_find_next_prep(struct io_kiocb *req) 1129 { 1130 struct io_ring_ctx *ctx = req->ctx; 1131 1132 spin_lock(&ctx->completion_lock); 1133 io_disarm_next(req); 1134 spin_unlock(&ctx->completion_lock); 1135 } 1136 1137 static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) 1138 { 1139 struct io_kiocb *nxt; 1140 1141 /* 1142 * If LINK is set, we have dependent requests in this chain. If we 1143 * didn't fail this request, queue the first one up, moving any other 1144 * dependencies to the next request. In case of failure, fail the rest 1145 * of the chain. 1146 */ 1147 if (unlikely(req->flags & IO_DISARM_MASK)) 1148 __io_req_find_next_prep(req); 1149 nxt = req->link; 1150 req->link = NULL; 1151 return nxt; 1152 } 1153 1154 static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts) 1155 { 1156 if (!ctx) 1157 return; 1158 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) 1159 atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); 1160 if (ts->locked) { 1161 io_submit_flush_completions(ctx); 1162 mutex_unlock(&ctx->uring_lock); 1163 ts->locked = false; 1164 } 1165 percpu_ref_put(&ctx->refs); 1166 } 1167 1168 static unsigned int handle_tw_list(struct llist_node *node, 1169 struct io_ring_ctx **ctx, 1170 struct io_tw_state *ts) 1171 { 1172 unsigned int count = 0; 1173 1174 do { 1175 struct llist_node *next = node->next; 1176 struct io_kiocb *req = container_of(node, struct io_kiocb, 1177 io_task_work.node); 1178 1179 prefetch(container_of(next, struct io_kiocb, io_task_work.node)); 1180 1181 if (req->ctx != *ctx) { 1182 ctx_flush_and_put(*ctx, ts); 1183 *ctx = req->ctx; 1184 /* if not contended, grab and improve batching */ 1185 ts->locked = mutex_trylock(&(*ctx)->uring_lock); 1186 percpu_ref_get(&(*ctx)->refs); 1187 } 1188 INDIRECT_CALL_2(req->io_task_work.func, 1189 io_poll_task_func, io_req_rw_complete, 1190 req, ts); 1191 node = next; 1192 count++; 1193 if (unlikely(need_resched())) { 1194 ctx_flush_and_put(*ctx, ts); 1195 *ctx = NULL; 1196 cond_resched(); 1197 } 1198 } while (node); 1199 1200 return count; 1201 } 1202 1203 /** 1204 * io_llist_xchg - swap all entries in a lock-less list 1205 * @head: the head of lock-less list to delete all entries 1206 * @new: new entry as the head of the list 1207 * 1208 * If list is empty, return NULL, otherwise, return the pointer to the first entry. 1209 * The order of entries returned is from the newest to the oldest added one. 1210 */ 1211 static inline struct llist_node *io_llist_xchg(struct llist_head *head, 1212 struct llist_node *new) 1213 { 1214 return xchg(&head->first, new); 1215 } 1216 1217 static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync) 1218 { 1219 struct llist_node *node = llist_del_all(&tctx->task_list); 1220 struct io_ring_ctx *last_ctx = NULL; 1221 struct io_kiocb *req; 1222 1223 while (node) { 1224 req = container_of(node, struct io_kiocb, io_task_work.node); 1225 node = node->next; 1226 if (sync && last_ctx != req->ctx) { 1227 if (last_ctx) { 1228 flush_delayed_work(&last_ctx->fallback_work); 1229 percpu_ref_put(&last_ctx->refs); 1230 } 1231 last_ctx = req->ctx; 1232 percpu_ref_get(&last_ctx->refs); 1233 } 1234 if (llist_add(&req->io_task_work.node, 1235 &req->ctx->fallback_llist)) 1236 schedule_delayed_work(&req->ctx->fallback_work, 1); 1237 } 1238 1239 if (last_ctx) { 1240 flush_delayed_work(&last_ctx->fallback_work); 1241 percpu_ref_put(&last_ctx->refs); 1242 } 1243 } 1244 1245 void tctx_task_work(struct callback_head *cb) 1246 { 1247 struct io_tw_state ts = {}; 1248 struct io_ring_ctx *ctx = NULL; 1249 struct io_uring_task *tctx = container_of(cb, struct io_uring_task, 1250 task_work); 1251 struct llist_node *node; 1252 unsigned int count = 0; 1253 1254 if (unlikely(current->flags & PF_EXITING)) { 1255 io_fallback_tw(tctx, true); 1256 return; 1257 } 1258 1259 node = llist_del_all(&tctx->task_list); 1260 if (node) 1261 count = handle_tw_list(node, &ctx, &ts); 1262 1263 ctx_flush_and_put(ctx, &ts); 1264 1265 /* relaxed read is enough as only the task itself sets ->in_cancel */ 1266 if (unlikely(atomic_read(&tctx->in_cancel))) 1267 io_uring_drop_tctx_refs(current); 1268 1269 trace_io_uring_task_work_run(tctx, count, 1); 1270 } 1271 1272 static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags) 1273 { 1274 struct io_ring_ctx *ctx = req->ctx; 1275 unsigned nr_wait, nr_tw, nr_tw_prev; 1276 struct llist_node *first; 1277 1278 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) 1279 flags &= ~IOU_F_TWQ_LAZY_WAKE; 1280 1281 first = READ_ONCE(ctx->work_llist.first); 1282 do { 1283 nr_tw_prev = 0; 1284 if (first) { 1285 struct io_kiocb *first_req = container_of(first, 1286 struct io_kiocb, 1287 io_task_work.node); 1288 /* 1289 * Might be executed at any moment, rely on 1290 * SLAB_TYPESAFE_BY_RCU to keep it alive. 1291 */ 1292 nr_tw_prev = READ_ONCE(first_req->nr_tw); 1293 } 1294 nr_tw = nr_tw_prev + 1; 1295 /* Large enough to fail the nr_wait comparison below */ 1296 if (!(flags & IOU_F_TWQ_LAZY_WAKE)) 1297 nr_tw = INT_MAX; 1298 1299 req->nr_tw = nr_tw; 1300 req->io_task_work.node.next = first; 1301 } while (!try_cmpxchg(&ctx->work_llist.first, &first, 1302 &req->io_task_work.node)); 1303 1304 if (!first) { 1305 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) 1306 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); 1307 if (ctx->has_evfd) 1308 io_eventfd_signal(ctx); 1309 } 1310 1311 nr_wait = atomic_read(&ctx->cq_wait_nr); 1312 /* no one is waiting */ 1313 if (!nr_wait) 1314 return; 1315 /* either not enough or the previous add has already woken it up */ 1316 if (nr_wait > nr_tw || nr_tw_prev >= nr_wait) 1317 return; 1318 /* pairs with set_current_state() in io_cqring_wait() */ 1319 smp_mb__after_atomic(); 1320 wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE); 1321 } 1322 1323 static void io_req_normal_work_add(struct io_kiocb *req) 1324 { 1325 struct io_uring_task *tctx = req->task->io_uring; 1326 struct io_ring_ctx *ctx = req->ctx; 1327 1328 /* task_work already pending, we're done */ 1329 if (!llist_add(&req->io_task_work.node, &tctx->task_list)) 1330 return; 1331 1332 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) 1333 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); 1334 1335 if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method))) 1336 return; 1337 1338 io_fallback_tw(tctx, false); 1339 } 1340 1341 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags) 1342 { 1343 if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN) { 1344 rcu_read_lock(); 1345 io_req_local_work_add(req, flags); 1346 rcu_read_unlock(); 1347 } else { 1348 io_req_normal_work_add(req); 1349 } 1350 } 1351 1352 static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx) 1353 { 1354 struct llist_node *node; 1355 1356 node = llist_del_all(&ctx->work_llist); 1357 while (node) { 1358 struct io_kiocb *req = container_of(node, struct io_kiocb, 1359 io_task_work.node); 1360 1361 node = node->next; 1362 io_req_normal_work_add(req); 1363 } 1364 } 1365 1366 static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events, 1367 int min_events) 1368 { 1369 if (llist_empty(&ctx->work_llist)) 1370 return false; 1371 if (events < min_events) 1372 return true; 1373 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) 1374 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); 1375 return false; 1376 } 1377 1378 static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts, 1379 int min_events) 1380 { 1381 struct llist_node *node; 1382 unsigned int loops = 0; 1383 int ret = 0; 1384 1385 if (WARN_ON_ONCE(ctx->submitter_task != current)) 1386 return -EEXIST; 1387 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) 1388 atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); 1389 again: 1390 /* 1391 * llists are in reverse order, flip it back the right way before 1392 * running the pending items. 1393 */ 1394 node = llist_reverse_order(io_llist_xchg(&ctx->work_llist, NULL)); 1395 while (node) { 1396 struct llist_node *next = node->next; 1397 struct io_kiocb *req = container_of(node, struct io_kiocb, 1398 io_task_work.node); 1399 prefetch(container_of(next, struct io_kiocb, io_task_work.node)); 1400 INDIRECT_CALL_2(req->io_task_work.func, 1401 io_poll_task_func, io_req_rw_complete, 1402 req, ts); 1403 ret++; 1404 node = next; 1405 } 1406 loops++; 1407 1408 if (io_run_local_work_continue(ctx, ret, min_events)) 1409 goto again; 1410 if (ts->locked) { 1411 io_submit_flush_completions(ctx); 1412 if (io_run_local_work_continue(ctx, ret, min_events)) 1413 goto again; 1414 } 1415 1416 trace_io_uring_local_work_run(ctx, ret, loops); 1417 return ret; 1418 } 1419 1420 static inline int io_run_local_work_locked(struct io_ring_ctx *ctx, 1421 int min_events) 1422 { 1423 struct io_tw_state ts = { .locked = true, }; 1424 int ret; 1425 1426 if (llist_empty(&ctx->work_llist)) 1427 return 0; 1428 1429 ret = __io_run_local_work(ctx, &ts, min_events); 1430 /* shouldn't happen! */ 1431 if (WARN_ON_ONCE(!ts.locked)) 1432 mutex_lock(&ctx->uring_lock); 1433 return ret; 1434 } 1435 1436 static int io_run_local_work(struct io_ring_ctx *ctx, int min_events) 1437 { 1438 struct io_tw_state ts = {}; 1439 int ret; 1440 1441 ts.locked = mutex_trylock(&ctx->uring_lock); 1442 ret = __io_run_local_work(ctx, &ts, min_events); 1443 if (ts.locked) 1444 mutex_unlock(&ctx->uring_lock); 1445 1446 return ret; 1447 } 1448 1449 static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts) 1450 { 1451 io_tw_lock(req->ctx, ts); 1452 io_req_defer_failed(req, req->cqe.res); 1453 } 1454 1455 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts) 1456 { 1457 io_tw_lock(req->ctx, ts); 1458 /* req->task == current here, checking PF_EXITING is safe */ 1459 if (unlikely(req->task->flags & PF_EXITING)) 1460 io_req_defer_failed(req, -EFAULT); 1461 else if (req->flags & REQ_F_FORCE_ASYNC) 1462 io_queue_iowq(req, ts); 1463 else 1464 io_queue_sqe(req); 1465 } 1466 1467 void io_req_task_queue_fail(struct io_kiocb *req, int ret) 1468 { 1469 io_req_set_res(req, ret, 0); 1470 req->io_task_work.func = io_req_task_cancel; 1471 io_req_task_work_add(req); 1472 } 1473 1474 void io_req_task_queue(struct io_kiocb *req) 1475 { 1476 req->io_task_work.func = io_req_task_submit; 1477 io_req_task_work_add(req); 1478 } 1479 1480 void io_queue_next(struct io_kiocb *req) 1481 { 1482 struct io_kiocb *nxt = io_req_find_next(req); 1483 1484 if (nxt) 1485 io_req_task_queue(nxt); 1486 } 1487 1488 static void io_free_batch_list(struct io_ring_ctx *ctx, 1489 struct io_wq_work_node *node) 1490 __must_hold(&ctx->uring_lock) 1491 { 1492 do { 1493 struct io_kiocb *req = container_of(node, struct io_kiocb, 1494 comp_list); 1495 1496 if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) { 1497 if (req->flags & REQ_F_REFCOUNT) { 1498 node = req->comp_list.next; 1499 if (!req_ref_put_and_test(req)) 1500 continue; 1501 } 1502 if ((req->flags & REQ_F_POLLED) && req->apoll) { 1503 struct async_poll *apoll = req->apoll; 1504 1505 if (apoll->double_poll) 1506 kfree(apoll->double_poll); 1507 if (!io_alloc_cache_put(&ctx->apoll_cache, &apoll->cache)) 1508 kfree(apoll); 1509 req->flags &= ~REQ_F_POLLED; 1510 } 1511 if (req->flags & IO_REQ_LINK_FLAGS) 1512 io_queue_next(req); 1513 if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS)) 1514 io_clean_op(req); 1515 } 1516 io_put_file(req); 1517 1518 io_req_put_rsrc_locked(req, ctx); 1519 1520 io_put_task(req->task); 1521 node = req->comp_list.next; 1522 io_req_add_to_cache(req, ctx); 1523 } while (node); 1524 } 1525 1526 void __io_submit_flush_completions(struct io_ring_ctx *ctx) 1527 __must_hold(&ctx->uring_lock) 1528 { 1529 struct io_submit_state *state = &ctx->submit_state; 1530 struct io_wq_work_node *node; 1531 1532 __io_cq_lock(ctx); 1533 /* must come first to preserve CQE ordering in failure cases */ 1534 if (state->cqes_count) 1535 __io_flush_post_cqes(ctx); 1536 __wq_list_for_each(node, &state->compl_reqs) { 1537 struct io_kiocb *req = container_of(node, struct io_kiocb, 1538 comp_list); 1539 1540 if (!(req->flags & REQ_F_CQE_SKIP) && 1541 unlikely(!io_fill_cqe_req(ctx, req))) { 1542 if (ctx->lockless_cq) { 1543 spin_lock(&ctx->completion_lock); 1544 io_req_cqe_overflow(req); 1545 spin_unlock(&ctx->completion_lock); 1546 } else { 1547 io_req_cqe_overflow(req); 1548 } 1549 } 1550 } 1551 __io_cq_unlock_post(ctx); 1552 1553 if (!wq_list_empty(&ctx->submit_state.compl_reqs)) { 1554 io_free_batch_list(ctx, state->compl_reqs.first); 1555 INIT_WQ_LIST(&state->compl_reqs); 1556 } 1557 } 1558 1559 static unsigned io_cqring_events(struct io_ring_ctx *ctx) 1560 { 1561 /* See comment at the top of this file */ 1562 smp_rmb(); 1563 return __io_cqring_events(ctx); 1564 } 1565 1566 /* 1567 * We can't just wait for polled events to come to us, we have to actively 1568 * find and complete them. 1569 */ 1570 static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) 1571 { 1572 if (!(ctx->flags & IORING_SETUP_IOPOLL)) 1573 return; 1574 1575 mutex_lock(&ctx->uring_lock); 1576 while (!wq_list_empty(&ctx->iopoll_list)) { 1577 /* let it sleep and repeat later if can't complete a request */ 1578 if (io_do_iopoll(ctx, true) == 0) 1579 break; 1580 /* 1581 * Ensure we allow local-to-the-cpu processing to take place, 1582 * in this case we need to ensure that we reap all events. 1583 * Also let task_work, etc. to progress by releasing the mutex 1584 */ 1585 if (need_resched()) { 1586 mutex_unlock(&ctx->uring_lock); 1587 cond_resched(); 1588 mutex_lock(&ctx->uring_lock); 1589 } 1590 } 1591 mutex_unlock(&ctx->uring_lock); 1592 } 1593 1594 static int io_iopoll_check(struct io_ring_ctx *ctx, long min) 1595 { 1596 unsigned int nr_events = 0; 1597 unsigned long check_cq; 1598 1599 if (!io_allowed_run_tw(ctx)) 1600 return -EEXIST; 1601 1602 check_cq = READ_ONCE(ctx->check_cq); 1603 if (unlikely(check_cq)) { 1604 if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) 1605 __io_cqring_overflow_flush(ctx); 1606 /* 1607 * Similarly do not spin if we have not informed the user of any 1608 * dropped CQE. 1609 */ 1610 if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) 1611 return -EBADR; 1612 } 1613 /* 1614 * Don't enter poll loop if we already have events pending. 1615 * If we do, we can potentially be spinning for commands that 1616 * already triggered a CQE (eg in error). 1617 */ 1618 if (io_cqring_events(ctx)) 1619 return 0; 1620 1621 do { 1622 int ret = 0; 1623 1624 /* 1625 * If a submit got punted to a workqueue, we can have the 1626 * application entering polling for a command before it gets 1627 * issued. That app will hold the uring_lock for the duration 1628 * of the poll right here, so we need to take a breather every 1629 * now and then to ensure that the issue has a chance to add 1630 * the poll to the issued list. Otherwise we can spin here 1631 * forever, while the workqueue is stuck trying to acquire the 1632 * very same mutex. 1633 */ 1634 if (wq_list_empty(&ctx->iopoll_list) || 1635 io_task_work_pending(ctx)) { 1636 u32 tail = ctx->cached_cq_tail; 1637 1638 (void) io_run_local_work_locked(ctx, min); 1639 1640 if (task_work_pending(current) || 1641 wq_list_empty(&ctx->iopoll_list)) { 1642 mutex_unlock(&ctx->uring_lock); 1643 io_run_task_work(); 1644 mutex_lock(&ctx->uring_lock); 1645 } 1646 /* some requests don't go through iopoll_list */ 1647 if (tail != ctx->cached_cq_tail || 1648 wq_list_empty(&ctx->iopoll_list)) 1649 break; 1650 } 1651 ret = io_do_iopoll(ctx, !min); 1652 if (unlikely(ret < 0)) 1653 return ret; 1654 1655 if (task_sigpending(current)) 1656 return -EINTR; 1657 if (need_resched()) 1658 break; 1659 1660 nr_events += ret; 1661 } while (nr_events < min); 1662 1663 return 0; 1664 } 1665 1666 void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts) 1667 { 1668 if (ts->locked) 1669 io_req_complete_defer(req); 1670 else 1671 io_req_complete_post(req, IO_URING_F_UNLOCKED); 1672 } 1673 1674 /* 1675 * After the iocb has been issued, it's safe to be found on the poll list. 1676 * Adding the kiocb to the list AFTER submission ensures that we don't 1677 * find it from a io_do_iopoll() thread before the issuer is done 1678 * accessing the kiocb cookie. 1679 */ 1680 static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) 1681 { 1682 struct io_ring_ctx *ctx = req->ctx; 1683 const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; 1684 1685 /* workqueue context doesn't hold uring_lock, grab it now */ 1686 if (unlikely(needs_lock)) 1687 mutex_lock(&ctx->uring_lock); 1688 1689 /* 1690 * Track whether we have multiple files in our lists. This will impact 1691 * how we do polling eventually, not spinning if we're on potentially 1692 * different devices. 1693 */ 1694 if (wq_list_empty(&ctx->iopoll_list)) { 1695 ctx->poll_multi_queue = false; 1696 } else if (!ctx->poll_multi_queue) { 1697 struct io_kiocb *list_req; 1698 1699 list_req = container_of(ctx->iopoll_list.first, struct io_kiocb, 1700 comp_list); 1701 if (list_req->file != req->file) 1702 ctx->poll_multi_queue = true; 1703 } 1704 1705 /* 1706 * For fast devices, IO may have already completed. If it has, add 1707 * it to the front so we find it first. 1708 */ 1709 if (READ_ONCE(req->iopoll_completed)) 1710 wq_list_add_head(&req->comp_list, &ctx->iopoll_list); 1711 else 1712 wq_list_add_tail(&req->comp_list, &ctx->iopoll_list); 1713 1714 if (unlikely(needs_lock)) { 1715 /* 1716 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle 1717 * in sq thread task context or in io worker task context. If 1718 * current task context is sq thread, we don't need to check 1719 * whether should wake up sq thread. 1720 */ 1721 if ((ctx->flags & IORING_SETUP_SQPOLL) && 1722 wq_has_sleeper(&ctx->sq_data->wait)) 1723 wake_up(&ctx->sq_data->wait); 1724 1725 mutex_unlock(&ctx->uring_lock); 1726 } 1727 } 1728 1729 unsigned int io_file_get_flags(struct file *file) 1730 { 1731 unsigned int res = 0; 1732 1733 if (S_ISREG(file_inode(file)->i_mode)) 1734 res |= REQ_F_ISREG; 1735 if ((file->f_flags & O_NONBLOCK) || (file->f_mode & FMODE_NOWAIT)) 1736 res |= REQ_F_SUPPORT_NOWAIT; 1737 return res; 1738 } 1739 1740 bool io_alloc_async_data(struct io_kiocb *req) 1741 { 1742 WARN_ON_ONCE(!io_cold_defs[req->opcode].async_size); 1743 req->async_data = kmalloc(io_cold_defs[req->opcode].async_size, GFP_KERNEL); 1744 if (req->async_data) { 1745 req->flags |= REQ_F_ASYNC_DATA; 1746 return false; 1747 } 1748 return true; 1749 } 1750 1751 int io_req_prep_async(struct io_kiocb *req) 1752 { 1753 const struct io_cold_def *cdef = &io_cold_defs[req->opcode]; 1754 const struct io_issue_def *def = &io_issue_defs[req->opcode]; 1755 1756 /* assign early for deferred execution for non-fixed file */ 1757 if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE) && !req->file) 1758 req->file = io_file_get_normal(req, req->cqe.fd); 1759 if (!cdef->prep_async) 1760 return 0; 1761 if (WARN_ON_ONCE(req_has_async_data(req))) 1762 return -EFAULT; 1763 if (!def->manual_alloc) { 1764 if (io_alloc_async_data(req)) 1765 return -EAGAIN; 1766 } 1767 return cdef->prep_async(req); 1768 } 1769 1770 static u32 io_get_sequence(struct io_kiocb *req) 1771 { 1772 u32 seq = req->ctx->cached_sq_head; 1773 struct io_kiocb *cur; 1774 1775 /* need original cached_sq_head, but it was increased for each req */ 1776 io_for_each_link(cur, req) 1777 seq--; 1778 return seq; 1779 } 1780 1781 static __cold void io_drain_req(struct io_kiocb *req) 1782 __must_hold(&ctx->uring_lock) 1783 { 1784 struct io_ring_ctx *ctx = req->ctx; 1785 struct io_defer_entry *de; 1786 int ret; 1787 u32 seq = io_get_sequence(req); 1788 1789 /* Still need defer if there is pending req in defer list. */ 1790 spin_lock(&ctx->completion_lock); 1791 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) { 1792 spin_unlock(&ctx->completion_lock); 1793 queue: 1794 ctx->drain_active = false; 1795 io_req_task_queue(req); 1796 return; 1797 } 1798 spin_unlock(&ctx->completion_lock); 1799 1800 io_prep_async_link(req); 1801 de = kmalloc(sizeof(*de), GFP_KERNEL); 1802 if (!de) { 1803 ret = -ENOMEM; 1804 io_req_defer_failed(req, ret); 1805 return; 1806 } 1807 1808 spin_lock(&ctx->completion_lock); 1809 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) { 1810 spin_unlock(&ctx->completion_lock); 1811 kfree(de); 1812 goto queue; 1813 } 1814 1815 trace_io_uring_defer(req); 1816 de->req = req; 1817 de->seq = seq; 1818 list_add_tail(&de->list, &ctx->defer_list); 1819 spin_unlock(&ctx->completion_lock); 1820 } 1821 1822 static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def, 1823 unsigned int issue_flags) 1824 { 1825 if (req->file || !def->needs_file) 1826 return true; 1827 1828 if (req->flags & REQ_F_FIXED_FILE) 1829 req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags); 1830 else 1831 req->file = io_file_get_normal(req, req->cqe.fd); 1832 1833 return !!req->file; 1834 } 1835 1836 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) 1837 { 1838 const struct io_issue_def *def = &io_issue_defs[req->opcode]; 1839 const struct cred *creds = NULL; 1840 int ret; 1841 1842 if (unlikely(!io_assign_file(req, def, issue_flags))) 1843 return -EBADF; 1844 1845 if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred())) 1846 creds = override_creds(req->creds); 1847 1848 if (!def->audit_skip) 1849 audit_uring_entry(req->opcode); 1850 1851 ret = def->issue(req, issue_flags); 1852 1853 if (!def->audit_skip) 1854 audit_uring_exit(!ret, ret); 1855 1856 if (creds) 1857 revert_creds(creds); 1858 1859 if (ret == IOU_OK) { 1860 if (issue_flags & IO_URING_F_COMPLETE_DEFER) 1861 io_req_complete_defer(req); 1862 else 1863 io_req_complete_post(req, issue_flags); 1864 1865 return 0; 1866 } 1867 1868 if (ret != IOU_ISSUE_SKIP_COMPLETE) 1869 return ret; 1870 1871 /* If the op doesn't have a file, we're not polling for it */ 1872 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue) 1873 io_iopoll_req_issued(req, issue_flags); 1874 1875 return 0; 1876 } 1877 1878 int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts) 1879 { 1880 io_tw_lock(req->ctx, ts); 1881 return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT| 1882 IO_URING_F_COMPLETE_DEFER); 1883 } 1884 1885 struct io_wq_work *io_wq_free_work(struct io_wq_work *work) 1886 { 1887 struct io_kiocb *req = container_of(work, struct io_kiocb, work); 1888 struct io_kiocb *nxt = NULL; 1889 1890 if (req_ref_put_and_test(req)) { 1891 if (req->flags & IO_REQ_LINK_FLAGS) 1892 nxt = io_req_find_next(req); 1893 io_free_req(req); 1894 } 1895 return nxt ? &nxt->work : NULL; 1896 } 1897 1898 void io_wq_submit_work(struct io_wq_work *work) 1899 { 1900 struct io_kiocb *req = container_of(work, struct io_kiocb, work); 1901 const struct io_issue_def *def = &io_issue_defs[req->opcode]; 1902 unsigned int issue_flags = IO_URING_F_UNLOCKED | IO_URING_F_IOWQ; 1903 bool needs_poll = false; 1904 int ret = 0, err = -ECANCELED; 1905 1906 /* one will be dropped by ->io_wq_free_work() after returning to io-wq */ 1907 if (!(req->flags & REQ_F_REFCOUNT)) 1908 __io_req_set_refcount(req, 2); 1909 else 1910 req_ref_get(req); 1911 1912 io_arm_ltimeout(req); 1913 1914 /* either cancelled or io-wq is dying, so don't touch tctx->iowq */ 1915 if (work->flags & IO_WQ_WORK_CANCEL) { 1916 fail: 1917 io_req_task_queue_fail(req, err); 1918 return; 1919 } 1920 if (!io_assign_file(req, def, issue_flags)) { 1921 err = -EBADF; 1922 work->flags |= IO_WQ_WORK_CANCEL; 1923 goto fail; 1924 } 1925 1926 if (req->flags & REQ_F_FORCE_ASYNC) { 1927 bool opcode_poll = def->pollin || def->pollout; 1928 1929 if (opcode_poll && file_can_poll(req->file)) { 1930 needs_poll = true; 1931 issue_flags |= IO_URING_F_NONBLOCK; 1932 } 1933 } 1934 1935 do { 1936 ret = io_issue_sqe(req, issue_flags); 1937 if (ret != -EAGAIN) 1938 break; 1939 1940 /* 1941 * If REQ_F_NOWAIT is set, then don't wait or retry with 1942 * poll. -EAGAIN is final for that case. 1943 */ 1944 if (req->flags & REQ_F_NOWAIT) 1945 break; 1946 1947 /* 1948 * We can get EAGAIN for iopolled IO even though we're 1949 * forcing a sync submission from here, since we can't 1950 * wait for request slots on the block side. 1951 */ 1952 if (!needs_poll) { 1953 if (!(req->ctx->flags & IORING_SETUP_IOPOLL)) 1954 break; 1955 if (io_wq_worker_stopped()) 1956 break; 1957 cond_resched(); 1958 continue; 1959 } 1960 1961 if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK) 1962 return; 1963 /* aborted or ready, in either case retry blocking */ 1964 needs_poll = false; 1965 issue_flags &= ~IO_URING_F_NONBLOCK; 1966 } while (1); 1967 1968 /* avoid locking problems by failing it from a clean context */ 1969 if (ret < 0) 1970 io_req_task_queue_fail(req, ret); 1971 } 1972 1973 inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd, 1974 unsigned int issue_flags) 1975 { 1976 struct io_ring_ctx *ctx = req->ctx; 1977 struct io_fixed_file *slot; 1978 struct file *file = NULL; 1979 1980 io_ring_submit_lock(ctx, issue_flags); 1981 1982 if (unlikely((unsigned int)fd >= ctx->nr_user_files)) 1983 goto out; 1984 fd = array_index_nospec(fd, ctx->nr_user_files); 1985 slot = io_fixed_file_slot(&ctx->file_table, fd); 1986 file = io_slot_file(slot); 1987 req->flags |= io_slot_flags(slot); 1988 io_req_set_rsrc_node(req, ctx, 0); 1989 out: 1990 io_ring_submit_unlock(ctx, issue_flags); 1991 return file; 1992 } 1993 1994 struct file *io_file_get_normal(struct io_kiocb *req, int fd) 1995 { 1996 struct file *file = fget(fd); 1997 1998 trace_io_uring_file_get(req, fd); 1999 2000 /* we don't allow fixed io_uring files */ 2001 if (file && io_is_uring_fops(file)) 2002 io_req_track_inflight(req); 2003 return file; 2004 } 2005 2006 static void io_queue_async(struct io_kiocb *req, int ret) 2007 __must_hold(&req->ctx->uring_lock) 2008 { 2009 struct io_kiocb *linked_timeout; 2010 2011 if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) { 2012 io_req_defer_failed(req, ret); 2013 return; 2014 } 2015 2016 linked_timeout = io_prep_linked_timeout(req); 2017 2018 switch (io_arm_poll_handler(req, 0)) { 2019 case IO_APOLL_READY: 2020 io_kbuf_recycle(req, 0); 2021 io_req_task_queue(req); 2022 break; 2023 case IO_APOLL_ABORTED: 2024 io_kbuf_recycle(req, 0); 2025 io_queue_iowq(req, NULL); 2026 break; 2027 case IO_APOLL_OK: 2028 break; 2029 } 2030 2031 if (linked_timeout) 2032 io_queue_linked_timeout(linked_timeout); 2033 } 2034 2035 static inline void io_queue_sqe(struct io_kiocb *req) 2036 __must_hold(&req->ctx->uring_lock) 2037 { 2038 int ret; 2039 2040 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); 2041 2042 /* 2043 * We async punt it if the file wasn't marked NOWAIT, or if the file 2044 * doesn't support non-blocking read/write attempts 2045 */ 2046 if (likely(!ret)) 2047 io_arm_ltimeout(req); 2048 else 2049 io_queue_async(req, ret); 2050 } 2051 2052 static void io_queue_sqe_fallback(struct io_kiocb *req) 2053 __must_hold(&req->ctx->uring_lock) 2054 { 2055 if (unlikely(req->flags & REQ_F_FAIL)) { 2056 /* 2057 * We don't submit, fail them all, for that replace hardlinks 2058 * with normal links. Extra REQ_F_LINK is tolerated. 2059 */ 2060 req->flags &= ~REQ_F_HARDLINK; 2061 req->flags |= REQ_F_LINK; 2062 io_req_defer_failed(req, req->cqe.res); 2063 } else { 2064 int ret = io_req_prep_async(req); 2065 2066 if (unlikely(ret)) { 2067 io_req_defer_failed(req, ret); 2068 return; 2069 } 2070 2071 if (unlikely(req->ctx->drain_active)) 2072 io_drain_req(req); 2073 else 2074 io_queue_iowq(req, NULL); 2075 } 2076 } 2077 2078 /* 2079 * Check SQE restrictions (opcode and flags). 2080 * 2081 * Returns 'true' if SQE is allowed, 'false' otherwise. 2082 */ 2083 static inline bool io_check_restriction(struct io_ring_ctx *ctx, 2084 struct io_kiocb *req, 2085 unsigned int sqe_flags) 2086 { 2087 if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) 2088 return false; 2089 2090 if ((sqe_flags & ctx->restrictions.sqe_flags_required) != 2091 ctx->restrictions.sqe_flags_required) 2092 return false; 2093 2094 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed | 2095 ctx->restrictions.sqe_flags_required)) 2096 return false; 2097 2098 return true; 2099 } 2100 2101 static void io_init_req_drain(struct io_kiocb *req) 2102 { 2103 struct io_ring_ctx *ctx = req->ctx; 2104 struct io_kiocb *head = ctx->submit_state.link.head; 2105 2106 ctx->drain_active = true; 2107 if (head) { 2108 /* 2109 * If we need to drain a request in the middle of a link, drain 2110 * the head request and the next request/link after the current 2111 * link. Considering sequential execution of links, 2112 * REQ_F_IO_DRAIN will be maintained for every request of our 2113 * link. 2114 */ 2115 head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC; 2116 ctx->drain_next = true; 2117 } 2118 } 2119 2120 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, 2121 const struct io_uring_sqe *sqe) 2122 __must_hold(&ctx->uring_lock) 2123 { 2124 const struct io_issue_def *def; 2125 unsigned int sqe_flags; 2126 int personality; 2127 u8 opcode; 2128 2129 /* req is partially pre-initialised, see io_preinit_req() */ 2130 req->opcode = opcode = READ_ONCE(sqe->opcode); 2131 /* same numerical values with corresponding REQ_F_*, safe to copy */ 2132 req->flags = sqe_flags = READ_ONCE(sqe->flags); 2133 req->cqe.user_data = READ_ONCE(sqe->user_data); 2134 req->file = NULL; 2135 req->rsrc_node = NULL; 2136 req->task = current; 2137 2138 if (unlikely(opcode >= IORING_OP_LAST)) { 2139 req->opcode = 0; 2140 return -EINVAL; 2141 } 2142 def = &io_issue_defs[opcode]; 2143 if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) { 2144 /* enforce forwards compatibility on users */ 2145 if (sqe_flags & ~SQE_VALID_FLAGS) 2146 return -EINVAL; 2147 if (sqe_flags & IOSQE_BUFFER_SELECT) { 2148 if (!def->buffer_select) 2149 return -EOPNOTSUPP; 2150 req->buf_index = READ_ONCE(sqe->buf_group); 2151 } 2152 if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS) 2153 ctx->drain_disabled = true; 2154 if (sqe_flags & IOSQE_IO_DRAIN) { 2155 if (ctx->drain_disabled) 2156 return -EOPNOTSUPP; 2157 io_init_req_drain(req); 2158 } 2159 } 2160 if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) { 2161 if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags)) 2162 return -EACCES; 2163 /* knock it to the slow queue path, will be drained there */ 2164 if (ctx->drain_active) 2165 req->flags |= REQ_F_FORCE_ASYNC; 2166 /* if there is no link, we're at "next" request and need to drain */ 2167 if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) { 2168 ctx->drain_next = false; 2169 ctx->drain_active = true; 2170 req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC; 2171 } 2172 } 2173 2174 if (!def->ioprio && sqe->ioprio) 2175 return -EINVAL; 2176 if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL)) 2177 return -EINVAL; 2178 2179 if (def->needs_file) { 2180 struct io_submit_state *state = &ctx->submit_state; 2181 2182 req->cqe.fd = READ_ONCE(sqe->fd); 2183 2184 /* 2185 * Plug now if we have more than 2 IO left after this, and the 2186 * target is potentially a read/write to block based storage. 2187 */ 2188 if (state->need_plug && def->plug) { 2189 state->plug_started = true; 2190 state->need_plug = false; 2191 blk_start_plug_nr_ios(&state->plug, state->submit_nr); 2192 } 2193 } 2194 2195 personality = READ_ONCE(sqe->personality); 2196 if (personality) { 2197 int ret; 2198 2199 req->creds = xa_load(&ctx->personalities, personality); 2200 if (!req->creds) 2201 return -EINVAL; 2202 get_cred(req->creds); 2203 ret = security_uring_override_creds(req->creds); 2204 if (ret) { 2205 put_cred(req->creds); 2206 return ret; 2207 } 2208 req->flags |= REQ_F_CREDS; 2209 } 2210 2211 return def->prep(req, sqe); 2212 } 2213 2214 static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe, 2215 struct io_kiocb *req, int ret) 2216 { 2217 struct io_ring_ctx *ctx = req->ctx; 2218 struct io_submit_link *link = &ctx->submit_state.link; 2219 struct io_kiocb *head = link->head; 2220 2221 trace_io_uring_req_failed(sqe, req, ret); 2222 2223 /* 2224 * Avoid breaking links in the middle as it renders links with SQPOLL 2225 * unusable. Instead of failing eagerly, continue assembling the link if 2226 * applicable and mark the head with REQ_F_FAIL. The link flushing code 2227 * should find the flag and handle the rest. 2228 */ 2229 req_fail_link_node(req, ret); 2230 if (head && !(head->flags & REQ_F_FAIL)) 2231 req_fail_link_node(head, -ECANCELED); 2232 2233 if (!(req->flags & IO_REQ_LINK_FLAGS)) { 2234 if (head) { 2235 link->last->link = req; 2236 link->head = NULL; 2237 req = head; 2238 } 2239 io_queue_sqe_fallback(req); 2240 return ret; 2241 } 2242 2243 if (head) 2244 link->last->link = req; 2245 else 2246 link->head = req; 2247 link->last = req; 2248 return 0; 2249 } 2250 2251 static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, 2252 const struct io_uring_sqe *sqe) 2253 __must_hold(&ctx->uring_lock) 2254 { 2255 struct io_submit_link *link = &ctx->submit_state.link; 2256 int ret; 2257 2258 ret = io_init_req(ctx, req, sqe); 2259 if (unlikely(ret)) 2260 return io_submit_fail_init(sqe, req, ret); 2261 2262 trace_io_uring_submit_req(req); 2263 2264 /* 2265 * If we already have a head request, queue this one for async 2266 * submittal once the head completes. If we don't have a head but 2267 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be 2268 * submitted sync once the chain is complete. If none of those 2269 * conditions are true (normal request), then just queue it. 2270 */ 2271 if (unlikely(link->head)) { 2272 ret = io_req_prep_async(req); 2273 if (unlikely(ret)) 2274 return io_submit_fail_init(sqe, req, ret); 2275 2276 trace_io_uring_link(req, link->head); 2277 link->last->link = req; 2278 link->last = req; 2279 2280 if (req->flags & IO_REQ_LINK_FLAGS) 2281 return 0; 2282 /* last request of the link, flush it */ 2283 req = link->head; 2284 link->head = NULL; 2285 if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)) 2286 goto fallback; 2287 2288 } else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS | 2289 REQ_F_FORCE_ASYNC | REQ_F_FAIL))) { 2290 if (req->flags & IO_REQ_LINK_FLAGS) { 2291 link->head = req; 2292 link->last = req; 2293 } else { 2294 fallback: 2295 io_queue_sqe_fallback(req); 2296 } 2297 return 0; 2298 } 2299 2300 io_queue_sqe(req); 2301 return 0; 2302 } 2303 2304 /* 2305 * Batched submission is done, ensure local IO is flushed out. 2306 */ 2307 static void io_submit_state_end(struct io_ring_ctx *ctx) 2308 { 2309 struct io_submit_state *state = &ctx->submit_state; 2310 2311 if (unlikely(state->link.head)) 2312 io_queue_sqe_fallback(state->link.head); 2313 /* flush only after queuing links as they can generate completions */ 2314 io_submit_flush_completions(ctx); 2315 if (state->plug_started) 2316 blk_finish_plug(&state->plug); 2317 } 2318 2319 /* 2320 * Start submission side cache. 2321 */ 2322 static void io_submit_state_start(struct io_submit_state *state, 2323 unsigned int max_ios) 2324 { 2325 state->plug_started = false; 2326 state->need_plug = max_ios > 2; 2327 state->submit_nr = max_ios; 2328 /* set only head, no need to init link_last in advance */ 2329 state->link.head = NULL; 2330 } 2331 2332 static void io_commit_sqring(struct io_ring_ctx *ctx) 2333 { 2334 struct io_rings *rings = ctx->rings; 2335 2336 /* 2337 * Ensure any loads from the SQEs are done at this point, 2338 * since once we write the new head, the application could 2339 * write new data to them. 2340 */ 2341 smp_store_release(&rings->sq.head, ctx->cached_sq_head); 2342 } 2343 2344 /* 2345 * Fetch an sqe, if one is available. Note this returns a pointer to memory 2346 * that is mapped by userspace. This means that care needs to be taken to 2347 * ensure that reads are stable, as we cannot rely on userspace always 2348 * being a good citizen. If members of the sqe are validated and then later 2349 * used, it's important that those reads are done through READ_ONCE() to 2350 * prevent a re-load down the line. 2351 */ 2352 static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe) 2353 { 2354 unsigned mask = ctx->sq_entries - 1; 2355 unsigned head = ctx->cached_sq_head++ & mask; 2356 2357 if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) { 2358 head = READ_ONCE(ctx->sq_array[head]); 2359 if (unlikely(head >= ctx->sq_entries)) { 2360 /* drop invalid entries */ 2361 spin_lock(&ctx->completion_lock); 2362 ctx->cq_extra--; 2363 spin_unlock(&ctx->completion_lock); 2364 WRITE_ONCE(ctx->rings->sq_dropped, 2365 READ_ONCE(ctx->rings->sq_dropped) + 1); 2366 return false; 2367 } 2368 } 2369 2370 /* 2371 * The cached sq head (or cq tail) serves two purposes: 2372 * 2373 * 1) allows us to batch the cost of updating the user visible 2374 * head updates. 2375 * 2) allows the kernel side to track the head on its own, even 2376 * though the application is the one updating it. 2377 */ 2378 2379 /* double index for 128-byte SQEs, twice as long */ 2380 if (ctx->flags & IORING_SETUP_SQE128) 2381 head <<= 1; 2382 *sqe = &ctx->sq_sqes[head]; 2383 return true; 2384 } 2385 2386 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) 2387 __must_hold(&ctx->uring_lock) 2388 { 2389 unsigned int entries = io_sqring_entries(ctx); 2390 unsigned int left; 2391 int ret; 2392 2393 if (unlikely(!entries)) 2394 return 0; 2395 /* make sure SQ entry isn't read before tail */ 2396 ret = left = min(nr, entries); 2397 io_get_task_refs(left); 2398 io_submit_state_start(&ctx->submit_state, left); 2399 2400 do { 2401 const struct io_uring_sqe *sqe; 2402 struct io_kiocb *req; 2403 2404 if (unlikely(!io_alloc_req(ctx, &req))) 2405 break; 2406 if (unlikely(!io_get_sqe(ctx, &sqe))) { 2407 io_req_add_to_cache(req, ctx); 2408 break; 2409 } 2410 2411 /* 2412 * Continue submitting even for sqe failure if the 2413 * ring was setup with IORING_SETUP_SUBMIT_ALL 2414 */ 2415 if (unlikely(io_submit_sqe(ctx, req, sqe)) && 2416 !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) { 2417 left--; 2418 break; 2419 } 2420 } while (--left); 2421 2422 if (unlikely(left)) { 2423 ret -= left; 2424 /* try again if it submitted nothing and can't allocate a req */ 2425 if (!ret && io_req_cache_empty(ctx)) 2426 ret = -EAGAIN; 2427 current->io_uring->cached_refs += left; 2428 } 2429 2430 io_submit_state_end(ctx); 2431 /* Commit SQ ring head once we've consumed and submitted all SQEs */ 2432 io_commit_sqring(ctx); 2433 return ret; 2434 } 2435 2436 struct io_wait_queue { 2437 struct wait_queue_entry wq; 2438 struct io_ring_ctx *ctx; 2439 unsigned cq_tail; 2440 unsigned nr_timeouts; 2441 ktime_t timeout; 2442 }; 2443 2444 static inline bool io_has_work(struct io_ring_ctx *ctx) 2445 { 2446 return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) || 2447 !llist_empty(&ctx->work_llist); 2448 } 2449 2450 static inline bool io_should_wake(struct io_wait_queue *iowq) 2451 { 2452 struct io_ring_ctx *ctx = iowq->ctx; 2453 int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail; 2454 2455 /* 2456 * Wake up if we have enough events, or if a timeout occurred since we 2457 * started waiting. For timeouts, we always want to return to userspace, 2458 * regardless of event count. 2459 */ 2460 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; 2461 } 2462 2463 static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode, 2464 int wake_flags, void *key) 2465 { 2466 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, wq); 2467 2468 /* 2469 * Cannot safely flush overflowed CQEs from here, ensure we wake up 2470 * the task, and the next invocation will do it. 2471 */ 2472 if (io_should_wake(iowq) || io_has_work(iowq->ctx)) 2473 return autoremove_wake_function(curr, mode, wake_flags, key); 2474 return -1; 2475 } 2476 2477 int io_run_task_work_sig(struct io_ring_ctx *ctx) 2478 { 2479 if (!llist_empty(&ctx->work_llist)) { 2480 __set_current_state(TASK_RUNNING); 2481 if (io_run_local_work(ctx, INT_MAX) > 0) 2482 return 0; 2483 } 2484 if (io_run_task_work() > 0) 2485 return 0; 2486 if (task_sigpending(current)) 2487 return -EINTR; 2488 return 0; 2489 } 2490 2491 static bool current_pending_io(void) 2492 { 2493 struct io_uring_task *tctx = current->io_uring; 2494 2495 if (!tctx) 2496 return false; 2497 return percpu_counter_read_positive(&tctx->inflight); 2498 } 2499 2500 /* when returns >0, the caller should retry */ 2501 static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, 2502 struct io_wait_queue *iowq) 2503 { 2504 int io_wait, ret; 2505 2506 if (unlikely(READ_ONCE(ctx->check_cq))) 2507 return 1; 2508 if (unlikely(!llist_empty(&ctx->work_llist))) 2509 return 1; 2510 if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) 2511 return 1; 2512 if (unlikely(task_sigpending(current))) 2513 return -EINTR; 2514 if (unlikely(io_should_wake(iowq))) 2515 return 0; 2516 2517 /* 2518 * Mark us as being in io_wait if we have pending requests, so cpufreq 2519 * can take into account that the task is waiting for IO - turns out 2520 * to be important for low QD IO. 2521 */ 2522 io_wait = current->in_iowait; 2523 if (current_pending_io()) 2524 current->in_iowait = 1; 2525 ret = 0; 2526 if (iowq->timeout == KTIME_MAX) 2527 schedule(); 2528 else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS)) 2529 ret = -ETIME; 2530 current->in_iowait = io_wait; 2531 return ret; 2532 } 2533 2534 /* 2535 * Wait until events become available, if we don't already have some. The 2536 * application must reap them itself, as they reside on the shared cq ring. 2537 */ 2538 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, 2539 const sigset_t __user *sig, size_t sigsz, 2540 struct __kernel_timespec __user *uts) 2541 { 2542 struct io_wait_queue iowq; 2543 struct io_rings *rings = ctx->rings; 2544 int ret; 2545 2546 if (!io_allowed_run_tw(ctx)) 2547 return -EEXIST; 2548 if (!llist_empty(&ctx->work_llist)) 2549 io_run_local_work(ctx, min_events); 2550 io_run_task_work(); 2551 io_cqring_overflow_flush(ctx); 2552 /* if user messes with these they will just get an early return */ 2553 if (__io_cqring_events_user(ctx) >= min_events) 2554 return 0; 2555 2556 if (sig) { 2557 #ifdef CONFIG_COMPAT 2558 if (in_compat_syscall()) 2559 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig, 2560 sigsz); 2561 else 2562 #endif 2563 ret = set_user_sigmask(sig, sigsz); 2564 2565 if (ret) 2566 return ret; 2567 } 2568 2569 init_waitqueue_func_entry(&iowq.wq, io_wake_function); 2570 iowq.wq.private = current; 2571 INIT_LIST_HEAD(&iowq.wq.entry); 2572 iowq.ctx = ctx; 2573 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); 2574 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events; 2575 iowq.timeout = KTIME_MAX; 2576 2577 if (uts) { 2578 struct timespec64 ts; 2579 2580 if (get_timespec64(&ts, uts)) 2581 return -EFAULT; 2582 iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns()); 2583 } 2584 2585 trace_io_uring_cqring_wait(ctx, min_events); 2586 do { 2587 int nr_wait = (int) iowq.cq_tail - READ_ONCE(ctx->rings->cq.tail); 2588 unsigned long check_cq; 2589 2590 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { 2591 atomic_set(&ctx->cq_wait_nr, nr_wait); 2592 set_current_state(TASK_INTERRUPTIBLE); 2593 } else { 2594 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq, 2595 TASK_INTERRUPTIBLE); 2596 } 2597 2598 ret = io_cqring_wait_schedule(ctx, &iowq); 2599 __set_current_state(TASK_RUNNING); 2600 atomic_set(&ctx->cq_wait_nr, 0); 2601 2602 /* 2603 * Run task_work after scheduling and before io_should_wake(). 2604 * If we got woken because of task_work being processed, run it 2605 * now rather than let the caller do another wait loop. 2606 */ 2607 io_run_task_work(); 2608 if (!llist_empty(&ctx->work_llist)) 2609 io_run_local_work(ctx, nr_wait); 2610 2611 /* 2612 * Non-local task_work will be run on exit to userspace, but 2613 * if we're using DEFER_TASKRUN, then we could have waited 2614 * with a timeout for a number of requests. If the timeout 2615 * hits, we could have some requests ready to process. Ensure 2616 * this break is _after_ we have run task_work, to avoid 2617 * deferring running potentially pending requests until the 2618 * next time we wait for events. 2619 */ 2620 if (ret < 0) 2621 break; 2622 2623 check_cq = READ_ONCE(ctx->check_cq); 2624 if (unlikely(check_cq)) { 2625 /* let the caller flush overflows, retry */ 2626 if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) 2627 io_cqring_do_overflow_flush(ctx); 2628 if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) { 2629 ret = -EBADR; 2630 break; 2631 } 2632 } 2633 2634 if (io_should_wake(&iowq)) { 2635 ret = 0; 2636 break; 2637 } 2638 cond_resched(); 2639 } while (1); 2640 2641 if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) 2642 finish_wait(&ctx->cq_wait, &iowq.wq); 2643 restore_saved_sigmask_unless(ret == -EINTR); 2644 2645 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; 2646 } 2647 2648 void io_mem_free(void *ptr) 2649 { 2650 if (!ptr) 2651 return; 2652 2653 folio_put(virt_to_folio(ptr)); 2654 } 2655 2656 static void io_pages_free(struct page ***pages, int npages) 2657 { 2658 struct page **page_array; 2659 int i; 2660 2661 if (!pages) 2662 return; 2663 2664 page_array = *pages; 2665 if (!page_array) 2666 return; 2667 2668 for (i = 0; i < npages; i++) 2669 unpin_user_page(page_array[i]); 2670 kvfree(page_array); 2671 *pages = NULL; 2672 } 2673 2674 static void *__io_uaddr_map(struct page ***pages, unsigned short *npages, 2675 unsigned long uaddr, size_t size) 2676 { 2677 struct page **page_array; 2678 unsigned int nr_pages; 2679 void *page_addr; 2680 int ret, i; 2681 2682 *npages = 0; 2683 2684 if (uaddr & (PAGE_SIZE - 1) || !size) 2685 return ERR_PTR(-EINVAL); 2686 2687 nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 2688 if (nr_pages > USHRT_MAX) 2689 return ERR_PTR(-EINVAL); 2690 page_array = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); 2691 if (!page_array) 2692 return ERR_PTR(-ENOMEM); 2693 2694 ret = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM, 2695 page_array); 2696 if (ret != nr_pages) { 2697 err: 2698 io_pages_free(&page_array, ret > 0 ? ret : 0); 2699 return ret < 0 ? ERR_PTR(ret) : ERR_PTR(-EFAULT); 2700 } 2701 2702 page_addr = page_address(page_array[0]); 2703 for (i = 0; i < nr_pages; i++) { 2704 ret = -EINVAL; 2705 2706 /* 2707 * Can't support mapping user allocated ring memory on 32-bit 2708 * archs where it could potentially reside in highmem. Just 2709 * fail those with -EINVAL, just like we did on kernels that 2710 * didn't support this feature. 2711 */ 2712 if (PageHighMem(page_array[i])) 2713 goto err; 2714 2715 /* 2716 * No support for discontig pages for now, should either be a 2717 * single normal page, or a huge page. Later on we can add 2718 * support for remapping discontig pages, for now we will 2719 * just fail them with EINVAL. 2720 */ 2721 if (page_address(page_array[i]) != page_addr) 2722 goto err; 2723 page_addr += PAGE_SIZE; 2724 } 2725 2726 *pages = page_array; 2727 *npages = nr_pages; 2728 return page_to_virt(page_array[0]); 2729 } 2730 2731 static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr, 2732 size_t size) 2733 { 2734 return __io_uaddr_map(&ctx->ring_pages, &ctx->n_ring_pages, uaddr, 2735 size); 2736 } 2737 2738 static void *io_sqes_map(struct io_ring_ctx *ctx, unsigned long uaddr, 2739 size_t size) 2740 { 2741 return __io_uaddr_map(&ctx->sqe_pages, &ctx->n_sqe_pages, uaddr, 2742 size); 2743 } 2744 2745 static void io_rings_free(struct io_ring_ctx *ctx) 2746 { 2747 if (!(ctx->flags & IORING_SETUP_NO_MMAP)) { 2748 io_mem_free(ctx->rings); 2749 io_mem_free(ctx->sq_sqes); 2750 ctx->rings = NULL; 2751 ctx->sq_sqes = NULL; 2752 } else { 2753 io_pages_free(&ctx->ring_pages, ctx->n_ring_pages); 2754 ctx->n_ring_pages = 0; 2755 io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages); 2756 ctx->n_sqe_pages = 0; 2757 } 2758 } 2759 2760 void *io_mem_alloc(size_t size) 2761 { 2762 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP; 2763 void *ret; 2764 2765 ret = (void *) __get_free_pages(gfp, get_order(size)); 2766 if (ret) 2767 return ret; 2768 return ERR_PTR(-ENOMEM); 2769 } 2770 2771 static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries, 2772 unsigned int cq_entries, size_t *sq_offset) 2773 { 2774 struct io_rings *rings; 2775 size_t off, sq_array_size; 2776 2777 off = struct_size(rings, cqes, cq_entries); 2778 if (off == SIZE_MAX) 2779 return SIZE_MAX; 2780 if (ctx->flags & IORING_SETUP_CQE32) { 2781 if (check_shl_overflow(off, 1, &off)) 2782 return SIZE_MAX; 2783 } 2784 2785 #ifdef CONFIG_SMP 2786 off = ALIGN(off, SMP_CACHE_BYTES); 2787 if (off == 0) 2788 return SIZE_MAX; 2789 #endif 2790 2791 if (ctx->flags & IORING_SETUP_NO_SQARRAY) { 2792 if (sq_offset) 2793 *sq_offset = SIZE_MAX; 2794 return off; 2795 } 2796 2797 if (sq_offset) 2798 *sq_offset = off; 2799 2800 sq_array_size = array_size(sizeof(u32), sq_entries); 2801 if (sq_array_size == SIZE_MAX) 2802 return SIZE_MAX; 2803 2804 if (check_add_overflow(off, sq_array_size, &off)) 2805 return SIZE_MAX; 2806 2807 return off; 2808 } 2809 2810 static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg, 2811 unsigned int eventfd_async) 2812 { 2813 struct io_ev_fd *ev_fd; 2814 __s32 __user *fds = arg; 2815 int fd; 2816 2817 ev_fd = rcu_dereference_protected(ctx->io_ev_fd, 2818 lockdep_is_held(&ctx->uring_lock)); 2819 if (ev_fd) 2820 return -EBUSY; 2821 2822 if (copy_from_user(&fd, fds, sizeof(*fds))) 2823 return -EFAULT; 2824 2825 ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL); 2826 if (!ev_fd) 2827 return -ENOMEM; 2828 2829 ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd); 2830 if (IS_ERR(ev_fd->cq_ev_fd)) { 2831 int ret = PTR_ERR(ev_fd->cq_ev_fd); 2832 kfree(ev_fd); 2833 return ret; 2834 } 2835 2836 spin_lock(&ctx->completion_lock); 2837 ctx->evfd_last_cq_tail = ctx->cached_cq_tail; 2838 spin_unlock(&ctx->completion_lock); 2839 2840 ev_fd->eventfd_async = eventfd_async; 2841 ctx->has_evfd = true; 2842 rcu_assign_pointer(ctx->io_ev_fd, ev_fd); 2843 atomic_set(&ev_fd->refs, 1); 2844 atomic_set(&ev_fd->ops, 0); 2845 return 0; 2846 } 2847 2848 static int io_eventfd_unregister(struct io_ring_ctx *ctx) 2849 { 2850 struct io_ev_fd *ev_fd; 2851 2852 ev_fd = rcu_dereference_protected(ctx->io_ev_fd, 2853 lockdep_is_held(&ctx->uring_lock)); 2854 if (ev_fd) { 2855 ctx->has_evfd = false; 2856 rcu_assign_pointer(ctx->io_ev_fd, NULL); 2857 if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_FREE_BIT), &ev_fd->ops)) 2858 call_rcu(&ev_fd->rcu, io_eventfd_ops); 2859 return 0; 2860 } 2861 2862 return -ENXIO; 2863 } 2864 2865 static void io_req_caches_free(struct io_ring_ctx *ctx) 2866 { 2867 struct io_kiocb *req; 2868 int nr = 0; 2869 2870 mutex_lock(&ctx->uring_lock); 2871 io_flush_cached_locked_reqs(ctx, &ctx->submit_state); 2872 2873 while (!io_req_cache_empty(ctx)) { 2874 req = io_extract_req(ctx); 2875 kmem_cache_free(req_cachep, req); 2876 nr++; 2877 } 2878 if (nr) 2879 percpu_ref_put_many(&ctx->refs, nr); 2880 mutex_unlock(&ctx->uring_lock); 2881 } 2882 2883 static void io_rsrc_node_cache_free(struct io_cache_entry *entry) 2884 { 2885 kfree(container_of(entry, struct io_rsrc_node, cache)); 2886 } 2887 2888 static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) 2889 { 2890 io_sq_thread_finish(ctx); 2891 /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */ 2892 if (WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list))) 2893 return; 2894 2895 mutex_lock(&ctx->uring_lock); 2896 if (ctx->buf_data) 2897 __io_sqe_buffers_unregister(ctx); 2898 if (ctx->file_data) 2899 __io_sqe_files_unregister(ctx); 2900 io_cqring_overflow_kill(ctx); 2901 io_eventfd_unregister(ctx); 2902 io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free); 2903 io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free); 2904 io_destroy_buffers(ctx); 2905 mutex_unlock(&ctx->uring_lock); 2906 if (ctx->sq_creds) 2907 put_cred(ctx->sq_creds); 2908 if (ctx->submitter_task) 2909 put_task_struct(ctx->submitter_task); 2910 2911 /* there are no registered resources left, nobody uses it */ 2912 if (ctx->rsrc_node) 2913 io_rsrc_node_destroy(ctx, ctx->rsrc_node); 2914 2915 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)); 2916 WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list)); 2917 2918 io_alloc_cache_free(&ctx->rsrc_node_cache, io_rsrc_node_cache_free); 2919 if (ctx->mm_account) { 2920 mmdrop(ctx->mm_account); 2921 ctx->mm_account = NULL; 2922 } 2923 io_rings_free(ctx); 2924 io_kbuf_mmap_list_free(ctx); 2925 2926 percpu_ref_exit(&ctx->refs); 2927 free_uid(ctx->user); 2928 io_req_caches_free(ctx); 2929 if (ctx->hash_map) 2930 io_wq_put_hash(ctx->hash_map); 2931 kfree(ctx->cancel_table.hbs); 2932 kfree(ctx->cancel_table_locked.hbs); 2933 kfree(ctx->io_bl); 2934 xa_destroy(&ctx->io_bl_xa); 2935 kfree(ctx); 2936 } 2937 2938 static __cold void io_activate_pollwq_cb(struct callback_head *cb) 2939 { 2940 struct io_ring_ctx *ctx = container_of(cb, struct io_ring_ctx, 2941 poll_wq_task_work); 2942 2943 mutex_lock(&ctx->uring_lock); 2944 ctx->poll_activated = true; 2945 mutex_unlock(&ctx->uring_lock); 2946 2947 /* 2948 * Wake ups for some events between start of polling and activation 2949 * might've been lost due to loose synchronisation. 2950 */ 2951 wake_up_all(&ctx->poll_wq); 2952 percpu_ref_put(&ctx->refs); 2953 } 2954 2955 static __cold void io_activate_pollwq(struct io_ring_ctx *ctx) 2956 { 2957 spin_lock(&ctx->completion_lock); 2958 /* already activated or in progress */ 2959 if (ctx->poll_activated || ctx->poll_wq_task_work.func) 2960 goto out; 2961 if (WARN_ON_ONCE(!ctx->task_complete)) 2962 goto out; 2963 if (!ctx->submitter_task) 2964 goto out; 2965 /* 2966 * with ->submitter_task only the submitter task completes requests, we 2967 * only need to sync with it, which is done by injecting a tw 2968 */ 2969 init_task_work(&ctx->poll_wq_task_work, io_activate_pollwq_cb); 2970 percpu_ref_get(&ctx->refs); 2971 if (task_work_add(ctx->submitter_task, &ctx->poll_wq_task_work, TWA_SIGNAL)) 2972 percpu_ref_put(&ctx->refs); 2973 out: 2974 spin_unlock(&ctx->completion_lock); 2975 } 2976 2977 static __poll_t io_uring_poll(struct file *file, poll_table *wait) 2978 { 2979 struct io_ring_ctx *ctx = file->private_data; 2980 __poll_t mask = 0; 2981 2982 if (unlikely(!ctx->poll_activated)) 2983 io_activate_pollwq(ctx); 2984 2985 poll_wait(file, &ctx->poll_wq, wait); 2986 /* 2987 * synchronizes with barrier from wq_has_sleeper call in 2988 * io_commit_cqring 2989 */ 2990 smp_rmb(); 2991 if (!io_sqring_full(ctx)) 2992 mask |= EPOLLOUT | EPOLLWRNORM; 2993 2994 /* 2995 * Don't flush cqring overflow list here, just do a simple check. 2996 * Otherwise there could possible be ABBA deadlock: 2997 * CPU0 CPU1 2998 * ---- ---- 2999 * lock(&ctx->uring_lock); 3000 * lock(&ep->mtx); 3001 * lock(&ctx->uring_lock); 3002 * lock(&ep->mtx); 3003 * 3004 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this 3005 * pushes them to do the flush. 3006 */ 3007 3008 if (__io_cqring_events_user(ctx) || io_has_work(ctx)) 3009 mask |= EPOLLIN | EPOLLRDNORM; 3010 3011 return mask; 3012 } 3013 3014 static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id) 3015 { 3016 const struct cred *creds; 3017 3018 creds = xa_erase(&ctx->personalities, id); 3019 if (creds) { 3020 put_cred(creds); 3021 return 0; 3022 } 3023 3024 return -EINVAL; 3025 } 3026 3027 struct io_tctx_exit { 3028 struct callback_head task_work; 3029 struct completion completion; 3030 struct io_ring_ctx *ctx; 3031 }; 3032 3033 static __cold void io_tctx_exit_cb(struct callback_head *cb) 3034 { 3035 struct io_uring_task *tctx = current->io_uring; 3036 struct io_tctx_exit *work; 3037 3038 work = container_of(cb, struct io_tctx_exit, task_work); 3039 /* 3040 * When @in_cancel, we're in cancellation and it's racy to remove the 3041 * node. It'll be removed by the end of cancellation, just ignore it. 3042 * tctx can be NULL if the queueing of this task_work raced with 3043 * work cancelation off the exec path. 3044 */ 3045 if (tctx && !atomic_read(&tctx->in_cancel)) 3046 io_uring_del_tctx_node((unsigned long)work->ctx); 3047 complete(&work->completion); 3048 } 3049 3050 static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data) 3051 { 3052 struct io_kiocb *req = container_of(work, struct io_kiocb, work); 3053 3054 return req->ctx == data; 3055 } 3056 3057 static __cold void io_ring_exit_work(struct work_struct *work) 3058 { 3059 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work); 3060 unsigned long timeout = jiffies + HZ * 60 * 5; 3061 unsigned long interval = HZ / 20; 3062 struct io_tctx_exit exit; 3063 struct io_tctx_node *node; 3064 int ret; 3065 3066 /* 3067 * If we're doing polled IO and end up having requests being 3068 * submitted async (out-of-line), then completions can come in while 3069 * we're waiting for refs to drop. We need to reap these manually, 3070 * as nobody else will be looking for them. 3071 */ 3072 do { 3073 if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) { 3074 mutex_lock(&ctx->uring_lock); 3075 io_cqring_overflow_kill(ctx); 3076 mutex_unlock(&ctx->uring_lock); 3077 } 3078 3079 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) 3080 io_move_task_work_from_local(ctx); 3081 3082 while (io_uring_try_cancel_requests(ctx, NULL, true)) 3083 cond_resched(); 3084 3085 if (ctx->sq_data) { 3086 struct io_sq_data *sqd = ctx->sq_data; 3087 struct task_struct *tsk; 3088 3089 io_sq_thread_park(sqd); 3090 tsk = sqd->thread; 3091 if (tsk && tsk->io_uring && tsk->io_uring->io_wq) 3092 io_wq_cancel_cb(tsk->io_uring->io_wq, 3093 io_cancel_ctx_cb, ctx, true); 3094 io_sq_thread_unpark(sqd); 3095 } 3096 3097 io_req_caches_free(ctx); 3098 3099 if (WARN_ON_ONCE(time_after(jiffies, timeout))) { 3100 /* there is little hope left, don't run it too often */ 3101 interval = HZ * 60; 3102 } 3103 /* 3104 * This is really an uninterruptible wait, as it has to be 3105 * complete. But it's also run from a kworker, which doesn't 3106 * take signals, so it's fine to make it interruptible. This 3107 * avoids scenarios where we knowingly can wait much longer 3108 * on completions, for example if someone does a SIGSTOP on 3109 * a task that needs to finish task_work to make this loop 3110 * complete. That's a synthetic situation that should not 3111 * cause a stuck task backtrace, and hence a potential panic 3112 * on stuck tasks if that is enabled. 3113 */ 3114 } while (!wait_for_completion_interruptible_timeout(&ctx->ref_comp, interval)); 3115 3116 init_completion(&exit.completion); 3117 init_task_work(&exit.task_work, io_tctx_exit_cb); 3118 exit.ctx = ctx; 3119 3120 mutex_lock(&ctx->uring_lock); 3121 while (!list_empty(&ctx->tctx_list)) { 3122 WARN_ON_ONCE(time_after(jiffies, timeout)); 3123 3124 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node, 3125 ctx_node); 3126 /* don't spin on a single task if cancellation failed */ 3127 list_rotate_left(&ctx->tctx_list); 3128 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL); 3129 if (WARN_ON_ONCE(ret)) 3130 continue; 3131 3132 mutex_unlock(&ctx->uring_lock); 3133 /* 3134 * See comment above for 3135 * wait_for_completion_interruptible_timeout() on why this 3136 * wait is marked as interruptible. 3137 */ 3138 wait_for_completion_interruptible(&exit.completion); 3139 mutex_lock(&ctx->uring_lock); 3140 } 3141 mutex_unlock(&ctx->uring_lock); 3142 spin_lock(&ctx->completion_lock); 3143 spin_unlock(&ctx->completion_lock); 3144 3145 /* pairs with RCU read section in io_req_local_work_add() */ 3146 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) 3147 synchronize_rcu(); 3148 3149 io_ring_ctx_free(ctx); 3150 } 3151 3152 static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) 3153 { 3154 unsigned long index; 3155 struct creds *creds; 3156 3157 mutex_lock(&ctx->uring_lock); 3158 percpu_ref_kill(&ctx->refs); 3159 xa_for_each(&ctx->personalities, index, creds) 3160 io_unregister_personality(ctx, index); 3161 if (ctx->rings) 3162 io_poll_remove_all(ctx, NULL, true); 3163 mutex_unlock(&ctx->uring_lock); 3164 3165 /* 3166 * If we failed setting up the ctx, we might not have any rings 3167 * and therefore did not submit any requests 3168 */ 3169 if (ctx->rings) 3170 io_kill_timeouts(ctx, NULL, true); 3171 3172 flush_delayed_work(&ctx->fallback_work); 3173 3174 INIT_WORK(&ctx->exit_work, io_ring_exit_work); 3175 /* 3176 * Use system_unbound_wq to avoid spawning tons of event kworkers 3177 * if we're exiting a ton of rings at the same time. It just adds 3178 * noise and overhead, there's no discernable change in runtime 3179 * over using system_wq. 3180 */ 3181 queue_work(system_unbound_wq, &ctx->exit_work); 3182 } 3183 3184 static int io_uring_release(struct inode *inode, struct file *file) 3185 { 3186 struct io_ring_ctx *ctx = file->private_data; 3187 3188 file->private_data = NULL; 3189 io_ring_ctx_wait_and_kill(ctx); 3190 return 0; 3191 } 3192 3193 struct io_task_cancel { 3194 struct task_struct *task; 3195 bool all; 3196 }; 3197 3198 static bool io_cancel_task_cb(struct io_wq_work *work, void *data) 3199 { 3200 struct io_kiocb *req = container_of(work, struct io_kiocb, work); 3201 struct io_task_cancel *cancel = data; 3202 3203 return io_match_task_safe(req, cancel->task, cancel->all); 3204 } 3205 3206 static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, 3207 struct task_struct *task, 3208 bool cancel_all) 3209 { 3210 struct io_defer_entry *de; 3211 LIST_HEAD(list); 3212 3213 spin_lock(&ctx->completion_lock); 3214 list_for_each_entry_reverse(de, &ctx->defer_list, list) { 3215 if (io_match_task_safe(de->req, task, cancel_all)) { 3216 list_cut_position(&list, &ctx->defer_list, &de->list); 3217 break; 3218 } 3219 } 3220 spin_unlock(&ctx->completion_lock); 3221 if (list_empty(&list)) 3222 return false; 3223 3224 while (!list_empty(&list)) { 3225 de = list_first_entry(&list, struct io_defer_entry, list); 3226 list_del_init(&de->list); 3227 io_req_task_queue_fail(de->req, -ECANCELED); 3228 kfree(de); 3229 } 3230 return true; 3231 } 3232 3233 static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) 3234 { 3235 struct io_tctx_node *node; 3236 enum io_wq_cancel cret; 3237 bool ret = false; 3238 3239 mutex_lock(&ctx->uring_lock); 3240 list_for_each_entry(node, &ctx->tctx_list, ctx_node) { 3241 struct io_uring_task *tctx = node->task->io_uring; 3242 3243 /* 3244 * io_wq will stay alive while we hold uring_lock, because it's 3245 * killed after ctx nodes, which requires to take the lock. 3246 */ 3247 if (!tctx || !tctx->io_wq) 3248 continue; 3249 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true); 3250 ret |= (cret != IO_WQ_CANCEL_NOTFOUND); 3251 } 3252 mutex_unlock(&ctx->uring_lock); 3253 3254 return ret; 3255 } 3256 3257 static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, 3258 struct task_struct *task, 3259 bool cancel_all) 3260 { 3261 struct io_task_cancel cancel = { .task = task, .all = cancel_all, }; 3262 struct io_uring_task *tctx = task ? task->io_uring : NULL; 3263 enum io_wq_cancel cret; 3264 bool ret = false; 3265 3266 /* set it so io_req_local_work_add() would wake us up */ 3267 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { 3268 atomic_set(&ctx->cq_wait_nr, 1); 3269 smp_mb(); 3270 } 3271 3272 /* failed during ring init, it couldn't have issued any requests */ 3273 if (!ctx->rings) 3274 return false; 3275 3276 if (!task) { 3277 ret |= io_uring_try_cancel_iowq(ctx); 3278 } else if (tctx && tctx->io_wq) { 3279 /* 3280 * Cancels requests of all rings, not only @ctx, but 3281 * it's fine as the task is in exit/exec. 3282 */ 3283 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb, 3284 &cancel, true); 3285 ret |= (cret != IO_WQ_CANCEL_NOTFOUND); 3286 } 3287 3288 /* SQPOLL thread does its own polling */ 3289 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) || 3290 (ctx->sq_data && ctx->sq_data->thread == current)) { 3291 while (!wq_list_empty(&ctx->iopoll_list)) { 3292 io_iopoll_try_reap_events(ctx); 3293 ret = true; 3294 cond_resched(); 3295 } 3296 } 3297 3298 if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) && 3299 io_allowed_defer_tw_run(ctx)) 3300 ret |= io_run_local_work(ctx, INT_MAX) > 0; 3301 ret |= io_cancel_defer_files(ctx, task, cancel_all); 3302 mutex_lock(&ctx->uring_lock); 3303 ret |= io_poll_remove_all(ctx, task, cancel_all); 3304 mutex_unlock(&ctx->uring_lock); 3305 ret |= io_kill_timeouts(ctx, task, cancel_all); 3306 if (task) 3307 ret |= io_run_task_work() > 0; 3308 return ret; 3309 } 3310 3311 static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked) 3312 { 3313 if (tracked) 3314 return atomic_read(&tctx->inflight_tracked); 3315 return percpu_counter_sum(&tctx->inflight); 3316 } 3317 3318 /* 3319 * Find any io_uring ctx that this task has registered or done IO on, and cancel 3320 * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation. 3321 */ 3322 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) 3323 { 3324 struct io_uring_task *tctx = current->io_uring; 3325 struct io_ring_ctx *ctx; 3326 struct io_tctx_node *node; 3327 unsigned long index; 3328 s64 inflight; 3329 DEFINE_WAIT(wait); 3330 3331 WARN_ON_ONCE(sqd && sqd->thread != current); 3332 3333 if (!current->io_uring) 3334 return; 3335 if (tctx->io_wq) 3336 io_wq_exit_start(tctx->io_wq); 3337 3338 atomic_inc(&tctx->in_cancel); 3339 do { 3340 bool loop = false; 3341 3342 io_uring_drop_tctx_refs(current); 3343 /* read completions before cancelations */ 3344 inflight = tctx_inflight(tctx, !cancel_all); 3345 if (!inflight) 3346 break; 3347 3348 if (!sqd) { 3349 xa_for_each(&tctx->xa, index, node) { 3350 /* sqpoll task will cancel all its requests */ 3351 if (node->ctx->sq_data) 3352 continue; 3353 loop |= io_uring_try_cancel_requests(node->ctx, 3354 current, cancel_all); 3355 } 3356 } else { 3357 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) 3358 loop |= io_uring_try_cancel_requests(ctx, 3359 current, 3360 cancel_all); 3361 } 3362 3363 if (loop) { 3364 cond_resched(); 3365 continue; 3366 } 3367 3368 prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE); 3369 io_run_task_work(); 3370 io_uring_drop_tctx_refs(current); 3371 xa_for_each(&tctx->xa, index, node) { 3372 if (!llist_empty(&node->ctx->work_llist)) { 3373 WARN_ON_ONCE(node->ctx->submitter_task && 3374 node->ctx->submitter_task != current); 3375 goto end_wait; 3376 } 3377 } 3378 /* 3379 * If we've seen completions, retry without waiting. This 3380 * avoids a race where a completion comes in before we did 3381 * prepare_to_wait(). 3382 */ 3383 if (inflight == tctx_inflight(tctx, !cancel_all)) 3384 schedule(); 3385 end_wait: 3386 finish_wait(&tctx->wait, &wait); 3387 } while (1); 3388 3389 io_uring_clean_tctx(tctx); 3390 if (cancel_all) { 3391 /* 3392 * We shouldn't run task_works after cancel, so just leave 3393 * ->in_cancel set for normal exit. 3394 */ 3395 atomic_dec(&tctx->in_cancel); 3396 /* for exec all current's requests should be gone, kill tctx */ 3397 __io_uring_free(current); 3398 } 3399 } 3400 3401 void __io_uring_cancel(bool cancel_all) 3402 { 3403 io_uring_cancel_generic(cancel_all, NULL); 3404 } 3405 3406 static void *io_uring_validate_mmap_request(struct file *file, 3407 loff_t pgoff, size_t sz) 3408 { 3409 struct io_ring_ctx *ctx = file->private_data; 3410 loff_t offset = pgoff << PAGE_SHIFT; 3411 struct page *page; 3412 void *ptr; 3413 3414 switch (offset & IORING_OFF_MMAP_MASK) { 3415 case IORING_OFF_SQ_RING: 3416 case IORING_OFF_CQ_RING: 3417 /* Don't allow mmap if the ring was setup without it */ 3418 if (ctx->flags & IORING_SETUP_NO_MMAP) 3419 return ERR_PTR(-EINVAL); 3420 ptr = ctx->rings; 3421 break; 3422 case IORING_OFF_SQES: 3423 /* Don't allow mmap if the ring was setup without it */ 3424 if (ctx->flags & IORING_SETUP_NO_MMAP) 3425 return ERR_PTR(-EINVAL); 3426 ptr = ctx->sq_sqes; 3427 break; 3428 case IORING_OFF_PBUF_RING: { 3429 unsigned int bgid; 3430 3431 bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT; 3432 rcu_read_lock(); 3433 ptr = io_pbuf_get_address(ctx, bgid); 3434 rcu_read_unlock(); 3435 if (!ptr) 3436 return ERR_PTR(-EINVAL); 3437 break; 3438 } 3439 default: 3440 return ERR_PTR(-EINVAL); 3441 } 3442 3443 page = virt_to_head_page(ptr); 3444 if (sz > page_size(page)) 3445 return ERR_PTR(-EINVAL); 3446 3447 return ptr; 3448 } 3449 3450 #ifdef CONFIG_MMU 3451 3452 static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma) 3453 { 3454 size_t sz = vma->vm_end - vma->vm_start; 3455 unsigned long pfn; 3456 void *ptr; 3457 3458 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz); 3459 if (IS_ERR(ptr)) 3460 return PTR_ERR(ptr); 3461 3462 pfn = virt_to_phys(ptr) >> PAGE_SHIFT; 3463 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot); 3464 } 3465 3466 static unsigned long io_uring_mmu_get_unmapped_area(struct file *filp, 3467 unsigned long addr, unsigned long len, 3468 unsigned long pgoff, unsigned long flags) 3469 { 3470 void *ptr; 3471 3472 /* 3473 * Do not allow to map to user-provided address to avoid breaking the 3474 * aliasing rules. Userspace is not able to guess the offset address of 3475 * kernel kmalloc()ed memory area. 3476 */ 3477 if (addr) 3478 return -EINVAL; 3479 3480 ptr = io_uring_validate_mmap_request(filp, pgoff, len); 3481 if (IS_ERR(ptr)) 3482 return -ENOMEM; 3483 3484 /* 3485 * Some architectures have strong cache aliasing requirements. 3486 * For such architectures we need a coherent mapping which aliases 3487 * kernel memory *and* userspace memory. To achieve that: 3488 * - use a NULL file pointer to reference physical memory, and 3489 * - use the kernel virtual address of the shared io_uring context 3490 * (instead of the userspace-provided address, which has to be 0UL 3491 * anyway). 3492 * - use the same pgoff which the get_unmapped_area() uses to 3493 * calculate the page colouring. 3494 * For architectures without such aliasing requirements, the 3495 * architecture will return any suitable mapping because addr is 0. 3496 */ 3497 filp = NULL; 3498 flags |= MAP_SHARED; 3499 pgoff = 0; /* has been translated to ptr above */ 3500 #ifdef SHM_COLOUR 3501 addr = (uintptr_t) ptr; 3502 pgoff = addr >> PAGE_SHIFT; 3503 #else 3504 addr = 0UL; 3505 #endif 3506 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); 3507 } 3508 3509 #else /* !CONFIG_MMU */ 3510 3511 static int io_uring_mmap(struct file *file, struct vm_area_struct *vma) 3512 { 3513 return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -EINVAL; 3514 } 3515 3516 static unsigned int io_uring_nommu_mmap_capabilities(struct file *file) 3517 { 3518 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE; 3519 } 3520 3521 static unsigned long io_uring_nommu_get_unmapped_area(struct file *file, 3522 unsigned long addr, unsigned long len, 3523 unsigned long pgoff, unsigned long flags) 3524 { 3525 void *ptr; 3526 3527 ptr = io_uring_validate_mmap_request(file, pgoff, len); 3528 if (IS_ERR(ptr)) 3529 return PTR_ERR(ptr); 3530 3531 return (unsigned long) ptr; 3532 } 3533 3534 #endif /* !CONFIG_MMU */ 3535 3536 static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz) 3537 { 3538 if (flags & IORING_ENTER_EXT_ARG) { 3539 struct io_uring_getevents_arg arg; 3540 3541 if (argsz != sizeof(arg)) 3542 return -EINVAL; 3543 if (copy_from_user(&arg, argp, sizeof(arg))) 3544 return -EFAULT; 3545 } 3546 return 0; 3547 } 3548 3549 static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz, 3550 struct __kernel_timespec __user **ts, 3551 const sigset_t __user **sig) 3552 { 3553 struct io_uring_getevents_arg arg; 3554 3555 /* 3556 * If EXT_ARG isn't set, then we have no timespec and the argp pointer 3557 * is just a pointer to the sigset_t. 3558 */ 3559 if (!(flags & IORING_ENTER_EXT_ARG)) { 3560 *sig = (const sigset_t __user *) argp; 3561 *ts = NULL; 3562 return 0; 3563 } 3564 3565 /* 3566 * EXT_ARG is set - ensure we agree on the size of it and copy in our 3567 * timespec and sigset_t pointers if good. 3568 */ 3569 if (*argsz != sizeof(arg)) 3570 return -EINVAL; 3571 if (copy_from_user(&arg, argp, sizeof(arg))) 3572 return -EFAULT; 3573 if (arg.pad) 3574 return -EINVAL; 3575 *sig = u64_to_user_ptr(arg.sigmask); 3576 *argsz = arg.sigmask_sz; 3577 *ts = u64_to_user_ptr(arg.ts); 3578 return 0; 3579 } 3580 3581 SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, 3582 u32, min_complete, u32, flags, const void __user *, argp, 3583 size_t, argsz) 3584 { 3585 struct io_ring_ctx *ctx; 3586 struct file *file; 3587 long ret; 3588 3589 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP | 3590 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG | 3591 IORING_ENTER_REGISTERED_RING))) 3592 return -EINVAL; 3593 3594 /* 3595 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we 3596 * need only dereference our task private array to find it. 3597 */ 3598 if (flags & IORING_ENTER_REGISTERED_RING) { 3599 struct io_uring_task *tctx = current->io_uring; 3600 3601 if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX)) 3602 return -EINVAL; 3603 fd = array_index_nospec(fd, IO_RINGFD_REG_MAX); 3604 file = tctx->registered_rings[fd]; 3605 if (unlikely(!file)) 3606 return -EBADF; 3607 } else { 3608 file = fget(fd); 3609 if (unlikely(!file)) 3610 return -EBADF; 3611 ret = -EOPNOTSUPP; 3612 if (unlikely(!io_is_uring_fops(file))) 3613 goto out; 3614 } 3615 3616 ctx = file->private_data; 3617 ret = -EBADFD; 3618 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED)) 3619 goto out; 3620 3621 /* 3622 * For SQ polling, the thread will do all submissions and completions. 3623 * Just return the requested submit count, and wake the thread if 3624 * we were asked to. 3625 */ 3626 ret = 0; 3627 if (ctx->flags & IORING_SETUP_SQPOLL) { 3628 io_cqring_overflow_flush(ctx); 3629 3630 if (unlikely(ctx->sq_data->thread == NULL)) { 3631 ret = -EOWNERDEAD; 3632 goto out; 3633 } 3634 if (flags & IORING_ENTER_SQ_WAKEUP) 3635 wake_up(&ctx->sq_data->wait); 3636 if (flags & IORING_ENTER_SQ_WAIT) 3637 io_sqpoll_wait_sq(ctx); 3638 3639 ret = to_submit; 3640 } else if (to_submit) { 3641 ret = io_uring_add_tctx_node(ctx); 3642 if (unlikely(ret)) 3643 goto out; 3644 3645 mutex_lock(&ctx->uring_lock); 3646 ret = io_submit_sqes(ctx, to_submit); 3647 if (ret != to_submit) { 3648 mutex_unlock(&ctx->uring_lock); 3649 goto out; 3650 } 3651 if (flags & IORING_ENTER_GETEVENTS) { 3652 if (ctx->syscall_iopoll) 3653 goto iopoll_locked; 3654 /* 3655 * Ignore errors, we'll soon call io_cqring_wait() and 3656 * it should handle ownership problems if any. 3657 */ 3658 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) 3659 (void)io_run_local_work_locked(ctx, min_complete); 3660 } 3661 mutex_unlock(&ctx->uring_lock); 3662 } 3663 3664 if (flags & IORING_ENTER_GETEVENTS) { 3665 int ret2; 3666 3667 if (ctx->syscall_iopoll) { 3668 /* 3669 * We disallow the app entering submit/complete with 3670 * polling, but we still need to lock the ring to 3671 * prevent racing with polled issue that got punted to 3672 * a workqueue. 3673 */ 3674 mutex_lock(&ctx->uring_lock); 3675 iopoll_locked: 3676 ret2 = io_validate_ext_arg(flags, argp, argsz); 3677 if (likely(!ret2)) { 3678 min_complete = min(min_complete, 3679 ctx->cq_entries); 3680 ret2 = io_iopoll_check(ctx, min_complete); 3681 } 3682 mutex_unlock(&ctx->uring_lock); 3683 } else { 3684 const sigset_t __user *sig; 3685 struct __kernel_timespec __user *ts; 3686 3687 ret2 = io_get_ext_arg(flags, argp, &argsz, &ts, &sig); 3688 if (likely(!ret2)) { 3689 min_complete = min(min_complete, 3690 ctx->cq_entries); 3691 ret2 = io_cqring_wait(ctx, min_complete, sig, 3692 argsz, ts); 3693 } 3694 } 3695 3696 if (!ret) { 3697 ret = ret2; 3698 3699 /* 3700 * EBADR indicates that one or more CQE were dropped. 3701 * Once the user has been informed we can clear the bit 3702 * as they are obviously ok with those drops. 3703 */ 3704 if (unlikely(ret2 == -EBADR)) 3705 clear_bit(IO_CHECK_CQ_DROPPED_BIT, 3706 &ctx->check_cq); 3707 } 3708 } 3709 out: 3710 if (!(flags & IORING_ENTER_REGISTERED_RING)) 3711 fput(file); 3712 return ret; 3713 } 3714 3715 static const struct file_operations io_uring_fops = { 3716 .release = io_uring_release, 3717 .mmap = io_uring_mmap, 3718 #ifndef CONFIG_MMU 3719 .get_unmapped_area = io_uring_nommu_get_unmapped_area, 3720 .mmap_capabilities = io_uring_nommu_mmap_capabilities, 3721 #else 3722 .get_unmapped_area = io_uring_mmu_get_unmapped_area, 3723 #endif 3724 .poll = io_uring_poll, 3725 #ifdef CONFIG_PROC_FS 3726 .show_fdinfo = io_uring_show_fdinfo, 3727 #endif 3728 }; 3729 3730 bool io_is_uring_fops(struct file *file) 3731 { 3732 return file->f_op == &io_uring_fops; 3733 } 3734 3735 static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx, 3736 struct io_uring_params *p) 3737 { 3738 struct io_rings *rings; 3739 size_t size, sq_array_offset; 3740 void *ptr; 3741 3742 /* make sure these are sane, as we already accounted them */ 3743 ctx->sq_entries = p->sq_entries; 3744 ctx->cq_entries = p->cq_entries; 3745 3746 size = rings_size(ctx, p->sq_entries, p->cq_entries, &sq_array_offset); 3747 if (size == SIZE_MAX) 3748 return -EOVERFLOW; 3749 3750 if (!(ctx->flags & IORING_SETUP_NO_MMAP)) 3751 rings = io_mem_alloc(size); 3752 else 3753 rings = io_rings_map(ctx, p->cq_off.user_addr, size); 3754 3755 if (IS_ERR(rings)) 3756 return PTR_ERR(rings); 3757 3758 ctx->rings = rings; 3759 if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) 3760 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset); 3761 rings->sq_ring_mask = p->sq_entries - 1; 3762 rings->cq_ring_mask = p->cq_entries - 1; 3763 rings->sq_ring_entries = p->sq_entries; 3764 rings->cq_ring_entries = p->cq_entries; 3765 3766 if (p->flags & IORING_SETUP_SQE128) 3767 size = array_size(2 * sizeof(struct io_uring_sqe), p->sq_entries); 3768 else 3769 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); 3770 if (size == SIZE_MAX) { 3771 io_rings_free(ctx); 3772 return -EOVERFLOW; 3773 } 3774 3775 if (!(ctx->flags & IORING_SETUP_NO_MMAP)) 3776 ptr = io_mem_alloc(size); 3777 else 3778 ptr = io_sqes_map(ctx, p->sq_off.user_addr, size); 3779 3780 if (IS_ERR(ptr)) { 3781 io_rings_free(ctx); 3782 return PTR_ERR(ptr); 3783 } 3784 3785 ctx->sq_sqes = ptr; 3786 return 0; 3787 } 3788 3789 static int io_uring_install_fd(struct file *file) 3790 { 3791 int fd; 3792 3793 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); 3794 if (fd < 0) 3795 return fd; 3796 fd_install(fd, file); 3797 return fd; 3798 } 3799 3800 /* 3801 * Allocate an anonymous fd, this is what constitutes the application 3802 * visible backing of an io_uring instance. The application mmaps this 3803 * fd to gain access to the SQ/CQ ring details. 3804 */ 3805 static struct file *io_uring_get_file(struct io_ring_ctx *ctx) 3806 { 3807 return anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx, 3808 O_RDWR | O_CLOEXEC, NULL); 3809 } 3810 3811 static __cold int io_uring_create(unsigned entries, struct io_uring_params *p, 3812 struct io_uring_params __user *params) 3813 { 3814 struct io_ring_ctx *ctx; 3815 struct io_uring_task *tctx; 3816 struct file *file; 3817 int ret; 3818 3819 if (!entries) 3820 return -EINVAL; 3821 if (entries > IORING_MAX_ENTRIES) { 3822 if (!(p->flags & IORING_SETUP_CLAMP)) 3823 return -EINVAL; 3824 entries = IORING_MAX_ENTRIES; 3825 } 3826 3827 if ((p->flags & IORING_SETUP_REGISTERED_FD_ONLY) 3828 && !(p->flags & IORING_SETUP_NO_MMAP)) 3829 return -EINVAL; 3830 3831 /* 3832 * Use twice as many entries for the CQ ring. It's possible for the 3833 * application to drive a higher depth than the size of the SQ ring, 3834 * since the sqes are only used at submission time. This allows for 3835 * some flexibility in overcommitting a bit. If the application has 3836 * set IORING_SETUP_CQSIZE, it will have passed in the desired number 3837 * of CQ ring entries manually. 3838 */ 3839 p->sq_entries = roundup_pow_of_two(entries); 3840 if (p->flags & IORING_SETUP_CQSIZE) { 3841 /* 3842 * If IORING_SETUP_CQSIZE is set, we do the same roundup 3843 * to a power-of-two, if it isn't already. We do NOT impose 3844 * any cq vs sq ring sizing. 3845 */ 3846 if (!p->cq_entries) 3847 return -EINVAL; 3848 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) { 3849 if (!(p->flags & IORING_SETUP_CLAMP)) 3850 return -EINVAL; 3851 p->cq_entries = IORING_MAX_CQ_ENTRIES; 3852 } 3853 p->cq_entries = roundup_pow_of_two(p->cq_entries); 3854 if (p->cq_entries < p->sq_entries) 3855 return -EINVAL; 3856 } else { 3857 p->cq_entries = 2 * p->sq_entries; 3858 } 3859 3860 ctx = io_ring_ctx_alloc(p); 3861 if (!ctx) 3862 return -ENOMEM; 3863 3864 if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) && 3865 !(ctx->flags & IORING_SETUP_IOPOLL) && 3866 !(ctx->flags & IORING_SETUP_SQPOLL)) 3867 ctx->task_complete = true; 3868 3869 if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL)) 3870 ctx->lockless_cq = true; 3871 3872 /* 3873 * lazy poll_wq activation relies on ->task_complete for synchronisation 3874 * purposes, see io_activate_pollwq() 3875 */ 3876 if (!ctx->task_complete) 3877 ctx->poll_activated = true; 3878 3879 /* 3880 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user 3881 * space applications don't need to do io completion events 3882 * polling again, they can rely on io_sq_thread to do polling 3883 * work, which can reduce cpu usage and uring_lock contention. 3884 */ 3885 if (ctx->flags & IORING_SETUP_IOPOLL && 3886 !(ctx->flags & IORING_SETUP_SQPOLL)) 3887 ctx->syscall_iopoll = 1; 3888 3889 ctx->compat = in_compat_syscall(); 3890 if (!ns_capable_noaudit(&init_user_ns, CAP_IPC_LOCK)) 3891 ctx->user = get_uid(current_user()); 3892 3893 /* 3894 * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if 3895 * COOP_TASKRUN is set, then IPIs are never needed by the app. 3896 */ 3897 ret = -EINVAL; 3898 if (ctx->flags & IORING_SETUP_SQPOLL) { 3899 /* IPI related flags don't make sense with SQPOLL */ 3900 if (ctx->flags & (IORING_SETUP_COOP_TASKRUN | 3901 IORING_SETUP_TASKRUN_FLAG | 3902 IORING_SETUP_DEFER_TASKRUN)) 3903 goto err; 3904 ctx->notify_method = TWA_SIGNAL_NO_IPI; 3905 } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) { 3906 ctx->notify_method = TWA_SIGNAL_NO_IPI; 3907 } else { 3908 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG && 3909 !(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) 3910 goto err; 3911 ctx->notify_method = TWA_SIGNAL; 3912 } 3913 3914 /* 3915 * For DEFER_TASKRUN we require the completion task to be the same as the 3916 * submission task. This implies that there is only one submitter, so enforce 3917 * that. 3918 */ 3919 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN && 3920 !(ctx->flags & IORING_SETUP_SINGLE_ISSUER)) { 3921 goto err; 3922 } 3923 3924 /* 3925 * This is just grabbed for accounting purposes. When a process exits, 3926 * the mm is exited and dropped before the files, hence we need to hang 3927 * on to this mm purely for the purposes of being able to unaccount 3928 * memory (locked/pinned vm). It's not used for anything else. 3929 */ 3930 mmgrab(current->mm); 3931 ctx->mm_account = current->mm; 3932 3933 ret = io_allocate_scq_urings(ctx, p); 3934 if (ret) 3935 goto err; 3936 3937 ret = io_sq_offload_create(ctx, p); 3938 if (ret) 3939 goto err; 3940 3941 ret = io_rsrc_init(ctx); 3942 if (ret) 3943 goto err; 3944 3945 p->sq_off.head = offsetof(struct io_rings, sq.head); 3946 p->sq_off.tail = offsetof(struct io_rings, sq.tail); 3947 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask); 3948 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries); 3949 p->sq_off.flags = offsetof(struct io_rings, sq_flags); 3950 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped); 3951 if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) 3952 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings; 3953 p->sq_off.resv1 = 0; 3954 if (!(ctx->flags & IORING_SETUP_NO_MMAP)) 3955 p->sq_off.user_addr = 0; 3956 3957 p->cq_off.head = offsetof(struct io_rings, cq.head); 3958 p->cq_off.tail = offsetof(struct io_rings, cq.tail); 3959 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask); 3960 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries); 3961 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow); 3962 p->cq_off.cqes = offsetof(struct io_rings, cqes); 3963 p->cq_off.flags = offsetof(struct io_rings, cq_flags); 3964 p->cq_off.resv1 = 0; 3965 if (!(ctx->flags & IORING_SETUP_NO_MMAP)) 3966 p->cq_off.user_addr = 0; 3967 3968 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP | 3969 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS | 3970 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL | 3971 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED | 3972 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS | 3973 IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP | 3974 IORING_FEAT_LINKED_FILE | IORING_FEAT_REG_REG_RING; 3975 3976 if (copy_to_user(params, p, sizeof(*p))) { 3977 ret = -EFAULT; 3978 goto err; 3979 } 3980 3981 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER 3982 && !(ctx->flags & IORING_SETUP_R_DISABLED)) 3983 WRITE_ONCE(ctx->submitter_task, get_task_struct(current)); 3984 3985 file = io_uring_get_file(ctx); 3986 if (IS_ERR(file)) { 3987 ret = PTR_ERR(file); 3988 goto err; 3989 } 3990 3991 ret = __io_uring_add_tctx_node(ctx); 3992 if (ret) 3993 goto err_fput; 3994 tctx = current->io_uring; 3995 3996 /* 3997 * Install ring fd as the very last thing, so we don't risk someone 3998 * having closed it before we finish setup 3999 */ 4000 if (p->flags & IORING_SETUP_REGISTERED_FD_ONLY) 4001 ret = io_ring_add_registered_file(tctx, file, 0, IO_RINGFD_REG_MAX); 4002 else 4003 ret = io_uring_install_fd(file); 4004 if (ret < 0) 4005 goto err_fput; 4006 4007 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags); 4008 return ret; 4009 err: 4010 io_ring_ctx_wait_and_kill(ctx); 4011 return ret; 4012 err_fput: 4013 fput(file); 4014 return ret; 4015 } 4016 4017 /* 4018 * Sets up an aio uring context, and returns the fd. Applications asks for a 4019 * ring size, we return the actual sq/cq ring sizes (among other things) in the 4020 * params structure passed in. 4021 */ 4022 static long io_uring_setup(u32 entries, struct io_uring_params __user *params) 4023 { 4024 struct io_uring_params p; 4025 int i; 4026 4027 if (copy_from_user(&p, params, sizeof(p))) 4028 return -EFAULT; 4029 for (i = 0; i < ARRAY_SIZE(p.resv); i++) { 4030 if (p.resv[i]) 4031 return -EINVAL; 4032 } 4033 4034 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL | 4035 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE | 4036 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ | 4037 IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL | 4038 IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG | 4039 IORING_SETUP_SQE128 | IORING_SETUP_CQE32 | 4040 IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN | 4041 IORING_SETUP_NO_MMAP | IORING_SETUP_REGISTERED_FD_ONLY | 4042 IORING_SETUP_NO_SQARRAY)) 4043 return -EINVAL; 4044 4045 return io_uring_create(entries, &p, params); 4046 } 4047 4048 static inline bool io_uring_allowed(void) 4049 { 4050 int disabled = READ_ONCE(sysctl_io_uring_disabled); 4051 kgid_t io_uring_group; 4052 4053 if (disabled == 2) 4054 return false; 4055 4056 if (disabled == 0 || capable(CAP_SYS_ADMIN)) 4057 return true; 4058 4059 io_uring_group = make_kgid(&init_user_ns, sysctl_io_uring_group); 4060 if (!gid_valid(io_uring_group)) 4061 return false; 4062 4063 return in_group_p(io_uring_group); 4064 } 4065 4066 SYSCALL_DEFINE2(io_uring_setup, u32, entries, 4067 struct io_uring_params __user *, params) 4068 { 4069 if (!io_uring_allowed()) 4070 return -EPERM; 4071 4072 return io_uring_setup(entries, params); 4073 } 4074 4075 static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg, 4076 unsigned nr_args) 4077 { 4078 struct io_uring_probe *p; 4079 size_t size; 4080 int i, ret; 4081 4082 size = struct_size(p, ops, nr_args); 4083 if (size == SIZE_MAX) 4084 return -EOVERFLOW; 4085 p = kzalloc(size, GFP_KERNEL); 4086 if (!p) 4087 return -ENOMEM; 4088 4089 ret = -EFAULT; 4090 if (copy_from_user(p, arg, size)) 4091 goto out; 4092 ret = -EINVAL; 4093 if (memchr_inv(p, 0, size)) 4094 goto out; 4095 4096 p->last_op = IORING_OP_LAST - 1; 4097 if (nr_args > IORING_OP_LAST) 4098 nr_args = IORING_OP_LAST; 4099 4100 for (i = 0; i < nr_args; i++) { 4101 p->ops[i].op = i; 4102 if (!io_issue_defs[i].not_supported) 4103 p->ops[i].flags = IO_URING_OP_SUPPORTED; 4104 } 4105 p->ops_len = i; 4106 4107 ret = 0; 4108 if (copy_to_user(arg, p, size)) 4109 ret = -EFAULT; 4110 out: 4111 kfree(p); 4112 return ret; 4113 } 4114 4115 static int io_register_personality(struct io_ring_ctx *ctx) 4116 { 4117 const struct cred *creds; 4118 u32 id; 4119 int ret; 4120 4121 creds = get_current_cred(); 4122 4123 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds, 4124 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL); 4125 if (ret < 0) { 4126 put_cred(creds); 4127 return ret; 4128 } 4129 return id; 4130 } 4131 4132 static __cold int io_register_restrictions(struct io_ring_ctx *ctx, 4133 void __user *arg, unsigned int nr_args) 4134 { 4135 struct io_uring_restriction *res; 4136 size_t size; 4137 int i, ret; 4138 4139 /* Restrictions allowed only if rings started disabled */ 4140 if (!(ctx->flags & IORING_SETUP_R_DISABLED)) 4141 return -EBADFD; 4142 4143 /* We allow only a single restrictions registration */ 4144 if (ctx->restrictions.registered) 4145 return -EBUSY; 4146 4147 if (!arg || nr_args > IORING_MAX_RESTRICTIONS) 4148 return -EINVAL; 4149 4150 size = array_size(nr_args, sizeof(*res)); 4151 if (size == SIZE_MAX) 4152 return -EOVERFLOW; 4153 4154 res = memdup_user(arg, size); 4155 if (IS_ERR(res)) 4156 return PTR_ERR(res); 4157 4158 ret = 0; 4159 4160 for (i = 0; i < nr_args; i++) { 4161 switch (res[i].opcode) { 4162 case IORING_RESTRICTION_REGISTER_OP: 4163 if (res[i].register_op >= IORING_REGISTER_LAST) { 4164 ret = -EINVAL; 4165 goto out; 4166 } 4167 4168 __set_bit(res[i].register_op, 4169 ctx->restrictions.register_op); 4170 break; 4171 case IORING_RESTRICTION_SQE_OP: 4172 if (res[i].sqe_op >= IORING_OP_LAST) { 4173 ret = -EINVAL; 4174 goto out; 4175 } 4176 4177 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op); 4178 break; 4179 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED: 4180 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags; 4181 break; 4182 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED: 4183 ctx->restrictions.sqe_flags_required = res[i].sqe_flags; 4184 break; 4185 default: 4186 ret = -EINVAL; 4187 goto out; 4188 } 4189 } 4190 4191 out: 4192 /* Reset all restrictions if an error happened */ 4193 if (ret != 0) 4194 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions)); 4195 else 4196 ctx->restrictions.registered = true; 4197 4198 kfree(res); 4199 return ret; 4200 } 4201 4202 static int io_register_enable_rings(struct io_ring_ctx *ctx) 4203 { 4204 if (!(ctx->flags & IORING_SETUP_R_DISABLED)) 4205 return -EBADFD; 4206 4207 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task) { 4208 WRITE_ONCE(ctx->submitter_task, get_task_struct(current)); 4209 /* 4210 * Lazy activation attempts would fail if it was polled before 4211 * submitter_task is set. 4212 */ 4213 if (wq_has_sleeper(&ctx->poll_wq)) 4214 io_activate_pollwq(ctx); 4215 } 4216 4217 if (ctx->restrictions.registered) 4218 ctx->restricted = 1; 4219 4220 ctx->flags &= ~IORING_SETUP_R_DISABLED; 4221 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait)) 4222 wake_up(&ctx->sq_data->wait); 4223 return 0; 4224 } 4225 4226 static __cold int __io_register_iowq_aff(struct io_ring_ctx *ctx, 4227 cpumask_var_t new_mask) 4228 { 4229 int ret; 4230 4231 if (!(ctx->flags & IORING_SETUP_SQPOLL)) { 4232 ret = io_wq_cpu_affinity(current->io_uring, new_mask); 4233 } else { 4234 mutex_unlock(&ctx->uring_lock); 4235 ret = io_sqpoll_wq_cpu_affinity(ctx, new_mask); 4236 mutex_lock(&ctx->uring_lock); 4237 } 4238 4239 return ret; 4240 } 4241 4242 static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx, 4243 void __user *arg, unsigned len) 4244 { 4245 cpumask_var_t new_mask; 4246 int ret; 4247 4248 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 4249 return -ENOMEM; 4250 4251 cpumask_clear(new_mask); 4252 if (len > cpumask_size()) 4253 len = cpumask_size(); 4254 4255 if (in_compat_syscall()) { 4256 ret = compat_get_bitmap(cpumask_bits(new_mask), 4257 (const compat_ulong_t __user *)arg, 4258 len * 8 /* CHAR_BIT */); 4259 } else { 4260 ret = copy_from_user(new_mask, arg, len); 4261 } 4262 4263 if (ret) { 4264 free_cpumask_var(new_mask); 4265 return -EFAULT; 4266 } 4267 4268 ret = __io_register_iowq_aff(ctx, new_mask); 4269 free_cpumask_var(new_mask); 4270 return ret; 4271 } 4272 4273 static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx) 4274 { 4275 return __io_register_iowq_aff(ctx, NULL); 4276 } 4277 4278 static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, 4279 void __user *arg) 4280 __must_hold(&ctx->uring_lock) 4281 { 4282 struct io_tctx_node *node; 4283 struct io_uring_task *tctx = NULL; 4284 struct io_sq_data *sqd = NULL; 4285 __u32 new_count[2]; 4286 int i, ret; 4287 4288 if (copy_from_user(new_count, arg, sizeof(new_count))) 4289 return -EFAULT; 4290 for (i = 0; i < ARRAY_SIZE(new_count); i++) 4291 if (new_count[i] > INT_MAX) 4292 return -EINVAL; 4293 4294 if (ctx->flags & IORING_SETUP_SQPOLL) { 4295 sqd = ctx->sq_data; 4296 if (sqd) { 4297 /* 4298 * Observe the correct sqd->lock -> ctx->uring_lock 4299 * ordering. Fine to drop uring_lock here, we hold 4300 * a ref to the ctx. 4301 */ 4302 refcount_inc(&sqd->refs); 4303 mutex_unlock(&ctx->uring_lock); 4304 mutex_lock(&sqd->lock); 4305 mutex_lock(&ctx->uring_lock); 4306 if (sqd->thread) 4307 tctx = sqd->thread->io_uring; 4308 } 4309 } else { 4310 tctx = current->io_uring; 4311 } 4312 4313 BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits)); 4314 4315 for (i = 0; i < ARRAY_SIZE(new_count); i++) 4316 if (new_count[i]) 4317 ctx->iowq_limits[i] = new_count[i]; 4318 ctx->iowq_limits_set = true; 4319 4320 if (tctx && tctx->io_wq) { 4321 ret = io_wq_max_workers(tctx->io_wq, new_count); 4322 if (ret) 4323 goto err; 4324 } else { 4325 memset(new_count, 0, sizeof(new_count)); 4326 } 4327 4328 if (sqd) { 4329 mutex_unlock(&sqd->lock); 4330 io_put_sq_data(sqd); 4331 } 4332 4333 if (copy_to_user(arg, new_count, sizeof(new_count))) 4334 return -EFAULT; 4335 4336 /* that's it for SQPOLL, only the SQPOLL task creates requests */ 4337 if (sqd) 4338 return 0; 4339 4340 /* now propagate the restriction to all registered users */ 4341 list_for_each_entry(node, &ctx->tctx_list, ctx_node) { 4342 struct io_uring_task *tctx = node->task->io_uring; 4343 4344 if (WARN_ON_ONCE(!tctx->io_wq)) 4345 continue; 4346 4347 for (i = 0; i < ARRAY_SIZE(new_count); i++) 4348 new_count[i] = ctx->iowq_limits[i]; 4349 /* ignore errors, it always returns zero anyway */ 4350 (void)io_wq_max_workers(tctx->io_wq, new_count); 4351 } 4352 return 0; 4353 err: 4354 if (sqd) { 4355 mutex_unlock(&sqd->lock); 4356 io_put_sq_data(sqd); 4357 } 4358 return ret; 4359 } 4360 4361 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, 4362 void __user *arg, unsigned nr_args) 4363 __releases(ctx->uring_lock) 4364 __acquires(ctx->uring_lock) 4365 { 4366 int ret; 4367 4368 /* 4369 * We don't quiesce the refs for register anymore and so it can't be 4370 * dying as we're holding a file ref here. 4371 */ 4372 if (WARN_ON_ONCE(percpu_ref_is_dying(&ctx->refs))) 4373 return -ENXIO; 4374 4375 if (ctx->submitter_task && ctx->submitter_task != current) 4376 return -EEXIST; 4377 4378 if (ctx->restricted) { 4379 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST); 4380 if (!test_bit(opcode, ctx->restrictions.register_op)) 4381 return -EACCES; 4382 } 4383 4384 switch (opcode) { 4385 case IORING_REGISTER_BUFFERS: 4386 ret = -EFAULT; 4387 if (!arg) 4388 break; 4389 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL); 4390 break; 4391 case IORING_UNREGISTER_BUFFERS: 4392 ret = -EINVAL; 4393 if (arg || nr_args) 4394 break; 4395 ret = io_sqe_buffers_unregister(ctx); 4396 break; 4397 case IORING_REGISTER_FILES: 4398 ret = -EFAULT; 4399 if (!arg) 4400 break; 4401 ret = io_sqe_files_register(ctx, arg, nr_args, NULL); 4402 break; 4403 case IORING_UNREGISTER_FILES: 4404 ret = -EINVAL; 4405 if (arg || nr_args) 4406 break; 4407 ret = io_sqe_files_unregister(ctx); 4408 break; 4409 case IORING_REGISTER_FILES_UPDATE: 4410 ret = io_register_files_update(ctx, arg, nr_args); 4411 break; 4412 case IORING_REGISTER_EVENTFD: 4413 ret = -EINVAL; 4414 if (nr_args != 1) 4415 break; 4416 ret = io_eventfd_register(ctx, arg, 0); 4417 break; 4418 case IORING_REGISTER_EVENTFD_ASYNC: 4419 ret = -EINVAL; 4420 if (nr_args != 1) 4421 break; 4422 ret = io_eventfd_register(ctx, arg, 1); 4423 break; 4424 case IORING_UNREGISTER_EVENTFD: 4425 ret = -EINVAL; 4426 if (arg || nr_args) 4427 break; 4428 ret = io_eventfd_unregister(ctx); 4429 break; 4430 case IORING_REGISTER_PROBE: 4431 ret = -EINVAL; 4432 if (!arg || nr_args > 256) 4433 break; 4434 ret = io_probe(ctx, arg, nr_args); 4435 break; 4436 case IORING_REGISTER_PERSONALITY: 4437 ret = -EINVAL; 4438 if (arg || nr_args) 4439 break; 4440 ret = io_register_personality(ctx); 4441 break; 4442 case IORING_UNREGISTER_PERSONALITY: 4443 ret = -EINVAL; 4444 if (arg) 4445 break; 4446 ret = io_unregister_personality(ctx, nr_args); 4447 break; 4448 case IORING_REGISTER_ENABLE_RINGS: 4449 ret = -EINVAL; 4450 if (arg || nr_args) 4451 break; 4452 ret = io_register_enable_rings(ctx); 4453 break; 4454 case IORING_REGISTER_RESTRICTIONS: 4455 ret = io_register_restrictions(ctx, arg, nr_args); 4456 break; 4457 case IORING_REGISTER_FILES2: 4458 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE); 4459 break; 4460 case IORING_REGISTER_FILES_UPDATE2: 4461 ret = io_register_rsrc_update(ctx, arg, nr_args, 4462 IORING_RSRC_FILE); 4463 break; 4464 case IORING_REGISTER_BUFFERS2: 4465 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER); 4466 break; 4467 case IORING_REGISTER_BUFFERS_UPDATE: 4468 ret = io_register_rsrc_update(ctx, arg, nr_args, 4469 IORING_RSRC_BUFFER); 4470 break; 4471 case IORING_REGISTER_IOWQ_AFF: 4472 ret = -EINVAL; 4473 if (!arg || !nr_args) 4474 break; 4475 ret = io_register_iowq_aff(ctx, arg, nr_args); 4476 break; 4477 case IORING_UNREGISTER_IOWQ_AFF: 4478 ret = -EINVAL; 4479 if (arg || nr_args) 4480 break; 4481 ret = io_unregister_iowq_aff(ctx); 4482 break; 4483 case IORING_REGISTER_IOWQ_MAX_WORKERS: 4484 ret = -EINVAL; 4485 if (!arg || nr_args != 2) 4486 break; 4487 ret = io_register_iowq_max_workers(ctx, arg); 4488 break; 4489 case IORING_REGISTER_RING_FDS: 4490 ret = io_ringfd_register(ctx, arg, nr_args); 4491 break; 4492 case IORING_UNREGISTER_RING_FDS: 4493 ret = io_ringfd_unregister(ctx, arg, nr_args); 4494 break; 4495 case IORING_REGISTER_PBUF_RING: 4496 ret = -EINVAL; 4497 if (!arg || nr_args != 1) 4498 break; 4499 ret = io_register_pbuf_ring(ctx, arg); 4500 break; 4501 case IORING_UNREGISTER_PBUF_RING: 4502 ret = -EINVAL; 4503 if (!arg || nr_args != 1) 4504 break; 4505 ret = io_unregister_pbuf_ring(ctx, arg); 4506 break; 4507 case IORING_REGISTER_SYNC_CANCEL: 4508 ret = -EINVAL; 4509 if (!arg || nr_args != 1) 4510 break; 4511 ret = io_sync_cancel(ctx, arg); 4512 break; 4513 case IORING_REGISTER_FILE_ALLOC_RANGE: 4514 ret = -EINVAL; 4515 if (!arg || nr_args) 4516 break; 4517 ret = io_register_file_alloc_range(ctx, arg); 4518 break; 4519 default: 4520 ret = -EINVAL; 4521 break; 4522 } 4523 4524 return ret; 4525 } 4526 4527 SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, 4528 void __user *, arg, unsigned int, nr_args) 4529 { 4530 struct io_ring_ctx *ctx; 4531 long ret = -EBADF; 4532 struct file *file; 4533 bool use_registered_ring; 4534 4535 use_registered_ring = !!(opcode & IORING_REGISTER_USE_REGISTERED_RING); 4536 opcode &= ~IORING_REGISTER_USE_REGISTERED_RING; 4537 4538 if (opcode >= IORING_REGISTER_LAST) 4539 return -EINVAL; 4540 4541 if (use_registered_ring) { 4542 /* 4543 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we 4544 * need only dereference our task private array to find it. 4545 */ 4546 struct io_uring_task *tctx = current->io_uring; 4547 4548 if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX)) 4549 return -EINVAL; 4550 fd = array_index_nospec(fd, IO_RINGFD_REG_MAX); 4551 file = tctx->registered_rings[fd]; 4552 if (unlikely(!file)) 4553 return -EBADF; 4554 } else { 4555 file = fget(fd); 4556 if (unlikely(!file)) 4557 return -EBADF; 4558 ret = -EOPNOTSUPP; 4559 if (!io_is_uring_fops(file)) 4560 goto out_fput; 4561 } 4562 4563 ctx = file->private_data; 4564 4565 mutex_lock(&ctx->uring_lock); 4566 ret = __io_uring_register(ctx, opcode, arg, nr_args); 4567 mutex_unlock(&ctx->uring_lock); 4568 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret); 4569 out_fput: 4570 if (!use_registered_ring) 4571 fput(file); 4572 return ret; 4573 } 4574 4575 static int __init io_uring_init(void) 4576 { 4577 #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \ 4578 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \ 4579 BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \ 4580 } while (0) 4581 4582 #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \ 4583 __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, sizeof(etype), ename) 4584 #define BUILD_BUG_SQE_ELEM_SIZE(eoffset, esize, ename) \ 4585 __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, esize, ename) 4586 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64); 4587 BUILD_BUG_SQE_ELEM(0, __u8, opcode); 4588 BUILD_BUG_SQE_ELEM(1, __u8, flags); 4589 BUILD_BUG_SQE_ELEM(2, __u16, ioprio); 4590 BUILD_BUG_SQE_ELEM(4, __s32, fd); 4591 BUILD_BUG_SQE_ELEM(8, __u64, off); 4592 BUILD_BUG_SQE_ELEM(8, __u64, addr2); 4593 BUILD_BUG_SQE_ELEM(8, __u32, cmd_op); 4594 BUILD_BUG_SQE_ELEM(12, __u32, __pad1); 4595 BUILD_BUG_SQE_ELEM(16, __u64, addr); 4596 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in); 4597 BUILD_BUG_SQE_ELEM(24, __u32, len); 4598 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags); 4599 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags); 4600 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags); 4601 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags); 4602 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events); 4603 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events); 4604 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags); 4605 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags); 4606 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags); 4607 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags); 4608 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags); 4609 BUILD_BUG_SQE_ELEM(28, __u32, open_flags); 4610 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags); 4611 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice); 4612 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags); 4613 BUILD_BUG_SQE_ELEM(28, __u32, rename_flags); 4614 BUILD_BUG_SQE_ELEM(28, __u32, unlink_flags); 4615 BUILD_BUG_SQE_ELEM(28, __u32, hardlink_flags); 4616 BUILD_BUG_SQE_ELEM(28, __u32, xattr_flags); 4617 BUILD_BUG_SQE_ELEM(28, __u32, msg_ring_flags); 4618 BUILD_BUG_SQE_ELEM(32, __u64, user_data); 4619 BUILD_BUG_SQE_ELEM(40, __u16, buf_index); 4620 BUILD_BUG_SQE_ELEM(40, __u16, buf_group); 4621 BUILD_BUG_SQE_ELEM(42, __u16, personality); 4622 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in); 4623 BUILD_BUG_SQE_ELEM(44, __u32, file_index); 4624 BUILD_BUG_SQE_ELEM(44, __u16, addr_len); 4625 BUILD_BUG_SQE_ELEM(46, __u16, __pad3[0]); 4626 BUILD_BUG_SQE_ELEM(48, __u64, addr3); 4627 BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd); 4628 BUILD_BUG_SQE_ELEM(56, __u64, __pad2); 4629 4630 BUILD_BUG_ON(sizeof(struct io_uring_files_update) != 4631 sizeof(struct io_uring_rsrc_update)); 4632 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) > 4633 sizeof(struct io_uring_rsrc_update2)); 4634 4635 /* ->buf_index is u16 */ 4636 BUILD_BUG_ON(offsetof(struct io_uring_buf_ring, bufs) != 0); 4637 BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) != 4638 offsetof(struct io_uring_buf_ring, tail)); 4639 4640 /* should fit into one byte */ 4641 BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8)); 4642 BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8)); 4643 BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS); 4644 4645 BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int)); 4646 4647 BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32)); 4648 4649 io_uring_optable_init(); 4650 4651 /* 4652 * Allow user copy in the per-command field, which starts after the 4653 * file in io_kiocb and until the opcode field. The openat2 handling 4654 * requires copying in user memory into the io_kiocb object in that 4655 * range, and HARDENED_USERCOPY will complain if we haven't 4656 * correctly annotated this range. 4657 */ 4658 req_cachep = kmem_cache_create_usercopy("io_kiocb", 4659 sizeof(struct io_kiocb), 0, 4660 SLAB_HWCACHE_ALIGN | SLAB_PANIC | 4661 SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU, 4662 offsetof(struct io_kiocb, cmd.data), 4663 sizeof_field(struct io_kiocb, cmd.data), NULL); 4664 4665 #ifdef CONFIG_SYSCTL 4666 register_sysctl_init("kernel", kernel_io_uring_disabled_table); 4667 #endif 4668 4669 return 0; 4670 }; 4671 __initcall(io_uring_init); 4672