1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Shared application/kernel submission and completion ring pairs, for 4 * supporting fast/efficient IO. 5 * 6 * A note on the read/write ordering memory barriers that are matched between 7 * the application and kernel side. 8 * 9 * After the application reads the CQ ring tail, it must use an 10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses 11 * before writing the tail (using smp_load_acquire to read the tail will 12 * do). It also needs a smp_mb() before updating CQ head (ordering the 13 * entry load(s) with the head store), pairing with an implicit barrier 14 * through a control-dependency in io_get_cqe (smp_store_release to 15 * store head will do). Failure to do so could lead to reading invalid 16 * CQ entries. 17 * 18 * Likewise, the application must use an appropriate smp_wmb() before 19 * writing the SQ tail (ordering SQ entry stores with the tail store), 20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release 21 * to store the tail will do). And it needs a barrier ordering the SQ 22 * head load before writing new SQ entries (smp_load_acquire to read 23 * head will do). 24 * 25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application 26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after* 27 * updating the SQ tail; a full memory barrier smp_mb() is needed 28 * between. 29 * 30 * Also see the examples in the liburing library: 31 * 32 * git://git.kernel.dk/liburing 33 * 34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens 35 * from data shared between the kernel and application. This is done both 36 * for ordering purposes, but also to ensure that once a value is loaded from 37 * data that the application could potentially modify, it remains stable. 38 * 39 * Copyright (C) 2018-2019 Jens Axboe 40 * Copyright (c) 2018-2019 Christoph Hellwig 41 */ 42 #include <linux/kernel.h> 43 #include <linux/init.h> 44 #include <linux/errno.h> 45 #include <linux/syscalls.h> 46 #include <net/compat.h> 47 #include <linux/refcount.h> 48 #include <linux/uio.h> 49 #include <linux/bits.h> 50 51 #include <linux/sched/signal.h> 52 #include <linux/fs.h> 53 #include <linux/file.h> 54 #include <linux/fdtable.h> 55 #include <linux/mm.h> 56 #include <linux/mman.h> 57 #include <linux/percpu.h> 58 #include <linux/slab.h> 59 #include <linux/bvec.h> 60 #include <linux/net.h> 61 #include <net/sock.h> 62 #include <net/af_unix.h> 63 #include <net/scm.h> 64 #include <linux/anon_inodes.h> 65 #include <linux/sched/mm.h> 66 #include <linux/uaccess.h> 67 #include <linux/nospec.h> 68 #include <linux/highmem.h> 69 #include <linux/fsnotify.h> 70 #include <linux/fadvise.h> 71 #include <linux/task_work.h> 72 #include <linux/io_uring.h> 73 #include <linux/audit.h> 74 #include <linux/security.h> 75 #include <asm/shmparam.h> 76 77 #define CREATE_TRACE_POINTS 78 #include <trace/events/io_uring.h> 79 80 #include <uapi/linux/io_uring.h> 81 82 #include "io-wq.h" 83 84 #include "io_uring.h" 85 #include "opdef.h" 86 #include "refs.h" 87 #include "tctx.h" 88 #include "sqpoll.h" 89 #include "fdinfo.h" 90 #include "kbuf.h" 91 #include "rsrc.h" 92 #include "cancel.h" 93 #include "net.h" 94 #include "notif.h" 95 96 #include "timeout.h" 97 #include "poll.h" 98 #include "rw.h" 99 #include "alloc_cache.h" 100 101 #define IORING_MAX_ENTRIES 32768 102 #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES) 103 104 #define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \ 105 IORING_REGISTER_LAST + IORING_OP_LAST) 106 107 #define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \ 108 IOSQE_IO_HARDLINK | IOSQE_ASYNC) 109 110 #define SQE_VALID_FLAGS (SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \ 111 IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS) 112 113 #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \ 114 REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \ 115 REQ_F_ASYNC_DATA) 116 117 #define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\ 118 IO_REQ_CLEAN_FLAGS) 119 120 #define IO_TCTX_REFS_CACHE_NR (1U << 10) 121 122 #define IO_COMPL_BATCH 32 123 #define IO_REQ_ALLOC_BATCH 8 124 125 enum { 126 IO_CHECK_CQ_OVERFLOW_BIT, 127 IO_CHECK_CQ_DROPPED_BIT, 128 }; 129 130 enum { 131 IO_EVENTFD_OP_SIGNAL_BIT, 132 IO_EVENTFD_OP_FREE_BIT, 133 }; 134 135 struct io_defer_entry { 136 struct list_head list; 137 struct io_kiocb *req; 138 u32 seq; 139 }; 140 141 /* requests with any of those set should undergo io_disarm_next() */ 142 #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL) 143 #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK) 144 145 static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, 146 struct task_struct *task, 147 bool cancel_all); 148 149 static void io_queue_sqe(struct io_kiocb *req); 150 static void io_move_task_work_from_local(struct io_ring_ctx *ctx); 151 static void __io_submit_flush_completions(struct io_ring_ctx *ctx); 152 static __cold void io_fallback_tw(struct io_uring_task *tctx); 153 154 struct kmem_cache *req_cachep; 155 156 struct sock *io_uring_get_socket(struct file *file) 157 { 158 #if defined(CONFIG_UNIX) 159 if (io_is_uring_fops(file)) { 160 struct io_ring_ctx *ctx = file->private_data; 161 162 return ctx->ring_sock->sk; 163 } 164 #endif 165 return NULL; 166 } 167 EXPORT_SYMBOL(io_uring_get_socket); 168 169 static inline void io_submit_flush_completions(struct io_ring_ctx *ctx) 170 { 171 if (!wq_list_empty(&ctx->submit_state.compl_reqs) || 172 ctx->submit_state.cqes_count) 173 __io_submit_flush_completions(ctx); 174 } 175 176 static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx) 177 { 178 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head); 179 } 180 181 static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx) 182 { 183 return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head); 184 } 185 186 static bool io_match_linked(struct io_kiocb *head) 187 { 188 struct io_kiocb *req; 189 190 io_for_each_link(req, head) { 191 if (req->flags & REQ_F_INFLIGHT) 192 return true; 193 } 194 return false; 195 } 196 197 /* 198 * As io_match_task() but protected against racing with linked timeouts. 199 * User must not hold timeout_lock. 200 */ 201 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, 202 bool cancel_all) 203 { 204 bool matched; 205 206 if (task && head->task != task) 207 return false; 208 if (cancel_all) 209 return true; 210 211 if (head->flags & REQ_F_LINK_TIMEOUT) { 212 struct io_ring_ctx *ctx = head->ctx; 213 214 /* protect against races with linked timeouts */ 215 spin_lock_irq(&ctx->timeout_lock); 216 matched = io_match_linked(head); 217 spin_unlock_irq(&ctx->timeout_lock); 218 } else { 219 matched = io_match_linked(head); 220 } 221 return matched; 222 } 223 224 static inline void req_fail_link_node(struct io_kiocb *req, int res) 225 { 226 req_set_fail(req); 227 io_req_set_res(req, res, 0); 228 } 229 230 static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx) 231 { 232 wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); 233 kasan_poison_object_data(req_cachep, req); 234 } 235 236 static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref) 237 { 238 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); 239 240 complete(&ctx->ref_comp); 241 } 242 243 static __cold void io_fallback_req_func(struct work_struct *work) 244 { 245 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, 246 fallback_work.work); 247 struct llist_node *node = llist_del_all(&ctx->fallback_llist); 248 struct io_kiocb *req, *tmp; 249 struct io_tw_state ts = { .locked = true, }; 250 251 mutex_lock(&ctx->uring_lock); 252 llist_for_each_entry_safe(req, tmp, node, io_task_work.node) 253 req->io_task_work.func(req, &ts); 254 if (WARN_ON_ONCE(!ts.locked)) 255 return; 256 io_submit_flush_completions(ctx); 257 mutex_unlock(&ctx->uring_lock); 258 } 259 260 static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits) 261 { 262 unsigned hash_buckets = 1U << bits; 263 size_t hash_size = hash_buckets * sizeof(table->hbs[0]); 264 265 table->hbs = kmalloc(hash_size, GFP_KERNEL); 266 if (!table->hbs) 267 return -ENOMEM; 268 269 table->hash_bits = bits; 270 init_hash_table(table, hash_buckets); 271 return 0; 272 } 273 274 static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) 275 { 276 struct io_ring_ctx *ctx; 277 int hash_bits; 278 279 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 280 if (!ctx) 281 return NULL; 282 283 xa_init(&ctx->io_bl_xa); 284 285 /* 286 * Use 5 bits less than the max cq entries, that should give us around 287 * 32 entries per hash list if totally full and uniformly spread, but 288 * don't keep too many buckets to not overconsume memory. 289 */ 290 hash_bits = ilog2(p->cq_entries) - 5; 291 hash_bits = clamp(hash_bits, 1, 8); 292 if (io_alloc_hash_table(&ctx->cancel_table, hash_bits)) 293 goto err; 294 if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits)) 295 goto err; 296 297 ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL); 298 if (!ctx->dummy_ubuf) 299 goto err; 300 /* set invalid range, so io_import_fixed() fails meeting it */ 301 ctx->dummy_ubuf->ubuf = -1UL; 302 303 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free, 304 0, GFP_KERNEL)) 305 goto err; 306 307 ctx->flags = p->flags; 308 init_waitqueue_head(&ctx->sqo_sq_wait); 309 INIT_LIST_HEAD(&ctx->sqd_list); 310 INIT_LIST_HEAD(&ctx->cq_overflow_list); 311 INIT_LIST_HEAD(&ctx->io_buffers_cache); 312 io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX, 313 sizeof(struct io_rsrc_node)); 314 io_alloc_cache_init(&ctx->apoll_cache, IO_ALLOC_CACHE_MAX, 315 sizeof(struct async_poll)); 316 io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX, 317 sizeof(struct io_async_msghdr)); 318 init_completion(&ctx->ref_comp); 319 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1); 320 mutex_init(&ctx->uring_lock); 321 init_waitqueue_head(&ctx->cq_wait); 322 init_waitqueue_head(&ctx->poll_wq); 323 init_waitqueue_head(&ctx->rsrc_quiesce_wq); 324 spin_lock_init(&ctx->completion_lock); 325 spin_lock_init(&ctx->timeout_lock); 326 INIT_WQ_LIST(&ctx->iopoll_list); 327 INIT_LIST_HEAD(&ctx->io_buffers_pages); 328 INIT_LIST_HEAD(&ctx->io_buffers_comp); 329 INIT_LIST_HEAD(&ctx->defer_list); 330 INIT_LIST_HEAD(&ctx->timeout_list); 331 INIT_LIST_HEAD(&ctx->ltimeout_list); 332 INIT_LIST_HEAD(&ctx->rsrc_ref_list); 333 init_llist_head(&ctx->work_llist); 334 INIT_LIST_HEAD(&ctx->tctx_list); 335 ctx->submit_state.free_list.next = NULL; 336 INIT_WQ_LIST(&ctx->locked_free_list); 337 INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func); 338 INIT_WQ_LIST(&ctx->submit_state.compl_reqs); 339 return ctx; 340 err: 341 kfree(ctx->dummy_ubuf); 342 kfree(ctx->cancel_table.hbs); 343 kfree(ctx->cancel_table_locked.hbs); 344 kfree(ctx->io_bl); 345 xa_destroy(&ctx->io_bl_xa); 346 kfree(ctx); 347 return NULL; 348 } 349 350 static void io_account_cq_overflow(struct io_ring_ctx *ctx) 351 { 352 struct io_rings *r = ctx->rings; 353 354 WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1); 355 ctx->cq_extra--; 356 } 357 358 static bool req_need_defer(struct io_kiocb *req, u32 seq) 359 { 360 if (unlikely(req->flags & REQ_F_IO_DRAIN)) { 361 struct io_ring_ctx *ctx = req->ctx; 362 363 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail; 364 } 365 366 return false; 367 } 368 369 static void io_clean_op(struct io_kiocb *req) 370 { 371 if (req->flags & REQ_F_BUFFER_SELECTED) { 372 spin_lock(&req->ctx->completion_lock); 373 io_put_kbuf_comp(req); 374 spin_unlock(&req->ctx->completion_lock); 375 } 376 377 if (req->flags & REQ_F_NEED_CLEANUP) { 378 const struct io_cold_def *def = &io_cold_defs[req->opcode]; 379 380 if (def->cleanup) 381 def->cleanup(req); 382 } 383 if ((req->flags & REQ_F_POLLED) && req->apoll) { 384 kfree(req->apoll->double_poll); 385 kfree(req->apoll); 386 req->apoll = NULL; 387 } 388 if (req->flags & REQ_F_INFLIGHT) { 389 struct io_uring_task *tctx = req->task->io_uring; 390 391 atomic_dec(&tctx->inflight_tracked); 392 } 393 if (req->flags & REQ_F_CREDS) 394 put_cred(req->creds); 395 if (req->flags & REQ_F_ASYNC_DATA) { 396 kfree(req->async_data); 397 req->async_data = NULL; 398 } 399 req->flags &= ~IO_REQ_CLEAN_FLAGS; 400 } 401 402 static inline void io_req_track_inflight(struct io_kiocb *req) 403 { 404 if (!(req->flags & REQ_F_INFLIGHT)) { 405 req->flags |= REQ_F_INFLIGHT; 406 atomic_inc(&req->task->io_uring->inflight_tracked); 407 } 408 } 409 410 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req) 411 { 412 if (WARN_ON_ONCE(!req->link)) 413 return NULL; 414 415 req->flags &= ~REQ_F_ARM_LTIMEOUT; 416 req->flags |= REQ_F_LINK_TIMEOUT; 417 418 /* linked timeouts should have two refs once prep'ed */ 419 io_req_set_refcount(req); 420 __io_req_set_refcount(req->link, 2); 421 return req->link; 422 } 423 424 static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) 425 { 426 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT))) 427 return NULL; 428 return __io_prep_linked_timeout(req); 429 } 430 431 static noinline void __io_arm_ltimeout(struct io_kiocb *req) 432 { 433 io_queue_linked_timeout(__io_prep_linked_timeout(req)); 434 } 435 436 static inline void io_arm_ltimeout(struct io_kiocb *req) 437 { 438 if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT)) 439 __io_arm_ltimeout(req); 440 } 441 442 static void io_prep_async_work(struct io_kiocb *req) 443 { 444 const struct io_issue_def *def = &io_issue_defs[req->opcode]; 445 struct io_ring_ctx *ctx = req->ctx; 446 447 if (!(req->flags & REQ_F_CREDS)) { 448 req->flags |= REQ_F_CREDS; 449 req->creds = get_current_cred(); 450 } 451 452 req->work.list.next = NULL; 453 req->work.flags = 0; 454 req->work.cancel_seq = atomic_read(&ctx->cancel_seq); 455 if (req->flags & REQ_F_FORCE_ASYNC) 456 req->work.flags |= IO_WQ_WORK_CONCURRENT; 457 458 if (req->file && !(req->flags & REQ_F_FIXED_FILE)) 459 req->flags |= io_file_get_flags(req->file); 460 461 if (req->file && (req->flags & REQ_F_ISREG)) { 462 bool should_hash = def->hash_reg_file; 463 464 /* don't serialize this request if the fs doesn't need it */ 465 if (should_hash && (req->file->f_flags & O_DIRECT) && 466 (req->file->f_mode & FMODE_DIO_PARALLEL_WRITE)) 467 should_hash = false; 468 if (should_hash || (ctx->flags & IORING_SETUP_IOPOLL)) 469 io_wq_hash_work(&req->work, file_inode(req->file)); 470 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) { 471 if (def->unbound_nonreg_file) 472 req->work.flags |= IO_WQ_WORK_UNBOUND; 473 } 474 } 475 476 static void io_prep_async_link(struct io_kiocb *req) 477 { 478 struct io_kiocb *cur; 479 480 if (req->flags & REQ_F_LINK_TIMEOUT) { 481 struct io_ring_ctx *ctx = req->ctx; 482 483 spin_lock_irq(&ctx->timeout_lock); 484 io_for_each_link(cur, req) 485 io_prep_async_work(cur); 486 spin_unlock_irq(&ctx->timeout_lock); 487 } else { 488 io_for_each_link(cur, req) 489 io_prep_async_work(cur); 490 } 491 } 492 493 void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use) 494 { 495 struct io_kiocb *link = io_prep_linked_timeout(req); 496 struct io_uring_task *tctx = req->task->io_uring; 497 498 BUG_ON(!tctx); 499 BUG_ON(!tctx->io_wq); 500 501 /* init ->work of the whole link before punting */ 502 io_prep_async_link(req); 503 504 /* 505 * Not expected to happen, but if we do have a bug where this _can_ 506 * happen, catch it here and ensure the request is marked as 507 * canceled. That will make io-wq go through the usual work cancel 508 * procedure rather than attempt to run this request (or create a new 509 * worker for it). 510 */ 511 if (WARN_ON_ONCE(!same_thread_group(req->task, current))) 512 req->work.flags |= IO_WQ_WORK_CANCEL; 513 514 trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work)); 515 io_wq_enqueue(tctx->io_wq, &req->work); 516 if (link) 517 io_queue_linked_timeout(link); 518 } 519 520 static __cold void io_queue_deferred(struct io_ring_ctx *ctx) 521 { 522 while (!list_empty(&ctx->defer_list)) { 523 struct io_defer_entry *de = list_first_entry(&ctx->defer_list, 524 struct io_defer_entry, list); 525 526 if (req_need_defer(de->req, de->seq)) 527 break; 528 list_del_init(&de->list); 529 io_req_task_queue(de->req); 530 kfree(de); 531 } 532 } 533 534 535 static void io_eventfd_ops(struct rcu_head *rcu) 536 { 537 struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu); 538 int ops = atomic_xchg(&ev_fd->ops, 0); 539 540 if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT)) 541 eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE); 542 543 /* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback 544 * ordering in a race but if references are 0 we know we have to free 545 * it regardless. 546 */ 547 if (atomic_dec_and_test(&ev_fd->refs)) { 548 eventfd_ctx_put(ev_fd->cq_ev_fd); 549 kfree(ev_fd); 550 } 551 } 552 553 static void io_eventfd_signal(struct io_ring_ctx *ctx) 554 { 555 struct io_ev_fd *ev_fd = NULL; 556 557 rcu_read_lock(); 558 /* 559 * rcu_dereference ctx->io_ev_fd once and use it for both for checking 560 * and eventfd_signal 561 */ 562 ev_fd = rcu_dereference(ctx->io_ev_fd); 563 564 /* 565 * Check again if ev_fd exists incase an io_eventfd_unregister call 566 * completed between the NULL check of ctx->io_ev_fd at the start of 567 * the function and rcu_read_lock. 568 */ 569 if (unlikely(!ev_fd)) 570 goto out; 571 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED) 572 goto out; 573 if (ev_fd->eventfd_async && !io_wq_current_is_worker()) 574 goto out; 575 576 if (likely(eventfd_signal_allowed())) { 577 eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE); 578 } else { 579 atomic_inc(&ev_fd->refs); 580 if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops)) 581 call_rcu_hurry(&ev_fd->rcu, io_eventfd_ops); 582 else 583 atomic_dec(&ev_fd->refs); 584 } 585 586 out: 587 rcu_read_unlock(); 588 } 589 590 static void io_eventfd_flush_signal(struct io_ring_ctx *ctx) 591 { 592 bool skip; 593 594 spin_lock(&ctx->completion_lock); 595 596 /* 597 * Eventfd should only get triggered when at least one event has been 598 * posted. Some applications rely on the eventfd notification count 599 * only changing IFF a new CQE has been added to the CQ ring. There's 600 * no depedency on 1:1 relationship between how many times this 601 * function is called (and hence the eventfd count) and number of CQEs 602 * posted to the CQ ring. 603 */ 604 skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail; 605 ctx->evfd_last_cq_tail = ctx->cached_cq_tail; 606 spin_unlock(&ctx->completion_lock); 607 if (skip) 608 return; 609 610 io_eventfd_signal(ctx); 611 } 612 613 void __io_commit_cqring_flush(struct io_ring_ctx *ctx) 614 { 615 if (ctx->poll_activated) 616 io_poll_wq_wake(ctx); 617 if (ctx->off_timeout_used) 618 io_flush_timeouts(ctx); 619 if (ctx->drain_active) { 620 spin_lock(&ctx->completion_lock); 621 io_queue_deferred(ctx); 622 spin_unlock(&ctx->completion_lock); 623 } 624 if (ctx->has_evfd) 625 io_eventfd_flush_signal(ctx); 626 } 627 628 static inline void __io_cq_lock(struct io_ring_ctx *ctx) 629 __acquires(ctx->completion_lock) 630 { 631 if (!ctx->task_complete) 632 spin_lock(&ctx->completion_lock); 633 } 634 635 static inline void __io_cq_unlock(struct io_ring_ctx *ctx) 636 { 637 if (!ctx->task_complete) 638 spin_unlock(&ctx->completion_lock); 639 } 640 641 static inline void io_cq_lock(struct io_ring_ctx *ctx) 642 __acquires(ctx->completion_lock) 643 { 644 spin_lock(&ctx->completion_lock); 645 } 646 647 static inline void io_cq_unlock(struct io_ring_ctx *ctx) 648 __releases(ctx->completion_lock) 649 { 650 spin_unlock(&ctx->completion_lock); 651 } 652 653 /* keep it inlined for io_submit_flush_completions() */ 654 static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx) 655 __releases(ctx->completion_lock) 656 { 657 io_commit_cqring(ctx); 658 __io_cq_unlock(ctx); 659 io_commit_cqring_flush(ctx); 660 io_cqring_wake(ctx); 661 } 662 663 static void __io_cq_unlock_post_flush(struct io_ring_ctx *ctx) 664 __releases(ctx->completion_lock) 665 { 666 io_commit_cqring(ctx); 667 668 if (ctx->task_complete) { 669 /* 670 * ->task_complete implies that only current might be waiting 671 * for CQEs, and obviously, we currently don't. No one is 672 * waiting, wakeups are futile, skip them. 673 */ 674 io_commit_cqring_flush(ctx); 675 } else { 676 __io_cq_unlock(ctx); 677 io_commit_cqring_flush(ctx); 678 io_cqring_wake(ctx); 679 } 680 } 681 682 void io_cq_unlock_post(struct io_ring_ctx *ctx) 683 __releases(ctx->completion_lock) 684 { 685 io_commit_cqring(ctx); 686 spin_unlock(&ctx->completion_lock); 687 io_commit_cqring_flush(ctx); 688 io_cqring_wake(ctx); 689 } 690 691 /* Returns true if there are no backlogged entries after the flush */ 692 static void io_cqring_overflow_kill(struct io_ring_ctx *ctx) 693 { 694 struct io_overflow_cqe *ocqe; 695 LIST_HEAD(list); 696 697 io_cq_lock(ctx); 698 list_splice_init(&ctx->cq_overflow_list, &list); 699 clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq); 700 io_cq_unlock(ctx); 701 702 while (!list_empty(&list)) { 703 ocqe = list_first_entry(&list, struct io_overflow_cqe, list); 704 list_del(&ocqe->list); 705 kfree(ocqe); 706 } 707 } 708 709 static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx) 710 { 711 size_t cqe_size = sizeof(struct io_uring_cqe); 712 713 if (__io_cqring_events(ctx) == ctx->cq_entries) 714 return; 715 716 if (ctx->flags & IORING_SETUP_CQE32) 717 cqe_size <<= 1; 718 719 io_cq_lock(ctx); 720 while (!list_empty(&ctx->cq_overflow_list)) { 721 struct io_uring_cqe *cqe = io_get_cqe_overflow(ctx, true); 722 struct io_overflow_cqe *ocqe; 723 724 if (!cqe) 725 break; 726 ocqe = list_first_entry(&ctx->cq_overflow_list, 727 struct io_overflow_cqe, list); 728 memcpy(cqe, &ocqe->cqe, cqe_size); 729 list_del(&ocqe->list); 730 kfree(ocqe); 731 } 732 733 if (list_empty(&ctx->cq_overflow_list)) { 734 clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq); 735 atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); 736 } 737 io_cq_unlock_post(ctx); 738 } 739 740 static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx) 741 { 742 /* iopoll syncs against uring_lock, not completion_lock */ 743 if (ctx->flags & IORING_SETUP_IOPOLL) 744 mutex_lock(&ctx->uring_lock); 745 __io_cqring_overflow_flush(ctx); 746 if (ctx->flags & IORING_SETUP_IOPOLL) 747 mutex_unlock(&ctx->uring_lock); 748 } 749 750 static void io_cqring_overflow_flush(struct io_ring_ctx *ctx) 751 { 752 if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) 753 io_cqring_do_overflow_flush(ctx); 754 } 755 756 /* can be called by any task */ 757 static void io_put_task_remote(struct task_struct *task, int nr) 758 { 759 struct io_uring_task *tctx = task->io_uring; 760 761 percpu_counter_sub(&tctx->inflight, nr); 762 if (unlikely(atomic_read(&tctx->in_cancel))) 763 wake_up(&tctx->wait); 764 put_task_struct_many(task, nr); 765 } 766 767 /* used by a task to put its own references */ 768 static void io_put_task_local(struct task_struct *task, int nr) 769 { 770 task->io_uring->cached_refs += nr; 771 } 772 773 /* must to be called somewhat shortly after putting a request */ 774 static inline void io_put_task(struct task_struct *task, int nr) 775 { 776 if (likely(task == current)) 777 io_put_task_local(task, nr); 778 else 779 io_put_task_remote(task, nr); 780 } 781 782 void io_task_refs_refill(struct io_uring_task *tctx) 783 { 784 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR; 785 786 percpu_counter_add(&tctx->inflight, refill); 787 refcount_add(refill, ¤t->usage); 788 tctx->cached_refs += refill; 789 } 790 791 static __cold void io_uring_drop_tctx_refs(struct task_struct *task) 792 { 793 struct io_uring_task *tctx = task->io_uring; 794 unsigned int refs = tctx->cached_refs; 795 796 if (refs) { 797 tctx->cached_refs = 0; 798 percpu_counter_sub(&tctx->inflight, refs); 799 put_task_struct_many(task, refs); 800 } 801 } 802 803 static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, 804 s32 res, u32 cflags, u64 extra1, u64 extra2) 805 { 806 struct io_overflow_cqe *ocqe; 807 size_t ocq_size = sizeof(struct io_overflow_cqe); 808 bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32); 809 810 lockdep_assert_held(&ctx->completion_lock); 811 812 if (is_cqe32) 813 ocq_size += sizeof(struct io_uring_cqe); 814 815 ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT); 816 trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe); 817 if (!ocqe) { 818 /* 819 * If we're in ring overflow flush mode, or in task cancel mode, 820 * or cannot allocate an overflow entry, then we need to drop it 821 * on the floor. 822 */ 823 io_account_cq_overflow(ctx); 824 set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq); 825 return false; 826 } 827 if (list_empty(&ctx->cq_overflow_list)) { 828 set_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq); 829 atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); 830 831 } 832 ocqe->cqe.user_data = user_data; 833 ocqe->cqe.res = res; 834 ocqe->cqe.flags = cflags; 835 if (is_cqe32) { 836 ocqe->cqe.big_cqe[0] = extra1; 837 ocqe->cqe.big_cqe[1] = extra2; 838 } 839 list_add_tail(&ocqe->list, &ctx->cq_overflow_list); 840 return true; 841 } 842 843 bool io_req_cqe_overflow(struct io_kiocb *req) 844 { 845 if (!(req->flags & REQ_F_CQE32_INIT)) { 846 req->extra1 = 0; 847 req->extra2 = 0; 848 } 849 return io_cqring_event_overflow(req->ctx, req->cqe.user_data, 850 req->cqe.res, req->cqe.flags, 851 req->extra1, req->extra2); 852 } 853 854 /* 855 * writes to the cq entry need to come after reading head; the 856 * control dependency is enough as we're using WRITE_ONCE to 857 * fill the cq entry 858 */ 859 struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow) 860 { 861 struct io_rings *rings = ctx->rings; 862 unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1); 863 unsigned int free, queued, len; 864 865 /* 866 * Posting into the CQ when there are pending overflowed CQEs may break 867 * ordering guarantees, which will affect links, F_MORE users and more. 868 * Force overflow the completion. 869 */ 870 if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))) 871 return NULL; 872 873 /* userspace may cheat modifying the tail, be safe and do min */ 874 queued = min(__io_cqring_events(ctx), ctx->cq_entries); 875 free = ctx->cq_entries - queued; 876 /* we need a contiguous range, limit based on the current array offset */ 877 len = min(free, ctx->cq_entries - off); 878 if (!len) 879 return NULL; 880 881 if (ctx->flags & IORING_SETUP_CQE32) { 882 off <<= 1; 883 len <<= 1; 884 } 885 886 ctx->cqe_cached = &rings->cqes[off]; 887 ctx->cqe_sentinel = ctx->cqe_cached + len; 888 889 ctx->cached_cq_tail++; 890 ctx->cqe_cached++; 891 if (ctx->flags & IORING_SETUP_CQE32) 892 ctx->cqe_cached++; 893 return &rings->cqes[off]; 894 } 895 896 static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, 897 u32 cflags) 898 { 899 struct io_uring_cqe *cqe; 900 901 ctx->cq_extra++; 902 903 /* 904 * If we can't get a cq entry, userspace overflowed the 905 * submission (by quite a lot). Increment the overflow count in 906 * the ring. 907 */ 908 cqe = io_get_cqe(ctx); 909 if (likely(cqe)) { 910 trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0); 911 912 WRITE_ONCE(cqe->user_data, user_data); 913 WRITE_ONCE(cqe->res, res); 914 WRITE_ONCE(cqe->flags, cflags); 915 916 if (ctx->flags & IORING_SETUP_CQE32) { 917 WRITE_ONCE(cqe->big_cqe[0], 0); 918 WRITE_ONCE(cqe->big_cqe[1], 0); 919 } 920 return true; 921 } 922 return false; 923 } 924 925 static void __io_flush_post_cqes(struct io_ring_ctx *ctx) 926 __must_hold(&ctx->uring_lock) 927 { 928 struct io_submit_state *state = &ctx->submit_state; 929 unsigned int i; 930 931 lockdep_assert_held(&ctx->uring_lock); 932 for (i = 0; i < state->cqes_count; i++) { 933 struct io_uring_cqe *cqe = &state->cqes[i]; 934 935 if (!io_fill_cqe_aux(ctx, cqe->user_data, cqe->res, cqe->flags)) { 936 if (ctx->task_complete) { 937 spin_lock(&ctx->completion_lock); 938 io_cqring_event_overflow(ctx, cqe->user_data, 939 cqe->res, cqe->flags, 0, 0); 940 spin_unlock(&ctx->completion_lock); 941 } else { 942 io_cqring_event_overflow(ctx, cqe->user_data, 943 cqe->res, cqe->flags, 0, 0); 944 } 945 } 946 } 947 state->cqes_count = 0; 948 } 949 950 static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags, 951 bool allow_overflow) 952 { 953 bool filled; 954 955 io_cq_lock(ctx); 956 filled = io_fill_cqe_aux(ctx, user_data, res, cflags); 957 if (!filled && allow_overflow) 958 filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0); 959 960 io_cq_unlock_post(ctx); 961 return filled; 962 } 963 964 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) 965 { 966 return __io_post_aux_cqe(ctx, user_data, res, cflags, true); 967 } 968 969 bool io_aux_cqe(const struct io_kiocb *req, bool defer, s32 res, u32 cflags, 970 bool allow_overflow) 971 { 972 struct io_ring_ctx *ctx = req->ctx; 973 u64 user_data = req->cqe.user_data; 974 struct io_uring_cqe *cqe; 975 976 if (!defer) 977 return __io_post_aux_cqe(ctx, user_data, res, cflags, allow_overflow); 978 979 lockdep_assert_held(&ctx->uring_lock); 980 981 if (ctx->submit_state.cqes_count == ARRAY_SIZE(ctx->submit_state.cqes)) { 982 __io_cq_lock(ctx); 983 __io_flush_post_cqes(ctx); 984 /* no need to flush - flush is deferred */ 985 __io_cq_unlock_post(ctx); 986 } 987 988 /* For defered completions this is not as strict as it is otherwise, 989 * however it's main job is to prevent unbounded posted completions, 990 * and in that it works just as well. 991 */ 992 if (!allow_overflow && test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) 993 return false; 994 995 cqe = &ctx->submit_state.cqes[ctx->submit_state.cqes_count++]; 996 cqe->user_data = user_data; 997 cqe->res = res; 998 cqe->flags = cflags; 999 return true; 1000 } 1001 1002 static void __io_req_complete_post(struct io_kiocb *req, unsigned issue_flags) 1003 { 1004 struct io_ring_ctx *ctx = req->ctx; 1005 struct io_rsrc_node *rsrc_node = NULL; 1006 1007 io_cq_lock(ctx); 1008 if (!(req->flags & REQ_F_CQE_SKIP)) 1009 io_fill_cqe_req(ctx, req); 1010 1011 /* 1012 * If we're the last reference to this request, add to our locked 1013 * free_list cache. 1014 */ 1015 if (req_ref_put_and_test(req)) { 1016 if (req->flags & IO_REQ_LINK_FLAGS) { 1017 if (req->flags & IO_DISARM_MASK) 1018 io_disarm_next(req); 1019 if (req->link) { 1020 io_req_task_queue(req->link); 1021 req->link = NULL; 1022 } 1023 } 1024 io_put_kbuf_comp(req); 1025 if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS)) 1026 io_clean_op(req); 1027 if (!(req->flags & REQ_F_FIXED_FILE)) 1028 io_put_file(req->file); 1029 1030 rsrc_node = req->rsrc_node; 1031 /* 1032 * Selected buffer deallocation in io_clean_op() assumes that 1033 * we don't hold ->completion_lock. Clean them here to avoid 1034 * deadlocks. 1035 */ 1036 io_put_task_remote(req->task, 1); 1037 wq_list_add_head(&req->comp_list, &ctx->locked_free_list); 1038 ctx->locked_free_nr++; 1039 } 1040 io_cq_unlock_post(ctx); 1041 1042 if (rsrc_node) { 1043 io_ring_submit_lock(ctx, issue_flags); 1044 io_put_rsrc_node(ctx, rsrc_node); 1045 io_ring_submit_unlock(ctx, issue_flags); 1046 } 1047 } 1048 1049 void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags) 1050 { 1051 if (req->ctx->task_complete && req->ctx->submitter_task != current) { 1052 req->io_task_work.func = io_req_task_complete; 1053 io_req_task_work_add(req); 1054 } else if (!(issue_flags & IO_URING_F_UNLOCKED) || 1055 !(req->ctx->flags & IORING_SETUP_IOPOLL)) { 1056 __io_req_complete_post(req, issue_flags); 1057 } else { 1058 struct io_ring_ctx *ctx = req->ctx; 1059 1060 mutex_lock(&ctx->uring_lock); 1061 __io_req_complete_post(req, issue_flags & ~IO_URING_F_UNLOCKED); 1062 mutex_unlock(&ctx->uring_lock); 1063 } 1064 } 1065 1066 void io_req_defer_failed(struct io_kiocb *req, s32 res) 1067 __must_hold(&ctx->uring_lock) 1068 { 1069 const struct io_cold_def *def = &io_cold_defs[req->opcode]; 1070 1071 lockdep_assert_held(&req->ctx->uring_lock); 1072 1073 req_set_fail(req); 1074 io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED)); 1075 if (def->fail) 1076 def->fail(req); 1077 io_req_complete_defer(req); 1078 } 1079 1080 /* 1081 * Don't initialise the fields below on every allocation, but do that in 1082 * advance and keep them valid across allocations. 1083 */ 1084 static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) 1085 { 1086 req->ctx = ctx; 1087 req->link = NULL; 1088 req->async_data = NULL; 1089 /* not necessary, but safer to zero */ 1090 req->cqe.res = 0; 1091 } 1092 1093 static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx, 1094 struct io_submit_state *state) 1095 { 1096 spin_lock(&ctx->completion_lock); 1097 wq_list_splice(&ctx->locked_free_list, &state->free_list); 1098 ctx->locked_free_nr = 0; 1099 spin_unlock(&ctx->completion_lock); 1100 } 1101 1102 /* 1103 * A request might get retired back into the request caches even before opcode 1104 * handlers and io_issue_sqe() are done with it, e.g. inline completion path. 1105 * Because of that, io_alloc_req() should be called only under ->uring_lock 1106 * and with extra caution to not get a request that is still worked on. 1107 */ 1108 __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) 1109 __must_hold(&ctx->uring_lock) 1110 { 1111 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; 1112 void *reqs[IO_REQ_ALLOC_BATCH]; 1113 int ret, i; 1114 1115 /* 1116 * If we have more than a batch's worth of requests in our IRQ side 1117 * locked cache, grab the lock and move them over to our submission 1118 * side cache. 1119 */ 1120 if (data_race(ctx->locked_free_nr) > IO_COMPL_BATCH) { 1121 io_flush_cached_locked_reqs(ctx, &ctx->submit_state); 1122 if (!io_req_cache_empty(ctx)) 1123 return true; 1124 } 1125 1126 ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs); 1127 1128 /* 1129 * Bulk alloc is all-or-nothing. If we fail to get a batch, 1130 * retry single alloc to be on the safe side. 1131 */ 1132 if (unlikely(ret <= 0)) { 1133 reqs[0] = kmem_cache_alloc(req_cachep, gfp); 1134 if (!reqs[0]) 1135 return false; 1136 ret = 1; 1137 } 1138 1139 percpu_ref_get_many(&ctx->refs, ret); 1140 for (i = 0; i < ret; i++) { 1141 struct io_kiocb *req = reqs[i]; 1142 1143 io_preinit_req(req, ctx); 1144 io_req_add_to_cache(req, ctx); 1145 } 1146 return true; 1147 } 1148 1149 __cold void io_free_req(struct io_kiocb *req) 1150 { 1151 /* refs were already put, restore them for io_req_task_complete() */ 1152 req->flags &= ~REQ_F_REFCOUNT; 1153 /* we only want to free it, don't post CQEs */ 1154 req->flags |= REQ_F_CQE_SKIP; 1155 req->io_task_work.func = io_req_task_complete; 1156 io_req_task_work_add(req); 1157 } 1158 1159 static void __io_req_find_next_prep(struct io_kiocb *req) 1160 { 1161 struct io_ring_ctx *ctx = req->ctx; 1162 1163 spin_lock(&ctx->completion_lock); 1164 io_disarm_next(req); 1165 spin_unlock(&ctx->completion_lock); 1166 } 1167 1168 static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) 1169 { 1170 struct io_kiocb *nxt; 1171 1172 /* 1173 * If LINK is set, we have dependent requests in this chain. If we 1174 * didn't fail this request, queue the first one up, moving any other 1175 * dependencies to the next request. In case of failure, fail the rest 1176 * of the chain. 1177 */ 1178 if (unlikely(req->flags & IO_DISARM_MASK)) 1179 __io_req_find_next_prep(req); 1180 nxt = req->link; 1181 req->link = NULL; 1182 return nxt; 1183 } 1184 1185 static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts) 1186 { 1187 if (!ctx) 1188 return; 1189 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) 1190 atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); 1191 if (ts->locked) { 1192 io_submit_flush_completions(ctx); 1193 mutex_unlock(&ctx->uring_lock); 1194 ts->locked = false; 1195 } 1196 percpu_ref_put(&ctx->refs); 1197 } 1198 1199 static unsigned int handle_tw_list(struct llist_node *node, 1200 struct io_ring_ctx **ctx, 1201 struct io_tw_state *ts, 1202 struct llist_node *last) 1203 { 1204 unsigned int count = 0; 1205 1206 while (node && node != last) { 1207 struct llist_node *next = node->next; 1208 struct io_kiocb *req = container_of(node, struct io_kiocb, 1209 io_task_work.node); 1210 1211 prefetch(container_of(next, struct io_kiocb, io_task_work.node)); 1212 1213 if (req->ctx != *ctx) { 1214 ctx_flush_and_put(*ctx, ts); 1215 *ctx = req->ctx; 1216 /* if not contended, grab and improve batching */ 1217 ts->locked = mutex_trylock(&(*ctx)->uring_lock); 1218 percpu_ref_get(&(*ctx)->refs); 1219 } 1220 INDIRECT_CALL_2(req->io_task_work.func, 1221 io_poll_task_func, io_req_rw_complete, 1222 req, ts); 1223 node = next; 1224 count++; 1225 if (unlikely(need_resched())) { 1226 ctx_flush_and_put(*ctx, ts); 1227 *ctx = NULL; 1228 cond_resched(); 1229 } 1230 } 1231 1232 return count; 1233 } 1234 1235 /** 1236 * io_llist_xchg - swap all entries in a lock-less list 1237 * @head: the head of lock-less list to delete all entries 1238 * @new: new entry as the head of the list 1239 * 1240 * If list is empty, return NULL, otherwise, return the pointer to the first entry. 1241 * The order of entries returned is from the newest to the oldest added one. 1242 */ 1243 static inline struct llist_node *io_llist_xchg(struct llist_head *head, 1244 struct llist_node *new) 1245 { 1246 return xchg(&head->first, new); 1247 } 1248 1249 /** 1250 * io_llist_cmpxchg - possibly swap all entries in a lock-less list 1251 * @head: the head of lock-less list to delete all entries 1252 * @old: expected old value of the first entry of the list 1253 * @new: new entry as the head of the list 1254 * 1255 * perform a cmpxchg on the first entry of the list. 1256 */ 1257 1258 static inline struct llist_node *io_llist_cmpxchg(struct llist_head *head, 1259 struct llist_node *old, 1260 struct llist_node *new) 1261 { 1262 return cmpxchg(&head->first, old, new); 1263 } 1264 1265 void tctx_task_work(struct callback_head *cb) 1266 { 1267 struct io_tw_state ts = {}; 1268 struct io_ring_ctx *ctx = NULL; 1269 struct io_uring_task *tctx = container_of(cb, struct io_uring_task, 1270 task_work); 1271 struct llist_node fake = {}; 1272 struct llist_node *node; 1273 unsigned int loops = 0; 1274 unsigned int count = 0; 1275 1276 if (unlikely(current->flags & PF_EXITING)) { 1277 io_fallback_tw(tctx); 1278 return; 1279 } 1280 1281 do { 1282 loops++; 1283 node = io_llist_xchg(&tctx->task_list, &fake); 1284 count += handle_tw_list(node, &ctx, &ts, &fake); 1285 1286 /* skip expensive cmpxchg if there are items in the list */ 1287 if (READ_ONCE(tctx->task_list.first) != &fake) 1288 continue; 1289 if (ts.locked && !wq_list_empty(&ctx->submit_state.compl_reqs)) { 1290 io_submit_flush_completions(ctx); 1291 if (READ_ONCE(tctx->task_list.first) != &fake) 1292 continue; 1293 } 1294 node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL); 1295 } while (node != &fake); 1296 1297 ctx_flush_and_put(ctx, &ts); 1298 1299 /* relaxed read is enough as only the task itself sets ->in_cancel */ 1300 if (unlikely(atomic_read(&tctx->in_cancel))) 1301 io_uring_drop_tctx_refs(current); 1302 1303 trace_io_uring_task_work_run(tctx, count, loops); 1304 } 1305 1306 static __cold void io_fallback_tw(struct io_uring_task *tctx) 1307 { 1308 struct llist_node *node = llist_del_all(&tctx->task_list); 1309 struct io_kiocb *req; 1310 1311 while (node) { 1312 req = container_of(node, struct io_kiocb, io_task_work.node); 1313 node = node->next; 1314 if (llist_add(&req->io_task_work.node, 1315 &req->ctx->fallback_llist)) 1316 schedule_delayed_work(&req->ctx->fallback_work, 1); 1317 } 1318 } 1319 1320 static void io_req_local_work_add(struct io_kiocb *req, unsigned flags) 1321 { 1322 struct io_ring_ctx *ctx = req->ctx; 1323 unsigned nr_wait, nr_tw, nr_tw_prev; 1324 struct llist_node *first; 1325 1326 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) 1327 flags &= ~IOU_F_TWQ_LAZY_WAKE; 1328 1329 first = READ_ONCE(ctx->work_llist.first); 1330 do { 1331 nr_tw_prev = 0; 1332 if (first) { 1333 struct io_kiocb *first_req = container_of(first, 1334 struct io_kiocb, 1335 io_task_work.node); 1336 /* 1337 * Might be executed at any moment, rely on 1338 * SLAB_TYPESAFE_BY_RCU to keep it alive. 1339 */ 1340 nr_tw_prev = READ_ONCE(first_req->nr_tw); 1341 } 1342 nr_tw = nr_tw_prev + 1; 1343 /* Large enough to fail the nr_wait comparison below */ 1344 if (!(flags & IOU_F_TWQ_LAZY_WAKE)) 1345 nr_tw = -1U; 1346 1347 req->nr_tw = nr_tw; 1348 req->io_task_work.node.next = first; 1349 } while (!try_cmpxchg(&ctx->work_llist.first, &first, 1350 &req->io_task_work.node)); 1351 1352 if (!first) { 1353 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) 1354 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); 1355 if (ctx->has_evfd) 1356 io_eventfd_signal(ctx); 1357 } 1358 1359 nr_wait = atomic_read(&ctx->cq_wait_nr); 1360 /* no one is waiting */ 1361 if (!nr_wait) 1362 return; 1363 /* either not enough or the previous add has already woken it up */ 1364 if (nr_wait > nr_tw || nr_tw_prev >= nr_wait) 1365 return; 1366 /* pairs with set_current_state() in io_cqring_wait() */ 1367 smp_mb__after_atomic(); 1368 wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE); 1369 } 1370 1371 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags) 1372 { 1373 struct io_uring_task *tctx = req->task->io_uring; 1374 struct io_ring_ctx *ctx = req->ctx; 1375 1376 if (!(flags & IOU_F_TWQ_FORCE_NORMAL) && 1377 (ctx->flags & IORING_SETUP_DEFER_TASKRUN)) { 1378 rcu_read_lock(); 1379 io_req_local_work_add(req, flags); 1380 rcu_read_unlock(); 1381 return; 1382 } 1383 1384 /* task_work already pending, we're done */ 1385 if (!llist_add(&req->io_task_work.node, &tctx->task_list)) 1386 return; 1387 1388 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) 1389 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); 1390 1391 if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method))) 1392 return; 1393 1394 io_fallback_tw(tctx); 1395 } 1396 1397 static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx) 1398 { 1399 struct llist_node *node; 1400 1401 node = llist_del_all(&ctx->work_llist); 1402 while (node) { 1403 struct io_kiocb *req = container_of(node, struct io_kiocb, 1404 io_task_work.node); 1405 1406 node = node->next; 1407 __io_req_task_work_add(req, IOU_F_TWQ_FORCE_NORMAL); 1408 } 1409 } 1410 1411 static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts) 1412 { 1413 struct llist_node *node; 1414 unsigned int loops = 0; 1415 int ret = 0; 1416 1417 if (WARN_ON_ONCE(ctx->submitter_task != current)) 1418 return -EEXIST; 1419 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) 1420 atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); 1421 again: 1422 /* 1423 * llists are in reverse order, flip it back the right way before 1424 * running the pending items. 1425 */ 1426 node = llist_reverse_order(io_llist_xchg(&ctx->work_llist, NULL)); 1427 while (node) { 1428 struct llist_node *next = node->next; 1429 struct io_kiocb *req = container_of(node, struct io_kiocb, 1430 io_task_work.node); 1431 prefetch(container_of(next, struct io_kiocb, io_task_work.node)); 1432 INDIRECT_CALL_2(req->io_task_work.func, 1433 io_poll_task_func, io_req_rw_complete, 1434 req, ts); 1435 ret++; 1436 node = next; 1437 } 1438 loops++; 1439 1440 if (!llist_empty(&ctx->work_llist)) 1441 goto again; 1442 if (ts->locked) { 1443 io_submit_flush_completions(ctx); 1444 if (!llist_empty(&ctx->work_llist)) 1445 goto again; 1446 } 1447 trace_io_uring_local_work_run(ctx, ret, loops); 1448 return ret; 1449 } 1450 1451 static inline int io_run_local_work_locked(struct io_ring_ctx *ctx) 1452 { 1453 struct io_tw_state ts = { .locked = true, }; 1454 int ret; 1455 1456 if (llist_empty(&ctx->work_llist)) 1457 return 0; 1458 1459 ret = __io_run_local_work(ctx, &ts); 1460 /* shouldn't happen! */ 1461 if (WARN_ON_ONCE(!ts.locked)) 1462 mutex_lock(&ctx->uring_lock); 1463 return ret; 1464 } 1465 1466 static int io_run_local_work(struct io_ring_ctx *ctx) 1467 { 1468 struct io_tw_state ts = {}; 1469 int ret; 1470 1471 ts.locked = mutex_trylock(&ctx->uring_lock); 1472 ret = __io_run_local_work(ctx, &ts); 1473 if (ts.locked) 1474 mutex_unlock(&ctx->uring_lock); 1475 1476 return ret; 1477 } 1478 1479 static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts) 1480 { 1481 io_tw_lock(req->ctx, ts); 1482 io_req_defer_failed(req, req->cqe.res); 1483 } 1484 1485 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts) 1486 { 1487 io_tw_lock(req->ctx, ts); 1488 /* req->task == current here, checking PF_EXITING is safe */ 1489 if (unlikely(req->task->flags & PF_EXITING)) 1490 io_req_defer_failed(req, -EFAULT); 1491 else if (req->flags & REQ_F_FORCE_ASYNC) 1492 io_queue_iowq(req, ts); 1493 else 1494 io_queue_sqe(req); 1495 } 1496 1497 void io_req_task_queue_fail(struct io_kiocb *req, int ret) 1498 { 1499 io_req_set_res(req, ret, 0); 1500 req->io_task_work.func = io_req_task_cancel; 1501 io_req_task_work_add(req); 1502 } 1503 1504 void io_req_task_queue(struct io_kiocb *req) 1505 { 1506 req->io_task_work.func = io_req_task_submit; 1507 io_req_task_work_add(req); 1508 } 1509 1510 void io_queue_next(struct io_kiocb *req) 1511 { 1512 struct io_kiocb *nxt = io_req_find_next(req); 1513 1514 if (nxt) 1515 io_req_task_queue(nxt); 1516 } 1517 1518 void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node) 1519 __must_hold(&ctx->uring_lock) 1520 { 1521 struct task_struct *task = NULL; 1522 int task_refs = 0; 1523 1524 do { 1525 struct io_kiocb *req = container_of(node, struct io_kiocb, 1526 comp_list); 1527 1528 if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) { 1529 if (req->flags & REQ_F_REFCOUNT) { 1530 node = req->comp_list.next; 1531 if (!req_ref_put_and_test(req)) 1532 continue; 1533 } 1534 if ((req->flags & REQ_F_POLLED) && req->apoll) { 1535 struct async_poll *apoll = req->apoll; 1536 1537 if (apoll->double_poll) 1538 kfree(apoll->double_poll); 1539 if (!io_alloc_cache_put(&ctx->apoll_cache, &apoll->cache)) 1540 kfree(apoll); 1541 req->flags &= ~REQ_F_POLLED; 1542 } 1543 if (req->flags & IO_REQ_LINK_FLAGS) 1544 io_queue_next(req); 1545 if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS)) 1546 io_clean_op(req); 1547 } 1548 if (!(req->flags & REQ_F_FIXED_FILE)) 1549 io_put_file(req->file); 1550 1551 io_req_put_rsrc_locked(req, ctx); 1552 1553 if (req->task != task) { 1554 if (task) 1555 io_put_task(task, task_refs); 1556 task = req->task; 1557 task_refs = 0; 1558 } 1559 task_refs++; 1560 node = req->comp_list.next; 1561 io_req_add_to_cache(req, ctx); 1562 } while (node); 1563 1564 if (task) 1565 io_put_task(task, task_refs); 1566 } 1567 1568 static void __io_submit_flush_completions(struct io_ring_ctx *ctx) 1569 __must_hold(&ctx->uring_lock) 1570 { 1571 struct io_submit_state *state = &ctx->submit_state; 1572 struct io_wq_work_node *node; 1573 1574 __io_cq_lock(ctx); 1575 /* must come first to preserve CQE ordering in failure cases */ 1576 if (state->cqes_count) 1577 __io_flush_post_cqes(ctx); 1578 __wq_list_for_each(node, &state->compl_reqs) { 1579 struct io_kiocb *req = container_of(node, struct io_kiocb, 1580 comp_list); 1581 1582 if (!(req->flags & REQ_F_CQE_SKIP) && 1583 unlikely(!__io_fill_cqe_req(ctx, req))) { 1584 if (ctx->task_complete) { 1585 spin_lock(&ctx->completion_lock); 1586 io_req_cqe_overflow(req); 1587 spin_unlock(&ctx->completion_lock); 1588 } else { 1589 io_req_cqe_overflow(req); 1590 } 1591 } 1592 } 1593 __io_cq_unlock_post_flush(ctx); 1594 1595 if (!wq_list_empty(&ctx->submit_state.compl_reqs)) { 1596 io_free_batch_list(ctx, state->compl_reqs.first); 1597 INIT_WQ_LIST(&state->compl_reqs); 1598 } 1599 } 1600 1601 static unsigned io_cqring_events(struct io_ring_ctx *ctx) 1602 { 1603 /* See comment at the top of this file */ 1604 smp_rmb(); 1605 return __io_cqring_events(ctx); 1606 } 1607 1608 /* 1609 * We can't just wait for polled events to come to us, we have to actively 1610 * find and complete them. 1611 */ 1612 static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) 1613 { 1614 if (!(ctx->flags & IORING_SETUP_IOPOLL)) 1615 return; 1616 1617 mutex_lock(&ctx->uring_lock); 1618 while (!wq_list_empty(&ctx->iopoll_list)) { 1619 /* let it sleep and repeat later if can't complete a request */ 1620 if (io_do_iopoll(ctx, true) == 0) 1621 break; 1622 /* 1623 * Ensure we allow local-to-the-cpu processing to take place, 1624 * in this case we need to ensure that we reap all events. 1625 * Also let task_work, etc. to progress by releasing the mutex 1626 */ 1627 if (need_resched()) { 1628 mutex_unlock(&ctx->uring_lock); 1629 cond_resched(); 1630 mutex_lock(&ctx->uring_lock); 1631 } 1632 } 1633 mutex_unlock(&ctx->uring_lock); 1634 } 1635 1636 static int io_iopoll_check(struct io_ring_ctx *ctx, long min) 1637 { 1638 unsigned int nr_events = 0; 1639 int ret = 0; 1640 unsigned long check_cq; 1641 1642 if (!io_allowed_run_tw(ctx)) 1643 return -EEXIST; 1644 1645 check_cq = READ_ONCE(ctx->check_cq); 1646 if (unlikely(check_cq)) { 1647 if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) 1648 __io_cqring_overflow_flush(ctx); 1649 /* 1650 * Similarly do not spin if we have not informed the user of any 1651 * dropped CQE. 1652 */ 1653 if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) 1654 return -EBADR; 1655 } 1656 /* 1657 * Don't enter poll loop if we already have events pending. 1658 * If we do, we can potentially be spinning for commands that 1659 * already triggered a CQE (eg in error). 1660 */ 1661 if (io_cqring_events(ctx)) 1662 return 0; 1663 1664 do { 1665 /* 1666 * If a submit got punted to a workqueue, we can have the 1667 * application entering polling for a command before it gets 1668 * issued. That app will hold the uring_lock for the duration 1669 * of the poll right here, so we need to take a breather every 1670 * now and then to ensure that the issue has a chance to add 1671 * the poll to the issued list. Otherwise we can spin here 1672 * forever, while the workqueue is stuck trying to acquire the 1673 * very same mutex. 1674 */ 1675 if (wq_list_empty(&ctx->iopoll_list) || 1676 io_task_work_pending(ctx)) { 1677 u32 tail = ctx->cached_cq_tail; 1678 1679 (void) io_run_local_work_locked(ctx); 1680 1681 if (task_work_pending(current) || 1682 wq_list_empty(&ctx->iopoll_list)) { 1683 mutex_unlock(&ctx->uring_lock); 1684 io_run_task_work(); 1685 mutex_lock(&ctx->uring_lock); 1686 } 1687 /* some requests don't go through iopoll_list */ 1688 if (tail != ctx->cached_cq_tail || 1689 wq_list_empty(&ctx->iopoll_list)) 1690 break; 1691 } 1692 ret = io_do_iopoll(ctx, !min); 1693 if (ret < 0) 1694 break; 1695 nr_events += ret; 1696 ret = 0; 1697 } while (nr_events < min && !need_resched()); 1698 1699 return ret; 1700 } 1701 1702 void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts) 1703 { 1704 if (ts->locked) 1705 io_req_complete_defer(req); 1706 else 1707 io_req_complete_post(req, IO_URING_F_UNLOCKED); 1708 } 1709 1710 /* 1711 * After the iocb has been issued, it's safe to be found on the poll list. 1712 * Adding the kiocb to the list AFTER submission ensures that we don't 1713 * find it from a io_do_iopoll() thread before the issuer is done 1714 * accessing the kiocb cookie. 1715 */ 1716 static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) 1717 { 1718 struct io_ring_ctx *ctx = req->ctx; 1719 const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; 1720 1721 /* workqueue context doesn't hold uring_lock, grab it now */ 1722 if (unlikely(needs_lock)) 1723 mutex_lock(&ctx->uring_lock); 1724 1725 /* 1726 * Track whether we have multiple files in our lists. This will impact 1727 * how we do polling eventually, not spinning if we're on potentially 1728 * different devices. 1729 */ 1730 if (wq_list_empty(&ctx->iopoll_list)) { 1731 ctx->poll_multi_queue = false; 1732 } else if (!ctx->poll_multi_queue) { 1733 struct io_kiocb *list_req; 1734 1735 list_req = container_of(ctx->iopoll_list.first, struct io_kiocb, 1736 comp_list); 1737 if (list_req->file != req->file) 1738 ctx->poll_multi_queue = true; 1739 } 1740 1741 /* 1742 * For fast devices, IO may have already completed. If it has, add 1743 * it to the front so we find it first. 1744 */ 1745 if (READ_ONCE(req->iopoll_completed)) 1746 wq_list_add_head(&req->comp_list, &ctx->iopoll_list); 1747 else 1748 wq_list_add_tail(&req->comp_list, &ctx->iopoll_list); 1749 1750 if (unlikely(needs_lock)) { 1751 /* 1752 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle 1753 * in sq thread task context or in io worker task context. If 1754 * current task context is sq thread, we don't need to check 1755 * whether should wake up sq thread. 1756 */ 1757 if ((ctx->flags & IORING_SETUP_SQPOLL) && 1758 wq_has_sleeper(&ctx->sq_data->wait)) 1759 wake_up(&ctx->sq_data->wait); 1760 1761 mutex_unlock(&ctx->uring_lock); 1762 } 1763 } 1764 1765 unsigned int io_file_get_flags(struct file *file) 1766 { 1767 unsigned int res = 0; 1768 1769 if (S_ISREG(file_inode(file)->i_mode)) 1770 res |= REQ_F_ISREG; 1771 if ((file->f_flags & O_NONBLOCK) || (file->f_mode & FMODE_NOWAIT)) 1772 res |= REQ_F_SUPPORT_NOWAIT; 1773 return res; 1774 } 1775 1776 bool io_alloc_async_data(struct io_kiocb *req) 1777 { 1778 WARN_ON_ONCE(!io_cold_defs[req->opcode].async_size); 1779 req->async_data = kmalloc(io_cold_defs[req->opcode].async_size, GFP_KERNEL); 1780 if (req->async_data) { 1781 req->flags |= REQ_F_ASYNC_DATA; 1782 return false; 1783 } 1784 return true; 1785 } 1786 1787 int io_req_prep_async(struct io_kiocb *req) 1788 { 1789 const struct io_cold_def *cdef = &io_cold_defs[req->opcode]; 1790 const struct io_issue_def *def = &io_issue_defs[req->opcode]; 1791 1792 /* assign early for deferred execution for non-fixed file */ 1793 if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE) && !req->file) 1794 req->file = io_file_get_normal(req, req->cqe.fd); 1795 if (!cdef->prep_async) 1796 return 0; 1797 if (WARN_ON_ONCE(req_has_async_data(req))) 1798 return -EFAULT; 1799 if (!def->manual_alloc) { 1800 if (io_alloc_async_data(req)) 1801 return -EAGAIN; 1802 } 1803 return cdef->prep_async(req); 1804 } 1805 1806 static u32 io_get_sequence(struct io_kiocb *req) 1807 { 1808 u32 seq = req->ctx->cached_sq_head; 1809 struct io_kiocb *cur; 1810 1811 /* need original cached_sq_head, but it was increased for each req */ 1812 io_for_each_link(cur, req) 1813 seq--; 1814 return seq; 1815 } 1816 1817 static __cold void io_drain_req(struct io_kiocb *req) 1818 __must_hold(&ctx->uring_lock) 1819 { 1820 struct io_ring_ctx *ctx = req->ctx; 1821 struct io_defer_entry *de; 1822 int ret; 1823 u32 seq = io_get_sequence(req); 1824 1825 /* Still need defer if there is pending req in defer list. */ 1826 spin_lock(&ctx->completion_lock); 1827 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) { 1828 spin_unlock(&ctx->completion_lock); 1829 queue: 1830 ctx->drain_active = false; 1831 io_req_task_queue(req); 1832 return; 1833 } 1834 spin_unlock(&ctx->completion_lock); 1835 1836 io_prep_async_link(req); 1837 de = kmalloc(sizeof(*de), GFP_KERNEL); 1838 if (!de) { 1839 ret = -ENOMEM; 1840 io_req_defer_failed(req, ret); 1841 return; 1842 } 1843 1844 spin_lock(&ctx->completion_lock); 1845 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) { 1846 spin_unlock(&ctx->completion_lock); 1847 kfree(de); 1848 goto queue; 1849 } 1850 1851 trace_io_uring_defer(req); 1852 de->req = req; 1853 de->seq = seq; 1854 list_add_tail(&de->list, &ctx->defer_list); 1855 spin_unlock(&ctx->completion_lock); 1856 } 1857 1858 static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def, 1859 unsigned int issue_flags) 1860 { 1861 if (req->file || !def->needs_file) 1862 return true; 1863 1864 if (req->flags & REQ_F_FIXED_FILE) 1865 req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags); 1866 else 1867 req->file = io_file_get_normal(req, req->cqe.fd); 1868 1869 return !!req->file; 1870 } 1871 1872 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) 1873 { 1874 const struct io_issue_def *def = &io_issue_defs[req->opcode]; 1875 const struct cred *creds = NULL; 1876 int ret; 1877 1878 if (unlikely(!io_assign_file(req, def, issue_flags))) 1879 return -EBADF; 1880 1881 if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred())) 1882 creds = override_creds(req->creds); 1883 1884 if (!def->audit_skip) 1885 audit_uring_entry(req->opcode); 1886 1887 ret = def->issue(req, issue_flags); 1888 1889 if (!def->audit_skip) 1890 audit_uring_exit(!ret, ret); 1891 1892 if (creds) 1893 revert_creds(creds); 1894 1895 if (ret == IOU_OK) { 1896 if (issue_flags & IO_URING_F_COMPLETE_DEFER) 1897 io_req_complete_defer(req); 1898 else 1899 io_req_complete_post(req, issue_flags); 1900 } else if (ret != IOU_ISSUE_SKIP_COMPLETE) 1901 return ret; 1902 1903 /* If the op doesn't have a file, we're not polling for it */ 1904 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue) 1905 io_iopoll_req_issued(req, issue_flags); 1906 1907 return 0; 1908 } 1909 1910 int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts) 1911 { 1912 io_tw_lock(req->ctx, ts); 1913 return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT| 1914 IO_URING_F_COMPLETE_DEFER); 1915 } 1916 1917 struct io_wq_work *io_wq_free_work(struct io_wq_work *work) 1918 { 1919 struct io_kiocb *req = container_of(work, struct io_kiocb, work); 1920 struct io_kiocb *nxt = NULL; 1921 1922 if (req_ref_put_and_test(req)) { 1923 if (req->flags & IO_REQ_LINK_FLAGS) 1924 nxt = io_req_find_next(req); 1925 io_free_req(req); 1926 } 1927 return nxt ? &nxt->work : NULL; 1928 } 1929 1930 void io_wq_submit_work(struct io_wq_work *work) 1931 { 1932 struct io_kiocb *req = container_of(work, struct io_kiocb, work); 1933 const struct io_issue_def *def = &io_issue_defs[req->opcode]; 1934 unsigned int issue_flags = IO_URING_F_UNLOCKED | IO_URING_F_IOWQ; 1935 bool needs_poll = false; 1936 int ret = 0, err = -ECANCELED; 1937 1938 /* one will be dropped by ->io_wq_free_work() after returning to io-wq */ 1939 if (!(req->flags & REQ_F_REFCOUNT)) 1940 __io_req_set_refcount(req, 2); 1941 else 1942 req_ref_get(req); 1943 1944 io_arm_ltimeout(req); 1945 1946 /* either cancelled or io-wq is dying, so don't touch tctx->iowq */ 1947 if (work->flags & IO_WQ_WORK_CANCEL) { 1948 fail: 1949 io_req_task_queue_fail(req, err); 1950 return; 1951 } 1952 if (!io_assign_file(req, def, issue_flags)) { 1953 err = -EBADF; 1954 work->flags |= IO_WQ_WORK_CANCEL; 1955 goto fail; 1956 } 1957 1958 if (req->flags & REQ_F_FORCE_ASYNC) { 1959 bool opcode_poll = def->pollin || def->pollout; 1960 1961 if (opcode_poll && file_can_poll(req->file)) { 1962 needs_poll = true; 1963 issue_flags |= IO_URING_F_NONBLOCK; 1964 } 1965 } 1966 1967 do { 1968 ret = io_issue_sqe(req, issue_flags); 1969 if (ret != -EAGAIN) 1970 break; 1971 /* 1972 * We can get EAGAIN for iopolled IO even though we're 1973 * forcing a sync submission from here, since we can't 1974 * wait for request slots on the block side. 1975 */ 1976 if (!needs_poll) { 1977 if (!(req->ctx->flags & IORING_SETUP_IOPOLL)) 1978 break; 1979 cond_resched(); 1980 continue; 1981 } 1982 1983 if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK) 1984 return; 1985 /* aborted or ready, in either case retry blocking */ 1986 needs_poll = false; 1987 issue_flags &= ~IO_URING_F_NONBLOCK; 1988 } while (1); 1989 1990 /* avoid locking problems by failing it from a clean context */ 1991 if (ret < 0) 1992 io_req_task_queue_fail(req, ret); 1993 } 1994 1995 inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd, 1996 unsigned int issue_flags) 1997 { 1998 struct io_ring_ctx *ctx = req->ctx; 1999 struct io_fixed_file *slot; 2000 struct file *file = NULL; 2001 2002 io_ring_submit_lock(ctx, issue_flags); 2003 2004 if (unlikely((unsigned int)fd >= ctx->nr_user_files)) 2005 goto out; 2006 fd = array_index_nospec(fd, ctx->nr_user_files); 2007 slot = io_fixed_file_slot(&ctx->file_table, fd); 2008 file = io_slot_file(slot); 2009 req->flags |= io_slot_flags(slot); 2010 io_req_set_rsrc_node(req, ctx, 0); 2011 out: 2012 io_ring_submit_unlock(ctx, issue_flags); 2013 return file; 2014 } 2015 2016 struct file *io_file_get_normal(struct io_kiocb *req, int fd) 2017 { 2018 struct file *file = fget(fd); 2019 2020 trace_io_uring_file_get(req, fd); 2021 2022 /* we don't allow fixed io_uring files */ 2023 if (file && io_is_uring_fops(file)) 2024 io_req_track_inflight(req); 2025 return file; 2026 } 2027 2028 static void io_queue_async(struct io_kiocb *req, int ret) 2029 __must_hold(&req->ctx->uring_lock) 2030 { 2031 struct io_kiocb *linked_timeout; 2032 2033 if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) { 2034 io_req_defer_failed(req, ret); 2035 return; 2036 } 2037 2038 linked_timeout = io_prep_linked_timeout(req); 2039 2040 switch (io_arm_poll_handler(req, 0)) { 2041 case IO_APOLL_READY: 2042 io_kbuf_recycle(req, 0); 2043 io_req_task_queue(req); 2044 break; 2045 case IO_APOLL_ABORTED: 2046 io_kbuf_recycle(req, 0); 2047 io_queue_iowq(req, NULL); 2048 break; 2049 case IO_APOLL_OK: 2050 break; 2051 } 2052 2053 if (linked_timeout) 2054 io_queue_linked_timeout(linked_timeout); 2055 } 2056 2057 static inline void io_queue_sqe(struct io_kiocb *req) 2058 __must_hold(&req->ctx->uring_lock) 2059 { 2060 int ret; 2061 2062 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); 2063 2064 /* 2065 * We async punt it if the file wasn't marked NOWAIT, or if the file 2066 * doesn't support non-blocking read/write attempts 2067 */ 2068 if (likely(!ret)) 2069 io_arm_ltimeout(req); 2070 else 2071 io_queue_async(req, ret); 2072 } 2073 2074 static void io_queue_sqe_fallback(struct io_kiocb *req) 2075 __must_hold(&req->ctx->uring_lock) 2076 { 2077 if (unlikely(req->flags & REQ_F_FAIL)) { 2078 /* 2079 * We don't submit, fail them all, for that replace hardlinks 2080 * with normal links. Extra REQ_F_LINK is tolerated. 2081 */ 2082 req->flags &= ~REQ_F_HARDLINK; 2083 req->flags |= REQ_F_LINK; 2084 io_req_defer_failed(req, req->cqe.res); 2085 } else { 2086 int ret = io_req_prep_async(req); 2087 2088 if (unlikely(ret)) { 2089 io_req_defer_failed(req, ret); 2090 return; 2091 } 2092 2093 if (unlikely(req->ctx->drain_active)) 2094 io_drain_req(req); 2095 else 2096 io_queue_iowq(req, NULL); 2097 } 2098 } 2099 2100 /* 2101 * Check SQE restrictions (opcode and flags). 2102 * 2103 * Returns 'true' if SQE is allowed, 'false' otherwise. 2104 */ 2105 static inline bool io_check_restriction(struct io_ring_ctx *ctx, 2106 struct io_kiocb *req, 2107 unsigned int sqe_flags) 2108 { 2109 if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) 2110 return false; 2111 2112 if ((sqe_flags & ctx->restrictions.sqe_flags_required) != 2113 ctx->restrictions.sqe_flags_required) 2114 return false; 2115 2116 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed | 2117 ctx->restrictions.sqe_flags_required)) 2118 return false; 2119 2120 return true; 2121 } 2122 2123 static void io_init_req_drain(struct io_kiocb *req) 2124 { 2125 struct io_ring_ctx *ctx = req->ctx; 2126 struct io_kiocb *head = ctx->submit_state.link.head; 2127 2128 ctx->drain_active = true; 2129 if (head) { 2130 /* 2131 * If we need to drain a request in the middle of a link, drain 2132 * the head request and the next request/link after the current 2133 * link. Considering sequential execution of links, 2134 * REQ_F_IO_DRAIN will be maintained for every request of our 2135 * link. 2136 */ 2137 head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC; 2138 ctx->drain_next = true; 2139 } 2140 } 2141 2142 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, 2143 const struct io_uring_sqe *sqe) 2144 __must_hold(&ctx->uring_lock) 2145 { 2146 const struct io_issue_def *def; 2147 unsigned int sqe_flags; 2148 int personality; 2149 u8 opcode; 2150 2151 /* req is partially pre-initialised, see io_preinit_req() */ 2152 req->opcode = opcode = READ_ONCE(sqe->opcode); 2153 /* same numerical values with corresponding REQ_F_*, safe to copy */ 2154 req->flags = sqe_flags = READ_ONCE(sqe->flags); 2155 req->cqe.user_data = READ_ONCE(sqe->user_data); 2156 req->file = NULL; 2157 req->rsrc_node = NULL; 2158 req->task = current; 2159 2160 if (unlikely(opcode >= IORING_OP_LAST)) { 2161 req->opcode = 0; 2162 return -EINVAL; 2163 } 2164 def = &io_issue_defs[opcode]; 2165 if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) { 2166 /* enforce forwards compatibility on users */ 2167 if (sqe_flags & ~SQE_VALID_FLAGS) 2168 return -EINVAL; 2169 if (sqe_flags & IOSQE_BUFFER_SELECT) { 2170 if (!def->buffer_select) 2171 return -EOPNOTSUPP; 2172 req->buf_index = READ_ONCE(sqe->buf_group); 2173 } 2174 if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS) 2175 ctx->drain_disabled = true; 2176 if (sqe_flags & IOSQE_IO_DRAIN) { 2177 if (ctx->drain_disabled) 2178 return -EOPNOTSUPP; 2179 io_init_req_drain(req); 2180 } 2181 } 2182 if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) { 2183 if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags)) 2184 return -EACCES; 2185 /* knock it to the slow queue path, will be drained there */ 2186 if (ctx->drain_active) 2187 req->flags |= REQ_F_FORCE_ASYNC; 2188 /* if there is no link, we're at "next" request and need to drain */ 2189 if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) { 2190 ctx->drain_next = false; 2191 ctx->drain_active = true; 2192 req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC; 2193 } 2194 } 2195 2196 if (!def->ioprio && sqe->ioprio) 2197 return -EINVAL; 2198 if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL)) 2199 return -EINVAL; 2200 2201 if (def->needs_file) { 2202 struct io_submit_state *state = &ctx->submit_state; 2203 2204 req->cqe.fd = READ_ONCE(sqe->fd); 2205 2206 /* 2207 * Plug now if we have more than 2 IO left after this, and the 2208 * target is potentially a read/write to block based storage. 2209 */ 2210 if (state->need_plug && def->plug) { 2211 state->plug_started = true; 2212 state->need_plug = false; 2213 blk_start_plug_nr_ios(&state->plug, state->submit_nr); 2214 } 2215 } 2216 2217 personality = READ_ONCE(sqe->personality); 2218 if (personality) { 2219 int ret; 2220 2221 req->creds = xa_load(&ctx->personalities, personality); 2222 if (!req->creds) 2223 return -EINVAL; 2224 get_cred(req->creds); 2225 ret = security_uring_override_creds(req->creds); 2226 if (ret) { 2227 put_cred(req->creds); 2228 return ret; 2229 } 2230 req->flags |= REQ_F_CREDS; 2231 } 2232 2233 return def->prep(req, sqe); 2234 } 2235 2236 static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe, 2237 struct io_kiocb *req, int ret) 2238 { 2239 struct io_ring_ctx *ctx = req->ctx; 2240 struct io_submit_link *link = &ctx->submit_state.link; 2241 struct io_kiocb *head = link->head; 2242 2243 trace_io_uring_req_failed(sqe, req, ret); 2244 2245 /* 2246 * Avoid breaking links in the middle as it renders links with SQPOLL 2247 * unusable. Instead of failing eagerly, continue assembling the link if 2248 * applicable and mark the head with REQ_F_FAIL. The link flushing code 2249 * should find the flag and handle the rest. 2250 */ 2251 req_fail_link_node(req, ret); 2252 if (head && !(head->flags & REQ_F_FAIL)) 2253 req_fail_link_node(head, -ECANCELED); 2254 2255 if (!(req->flags & IO_REQ_LINK_FLAGS)) { 2256 if (head) { 2257 link->last->link = req; 2258 link->head = NULL; 2259 req = head; 2260 } 2261 io_queue_sqe_fallback(req); 2262 return ret; 2263 } 2264 2265 if (head) 2266 link->last->link = req; 2267 else 2268 link->head = req; 2269 link->last = req; 2270 return 0; 2271 } 2272 2273 static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, 2274 const struct io_uring_sqe *sqe) 2275 __must_hold(&ctx->uring_lock) 2276 { 2277 struct io_submit_link *link = &ctx->submit_state.link; 2278 int ret; 2279 2280 ret = io_init_req(ctx, req, sqe); 2281 if (unlikely(ret)) 2282 return io_submit_fail_init(sqe, req, ret); 2283 2284 trace_io_uring_submit_req(req); 2285 2286 /* 2287 * If we already have a head request, queue this one for async 2288 * submittal once the head completes. If we don't have a head but 2289 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be 2290 * submitted sync once the chain is complete. If none of those 2291 * conditions are true (normal request), then just queue it. 2292 */ 2293 if (unlikely(link->head)) { 2294 ret = io_req_prep_async(req); 2295 if (unlikely(ret)) 2296 return io_submit_fail_init(sqe, req, ret); 2297 2298 trace_io_uring_link(req, link->head); 2299 link->last->link = req; 2300 link->last = req; 2301 2302 if (req->flags & IO_REQ_LINK_FLAGS) 2303 return 0; 2304 /* last request of the link, flush it */ 2305 req = link->head; 2306 link->head = NULL; 2307 if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)) 2308 goto fallback; 2309 2310 } else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS | 2311 REQ_F_FORCE_ASYNC | REQ_F_FAIL))) { 2312 if (req->flags & IO_REQ_LINK_FLAGS) { 2313 link->head = req; 2314 link->last = req; 2315 } else { 2316 fallback: 2317 io_queue_sqe_fallback(req); 2318 } 2319 return 0; 2320 } 2321 2322 io_queue_sqe(req); 2323 return 0; 2324 } 2325 2326 /* 2327 * Batched submission is done, ensure local IO is flushed out. 2328 */ 2329 static void io_submit_state_end(struct io_ring_ctx *ctx) 2330 { 2331 struct io_submit_state *state = &ctx->submit_state; 2332 2333 if (unlikely(state->link.head)) 2334 io_queue_sqe_fallback(state->link.head); 2335 /* flush only after queuing links as they can generate completions */ 2336 io_submit_flush_completions(ctx); 2337 if (state->plug_started) 2338 blk_finish_plug(&state->plug); 2339 } 2340 2341 /* 2342 * Start submission side cache. 2343 */ 2344 static void io_submit_state_start(struct io_submit_state *state, 2345 unsigned int max_ios) 2346 { 2347 state->plug_started = false; 2348 state->need_plug = max_ios > 2; 2349 state->submit_nr = max_ios; 2350 /* set only head, no need to init link_last in advance */ 2351 state->link.head = NULL; 2352 } 2353 2354 static void io_commit_sqring(struct io_ring_ctx *ctx) 2355 { 2356 struct io_rings *rings = ctx->rings; 2357 2358 /* 2359 * Ensure any loads from the SQEs are done at this point, 2360 * since once we write the new head, the application could 2361 * write new data to them. 2362 */ 2363 smp_store_release(&rings->sq.head, ctx->cached_sq_head); 2364 } 2365 2366 /* 2367 * Fetch an sqe, if one is available. Note this returns a pointer to memory 2368 * that is mapped by userspace. This means that care needs to be taken to 2369 * ensure that reads are stable, as we cannot rely on userspace always 2370 * being a good citizen. If members of the sqe are validated and then later 2371 * used, it's important that those reads are done through READ_ONCE() to 2372 * prevent a re-load down the line. 2373 */ 2374 static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe) 2375 { 2376 unsigned head, mask = ctx->sq_entries - 1; 2377 unsigned sq_idx = ctx->cached_sq_head++ & mask; 2378 2379 /* 2380 * The cached sq head (or cq tail) serves two purposes: 2381 * 2382 * 1) allows us to batch the cost of updating the user visible 2383 * head updates. 2384 * 2) allows the kernel side to track the head on its own, even 2385 * though the application is the one updating it. 2386 */ 2387 head = READ_ONCE(ctx->sq_array[sq_idx]); 2388 if (likely(head < ctx->sq_entries)) { 2389 /* double index for 128-byte SQEs, twice as long */ 2390 if (ctx->flags & IORING_SETUP_SQE128) 2391 head <<= 1; 2392 *sqe = &ctx->sq_sqes[head]; 2393 return true; 2394 } 2395 2396 /* drop invalid entries */ 2397 ctx->cq_extra--; 2398 WRITE_ONCE(ctx->rings->sq_dropped, 2399 READ_ONCE(ctx->rings->sq_dropped) + 1); 2400 return false; 2401 } 2402 2403 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) 2404 __must_hold(&ctx->uring_lock) 2405 { 2406 unsigned int entries = io_sqring_entries(ctx); 2407 unsigned int left; 2408 int ret; 2409 2410 if (unlikely(!entries)) 2411 return 0; 2412 /* make sure SQ entry isn't read before tail */ 2413 ret = left = min(nr, entries); 2414 io_get_task_refs(left); 2415 io_submit_state_start(&ctx->submit_state, left); 2416 2417 do { 2418 const struct io_uring_sqe *sqe; 2419 struct io_kiocb *req; 2420 2421 if (unlikely(!io_alloc_req(ctx, &req))) 2422 break; 2423 if (unlikely(!io_get_sqe(ctx, &sqe))) { 2424 io_req_add_to_cache(req, ctx); 2425 break; 2426 } 2427 2428 /* 2429 * Continue submitting even for sqe failure if the 2430 * ring was setup with IORING_SETUP_SUBMIT_ALL 2431 */ 2432 if (unlikely(io_submit_sqe(ctx, req, sqe)) && 2433 !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) { 2434 left--; 2435 break; 2436 } 2437 } while (--left); 2438 2439 if (unlikely(left)) { 2440 ret -= left; 2441 /* try again if it submitted nothing and can't allocate a req */ 2442 if (!ret && io_req_cache_empty(ctx)) 2443 ret = -EAGAIN; 2444 current->io_uring->cached_refs += left; 2445 } 2446 2447 io_submit_state_end(ctx); 2448 /* Commit SQ ring head once we've consumed and submitted all SQEs */ 2449 io_commit_sqring(ctx); 2450 return ret; 2451 } 2452 2453 struct io_wait_queue { 2454 struct wait_queue_entry wq; 2455 struct io_ring_ctx *ctx; 2456 unsigned cq_tail; 2457 unsigned nr_timeouts; 2458 ktime_t timeout; 2459 }; 2460 2461 static inline bool io_has_work(struct io_ring_ctx *ctx) 2462 { 2463 return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) || 2464 !llist_empty(&ctx->work_llist); 2465 } 2466 2467 static inline bool io_should_wake(struct io_wait_queue *iowq) 2468 { 2469 struct io_ring_ctx *ctx = iowq->ctx; 2470 int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail; 2471 2472 /* 2473 * Wake up if we have enough events, or if a timeout occurred since we 2474 * started waiting. For timeouts, we always want to return to userspace, 2475 * regardless of event count. 2476 */ 2477 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; 2478 } 2479 2480 static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode, 2481 int wake_flags, void *key) 2482 { 2483 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, wq); 2484 2485 /* 2486 * Cannot safely flush overflowed CQEs from here, ensure we wake up 2487 * the task, and the next invocation will do it. 2488 */ 2489 if (io_should_wake(iowq) || io_has_work(iowq->ctx)) 2490 return autoremove_wake_function(curr, mode, wake_flags, key); 2491 return -1; 2492 } 2493 2494 int io_run_task_work_sig(struct io_ring_ctx *ctx) 2495 { 2496 if (!llist_empty(&ctx->work_llist)) { 2497 __set_current_state(TASK_RUNNING); 2498 if (io_run_local_work(ctx) > 0) 2499 return 1; 2500 } 2501 if (io_run_task_work() > 0) 2502 return 1; 2503 if (task_sigpending(current)) 2504 return -EINTR; 2505 return 0; 2506 } 2507 2508 /* when returns >0, the caller should retry */ 2509 static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, 2510 struct io_wait_queue *iowq) 2511 { 2512 if (unlikely(READ_ONCE(ctx->check_cq))) 2513 return 1; 2514 if (unlikely(!llist_empty(&ctx->work_llist))) 2515 return 1; 2516 if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) 2517 return 1; 2518 if (unlikely(task_sigpending(current))) 2519 return -EINTR; 2520 if (unlikely(io_should_wake(iowq))) 2521 return 0; 2522 if (iowq->timeout == KTIME_MAX) 2523 schedule(); 2524 else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS)) 2525 return -ETIME; 2526 return 0; 2527 } 2528 2529 /* 2530 * Wait until events become available, if we don't already have some. The 2531 * application must reap them itself, as they reside on the shared cq ring. 2532 */ 2533 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, 2534 const sigset_t __user *sig, size_t sigsz, 2535 struct __kernel_timespec __user *uts) 2536 { 2537 struct io_wait_queue iowq; 2538 struct io_rings *rings = ctx->rings; 2539 int ret; 2540 2541 if (!io_allowed_run_tw(ctx)) 2542 return -EEXIST; 2543 if (!llist_empty(&ctx->work_llist)) 2544 io_run_local_work(ctx); 2545 io_run_task_work(); 2546 io_cqring_overflow_flush(ctx); 2547 /* if user messes with these they will just get an early return */ 2548 if (__io_cqring_events_user(ctx) >= min_events) 2549 return 0; 2550 2551 if (sig) { 2552 #ifdef CONFIG_COMPAT 2553 if (in_compat_syscall()) 2554 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig, 2555 sigsz); 2556 else 2557 #endif 2558 ret = set_user_sigmask(sig, sigsz); 2559 2560 if (ret) 2561 return ret; 2562 } 2563 2564 init_waitqueue_func_entry(&iowq.wq, io_wake_function); 2565 iowq.wq.private = current; 2566 INIT_LIST_HEAD(&iowq.wq.entry); 2567 iowq.ctx = ctx; 2568 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); 2569 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events; 2570 iowq.timeout = KTIME_MAX; 2571 2572 if (uts) { 2573 struct timespec64 ts; 2574 2575 if (get_timespec64(&ts, uts)) 2576 return -EFAULT; 2577 iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns()); 2578 } 2579 2580 trace_io_uring_cqring_wait(ctx, min_events); 2581 do { 2582 unsigned long check_cq; 2583 2584 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { 2585 int nr_wait = (int) iowq.cq_tail - READ_ONCE(ctx->rings->cq.tail); 2586 2587 atomic_set(&ctx->cq_wait_nr, nr_wait); 2588 set_current_state(TASK_INTERRUPTIBLE); 2589 } else { 2590 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq, 2591 TASK_INTERRUPTIBLE); 2592 } 2593 2594 ret = io_cqring_wait_schedule(ctx, &iowq); 2595 __set_current_state(TASK_RUNNING); 2596 atomic_set(&ctx->cq_wait_nr, 0); 2597 2598 if (ret < 0) 2599 break; 2600 /* 2601 * Run task_work after scheduling and before io_should_wake(). 2602 * If we got woken because of task_work being processed, run it 2603 * now rather than let the caller do another wait loop. 2604 */ 2605 io_run_task_work(); 2606 if (!llist_empty(&ctx->work_llist)) 2607 io_run_local_work(ctx); 2608 2609 check_cq = READ_ONCE(ctx->check_cq); 2610 if (unlikely(check_cq)) { 2611 /* let the caller flush overflows, retry */ 2612 if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) 2613 io_cqring_do_overflow_flush(ctx); 2614 if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) { 2615 ret = -EBADR; 2616 break; 2617 } 2618 } 2619 2620 if (io_should_wake(&iowq)) { 2621 ret = 0; 2622 break; 2623 } 2624 cond_resched(); 2625 } while (1); 2626 2627 if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) 2628 finish_wait(&ctx->cq_wait, &iowq.wq); 2629 restore_saved_sigmask_unless(ret == -EINTR); 2630 2631 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; 2632 } 2633 2634 static void io_mem_free(void *ptr) 2635 { 2636 struct page *page; 2637 2638 if (!ptr) 2639 return; 2640 2641 page = virt_to_head_page(ptr); 2642 if (put_page_testzero(page)) 2643 free_compound_page(page); 2644 } 2645 2646 static void io_pages_free(struct page ***pages, int npages) 2647 { 2648 struct page **page_array; 2649 int i; 2650 2651 if (!pages) 2652 return; 2653 page_array = *pages; 2654 for (i = 0; i < npages; i++) 2655 unpin_user_page(page_array[i]); 2656 kvfree(page_array); 2657 *pages = NULL; 2658 } 2659 2660 static void *__io_uaddr_map(struct page ***pages, unsigned short *npages, 2661 unsigned long uaddr, size_t size) 2662 { 2663 struct page **page_array; 2664 unsigned int nr_pages; 2665 int ret; 2666 2667 *npages = 0; 2668 2669 if (uaddr & (PAGE_SIZE - 1) || !size) 2670 return ERR_PTR(-EINVAL); 2671 2672 nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 2673 if (nr_pages > USHRT_MAX) 2674 return ERR_PTR(-EINVAL); 2675 page_array = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); 2676 if (!page_array) 2677 return ERR_PTR(-ENOMEM); 2678 2679 ret = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM, 2680 page_array); 2681 if (ret != nr_pages) { 2682 err: 2683 io_pages_free(&page_array, ret > 0 ? ret : 0); 2684 return ret < 0 ? ERR_PTR(ret) : ERR_PTR(-EFAULT); 2685 } 2686 /* 2687 * Should be a single page. If the ring is small enough that we can 2688 * use a normal page, that is fine. If we need multiple pages, then 2689 * userspace should use a huge page. That's the only way to guarantee 2690 * that we get contigious memory, outside of just being lucky or 2691 * (currently) having low memory fragmentation. 2692 */ 2693 if (page_array[0] != page_array[ret - 1]) 2694 goto err; 2695 *pages = page_array; 2696 *npages = nr_pages; 2697 return page_to_virt(page_array[0]); 2698 } 2699 2700 static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr, 2701 size_t size) 2702 { 2703 return __io_uaddr_map(&ctx->ring_pages, &ctx->n_ring_pages, uaddr, 2704 size); 2705 } 2706 2707 static void *io_sqes_map(struct io_ring_ctx *ctx, unsigned long uaddr, 2708 size_t size) 2709 { 2710 return __io_uaddr_map(&ctx->sqe_pages, &ctx->n_sqe_pages, uaddr, 2711 size); 2712 } 2713 2714 static void io_rings_free(struct io_ring_ctx *ctx) 2715 { 2716 if (!(ctx->flags & IORING_SETUP_NO_MMAP)) { 2717 io_mem_free(ctx->rings); 2718 io_mem_free(ctx->sq_sqes); 2719 ctx->rings = NULL; 2720 ctx->sq_sqes = NULL; 2721 } else { 2722 io_pages_free(&ctx->ring_pages, ctx->n_ring_pages); 2723 io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages); 2724 } 2725 } 2726 2727 static void *io_mem_alloc(size_t size) 2728 { 2729 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP; 2730 void *ret; 2731 2732 ret = (void *) __get_free_pages(gfp, get_order(size)); 2733 if (ret) 2734 return ret; 2735 return ERR_PTR(-ENOMEM); 2736 } 2737 2738 static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries, 2739 unsigned int cq_entries, size_t *sq_offset) 2740 { 2741 struct io_rings *rings; 2742 size_t off, sq_array_size; 2743 2744 off = struct_size(rings, cqes, cq_entries); 2745 if (off == SIZE_MAX) 2746 return SIZE_MAX; 2747 if (ctx->flags & IORING_SETUP_CQE32) { 2748 if (check_shl_overflow(off, 1, &off)) 2749 return SIZE_MAX; 2750 } 2751 2752 #ifdef CONFIG_SMP 2753 off = ALIGN(off, SMP_CACHE_BYTES); 2754 if (off == 0) 2755 return SIZE_MAX; 2756 #endif 2757 2758 if (sq_offset) 2759 *sq_offset = off; 2760 2761 sq_array_size = array_size(sizeof(u32), sq_entries); 2762 if (sq_array_size == SIZE_MAX) 2763 return SIZE_MAX; 2764 2765 if (check_add_overflow(off, sq_array_size, &off)) 2766 return SIZE_MAX; 2767 2768 return off; 2769 } 2770 2771 static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg, 2772 unsigned int eventfd_async) 2773 { 2774 struct io_ev_fd *ev_fd; 2775 __s32 __user *fds = arg; 2776 int fd; 2777 2778 ev_fd = rcu_dereference_protected(ctx->io_ev_fd, 2779 lockdep_is_held(&ctx->uring_lock)); 2780 if (ev_fd) 2781 return -EBUSY; 2782 2783 if (copy_from_user(&fd, fds, sizeof(*fds))) 2784 return -EFAULT; 2785 2786 ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL); 2787 if (!ev_fd) 2788 return -ENOMEM; 2789 2790 ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd); 2791 if (IS_ERR(ev_fd->cq_ev_fd)) { 2792 int ret = PTR_ERR(ev_fd->cq_ev_fd); 2793 kfree(ev_fd); 2794 return ret; 2795 } 2796 2797 spin_lock(&ctx->completion_lock); 2798 ctx->evfd_last_cq_tail = ctx->cached_cq_tail; 2799 spin_unlock(&ctx->completion_lock); 2800 2801 ev_fd->eventfd_async = eventfd_async; 2802 ctx->has_evfd = true; 2803 rcu_assign_pointer(ctx->io_ev_fd, ev_fd); 2804 atomic_set(&ev_fd->refs, 1); 2805 atomic_set(&ev_fd->ops, 0); 2806 return 0; 2807 } 2808 2809 static int io_eventfd_unregister(struct io_ring_ctx *ctx) 2810 { 2811 struct io_ev_fd *ev_fd; 2812 2813 ev_fd = rcu_dereference_protected(ctx->io_ev_fd, 2814 lockdep_is_held(&ctx->uring_lock)); 2815 if (ev_fd) { 2816 ctx->has_evfd = false; 2817 rcu_assign_pointer(ctx->io_ev_fd, NULL); 2818 if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_FREE_BIT), &ev_fd->ops)) 2819 call_rcu(&ev_fd->rcu, io_eventfd_ops); 2820 return 0; 2821 } 2822 2823 return -ENXIO; 2824 } 2825 2826 static void io_req_caches_free(struct io_ring_ctx *ctx) 2827 { 2828 struct io_kiocb *req; 2829 int nr = 0; 2830 2831 mutex_lock(&ctx->uring_lock); 2832 io_flush_cached_locked_reqs(ctx, &ctx->submit_state); 2833 2834 while (!io_req_cache_empty(ctx)) { 2835 req = io_extract_req(ctx); 2836 kmem_cache_free(req_cachep, req); 2837 nr++; 2838 } 2839 if (nr) 2840 percpu_ref_put_many(&ctx->refs, nr); 2841 mutex_unlock(&ctx->uring_lock); 2842 } 2843 2844 static void io_rsrc_node_cache_free(struct io_cache_entry *entry) 2845 { 2846 kfree(container_of(entry, struct io_rsrc_node, cache)); 2847 } 2848 2849 static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) 2850 { 2851 io_sq_thread_finish(ctx); 2852 /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */ 2853 if (WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list))) 2854 return; 2855 2856 mutex_lock(&ctx->uring_lock); 2857 if (ctx->buf_data) 2858 __io_sqe_buffers_unregister(ctx); 2859 if (ctx->file_data) 2860 __io_sqe_files_unregister(ctx); 2861 io_cqring_overflow_kill(ctx); 2862 io_eventfd_unregister(ctx); 2863 io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free); 2864 io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free); 2865 io_destroy_buffers(ctx); 2866 mutex_unlock(&ctx->uring_lock); 2867 if (ctx->sq_creds) 2868 put_cred(ctx->sq_creds); 2869 if (ctx->submitter_task) 2870 put_task_struct(ctx->submitter_task); 2871 2872 /* there are no registered resources left, nobody uses it */ 2873 if (ctx->rsrc_node) 2874 io_rsrc_node_destroy(ctx, ctx->rsrc_node); 2875 2876 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)); 2877 2878 #if defined(CONFIG_UNIX) 2879 if (ctx->ring_sock) { 2880 ctx->ring_sock->file = NULL; /* so that iput() is called */ 2881 sock_release(ctx->ring_sock); 2882 } 2883 #endif 2884 WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list)); 2885 2886 io_alloc_cache_free(&ctx->rsrc_node_cache, io_rsrc_node_cache_free); 2887 if (ctx->mm_account) { 2888 mmdrop(ctx->mm_account); 2889 ctx->mm_account = NULL; 2890 } 2891 io_rings_free(ctx); 2892 2893 percpu_ref_exit(&ctx->refs); 2894 free_uid(ctx->user); 2895 io_req_caches_free(ctx); 2896 if (ctx->hash_map) 2897 io_wq_put_hash(ctx->hash_map); 2898 kfree(ctx->cancel_table.hbs); 2899 kfree(ctx->cancel_table_locked.hbs); 2900 kfree(ctx->dummy_ubuf); 2901 kfree(ctx->io_bl); 2902 xa_destroy(&ctx->io_bl_xa); 2903 kfree(ctx); 2904 } 2905 2906 static __cold void io_activate_pollwq_cb(struct callback_head *cb) 2907 { 2908 struct io_ring_ctx *ctx = container_of(cb, struct io_ring_ctx, 2909 poll_wq_task_work); 2910 2911 mutex_lock(&ctx->uring_lock); 2912 ctx->poll_activated = true; 2913 mutex_unlock(&ctx->uring_lock); 2914 2915 /* 2916 * Wake ups for some events between start of polling and activation 2917 * might've been lost due to loose synchronisation. 2918 */ 2919 wake_up_all(&ctx->poll_wq); 2920 percpu_ref_put(&ctx->refs); 2921 } 2922 2923 static __cold void io_activate_pollwq(struct io_ring_ctx *ctx) 2924 { 2925 spin_lock(&ctx->completion_lock); 2926 /* already activated or in progress */ 2927 if (ctx->poll_activated || ctx->poll_wq_task_work.func) 2928 goto out; 2929 if (WARN_ON_ONCE(!ctx->task_complete)) 2930 goto out; 2931 if (!ctx->submitter_task) 2932 goto out; 2933 /* 2934 * with ->submitter_task only the submitter task completes requests, we 2935 * only need to sync with it, which is done by injecting a tw 2936 */ 2937 init_task_work(&ctx->poll_wq_task_work, io_activate_pollwq_cb); 2938 percpu_ref_get(&ctx->refs); 2939 if (task_work_add(ctx->submitter_task, &ctx->poll_wq_task_work, TWA_SIGNAL)) 2940 percpu_ref_put(&ctx->refs); 2941 out: 2942 spin_unlock(&ctx->completion_lock); 2943 } 2944 2945 static __poll_t io_uring_poll(struct file *file, poll_table *wait) 2946 { 2947 struct io_ring_ctx *ctx = file->private_data; 2948 __poll_t mask = 0; 2949 2950 if (unlikely(!ctx->poll_activated)) 2951 io_activate_pollwq(ctx); 2952 2953 poll_wait(file, &ctx->poll_wq, wait); 2954 /* 2955 * synchronizes with barrier from wq_has_sleeper call in 2956 * io_commit_cqring 2957 */ 2958 smp_rmb(); 2959 if (!io_sqring_full(ctx)) 2960 mask |= EPOLLOUT | EPOLLWRNORM; 2961 2962 /* 2963 * Don't flush cqring overflow list here, just do a simple check. 2964 * Otherwise there could possible be ABBA deadlock: 2965 * CPU0 CPU1 2966 * ---- ---- 2967 * lock(&ctx->uring_lock); 2968 * lock(&ep->mtx); 2969 * lock(&ctx->uring_lock); 2970 * lock(&ep->mtx); 2971 * 2972 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this 2973 * pushes them to do the flush. 2974 */ 2975 2976 if (__io_cqring_events_user(ctx) || io_has_work(ctx)) 2977 mask |= EPOLLIN | EPOLLRDNORM; 2978 2979 return mask; 2980 } 2981 2982 static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id) 2983 { 2984 const struct cred *creds; 2985 2986 creds = xa_erase(&ctx->personalities, id); 2987 if (creds) { 2988 put_cred(creds); 2989 return 0; 2990 } 2991 2992 return -EINVAL; 2993 } 2994 2995 struct io_tctx_exit { 2996 struct callback_head task_work; 2997 struct completion completion; 2998 struct io_ring_ctx *ctx; 2999 }; 3000 3001 static __cold void io_tctx_exit_cb(struct callback_head *cb) 3002 { 3003 struct io_uring_task *tctx = current->io_uring; 3004 struct io_tctx_exit *work; 3005 3006 work = container_of(cb, struct io_tctx_exit, task_work); 3007 /* 3008 * When @in_cancel, we're in cancellation and it's racy to remove the 3009 * node. It'll be removed by the end of cancellation, just ignore it. 3010 * tctx can be NULL if the queueing of this task_work raced with 3011 * work cancelation off the exec path. 3012 */ 3013 if (tctx && !atomic_read(&tctx->in_cancel)) 3014 io_uring_del_tctx_node((unsigned long)work->ctx); 3015 complete(&work->completion); 3016 } 3017 3018 static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data) 3019 { 3020 struct io_kiocb *req = container_of(work, struct io_kiocb, work); 3021 3022 return req->ctx == data; 3023 } 3024 3025 static __cold void io_ring_exit_work(struct work_struct *work) 3026 { 3027 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work); 3028 unsigned long timeout = jiffies + HZ * 60 * 5; 3029 unsigned long interval = HZ / 20; 3030 struct io_tctx_exit exit; 3031 struct io_tctx_node *node; 3032 int ret; 3033 3034 /* 3035 * If we're doing polled IO and end up having requests being 3036 * submitted async (out-of-line), then completions can come in while 3037 * we're waiting for refs to drop. We need to reap these manually, 3038 * as nobody else will be looking for them. 3039 */ 3040 do { 3041 if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) { 3042 mutex_lock(&ctx->uring_lock); 3043 io_cqring_overflow_kill(ctx); 3044 mutex_unlock(&ctx->uring_lock); 3045 } 3046 3047 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) 3048 io_move_task_work_from_local(ctx); 3049 3050 while (io_uring_try_cancel_requests(ctx, NULL, true)) 3051 cond_resched(); 3052 3053 if (ctx->sq_data) { 3054 struct io_sq_data *sqd = ctx->sq_data; 3055 struct task_struct *tsk; 3056 3057 io_sq_thread_park(sqd); 3058 tsk = sqd->thread; 3059 if (tsk && tsk->io_uring && tsk->io_uring->io_wq) 3060 io_wq_cancel_cb(tsk->io_uring->io_wq, 3061 io_cancel_ctx_cb, ctx, true); 3062 io_sq_thread_unpark(sqd); 3063 } 3064 3065 io_req_caches_free(ctx); 3066 3067 if (WARN_ON_ONCE(time_after(jiffies, timeout))) { 3068 /* there is little hope left, don't run it too often */ 3069 interval = HZ * 60; 3070 } 3071 /* 3072 * This is really an uninterruptible wait, as it has to be 3073 * complete. But it's also run from a kworker, which doesn't 3074 * take signals, so it's fine to make it interruptible. This 3075 * avoids scenarios where we knowingly can wait much longer 3076 * on completions, for example if someone does a SIGSTOP on 3077 * a task that needs to finish task_work to make this loop 3078 * complete. That's a synthetic situation that should not 3079 * cause a stuck task backtrace, and hence a potential panic 3080 * on stuck tasks if that is enabled. 3081 */ 3082 } while (!wait_for_completion_interruptible_timeout(&ctx->ref_comp, interval)); 3083 3084 init_completion(&exit.completion); 3085 init_task_work(&exit.task_work, io_tctx_exit_cb); 3086 exit.ctx = ctx; 3087 /* 3088 * Some may use context even when all refs and requests have been put, 3089 * and they are free to do so while still holding uring_lock or 3090 * completion_lock, see io_req_task_submit(). Apart from other work, 3091 * this lock/unlock section also waits them to finish. 3092 */ 3093 mutex_lock(&ctx->uring_lock); 3094 while (!list_empty(&ctx->tctx_list)) { 3095 WARN_ON_ONCE(time_after(jiffies, timeout)); 3096 3097 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node, 3098 ctx_node); 3099 /* don't spin on a single task if cancellation failed */ 3100 list_rotate_left(&ctx->tctx_list); 3101 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL); 3102 if (WARN_ON_ONCE(ret)) 3103 continue; 3104 3105 mutex_unlock(&ctx->uring_lock); 3106 /* 3107 * See comment above for 3108 * wait_for_completion_interruptible_timeout() on why this 3109 * wait is marked as interruptible. 3110 */ 3111 wait_for_completion_interruptible(&exit.completion); 3112 mutex_lock(&ctx->uring_lock); 3113 } 3114 mutex_unlock(&ctx->uring_lock); 3115 spin_lock(&ctx->completion_lock); 3116 spin_unlock(&ctx->completion_lock); 3117 3118 /* pairs with RCU read section in io_req_local_work_add() */ 3119 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) 3120 synchronize_rcu(); 3121 3122 io_ring_ctx_free(ctx); 3123 } 3124 3125 static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) 3126 { 3127 unsigned long index; 3128 struct creds *creds; 3129 3130 mutex_lock(&ctx->uring_lock); 3131 percpu_ref_kill(&ctx->refs); 3132 xa_for_each(&ctx->personalities, index, creds) 3133 io_unregister_personality(ctx, index); 3134 if (ctx->rings) 3135 io_poll_remove_all(ctx, NULL, true); 3136 mutex_unlock(&ctx->uring_lock); 3137 3138 /* 3139 * If we failed setting up the ctx, we might not have any rings 3140 * and therefore did not submit any requests 3141 */ 3142 if (ctx->rings) 3143 io_kill_timeouts(ctx, NULL, true); 3144 3145 INIT_WORK(&ctx->exit_work, io_ring_exit_work); 3146 /* 3147 * Use system_unbound_wq to avoid spawning tons of event kworkers 3148 * if we're exiting a ton of rings at the same time. It just adds 3149 * noise and overhead, there's no discernable change in runtime 3150 * over using system_wq. 3151 */ 3152 queue_work(system_unbound_wq, &ctx->exit_work); 3153 } 3154 3155 static int io_uring_release(struct inode *inode, struct file *file) 3156 { 3157 struct io_ring_ctx *ctx = file->private_data; 3158 3159 file->private_data = NULL; 3160 io_ring_ctx_wait_and_kill(ctx); 3161 return 0; 3162 } 3163 3164 struct io_task_cancel { 3165 struct task_struct *task; 3166 bool all; 3167 }; 3168 3169 static bool io_cancel_task_cb(struct io_wq_work *work, void *data) 3170 { 3171 struct io_kiocb *req = container_of(work, struct io_kiocb, work); 3172 struct io_task_cancel *cancel = data; 3173 3174 return io_match_task_safe(req, cancel->task, cancel->all); 3175 } 3176 3177 static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, 3178 struct task_struct *task, 3179 bool cancel_all) 3180 { 3181 struct io_defer_entry *de; 3182 LIST_HEAD(list); 3183 3184 spin_lock(&ctx->completion_lock); 3185 list_for_each_entry_reverse(de, &ctx->defer_list, list) { 3186 if (io_match_task_safe(de->req, task, cancel_all)) { 3187 list_cut_position(&list, &ctx->defer_list, &de->list); 3188 break; 3189 } 3190 } 3191 spin_unlock(&ctx->completion_lock); 3192 if (list_empty(&list)) 3193 return false; 3194 3195 while (!list_empty(&list)) { 3196 de = list_first_entry(&list, struct io_defer_entry, list); 3197 list_del_init(&de->list); 3198 io_req_task_queue_fail(de->req, -ECANCELED); 3199 kfree(de); 3200 } 3201 return true; 3202 } 3203 3204 static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) 3205 { 3206 struct io_tctx_node *node; 3207 enum io_wq_cancel cret; 3208 bool ret = false; 3209 3210 mutex_lock(&ctx->uring_lock); 3211 list_for_each_entry(node, &ctx->tctx_list, ctx_node) { 3212 struct io_uring_task *tctx = node->task->io_uring; 3213 3214 /* 3215 * io_wq will stay alive while we hold uring_lock, because it's 3216 * killed after ctx nodes, which requires to take the lock. 3217 */ 3218 if (!tctx || !tctx->io_wq) 3219 continue; 3220 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true); 3221 ret |= (cret != IO_WQ_CANCEL_NOTFOUND); 3222 } 3223 mutex_unlock(&ctx->uring_lock); 3224 3225 return ret; 3226 } 3227 3228 static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, 3229 struct task_struct *task, 3230 bool cancel_all) 3231 { 3232 struct io_task_cancel cancel = { .task = task, .all = cancel_all, }; 3233 struct io_uring_task *tctx = task ? task->io_uring : NULL; 3234 enum io_wq_cancel cret; 3235 bool ret = false; 3236 3237 /* set it so io_req_local_work_add() would wake us up */ 3238 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { 3239 atomic_set(&ctx->cq_wait_nr, 1); 3240 smp_mb(); 3241 } 3242 3243 /* failed during ring init, it couldn't have issued any requests */ 3244 if (!ctx->rings) 3245 return false; 3246 3247 if (!task) { 3248 ret |= io_uring_try_cancel_iowq(ctx); 3249 } else if (tctx && tctx->io_wq) { 3250 /* 3251 * Cancels requests of all rings, not only @ctx, but 3252 * it's fine as the task is in exit/exec. 3253 */ 3254 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb, 3255 &cancel, true); 3256 ret |= (cret != IO_WQ_CANCEL_NOTFOUND); 3257 } 3258 3259 /* SQPOLL thread does its own polling */ 3260 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) || 3261 (ctx->sq_data && ctx->sq_data->thread == current)) { 3262 while (!wq_list_empty(&ctx->iopoll_list)) { 3263 io_iopoll_try_reap_events(ctx); 3264 ret = true; 3265 cond_resched(); 3266 } 3267 } 3268 3269 if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) && 3270 io_allowed_defer_tw_run(ctx)) 3271 ret |= io_run_local_work(ctx) > 0; 3272 ret |= io_cancel_defer_files(ctx, task, cancel_all); 3273 mutex_lock(&ctx->uring_lock); 3274 ret |= io_poll_remove_all(ctx, task, cancel_all); 3275 mutex_unlock(&ctx->uring_lock); 3276 ret |= io_kill_timeouts(ctx, task, cancel_all); 3277 if (task) 3278 ret |= io_run_task_work() > 0; 3279 return ret; 3280 } 3281 3282 static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked) 3283 { 3284 if (tracked) 3285 return atomic_read(&tctx->inflight_tracked); 3286 return percpu_counter_sum(&tctx->inflight); 3287 } 3288 3289 /* 3290 * Find any io_uring ctx that this task has registered or done IO on, and cancel 3291 * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation. 3292 */ 3293 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) 3294 { 3295 struct io_uring_task *tctx = current->io_uring; 3296 struct io_ring_ctx *ctx; 3297 struct io_tctx_node *node; 3298 unsigned long index; 3299 s64 inflight; 3300 DEFINE_WAIT(wait); 3301 3302 WARN_ON_ONCE(sqd && sqd->thread != current); 3303 3304 if (!current->io_uring) 3305 return; 3306 if (tctx->io_wq) 3307 io_wq_exit_start(tctx->io_wq); 3308 3309 atomic_inc(&tctx->in_cancel); 3310 do { 3311 bool loop = false; 3312 3313 io_uring_drop_tctx_refs(current); 3314 /* read completions before cancelations */ 3315 inflight = tctx_inflight(tctx, !cancel_all); 3316 if (!inflight) 3317 break; 3318 3319 if (!sqd) { 3320 xa_for_each(&tctx->xa, index, node) { 3321 /* sqpoll task will cancel all its requests */ 3322 if (node->ctx->sq_data) 3323 continue; 3324 loop |= io_uring_try_cancel_requests(node->ctx, 3325 current, cancel_all); 3326 } 3327 } else { 3328 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) 3329 loop |= io_uring_try_cancel_requests(ctx, 3330 current, 3331 cancel_all); 3332 } 3333 3334 if (loop) { 3335 cond_resched(); 3336 continue; 3337 } 3338 3339 prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE); 3340 io_run_task_work(); 3341 io_uring_drop_tctx_refs(current); 3342 xa_for_each(&tctx->xa, index, node) { 3343 if (!llist_empty(&node->ctx->work_llist)) { 3344 WARN_ON_ONCE(node->ctx->submitter_task && 3345 node->ctx->submitter_task != current); 3346 goto end_wait; 3347 } 3348 } 3349 /* 3350 * If we've seen completions, retry without waiting. This 3351 * avoids a race where a completion comes in before we did 3352 * prepare_to_wait(). 3353 */ 3354 if (inflight == tctx_inflight(tctx, !cancel_all)) 3355 schedule(); 3356 end_wait: 3357 finish_wait(&tctx->wait, &wait); 3358 } while (1); 3359 3360 io_uring_clean_tctx(tctx); 3361 if (cancel_all) { 3362 /* 3363 * We shouldn't run task_works after cancel, so just leave 3364 * ->in_cancel set for normal exit. 3365 */ 3366 atomic_dec(&tctx->in_cancel); 3367 /* for exec all current's requests should be gone, kill tctx */ 3368 __io_uring_free(current); 3369 } 3370 } 3371 3372 void __io_uring_cancel(bool cancel_all) 3373 { 3374 io_uring_cancel_generic(cancel_all, NULL); 3375 } 3376 3377 static void *io_uring_validate_mmap_request(struct file *file, 3378 loff_t pgoff, size_t sz) 3379 { 3380 struct io_ring_ctx *ctx = file->private_data; 3381 loff_t offset = pgoff << PAGE_SHIFT; 3382 struct page *page; 3383 void *ptr; 3384 3385 /* Don't allow mmap if the ring was setup without it */ 3386 if (ctx->flags & IORING_SETUP_NO_MMAP) 3387 return ERR_PTR(-EINVAL); 3388 3389 switch (offset & IORING_OFF_MMAP_MASK) { 3390 case IORING_OFF_SQ_RING: 3391 case IORING_OFF_CQ_RING: 3392 ptr = ctx->rings; 3393 break; 3394 case IORING_OFF_SQES: 3395 ptr = ctx->sq_sqes; 3396 break; 3397 case IORING_OFF_PBUF_RING: { 3398 unsigned int bgid; 3399 3400 bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT; 3401 mutex_lock(&ctx->uring_lock); 3402 ptr = io_pbuf_get_address(ctx, bgid); 3403 mutex_unlock(&ctx->uring_lock); 3404 if (!ptr) 3405 return ERR_PTR(-EINVAL); 3406 break; 3407 } 3408 default: 3409 return ERR_PTR(-EINVAL); 3410 } 3411 3412 page = virt_to_head_page(ptr); 3413 if (sz > page_size(page)) 3414 return ERR_PTR(-EINVAL); 3415 3416 return ptr; 3417 } 3418 3419 #ifdef CONFIG_MMU 3420 3421 static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma) 3422 { 3423 size_t sz = vma->vm_end - vma->vm_start; 3424 unsigned long pfn; 3425 void *ptr; 3426 3427 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz); 3428 if (IS_ERR(ptr)) 3429 return PTR_ERR(ptr); 3430 3431 pfn = virt_to_phys(ptr) >> PAGE_SHIFT; 3432 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot); 3433 } 3434 3435 static unsigned long io_uring_mmu_get_unmapped_area(struct file *filp, 3436 unsigned long addr, unsigned long len, 3437 unsigned long pgoff, unsigned long flags) 3438 { 3439 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); 3440 struct vm_unmapped_area_info info; 3441 void *ptr; 3442 3443 /* 3444 * Do not allow to map to user-provided address to avoid breaking the 3445 * aliasing rules. Userspace is not able to guess the offset address of 3446 * kernel kmalloc()ed memory area. 3447 */ 3448 if (addr) 3449 return -EINVAL; 3450 3451 ptr = io_uring_validate_mmap_request(filp, pgoff, len); 3452 if (IS_ERR(ptr)) 3453 return -ENOMEM; 3454 3455 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 3456 info.length = len; 3457 info.low_limit = max(PAGE_SIZE, mmap_min_addr); 3458 info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base); 3459 #ifdef SHM_COLOUR 3460 info.align_mask = PAGE_MASK & (SHM_COLOUR - 1UL); 3461 #else 3462 info.align_mask = PAGE_MASK & (SHMLBA - 1UL); 3463 #endif 3464 info.align_offset = (unsigned long) ptr; 3465 3466 /* 3467 * A failed mmap() very likely causes application failure, 3468 * so fall back to the bottom-up function here. This scenario 3469 * can happen with large stack limits and large mmap() 3470 * allocations. 3471 */ 3472 addr = vm_unmapped_area(&info); 3473 if (offset_in_page(addr)) { 3474 info.flags = 0; 3475 info.low_limit = TASK_UNMAPPED_BASE; 3476 info.high_limit = mmap_end; 3477 addr = vm_unmapped_area(&info); 3478 } 3479 3480 return addr; 3481 } 3482 3483 #else /* !CONFIG_MMU */ 3484 3485 static int io_uring_mmap(struct file *file, struct vm_area_struct *vma) 3486 { 3487 return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -EINVAL; 3488 } 3489 3490 static unsigned int io_uring_nommu_mmap_capabilities(struct file *file) 3491 { 3492 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE; 3493 } 3494 3495 static unsigned long io_uring_nommu_get_unmapped_area(struct file *file, 3496 unsigned long addr, unsigned long len, 3497 unsigned long pgoff, unsigned long flags) 3498 { 3499 void *ptr; 3500 3501 ptr = io_uring_validate_mmap_request(file, pgoff, len); 3502 if (IS_ERR(ptr)) 3503 return PTR_ERR(ptr); 3504 3505 return (unsigned long) ptr; 3506 } 3507 3508 #endif /* !CONFIG_MMU */ 3509 3510 static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz) 3511 { 3512 if (flags & IORING_ENTER_EXT_ARG) { 3513 struct io_uring_getevents_arg arg; 3514 3515 if (argsz != sizeof(arg)) 3516 return -EINVAL; 3517 if (copy_from_user(&arg, argp, sizeof(arg))) 3518 return -EFAULT; 3519 } 3520 return 0; 3521 } 3522 3523 static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz, 3524 struct __kernel_timespec __user **ts, 3525 const sigset_t __user **sig) 3526 { 3527 struct io_uring_getevents_arg arg; 3528 3529 /* 3530 * If EXT_ARG isn't set, then we have no timespec and the argp pointer 3531 * is just a pointer to the sigset_t. 3532 */ 3533 if (!(flags & IORING_ENTER_EXT_ARG)) { 3534 *sig = (const sigset_t __user *) argp; 3535 *ts = NULL; 3536 return 0; 3537 } 3538 3539 /* 3540 * EXT_ARG is set - ensure we agree on the size of it and copy in our 3541 * timespec and sigset_t pointers if good. 3542 */ 3543 if (*argsz != sizeof(arg)) 3544 return -EINVAL; 3545 if (copy_from_user(&arg, argp, sizeof(arg))) 3546 return -EFAULT; 3547 if (arg.pad) 3548 return -EINVAL; 3549 *sig = u64_to_user_ptr(arg.sigmask); 3550 *argsz = arg.sigmask_sz; 3551 *ts = u64_to_user_ptr(arg.ts); 3552 return 0; 3553 } 3554 3555 SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, 3556 u32, min_complete, u32, flags, const void __user *, argp, 3557 size_t, argsz) 3558 { 3559 struct io_ring_ctx *ctx; 3560 struct fd f; 3561 long ret; 3562 3563 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP | 3564 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG | 3565 IORING_ENTER_REGISTERED_RING))) 3566 return -EINVAL; 3567 3568 /* 3569 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we 3570 * need only dereference our task private array to find it. 3571 */ 3572 if (flags & IORING_ENTER_REGISTERED_RING) { 3573 struct io_uring_task *tctx = current->io_uring; 3574 3575 if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX)) 3576 return -EINVAL; 3577 fd = array_index_nospec(fd, IO_RINGFD_REG_MAX); 3578 f.file = tctx->registered_rings[fd]; 3579 f.flags = 0; 3580 if (unlikely(!f.file)) 3581 return -EBADF; 3582 } else { 3583 f = fdget(fd); 3584 if (unlikely(!f.file)) 3585 return -EBADF; 3586 ret = -EOPNOTSUPP; 3587 if (unlikely(!io_is_uring_fops(f.file))) 3588 goto out; 3589 } 3590 3591 ctx = f.file->private_data; 3592 ret = -EBADFD; 3593 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED)) 3594 goto out; 3595 3596 /* 3597 * For SQ polling, the thread will do all submissions and completions. 3598 * Just return the requested submit count, and wake the thread if 3599 * we were asked to. 3600 */ 3601 ret = 0; 3602 if (ctx->flags & IORING_SETUP_SQPOLL) { 3603 io_cqring_overflow_flush(ctx); 3604 3605 if (unlikely(ctx->sq_data->thread == NULL)) { 3606 ret = -EOWNERDEAD; 3607 goto out; 3608 } 3609 if (flags & IORING_ENTER_SQ_WAKEUP) 3610 wake_up(&ctx->sq_data->wait); 3611 if (flags & IORING_ENTER_SQ_WAIT) 3612 io_sqpoll_wait_sq(ctx); 3613 3614 ret = to_submit; 3615 } else if (to_submit) { 3616 ret = io_uring_add_tctx_node(ctx); 3617 if (unlikely(ret)) 3618 goto out; 3619 3620 mutex_lock(&ctx->uring_lock); 3621 ret = io_submit_sqes(ctx, to_submit); 3622 if (ret != to_submit) { 3623 mutex_unlock(&ctx->uring_lock); 3624 goto out; 3625 } 3626 if (flags & IORING_ENTER_GETEVENTS) { 3627 if (ctx->syscall_iopoll) 3628 goto iopoll_locked; 3629 /* 3630 * Ignore errors, we'll soon call io_cqring_wait() and 3631 * it should handle ownership problems if any. 3632 */ 3633 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) 3634 (void)io_run_local_work_locked(ctx); 3635 } 3636 mutex_unlock(&ctx->uring_lock); 3637 } 3638 3639 if (flags & IORING_ENTER_GETEVENTS) { 3640 int ret2; 3641 3642 if (ctx->syscall_iopoll) { 3643 /* 3644 * We disallow the app entering submit/complete with 3645 * polling, but we still need to lock the ring to 3646 * prevent racing with polled issue that got punted to 3647 * a workqueue. 3648 */ 3649 mutex_lock(&ctx->uring_lock); 3650 iopoll_locked: 3651 ret2 = io_validate_ext_arg(flags, argp, argsz); 3652 if (likely(!ret2)) { 3653 min_complete = min(min_complete, 3654 ctx->cq_entries); 3655 ret2 = io_iopoll_check(ctx, min_complete); 3656 } 3657 mutex_unlock(&ctx->uring_lock); 3658 } else { 3659 const sigset_t __user *sig; 3660 struct __kernel_timespec __user *ts; 3661 3662 ret2 = io_get_ext_arg(flags, argp, &argsz, &ts, &sig); 3663 if (likely(!ret2)) { 3664 min_complete = min(min_complete, 3665 ctx->cq_entries); 3666 ret2 = io_cqring_wait(ctx, min_complete, sig, 3667 argsz, ts); 3668 } 3669 } 3670 3671 if (!ret) { 3672 ret = ret2; 3673 3674 /* 3675 * EBADR indicates that one or more CQE were dropped. 3676 * Once the user has been informed we can clear the bit 3677 * as they are obviously ok with those drops. 3678 */ 3679 if (unlikely(ret2 == -EBADR)) 3680 clear_bit(IO_CHECK_CQ_DROPPED_BIT, 3681 &ctx->check_cq); 3682 } 3683 } 3684 out: 3685 fdput(f); 3686 return ret; 3687 } 3688 3689 static const struct file_operations io_uring_fops = { 3690 .release = io_uring_release, 3691 .mmap = io_uring_mmap, 3692 #ifndef CONFIG_MMU 3693 .get_unmapped_area = io_uring_nommu_get_unmapped_area, 3694 .mmap_capabilities = io_uring_nommu_mmap_capabilities, 3695 #else 3696 .get_unmapped_area = io_uring_mmu_get_unmapped_area, 3697 #endif 3698 .poll = io_uring_poll, 3699 #ifdef CONFIG_PROC_FS 3700 .show_fdinfo = io_uring_show_fdinfo, 3701 #endif 3702 }; 3703 3704 bool io_is_uring_fops(struct file *file) 3705 { 3706 return file->f_op == &io_uring_fops; 3707 } 3708 3709 static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx, 3710 struct io_uring_params *p) 3711 { 3712 struct io_rings *rings; 3713 size_t size, sq_array_offset; 3714 void *ptr; 3715 3716 /* make sure these are sane, as we already accounted them */ 3717 ctx->sq_entries = p->sq_entries; 3718 ctx->cq_entries = p->cq_entries; 3719 3720 size = rings_size(ctx, p->sq_entries, p->cq_entries, &sq_array_offset); 3721 if (size == SIZE_MAX) 3722 return -EOVERFLOW; 3723 3724 if (!(ctx->flags & IORING_SETUP_NO_MMAP)) 3725 rings = io_mem_alloc(size); 3726 else 3727 rings = io_rings_map(ctx, p->cq_off.user_addr, size); 3728 3729 if (IS_ERR(rings)) 3730 return PTR_ERR(rings); 3731 3732 ctx->rings = rings; 3733 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset); 3734 rings->sq_ring_mask = p->sq_entries - 1; 3735 rings->cq_ring_mask = p->cq_entries - 1; 3736 rings->sq_ring_entries = p->sq_entries; 3737 rings->cq_ring_entries = p->cq_entries; 3738 3739 if (p->flags & IORING_SETUP_SQE128) 3740 size = array_size(2 * sizeof(struct io_uring_sqe), p->sq_entries); 3741 else 3742 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); 3743 if (size == SIZE_MAX) { 3744 io_rings_free(ctx); 3745 return -EOVERFLOW; 3746 } 3747 3748 if (!(ctx->flags & IORING_SETUP_NO_MMAP)) 3749 ptr = io_mem_alloc(size); 3750 else 3751 ptr = io_sqes_map(ctx, p->sq_off.user_addr, size); 3752 3753 if (IS_ERR(ptr)) { 3754 io_rings_free(ctx); 3755 return PTR_ERR(ptr); 3756 } 3757 3758 ctx->sq_sqes = ptr; 3759 return 0; 3760 } 3761 3762 static int io_uring_install_fd(struct file *file) 3763 { 3764 int fd; 3765 3766 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); 3767 if (fd < 0) 3768 return fd; 3769 fd_install(fd, file); 3770 return fd; 3771 } 3772 3773 /* 3774 * Allocate an anonymous fd, this is what constitutes the application 3775 * visible backing of an io_uring instance. The application mmaps this 3776 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled, 3777 * we have to tie this fd to a socket for file garbage collection purposes. 3778 */ 3779 static struct file *io_uring_get_file(struct io_ring_ctx *ctx) 3780 { 3781 struct file *file; 3782 #if defined(CONFIG_UNIX) 3783 int ret; 3784 3785 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP, 3786 &ctx->ring_sock); 3787 if (ret) 3788 return ERR_PTR(ret); 3789 #endif 3790 3791 file = anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx, 3792 O_RDWR | O_CLOEXEC, NULL); 3793 #if defined(CONFIG_UNIX) 3794 if (IS_ERR(file)) { 3795 sock_release(ctx->ring_sock); 3796 ctx->ring_sock = NULL; 3797 } else { 3798 ctx->ring_sock->file = file; 3799 } 3800 #endif 3801 return file; 3802 } 3803 3804 static __cold int io_uring_create(unsigned entries, struct io_uring_params *p, 3805 struct io_uring_params __user *params) 3806 { 3807 struct io_ring_ctx *ctx; 3808 struct io_uring_task *tctx; 3809 struct file *file; 3810 int ret; 3811 3812 if (!entries) 3813 return -EINVAL; 3814 if (entries > IORING_MAX_ENTRIES) { 3815 if (!(p->flags & IORING_SETUP_CLAMP)) 3816 return -EINVAL; 3817 entries = IORING_MAX_ENTRIES; 3818 } 3819 3820 if ((p->flags & IORING_SETUP_REGISTERED_FD_ONLY) 3821 && !(p->flags & IORING_SETUP_NO_MMAP)) 3822 return -EINVAL; 3823 3824 /* 3825 * Use twice as many entries for the CQ ring. It's possible for the 3826 * application to drive a higher depth than the size of the SQ ring, 3827 * since the sqes are only used at submission time. This allows for 3828 * some flexibility in overcommitting a bit. If the application has 3829 * set IORING_SETUP_CQSIZE, it will have passed in the desired number 3830 * of CQ ring entries manually. 3831 */ 3832 p->sq_entries = roundup_pow_of_two(entries); 3833 if (p->flags & IORING_SETUP_CQSIZE) { 3834 /* 3835 * If IORING_SETUP_CQSIZE is set, we do the same roundup 3836 * to a power-of-two, if it isn't already. We do NOT impose 3837 * any cq vs sq ring sizing. 3838 */ 3839 if (!p->cq_entries) 3840 return -EINVAL; 3841 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) { 3842 if (!(p->flags & IORING_SETUP_CLAMP)) 3843 return -EINVAL; 3844 p->cq_entries = IORING_MAX_CQ_ENTRIES; 3845 } 3846 p->cq_entries = roundup_pow_of_two(p->cq_entries); 3847 if (p->cq_entries < p->sq_entries) 3848 return -EINVAL; 3849 } else { 3850 p->cq_entries = 2 * p->sq_entries; 3851 } 3852 3853 ctx = io_ring_ctx_alloc(p); 3854 if (!ctx) 3855 return -ENOMEM; 3856 3857 if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) && 3858 !(ctx->flags & IORING_SETUP_IOPOLL) && 3859 !(ctx->flags & IORING_SETUP_SQPOLL)) 3860 ctx->task_complete = true; 3861 3862 /* 3863 * lazy poll_wq activation relies on ->task_complete for synchronisation 3864 * purposes, see io_activate_pollwq() 3865 */ 3866 if (!ctx->task_complete) 3867 ctx->poll_activated = true; 3868 3869 /* 3870 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user 3871 * space applications don't need to do io completion events 3872 * polling again, they can rely on io_sq_thread to do polling 3873 * work, which can reduce cpu usage and uring_lock contention. 3874 */ 3875 if (ctx->flags & IORING_SETUP_IOPOLL && 3876 !(ctx->flags & IORING_SETUP_SQPOLL)) 3877 ctx->syscall_iopoll = 1; 3878 3879 ctx->compat = in_compat_syscall(); 3880 if (!capable(CAP_IPC_LOCK)) 3881 ctx->user = get_uid(current_user()); 3882 3883 /* 3884 * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if 3885 * COOP_TASKRUN is set, then IPIs are never needed by the app. 3886 */ 3887 ret = -EINVAL; 3888 if (ctx->flags & IORING_SETUP_SQPOLL) { 3889 /* IPI related flags don't make sense with SQPOLL */ 3890 if (ctx->flags & (IORING_SETUP_COOP_TASKRUN | 3891 IORING_SETUP_TASKRUN_FLAG | 3892 IORING_SETUP_DEFER_TASKRUN)) 3893 goto err; 3894 ctx->notify_method = TWA_SIGNAL_NO_IPI; 3895 } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) { 3896 ctx->notify_method = TWA_SIGNAL_NO_IPI; 3897 } else { 3898 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG && 3899 !(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) 3900 goto err; 3901 ctx->notify_method = TWA_SIGNAL; 3902 } 3903 3904 /* 3905 * For DEFER_TASKRUN we require the completion task to be the same as the 3906 * submission task. This implies that there is only one submitter, so enforce 3907 * that. 3908 */ 3909 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN && 3910 !(ctx->flags & IORING_SETUP_SINGLE_ISSUER)) { 3911 goto err; 3912 } 3913 3914 /* 3915 * This is just grabbed for accounting purposes. When a process exits, 3916 * the mm is exited and dropped before the files, hence we need to hang 3917 * on to this mm purely for the purposes of being able to unaccount 3918 * memory (locked/pinned vm). It's not used for anything else. 3919 */ 3920 mmgrab(current->mm); 3921 ctx->mm_account = current->mm; 3922 3923 ret = io_allocate_scq_urings(ctx, p); 3924 if (ret) 3925 goto err; 3926 3927 ret = io_sq_offload_create(ctx, p); 3928 if (ret) 3929 goto err; 3930 3931 ret = io_rsrc_init(ctx); 3932 if (ret) 3933 goto err; 3934 3935 p->sq_off.head = offsetof(struct io_rings, sq.head); 3936 p->sq_off.tail = offsetof(struct io_rings, sq.tail); 3937 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask); 3938 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries); 3939 p->sq_off.flags = offsetof(struct io_rings, sq_flags); 3940 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped); 3941 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings; 3942 p->sq_off.resv1 = 0; 3943 if (!(ctx->flags & IORING_SETUP_NO_MMAP)) 3944 p->sq_off.user_addr = 0; 3945 3946 p->cq_off.head = offsetof(struct io_rings, cq.head); 3947 p->cq_off.tail = offsetof(struct io_rings, cq.tail); 3948 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask); 3949 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries); 3950 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow); 3951 p->cq_off.cqes = offsetof(struct io_rings, cqes); 3952 p->cq_off.flags = offsetof(struct io_rings, cq_flags); 3953 p->cq_off.resv1 = 0; 3954 if (!(ctx->flags & IORING_SETUP_NO_MMAP)) 3955 p->cq_off.user_addr = 0; 3956 3957 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP | 3958 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS | 3959 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL | 3960 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED | 3961 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS | 3962 IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP | 3963 IORING_FEAT_LINKED_FILE | IORING_FEAT_REG_REG_RING; 3964 3965 if (copy_to_user(params, p, sizeof(*p))) { 3966 ret = -EFAULT; 3967 goto err; 3968 } 3969 3970 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER 3971 && !(ctx->flags & IORING_SETUP_R_DISABLED)) 3972 WRITE_ONCE(ctx->submitter_task, get_task_struct(current)); 3973 3974 file = io_uring_get_file(ctx); 3975 if (IS_ERR(file)) { 3976 ret = PTR_ERR(file); 3977 goto err; 3978 } 3979 3980 ret = __io_uring_add_tctx_node(ctx); 3981 if (ret) 3982 goto err_fput; 3983 tctx = current->io_uring; 3984 3985 /* 3986 * Install ring fd as the very last thing, so we don't risk someone 3987 * having closed it before we finish setup 3988 */ 3989 if (p->flags & IORING_SETUP_REGISTERED_FD_ONLY) 3990 ret = io_ring_add_registered_file(tctx, file, 0, IO_RINGFD_REG_MAX); 3991 else 3992 ret = io_uring_install_fd(file); 3993 if (ret < 0) 3994 goto err_fput; 3995 3996 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags); 3997 return ret; 3998 err: 3999 io_ring_ctx_wait_and_kill(ctx); 4000 return ret; 4001 err_fput: 4002 fput(file); 4003 return ret; 4004 } 4005 4006 /* 4007 * Sets up an aio uring context, and returns the fd. Applications asks for a 4008 * ring size, we return the actual sq/cq ring sizes (among other things) in the 4009 * params structure passed in. 4010 */ 4011 static long io_uring_setup(u32 entries, struct io_uring_params __user *params) 4012 { 4013 struct io_uring_params p; 4014 int i; 4015 4016 if (copy_from_user(&p, params, sizeof(p))) 4017 return -EFAULT; 4018 for (i = 0; i < ARRAY_SIZE(p.resv); i++) { 4019 if (p.resv[i]) 4020 return -EINVAL; 4021 } 4022 4023 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL | 4024 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE | 4025 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ | 4026 IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL | 4027 IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG | 4028 IORING_SETUP_SQE128 | IORING_SETUP_CQE32 | 4029 IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN | 4030 IORING_SETUP_NO_MMAP | IORING_SETUP_REGISTERED_FD_ONLY)) 4031 return -EINVAL; 4032 4033 return io_uring_create(entries, &p, params); 4034 } 4035 4036 SYSCALL_DEFINE2(io_uring_setup, u32, entries, 4037 struct io_uring_params __user *, params) 4038 { 4039 return io_uring_setup(entries, params); 4040 } 4041 4042 static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg, 4043 unsigned nr_args) 4044 { 4045 struct io_uring_probe *p; 4046 size_t size; 4047 int i, ret; 4048 4049 size = struct_size(p, ops, nr_args); 4050 if (size == SIZE_MAX) 4051 return -EOVERFLOW; 4052 p = kzalloc(size, GFP_KERNEL); 4053 if (!p) 4054 return -ENOMEM; 4055 4056 ret = -EFAULT; 4057 if (copy_from_user(p, arg, size)) 4058 goto out; 4059 ret = -EINVAL; 4060 if (memchr_inv(p, 0, size)) 4061 goto out; 4062 4063 p->last_op = IORING_OP_LAST - 1; 4064 if (nr_args > IORING_OP_LAST) 4065 nr_args = IORING_OP_LAST; 4066 4067 for (i = 0; i < nr_args; i++) { 4068 p->ops[i].op = i; 4069 if (!io_issue_defs[i].not_supported) 4070 p->ops[i].flags = IO_URING_OP_SUPPORTED; 4071 } 4072 p->ops_len = i; 4073 4074 ret = 0; 4075 if (copy_to_user(arg, p, size)) 4076 ret = -EFAULT; 4077 out: 4078 kfree(p); 4079 return ret; 4080 } 4081 4082 static int io_register_personality(struct io_ring_ctx *ctx) 4083 { 4084 const struct cred *creds; 4085 u32 id; 4086 int ret; 4087 4088 creds = get_current_cred(); 4089 4090 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds, 4091 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL); 4092 if (ret < 0) { 4093 put_cred(creds); 4094 return ret; 4095 } 4096 return id; 4097 } 4098 4099 static __cold int io_register_restrictions(struct io_ring_ctx *ctx, 4100 void __user *arg, unsigned int nr_args) 4101 { 4102 struct io_uring_restriction *res; 4103 size_t size; 4104 int i, ret; 4105 4106 /* Restrictions allowed only if rings started disabled */ 4107 if (!(ctx->flags & IORING_SETUP_R_DISABLED)) 4108 return -EBADFD; 4109 4110 /* We allow only a single restrictions registration */ 4111 if (ctx->restrictions.registered) 4112 return -EBUSY; 4113 4114 if (!arg || nr_args > IORING_MAX_RESTRICTIONS) 4115 return -EINVAL; 4116 4117 size = array_size(nr_args, sizeof(*res)); 4118 if (size == SIZE_MAX) 4119 return -EOVERFLOW; 4120 4121 res = memdup_user(arg, size); 4122 if (IS_ERR(res)) 4123 return PTR_ERR(res); 4124 4125 ret = 0; 4126 4127 for (i = 0; i < nr_args; i++) { 4128 switch (res[i].opcode) { 4129 case IORING_RESTRICTION_REGISTER_OP: 4130 if (res[i].register_op >= IORING_REGISTER_LAST) { 4131 ret = -EINVAL; 4132 goto out; 4133 } 4134 4135 __set_bit(res[i].register_op, 4136 ctx->restrictions.register_op); 4137 break; 4138 case IORING_RESTRICTION_SQE_OP: 4139 if (res[i].sqe_op >= IORING_OP_LAST) { 4140 ret = -EINVAL; 4141 goto out; 4142 } 4143 4144 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op); 4145 break; 4146 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED: 4147 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags; 4148 break; 4149 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED: 4150 ctx->restrictions.sqe_flags_required = res[i].sqe_flags; 4151 break; 4152 default: 4153 ret = -EINVAL; 4154 goto out; 4155 } 4156 } 4157 4158 out: 4159 /* Reset all restrictions if an error happened */ 4160 if (ret != 0) 4161 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions)); 4162 else 4163 ctx->restrictions.registered = true; 4164 4165 kfree(res); 4166 return ret; 4167 } 4168 4169 static int io_register_enable_rings(struct io_ring_ctx *ctx) 4170 { 4171 if (!(ctx->flags & IORING_SETUP_R_DISABLED)) 4172 return -EBADFD; 4173 4174 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task) { 4175 WRITE_ONCE(ctx->submitter_task, get_task_struct(current)); 4176 /* 4177 * Lazy activation attempts would fail if it was polled before 4178 * submitter_task is set. 4179 */ 4180 if (wq_has_sleeper(&ctx->poll_wq)) 4181 io_activate_pollwq(ctx); 4182 } 4183 4184 if (ctx->restrictions.registered) 4185 ctx->restricted = 1; 4186 4187 ctx->flags &= ~IORING_SETUP_R_DISABLED; 4188 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait)) 4189 wake_up(&ctx->sq_data->wait); 4190 return 0; 4191 } 4192 4193 static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx, 4194 void __user *arg, unsigned len) 4195 { 4196 struct io_uring_task *tctx = current->io_uring; 4197 cpumask_var_t new_mask; 4198 int ret; 4199 4200 if (!tctx || !tctx->io_wq) 4201 return -EINVAL; 4202 4203 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 4204 return -ENOMEM; 4205 4206 cpumask_clear(new_mask); 4207 if (len > cpumask_size()) 4208 len = cpumask_size(); 4209 4210 if (in_compat_syscall()) { 4211 ret = compat_get_bitmap(cpumask_bits(new_mask), 4212 (const compat_ulong_t __user *)arg, 4213 len * 8 /* CHAR_BIT */); 4214 } else { 4215 ret = copy_from_user(new_mask, arg, len); 4216 } 4217 4218 if (ret) { 4219 free_cpumask_var(new_mask); 4220 return -EFAULT; 4221 } 4222 4223 ret = io_wq_cpu_affinity(tctx->io_wq, new_mask); 4224 free_cpumask_var(new_mask); 4225 return ret; 4226 } 4227 4228 static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx) 4229 { 4230 struct io_uring_task *tctx = current->io_uring; 4231 4232 if (!tctx || !tctx->io_wq) 4233 return -EINVAL; 4234 4235 return io_wq_cpu_affinity(tctx->io_wq, NULL); 4236 } 4237 4238 static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, 4239 void __user *arg) 4240 __must_hold(&ctx->uring_lock) 4241 { 4242 struct io_tctx_node *node; 4243 struct io_uring_task *tctx = NULL; 4244 struct io_sq_data *sqd = NULL; 4245 __u32 new_count[2]; 4246 int i, ret; 4247 4248 if (copy_from_user(new_count, arg, sizeof(new_count))) 4249 return -EFAULT; 4250 for (i = 0; i < ARRAY_SIZE(new_count); i++) 4251 if (new_count[i] > INT_MAX) 4252 return -EINVAL; 4253 4254 if (ctx->flags & IORING_SETUP_SQPOLL) { 4255 sqd = ctx->sq_data; 4256 if (sqd) { 4257 /* 4258 * Observe the correct sqd->lock -> ctx->uring_lock 4259 * ordering. Fine to drop uring_lock here, we hold 4260 * a ref to the ctx. 4261 */ 4262 refcount_inc(&sqd->refs); 4263 mutex_unlock(&ctx->uring_lock); 4264 mutex_lock(&sqd->lock); 4265 mutex_lock(&ctx->uring_lock); 4266 if (sqd->thread) 4267 tctx = sqd->thread->io_uring; 4268 } 4269 } else { 4270 tctx = current->io_uring; 4271 } 4272 4273 BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits)); 4274 4275 for (i = 0; i < ARRAY_SIZE(new_count); i++) 4276 if (new_count[i]) 4277 ctx->iowq_limits[i] = new_count[i]; 4278 ctx->iowq_limits_set = true; 4279 4280 if (tctx && tctx->io_wq) { 4281 ret = io_wq_max_workers(tctx->io_wq, new_count); 4282 if (ret) 4283 goto err; 4284 } else { 4285 memset(new_count, 0, sizeof(new_count)); 4286 } 4287 4288 if (sqd) { 4289 mutex_unlock(&sqd->lock); 4290 io_put_sq_data(sqd); 4291 } 4292 4293 if (copy_to_user(arg, new_count, sizeof(new_count))) 4294 return -EFAULT; 4295 4296 /* that's it for SQPOLL, only the SQPOLL task creates requests */ 4297 if (sqd) 4298 return 0; 4299 4300 /* now propagate the restriction to all registered users */ 4301 list_for_each_entry(node, &ctx->tctx_list, ctx_node) { 4302 struct io_uring_task *tctx = node->task->io_uring; 4303 4304 if (WARN_ON_ONCE(!tctx->io_wq)) 4305 continue; 4306 4307 for (i = 0; i < ARRAY_SIZE(new_count); i++) 4308 new_count[i] = ctx->iowq_limits[i]; 4309 /* ignore errors, it always returns zero anyway */ 4310 (void)io_wq_max_workers(tctx->io_wq, new_count); 4311 } 4312 return 0; 4313 err: 4314 if (sqd) { 4315 mutex_unlock(&sqd->lock); 4316 io_put_sq_data(sqd); 4317 } 4318 return ret; 4319 } 4320 4321 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, 4322 void __user *arg, unsigned nr_args) 4323 __releases(ctx->uring_lock) 4324 __acquires(ctx->uring_lock) 4325 { 4326 int ret; 4327 4328 /* 4329 * We don't quiesce the refs for register anymore and so it can't be 4330 * dying as we're holding a file ref here. 4331 */ 4332 if (WARN_ON_ONCE(percpu_ref_is_dying(&ctx->refs))) 4333 return -ENXIO; 4334 4335 if (ctx->submitter_task && ctx->submitter_task != current) 4336 return -EEXIST; 4337 4338 if (ctx->restricted) { 4339 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST); 4340 if (!test_bit(opcode, ctx->restrictions.register_op)) 4341 return -EACCES; 4342 } 4343 4344 switch (opcode) { 4345 case IORING_REGISTER_BUFFERS: 4346 ret = -EFAULT; 4347 if (!arg) 4348 break; 4349 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL); 4350 break; 4351 case IORING_UNREGISTER_BUFFERS: 4352 ret = -EINVAL; 4353 if (arg || nr_args) 4354 break; 4355 ret = io_sqe_buffers_unregister(ctx); 4356 break; 4357 case IORING_REGISTER_FILES: 4358 ret = -EFAULT; 4359 if (!arg) 4360 break; 4361 ret = io_sqe_files_register(ctx, arg, nr_args, NULL); 4362 break; 4363 case IORING_UNREGISTER_FILES: 4364 ret = -EINVAL; 4365 if (arg || nr_args) 4366 break; 4367 ret = io_sqe_files_unregister(ctx); 4368 break; 4369 case IORING_REGISTER_FILES_UPDATE: 4370 ret = io_register_files_update(ctx, arg, nr_args); 4371 break; 4372 case IORING_REGISTER_EVENTFD: 4373 ret = -EINVAL; 4374 if (nr_args != 1) 4375 break; 4376 ret = io_eventfd_register(ctx, arg, 0); 4377 break; 4378 case IORING_REGISTER_EVENTFD_ASYNC: 4379 ret = -EINVAL; 4380 if (nr_args != 1) 4381 break; 4382 ret = io_eventfd_register(ctx, arg, 1); 4383 break; 4384 case IORING_UNREGISTER_EVENTFD: 4385 ret = -EINVAL; 4386 if (arg || nr_args) 4387 break; 4388 ret = io_eventfd_unregister(ctx); 4389 break; 4390 case IORING_REGISTER_PROBE: 4391 ret = -EINVAL; 4392 if (!arg || nr_args > 256) 4393 break; 4394 ret = io_probe(ctx, arg, nr_args); 4395 break; 4396 case IORING_REGISTER_PERSONALITY: 4397 ret = -EINVAL; 4398 if (arg || nr_args) 4399 break; 4400 ret = io_register_personality(ctx); 4401 break; 4402 case IORING_UNREGISTER_PERSONALITY: 4403 ret = -EINVAL; 4404 if (arg) 4405 break; 4406 ret = io_unregister_personality(ctx, nr_args); 4407 break; 4408 case IORING_REGISTER_ENABLE_RINGS: 4409 ret = -EINVAL; 4410 if (arg || nr_args) 4411 break; 4412 ret = io_register_enable_rings(ctx); 4413 break; 4414 case IORING_REGISTER_RESTRICTIONS: 4415 ret = io_register_restrictions(ctx, arg, nr_args); 4416 break; 4417 case IORING_REGISTER_FILES2: 4418 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE); 4419 break; 4420 case IORING_REGISTER_FILES_UPDATE2: 4421 ret = io_register_rsrc_update(ctx, arg, nr_args, 4422 IORING_RSRC_FILE); 4423 break; 4424 case IORING_REGISTER_BUFFERS2: 4425 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER); 4426 break; 4427 case IORING_REGISTER_BUFFERS_UPDATE: 4428 ret = io_register_rsrc_update(ctx, arg, nr_args, 4429 IORING_RSRC_BUFFER); 4430 break; 4431 case IORING_REGISTER_IOWQ_AFF: 4432 ret = -EINVAL; 4433 if (!arg || !nr_args) 4434 break; 4435 ret = io_register_iowq_aff(ctx, arg, nr_args); 4436 break; 4437 case IORING_UNREGISTER_IOWQ_AFF: 4438 ret = -EINVAL; 4439 if (arg || nr_args) 4440 break; 4441 ret = io_unregister_iowq_aff(ctx); 4442 break; 4443 case IORING_REGISTER_IOWQ_MAX_WORKERS: 4444 ret = -EINVAL; 4445 if (!arg || nr_args != 2) 4446 break; 4447 ret = io_register_iowq_max_workers(ctx, arg); 4448 break; 4449 case IORING_REGISTER_RING_FDS: 4450 ret = io_ringfd_register(ctx, arg, nr_args); 4451 break; 4452 case IORING_UNREGISTER_RING_FDS: 4453 ret = io_ringfd_unregister(ctx, arg, nr_args); 4454 break; 4455 case IORING_REGISTER_PBUF_RING: 4456 ret = -EINVAL; 4457 if (!arg || nr_args != 1) 4458 break; 4459 ret = io_register_pbuf_ring(ctx, arg); 4460 break; 4461 case IORING_UNREGISTER_PBUF_RING: 4462 ret = -EINVAL; 4463 if (!arg || nr_args != 1) 4464 break; 4465 ret = io_unregister_pbuf_ring(ctx, arg); 4466 break; 4467 case IORING_REGISTER_SYNC_CANCEL: 4468 ret = -EINVAL; 4469 if (!arg || nr_args != 1) 4470 break; 4471 ret = io_sync_cancel(ctx, arg); 4472 break; 4473 case IORING_REGISTER_FILE_ALLOC_RANGE: 4474 ret = -EINVAL; 4475 if (!arg || nr_args) 4476 break; 4477 ret = io_register_file_alloc_range(ctx, arg); 4478 break; 4479 default: 4480 ret = -EINVAL; 4481 break; 4482 } 4483 4484 return ret; 4485 } 4486 4487 SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, 4488 void __user *, arg, unsigned int, nr_args) 4489 { 4490 struct io_ring_ctx *ctx; 4491 long ret = -EBADF; 4492 struct fd f; 4493 bool use_registered_ring; 4494 4495 use_registered_ring = !!(opcode & IORING_REGISTER_USE_REGISTERED_RING); 4496 opcode &= ~IORING_REGISTER_USE_REGISTERED_RING; 4497 4498 if (opcode >= IORING_REGISTER_LAST) 4499 return -EINVAL; 4500 4501 if (use_registered_ring) { 4502 /* 4503 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we 4504 * need only dereference our task private array to find it. 4505 */ 4506 struct io_uring_task *tctx = current->io_uring; 4507 4508 if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX)) 4509 return -EINVAL; 4510 fd = array_index_nospec(fd, IO_RINGFD_REG_MAX); 4511 f.file = tctx->registered_rings[fd]; 4512 f.flags = 0; 4513 if (unlikely(!f.file)) 4514 return -EBADF; 4515 } else { 4516 f = fdget(fd); 4517 if (unlikely(!f.file)) 4518 return -EBADF; 4519 ret = -EOPNOTSUPP; 4520 if (!io_is_uring_fops(f.file)) 4521 goto out_fput; 4522 } 4523 4524 ctx = f.file->private_data; 4525 4526 mutex_lock(&ctx->uring_lock); 4527 ret = __io_uring_register(ctx, opcode, arg, nr_args); 4528 mutex_unlock(&ctx->uring_lock); 4529 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret); 4530 out_fput: 4531 fdput(f); 4532 return ret; 4533 } 4534 4535 static int __init io_uring_init(void) 4536 { 4537 #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \ 4538 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \ 4539 BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \ 4540 } while (0) 4541 4542 #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \ 4543 __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, sizeof(etype), ename) 4544 #define BUILD_BUG_SQE_ELEM_SIZE(eoffset, esize, ename) \ 4545 __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, esize, ename) 4546 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64); 4547 BUILD_BUG_SQE_ELEM(0, __u8, opcode); 4548 BUILD_BUG_SQE_ELEM(1, __u8, flags); 4549 BUILD_BUG_SQE_ELEM(2, __u16, ioprio); 4550 BUILD_BUG_SQE_ELEM(4, __s32, fd); 4551 BUILD_BUG_SQE_ELEM(8, __u64, off); 4552 BUILD_BUG_SQE_ELEM(8, __u64, addr2); 4553 BUILD_BUG_SQE_ELEM(8, __u32, cmd_op); 4554 BUILD_BUG_SQE_ELEM(12, __u32, __pad1); 4555 BUILD_BUG_SQE_ELEM(16, __u64, addr); 4556 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in); 4557 BUILD_BUG_SQE_ELEM(24, __u32, len); 4558 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags); 4559 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags); 4560 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags); 4561 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags); 4562 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events); 4563 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events); 4564 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags); 4565 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags); 4566 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags); 4567 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags); 4568 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags); 4569 BUILD_BUG_SQE_ELEM(28, __u32, open_flags); 4570 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags); 4571 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice); 4572 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags); 4573 BUILD_BUG_SQE_ELEM(28, __u32, rename_flags); 4574 BUILD_BUG_SQE_ELEM(28, __u32, unlink_flags); 4575 BUILD_BUG_SQE_ELEM(28, __u32, hardlink_flags); 4576 BUILD_BUG_SQE_ELEM(28, __u32, xattr_flags); 4577 BUILD_BUG_SQE_ELEM(28, __u32, msg_ring_flags); 4578 BUILD_BUG_SQE_ELEM(32, __u64, user_data); 4579 BUILD_BUG_SQE_ELEM(40, __u16, buf_index); 4580 BUILD_BUG_SQE_ELEM(40, __u16, buf_group); 4581 BUILD_BUG_SQE_ELEM(42, __u16, personality); 4582 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in); 4583 BUILD_BUG_SQE_ELEM(44, __u32, file_index); 4584 BUILD_BUG_SQE_ELEM(44, __u16, addr_len); 4585 BUILD_BUG_SQE_ELEM(46, __u16, __pad3[0]); 4586 BUILD_BUG_SQE_ELEM(48, __u64, addr3); 4587 BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd); 4588 BUILD_BUG_SQE_ELEM(56, __u64, __pad2); 4589 4590 BUILD_BUG_ON(sizeof(struct io_uring_files_update) != 4591 sizeof(struct io_uring_rsrc_update)); 4592 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) > 4593 sizeof(struct io_uring_rsrc_update2)); 4594 4595 /* ->buf_index is u16 */ 4596 BUILD_BUG_ON(offsetof(struct io_uring_buf_ring, bufs) != 0); 4597 BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) != 4598 offsetof(struct io_uring_buf_ring, tail)); 4599 4600 /* should fit into one byte */ 4601 BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8)); 4602 BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8)); 4603 BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS); 4604 4605 BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int)); 4606 4607 BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32)); 4608 4609 io_uring_optable_init(); 4610 4611 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC | 4612 SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU); 4613 return 0; 4614 }; 4615 __initcall(io_uring_init); 4616