1 /* 2 * An async IO implementation for Linux 3 * Written by Benjamin LaHaise <bcrl@kvack.org> 4 * 5 * Implements an efficient asynchronous io interface. 6 * 7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. 8 * Copyright 2018 Christoph Hellwig. 9 * 10 * See ../COPYING for licensing terms. 11 */ 12 #define pr_fmt(fmt) "%s: " fmt, __func__ 13 14 #include <linux/kernel.h> 15 #include <linux/init.h> 16 #include <linux/errno.h> 17 #include <linux/time.h> 18 #include <linux/aio_abi.h> 19 #include <linux/export.h> 20 #include <linux/syscalls.h> 21 #include <linux/backing-dev.h> 22 #include <linux/refcount.h> 23 #include <linux/uio.h> 24 25 #include <linux/sched/signal.h> 26 #include <linux/fs.h> 27 #include <linux/file.h> 28 #include <linux/mm.h> 29 #include <linux/mman.h> 30 #include <linux/percpu.h> 31 #include <linux/slab.h> 32 #include <linux/timer.h> 33 #include <linux/aio.h> 34 #include <linux/highmem.h> 35 #include <linux/workqueue.h> 36 #include <linux/security.h> 37 #include <linux/eventfd.h> 38 #include <linux/blkdev.h> 39 #include <linux/compat.h> 40 #include <linux/migrate.h> 41 #include <linux/ramfs.h> 42 #include <linux/percpu-refcount.h> 43 #include <linux/mount.h> 44 #include <linux/pseudo_fs.h> 45 46 #include <linux/uaccess.h> 47 #include <linux/nospec.h> 48 49 #include "internal.h" 50 51 #define KIOCB_KEY 0 52 53 #define AIO_RING_MAGIC 0xa10a10a1 54 #define AIO_RING_COMPAT_FEATURES 1 55 #define AIO_RING_INCOMPAT_FEATURES 0 56 struct aio_ring { 57 unsigned id; /* kernel internal index number */ 58 unsigned nr; /* number of io_events */ 59 unsigned head; /* Written to by userland or under ring_lock 60 * mutex by aio_read_events_ring(). */ 61 unsigned tail; 62 63 unsigned magic; 64 unsigned compat_features; 65 unsigned incompat_features; 66 unsigned header_length; /* size of aio_ring */ 67 68 69 struct io_event io_events[]; 70 }; /* 128 bytes + ring size */ 71 72 /* 73 * Plugging is meant to work with larger batches of IOs. If we don't 74 * have more than the below, then don't bother setting up a plug. 75 */ 76 #define AIO_PLUG_THRESHOLD 2 77 78 #define AIO_RING_PAGES 8 79 80 struct kioctx_table { 81 struct rcu_head rcu; 82 unsigned nr; 83 struct kioctx __rcu *table[] __counted_by(nr); 84 }; 85 86 struct kioctx_cpu { 87 unsigned reqs_available; 88 }; 89 90 struct ctx_rq_wait { 91 struct completion comp; 92 atomic_t count; 93 }; 94 95 struct kioctx { 96 struct percpu_ref users; 97 atomic_t dead; 98 99 struct percpu_ref reqs; 100 101 unsigned long user_id; 102 103 struct __percpu kioctx_cpu *cpu; 104 105 /* 106 * For percpu reqs_available, number of slots we move to/from global 107 * counter at a time: 108 */ 109 unsigned req_batch; 110 /* 111 * This is what userspace passed to io_setup(), it's not used for 112 * anything but counting against the global max_reqs quota. 113 * 114 * The real limit is nr_events - 1, which will be larger (see 115 * aio_setup_ring()) 116 */ 117 unsigned max_reqs; 118 119 /* Size of ringbuffer, in units of struct io_event */ 120 unsigned nr_events; 121 122 unsigned long mmap_base; 123 unsigned long mmap_size; 124 125 struct page **ring_pages; 126 long nr_pages; 127 128 struct rcu_work free_rwork; /* see free_ioctx() */ 129 130 /* 131 * signals when all in-flight requests are done 132 */ 133 struct ctx_rq_wait *rq_wait; 134 135 struct { 136 /* 137 * This counts the number of available slots in the ringbuffer, 138 * so we avoid overflowing it: it's decremented (if positive) 139 * when allocating a kiocb and incremented when the resulting 140 * io_event is pulled off the ringbuffer. 141 * 142 * We batch accesses to it with a percpu version. 143 */ 144 atomic_t reqs_available; 145 } ____cacheline_aligned_in_smp; 146 147 struct { 148 spinlock_t ctx_lock; 149 struct list_head active_reqs; /* used for cancellation */ 150 } ____cacheline_aligned_in_smp; 151 152 struct { 153 struct mutex ring_lock; 154 wait_queue_head_t wait; 155 } ____cacheline_aligned_in_smp; 156 157 struct { 158 unsigned tail; 159 unsigned completed_events; 160 spinlock_t completion_lock; 161 } ____cacheline_aligned_in_smp; 162 163 struct page *internal_pages[AIO_RING_PAGES]; 164 struct file *aio_ring_file; 165 166 unsigned id; 167 }; 168 169 /* 170 * First field must be the file pointer in all the 171 * iocb unions! See also 'struct kiocb' in <linux/fs.h> 172 */ 173 struct fsync_iocb { 174 struct file *file; 175 struct work_struct work; 176 bool datasync; 177 struct cred *creds; 178 }; 179 180 struct poll_iocb { 181 struct file *file; 182 struct wait_queue_head *head; 183 __poll_t events; 184 bool cancelled; 185 bool work_scheduled; 186 bool work_need_resched; 187 struct wait_queue_entry wait; 188 struct work_struct work; 189 }; 190 191 /* 192 * NOTE! Each of the iocb union members has the file pointer 193 * as the first entry in their struct definition. So you can 194 * access the file pointer through any of the sub-structs, 195 * or directly as just 'ki_filp' in this struct. 196 */ 197 struct aio_kiocb { 198 union { 199 struct file *ki_filp; 200 struct kiocb rw; 201 struct fsync_iocb fsync; 202 struct poll_iocb poll; 203 }; 204 205 struct kioctx *ki_ctx; 206 kiocb_cancel_fn *ki_cancel; 207 208 struct io_event ki_res; 209 210 struct list_head ki_list; /* the aio core uses this 211 * for cancellation */ 212 refcount_t ki_refcnt; 213 214 /* 215 * If the aio_resfd field of the userspace iocb is not zero, 216 * this is the underlying eventfd context to deliver events to. 217 */ 218 struct eventfd_ctx *ki_eventfd; 219 }; 220 221 /*------ sysctl variables----*/ 222 static DEFINE_SPINLOCK(aio_nr_lock); 223 static unsigned long aio_nr; /* current system wide number of aio requests */ 224 static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ 225 /*----end sysctl variables---*/ 226 #ifdef CONFIG_SYSCTL 227 static struct ctl_table aio_sysctls[] = { 228 { 229 .procname = "aio-nr", 230 .data = &aio_nr, 231 .maxlen = sizeof(aio_nr), 232 .mode = 0444, 233 .proc_handler = proc_doulongvec_minmax, 234 }, 235 { 236 .procname = "aio-max-nr", 237 .data = &aio_max_nr, 238 .maxlen = sizeof(aio_max_nr), 239 .mode = 0644, 240 .proc_handler = proc_doulongvec_minmax, 241 }, 242 {} 243 }; 244 245 static void __init aio_sysctl_init(void) 246 { 247 register_sysctl_init("fs", aio_sysctls); 248 } 249 #else 250 #define aio_sysctl_init() do { } while (0) 251 #endif 252 253 static struct kmem_cache *kiocb_cachep; 254 static struct kmem_cache *kioctx_cachep; 255 256 static struct vfsmount *aio_mnt; 257 258 static const struct file_operations aio_ring_fops; 259 static const struct address_space_operations aio_ctx_aops; 260 261 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) 262 { 263 struct file *file; 264 struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb); 265 if (IS_ERR(inode)) 266 return ERR_CAST(inode); 267 268 inode->i_mapping->a_ops = &aio_ctx_aops; 269 inode->i_mapping->private_data = ctx; 270 inode->i_size = PAGE_SIZE * nr_pages; 271 272 file = alloc_file_pseudo(inode, aio_mnt, "[aio]", 273 O_RDWR, &aio_ring_fops); 274 if (IS_ERR(file)) 275 iput(inode); 276 return file; 277 } 278 279 static int aio_init_fs_context(struct fs_context *fc) 280 { 281 if (!init_pseudo(fc, AIO_RING_MAGIC)) 282 return -ENOMEM; 283 fc->s_iflags |= SB_I_NOEXEC; 284 return 0; 285 } 286 287 /* aio_setup 288 * Creates the slab caches used by the aio routines, panic on 289 * failure as this is done early during the boot sequence. 290 */ 291 static int __init aio_setup(void) 292 { 293 static struct file_system_type aio_fs = { 294 .name = "aio", 295 .init_fs_context = aio_init_fs_context, 296 .kill_sb = kill_anon_super, 297 }; 298 aio_mnt = kern_mount(&aio_fs); 299 if (IS_ERR(aio_mnt)) 300 panic("Failed to create aio fs mount."); 301 302 kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 303 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 304 aio_sysctl_init(); 305 return 0; 306 } 307 __initcall(aio_setup); 308 309 static void put_aio_ring_file(struct kioctx *ctx) 310 { 311 struct file *aio_ring_file = ctx->aio_ring_file; 312 struct address_space *i_mapping; 313 314 if (aio_ring_file) { 315 truncate_setsize(file_inode(aio_ring_file), 0); 316 317 /* Prevent further access to the kioctx from migratepages */ 318 i_mapping = aio_ring_file->f_mapping; 319 spin_lock(&i_mapping->private_lock); 320 i_mapping->private_data = NULL; 321 ctx->aio_ring_file = NULL; 322 spin_unlock(&i_mapping->private_lock); 323 324 fput(aio_ring_file); 325 } 326 } 327 328 static void aio_free_ring(struct kioctx *ctx) 329 { 330 int i; 331 332 /* Disconnect the kiotx from the ring file. This prevents future 333 * accesses to the kioctx from page migration. 334 */ 335 put_aio_ring_file(ctx); 336 337 for (i = 0; i < ctx->nr_pages; i++) { 338 struct page *page; 339 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, 340 page_count(ctx->ring_pages[i])); 341 page = ctx->ring_pages[i]; 342 if (!page) 343 continue; 344 ctx->ring_pages[i] = NULL; 345 put_page(page); 346 } 347 348 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) { 349 kfree(ctx->ring_pages); 350 ctx->ring_pages = NULL; 351 } 352 } 353 354 static int aio_ring_mremap(struct vm_area_struct *vma) 355 { 356 struct file *file = vma->vm_file; 357 struct mm_struct *mm = vma->vm_mm; 358 struct kioctx_table *table; 359 int i, res = -EINVAL; 360 361 spin_lock(&mm->ioctx_lock); 362 rcu_read_lock(); 363 table = rcu_dereference(mm->ioctx_table); 364 if (!table) 365 goto out_unlock; 366 367 for (i = 0; i < table->nr; i++) { 368 struct kioctx *ctx; 369 370 ctx = rcu_dereference(table->table[i]); 371 if (ctx && ctx->aio_ring_file == file) { 372 if (!atomic_read(&ctx->dead)) { 373 ctx->user_id = ctx->mmap_base = vma->vm_start; 374 res = 0; 375 } 376 break; 377 } 378 } 379 380 out_unlock: 381 rcu_read_unlock(); 382 spin_unlock(&mm->ioctx_lock); 383 return res; 384 } 385 386 static const struct vm_operations_struct aio_ring_vm_ops = { 387 .mremap = aio_ring_mremap, 388 #if IS_ENABLED(CONFIG_MMU) 389 .fault = filemap_fault, 390 .map_pages = filemap_map_pages, 391 .page_mkwrite = filemap_page_mkwrite, 392 #endif 393 }; 394 395 static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) 396 { 397 vm_flags_set(vma, VM_DONTEXPAND); 398 vma->vm_ops = &aio_ring_vm_ops; 399 return 0; 400 } 401 402 static const struct file_operations aio_ring_fops = { 403 .mmap = aio_ring_mmap, 404 }; 405 406 #if IS_ENABLED(CONFIG_MIGRATION) 407 static int aio_migrate_folio(struct address_space *mapping, struct folio *dst, 408 struct folio *src, enum migrate_mode mode) 409 { 410 struct kioctx *ctx; 411 unsigned long flags; 412 pgoff_t idx; 413 int rc; 414 415 /* 416 * We cannot support the _NO_COPY case here, because copy needs to 417 * happen under the ctx->completion_lock. That does not work with the 418 * migration workflow of MIGRATE_SYNC_NO_COPY. 419 */ 420 if (mode == MIGRATE_SYNC_NO_COPY) 421 return -EINVAL; 422 423 rc = 0; 424 425 /* mapping->private_lock here protects against the kioctx teardown. */ 426 spin_lock(&mapping->private_lock); 427 ctx = mapping->private_data; 428 if (!ctx) { 429 rc = -EINVAL; 430 goto out; 431 } 432 433 /* The ring_lock mutex. The prevents aio_read_events() from writing 434 * to the ring's head, and prevents page migration from mucking in 435 * a partially initialized kiotx. 436 */ 437 if (!mutex_trylock(&ctx->ring_lock)) { 438 rc = -EAGAIN; 439 goto out; 440 } 441 442 idx = src->index; 443 if (idx < (pgoff_t)ctx->nr_pages) { 444 /* Make sure the old folio hasn't already been changed */ 445 if (ctx->ring_pages[idx] != &src->page) 446 rc = -EAGAIN; 447 } else 448 rc = -EINVAL; 449 450 if (rc != 0) 451 goto out_unlock; 452 453 /* Writeback must be complete */ 454 BUG_ON(folio_test_writeback(src)); 455 folio_get(dst); 456 457 rc = folio_migrate_mapping(mapping, dst, src, 1); 458 if (rc != MIGRATEPAGE_SUCCESS) { 459 folio_put(dst); 460 goto out_unlock; 461 } 462 463 /* Take completion_lock to prevent other writes to the ring buffer 464 * while the old folio is copied to the new. This prevents new 465 * events from being lost. 466 */ 467 spin_lock_irqsave(&ctx->completion_lock, flags); 468 folio_migrate_copy(dst, src); 469 BUG_ON(ctx->ring_pages[idx] != &src->page); 470 ctx->ring_pages[idx] = &dst->page; 471 spin_unlock_irqrestore(&ctx->completion_lock, flags); 472 473 /* The old folio is no longer accessible. */ 474 folio_put(src); 475 476 out_unlock: 477 mutex_unlock(&ctx->ring_lock); 478 out: 479 spin_unlock(&mapping->private_lock); 480 return rc; 481 } 482 #else 483 #define aio_migrate_folio NULL 484 #endif 485 486 static const struct address_space_operations aio_ctx_aops = { 487 .dirty_folio = noop_dirty_folio, 488 .migrate_folio = aio_migrate_folio, 489 }; 490 491 static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events) 492 { 493 struct aio_ring *ring; 494 struct mm_struct *mm = current->mm; 495 unsigned long size, unused; 496 int nr_pages; 497 int i; 498 struct file *file; 499 500 /* Compensate for the ring buffer's head/tail overlap entry */ 501 nr_events += 2; /* 1 is required, 2 for good luck */ 502 503 size = sizeof(struct aio_ring); 504 size += sizeof(struct io_event) * nr_events; 505 506 nr_pages = PFN_UP(size); 507 if (nr_pages < 0) 508 return -EINVAL; 509 510 file = aio_private_file(ctx, nr_pages); 511 if (IS_ERR(file)) { 512 ctx->aio_ring_file = NULL; 513 return -ENOMEM; 514 } 515 516 ctx->aio_ring_file = file; 517 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) 518 / sizeof(struct io_event); 519 520 ctx->ring_pages = ctx->internal_pages; 521 if (nr_pages > AIO_RING_PAGES) { 522 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), 523 GFP_KERNEL); 524 if (!ctx->ring_pages) { 525 put_aio_ring_file(ctx); 526 return -ENOMEM; 527 } 528 } 529 530 for (i = 0; i < nr_pages; i++) { 531 struct page *page; 532 page = find_or_create_page(file->f_mapping, 533 i, GFP_USER | __GFP_ZERO); 534 if (!page) 535 break; 536 pr_debug("pid(%d) page[%d]->count=%d\n", 537 current->pid, i, page_count(page)); 538 SetPageUptodate(page); 539 unlock_page(page); 540 541 ctx->ring_pages[i] = page; 542 } 543 ctx->nr_pages = i; 544 545 if (unlikely(i != nr_pages)) { 546 aio_free_ring(ctx); 547 return -ENOMEM; 548 } 549 550 ctx->mmap_size = nr_pages * PAGE_SIZE; 551 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); 552 553 if (mmap_write_lock_killable(mm)) { 554 ctx->mmap_size = 0; 555 aio_free_ring(ctx); 556 return -EINTR; 557 } 558 559 ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size, 560 PROT_READ | PROT_WRITE, 561 MAP_SHARED, 0, 0, &unused, NULL); 562 mmap_write_unlock(mm); 563 if (IS_ERR((void *)ctx->mmap_base)) { 564 ctx->mmap_size = 0; 565 aio_free_ring(ctx); 566 return -ENOMEM; 567 } 568 569 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); 570 571 ctx->user_id = ctx->mmap_base; 572 ctx->nr_events = nr_events; /* trusted copy */ 573 574 ring = page_address(ctx->ring_pages[0]); 575 ring->nr = nr_events; /* user copy */ 576 ring->id = ~0U; 577 ring->head = ring->tail = 0; 578 ring->magic = AIO_RING_MAGIC; 579 ring->compat_features = AIO_RING_COMPAT_FEATURES; 580 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; 581 ring->header_length = sizeof(struct aio_ring); 582 flush_dcache_page(ctx->ring_pages[0]); 583 584 return 0; 585 } 586 587 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) 588 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) 589 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) 590 591 void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel) 592 { 593 struct aio_kiocb *req; 594 struct kioctx *ctx; 595 unsigned long flags; 596 597 /* 598 * kiocb didn't come from aio or is neither a read nor a write, hence 599 * ignore it. 600 */ 601 if (!(iocb->ki_flags & IOCB_AIO_RW)) 602 return; 603 604 req = container_of(iocb, struct aio_kiocb, rw); 605 606 if (WARN_ON_ONCE(!list_empty(&req->ki_list))) 607 return; 608 609 ctx = req->ki_ctx; 610 611 spin_lock_irqsave(&ctx->ctx_lock, flags); 612 list_add_tail(&req->ki_list, &ctx->active_reqs); 613 req->ki_cancel = cancel; 614 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 615 } 616 EXPORT_SYMBOL(kiocb_set_cancel_fn); 617 618 /* 619 * free_ioctx() should be RCU delayed to synchronize against the RCU 620 * protected lookup_ioctx() and also needs process context to call 621 * aio_free_ring(). Use rcu_work. 622 */ 623 static void free_ioctx(struct work_struct *work) 624 { 625 struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx, 626 free_rwork); 627 pr_debug("freeing %p\n", ctx); 628 629 aio_free_ring(ctx); 630 free_percpu(ctx->cpu); 631 percpu_ref_exit(&ctx->reqs); 632 percpu_ref_exit(&ctx->users); 633 kmem_cache_free(kioctx_cachep, ctx); 634 } 635 636 static void free_ioctx_reqs(struct percpu_ref *ref) 637 { 638 struct kioctx *ctx = container_of(ref, struct kioctx, reqs); 639 640 /* At this point we know that there are no any in-flight requests */ 641 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) 642 complete(&ctx->rq_wait->comp); 643 644 /* Synchronize against RCU protected table->table[] dereferences */ 645 INIT_RCU_WORK(&ctx->free_rwork, free_ioctx); 646 queue_rcu_work(system_wq, &ctx->free_rwork); 647 } 648 649 /* 650 * When this function runs, the kioctx has been removed from the "hash table" 651 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - 652 * now it's safe to cancel any that need to be. 653 */ 654 static void free_ioctx_users(struct percpu_ref *ref) 655 { 656 struct kioctx *ctx = container_of(ref, struct kioctx, users); 657 struct aio_kiocb *req; 658 659 spin_lock_irq(&ctx->ctx_lock); 660 661 while (!list_empty(&ctx->active_reqs)) { 662 req = list_first_entry(&ctx->active_reqs, 663 struct aio_kiocb, ki_list); 664 req->ki_cancel(&req->rw); 665 list_del_init(&req->ki_list); 666 } 667 668 spin_unlock_irq(&ctx->ctx_lock); 669 670 percpu_ref_kill(&ctx->reqs); 671 percpu_ref_put(&ctx->reqs); 672 } 673 674 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) 675 { 676 unsigned i, new_nr; 677 struct kioctx_table *table, *old; 678 struct aio_ring *ring; 679 680 spin_lock(&mm->ioctx_lock); 681 table = rcu_dereference_raw(mm->ioctx_table); 682 683 while (1) { 684 if (table) 685 for (i = 0; i < table->nr; i++) 686 if (!rcu_access_pointer(table->table[i])) { 687 ctx->id = i; 688 rcu_assign_pointer(table->table[i], ctx); 689 spin_unlock(&mm->ioctx_lock); 690 691 /* While kioctx setup is in progress, 692 * we are protected from page migration 693 * changes ring_pages by ->ring_lock. 694 */ 695 ring = page_address(ctx->ring_pages[0]); 696 ring->id = ctx->id; 697 return 0; 698 } 699 700 new_nr = (table ? table->nr : 1) * 4; 701 spin_unlock(&mm->ioctx_lock); 702 703 table = kzalloc(struct_size(table, table, new_nr), GFP_KERNEL); 704 if (!table) 705 return -ENOMEM; 706 707 table->nr = new_nr; 708 709 spin_lock(&mm->ioctx_lock); 710 old = rcu_dereference_raw(mm->ioctx_table); 711 712 if (!old) { 713 rcu_assign_pointer(mm->ioctx_table, table); 714 } else if (table->nr > old->nr) { 715 memcpy(table->table, old->table, 716 old->nr * sizeof(struct kioctx *)); 717 718 rcu_assign_pointer(mm->ioctx_table, table); 719 kfree_rcu(old, rcu); 720 } else { 721 kfree(table); 722 table = old; 723 } 724 } 725 } 726 727 static void aio_nr_sub(unsigned nr) 728 { 729 spin_lock(&aio_nr_lock); 730 if (WARN_ON(aio_nr - nr > aio_nr)) 731 aio_nr = 0; 732 else 733 aio_nr -= nr; 734 spin_unlock(&aio_nr_lock); 735 } 736 737 /* ioctx_alloc 738 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. 739 */ 740 static struct kioctx *ioctx_alloc(unsigned nr_events) 741 { 742 struct mm_struct *mm = current->mm; 743 struct kioctx *ctx; 744 int err = -ENOMEM; 745 746 /* 747 * Store the original nr_events -- what userspace passed to io_setup(), 748 * for counting against the global limit -- before it changes. 749 */ 750 unsigned int max_reqs = nr_events; 751 752 /* 753 * We keep track of the number of available ringbuffer slots, to prevent 754 * overflow (reqs_available), and we also use percpu counters for this. 755 * 756 * So since up to half the slots might be on other cpu's percpu counters 757 * and unavailable, double nr_events so userspace sees what they 758 * expected: additionally, we move req_batch slots to/from percpu 759 * counters at a time, so make sure that isn't 0: 760 */ 761 nr_events = max(nr_events, num_possible_cpus() * 4); 762 nr_events *= 2; 763 764 /* Prevent overflows */ 765 if (nr_events > (0x10000000U / sizeof(struct io_event))) { 766 pr_debug("ENOMEM: nr_events too high\n"); 767 return ERR_PTR(-EINVAL); 768 } 769 770 if (!nr_events || (unsigned long)max_reqs > aio_max_nr) 771 return ERR_PTR(-EAGAIN); 772 773 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); 774 if (!ctx) 775 return ERR_PTR(-ENOMEM); 776 777 ctx->max_reqs = max_reqs; 778 779 spin_lock_init(&ctx->ctx_lock); 780 spin_lock_init(&ctx->completion_lock); 781 mutex_init(&ctx->ring_lock); 782 /* Protect against page migration throughout kiotx setup by keeping 783 * the ring_lock mutex held until setup is complete. */ 784 mutex_lock(&ctx->ring_lock); 785 init_waitqueue_head(&ctx->wait); 786 787 INIT_LIST_HEAD(&ctx->active_reqs); 788 789 if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL)) 790 goto err; 791 792 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL)) 793 goto err; 794 795 ctx->cpu = alloc_percpu(struct kioctx_cpu); 796 if (!ctx->cpu) 797 goto err; 798 799 err = aio_setup_ring(ctx, nr_events); 800 if (err < 0) 801 goto err; 802 803 atomic_set(&ctx->reqs_available, ctx->nr_events - 1); 804 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); 805 if (ctx->req_batch < 1) 806 ctx->req_batch = 1; 807 808 /* limit the number of system wide aios */ 809 spin_lock(&aio_nr_lock); 810 if (aio_nr + ctx->max_reqs > aio_max_nr || 811 aio_nr + ctx->max_reqs < aio_nr) { 812 spin_unlock(&aio_nr_lock); 813 err = -EAGAIN; 814 goto err_ctx; 815 } 816 aio_nr += ctx->max_reqs; 817 spin_unlock(&aio_nr_lock); 818 819 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ 820 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ 821 822 err = ioctx_add_table(ctx, mm); 823 if (err) 824 goto err_cleanup; 825 826 /* Release the ring_lock mutex now that all setup is complete. */ 827 mutex_unlock(&ctx->ring_lock); 828 829 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", 830 ctx, ctx->user_id, mm, ctx->nr_events); 831 return ctx; 832 833 err_cleanup: 834 aio_nr_sub(ctx->max_reqs); 835 err_ctx: 836 atomic_set(&ctx->dead, 1); 837 if (ctx->mmap_size) 838 vm_munmap(ctx->mmap_base, ctx->mmap_size); 839 aio_free_ring(ctx); 840 err: 841 mutex_unlock(&ctx->ring_lock); 842 free_percpu(ctx->cpu); 843 percpu_ref_exit(&ctx->reqs); 844 percpu_ref_exit(&ctx->users); 845 kmem_cache_free(kioctx_cachep, ctx); 846 pr_debug("error allocating ioctx %d\n", err); 847 return ERR_PTR(err); 848 } 849 850 /* kill_ioctx 851 * Cancels all outstanding aio requests on an aio context. Used 852 * when the processes owning a context have all exited to encourage 853 * the rapid destruction of the kioctx. 854 */ 855 static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, 856 struct ctx_rq_wait *wait) 857 { 858 struct kioctx_table *table; 859 860 spin_lock(&mm->ioctx_lock); 861 if (atomic_xchg(&ctx->dead, 1)) { 862 spin_unlock(&mm->ioctx_lock); 863 return -EINVAL; 864 } 865 866 table = rcu_dereference_raw(mm->ioctx_table); 867 WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id])); 868 RCU_INIT_POINTER(table->table[ctx->id], NULL); 869 spin_unlock(&mm->ioctx_lock); 870 871 /* free_ioctx_reqs() will do the necessary RCU synchronization */ 872 wake_up_all(&ctx->wait); 873 874 /* 875 * It'd be more correct to do this in free_ioctx(), after all 876 * the outstanding kiocbs have finished - but by then io_destroy 877 * has already returned, so io_setup() could potentially return 878 * -EAGAIN with no ioctxs actually in use (as far as userspace 879 * could tell). 880 */ 881 aio_nr_sub(ctx->max_reqs); 882 883 if (ctx->mmap_size) 884 vm_munmap(ctx->mmap_base, ctx->mmap_size); 885 886 ctx->rq_wait = wait; 887 percpu_ref_kill(&ctx->users); 888 return 0; 889 } 890 891 /* 892 * exit_aio: called when the last user of mm goes away. At this point, there is 893 * no way for any new requests to be submited or any of the io_* syscalls to be 894 * called on the context. 895 * 896 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on 897 * them. 898 */ 899 void exit_aio(struct mm_struct *mm) 900 { 901 struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table); 902 struct ctx_rq_wait wait; 903 int i, skipped; 904 905 if (!table) 906 return; 907 908 atomic_set(&wait.count, table->nr); 909 init_completion(&wait.comp); 910 911 skipped = 0; 912 for (i = 0; i < table->nr; ++i) { 913 struct kioctx *ctx = 914 rcu_dereference_protected(table->table[i], true); 915 916 if (!ctx) { 917 skipped++; 918 continue; 919 } 920 921 /* 922 * We don't need to bother with munmap() here - exit_mmap(mm) 923 * is coming and it'll unmap everything. And we simply can't, 924 * this is not necessarily our ->mm. 925 * Since kill_ioctx() uses non-zero ->mmap_size as indicator 926 * that it needs to unmap the area, just set it to 0. 927 */ 928 ctx->mmap_size = 0; 929 kill_ioctx(mm, ctx, &wait); 930 } 931 932 if (!atomic_sub_and_test(skipped, &wait.count)) { 933 /* Wait until all IO for the context are done. */ 934 wait_for_completion(&wait.comp); 935 } 936 937 RCU_INIT_POINTER(mm->ioctx_table, NULL); 938 kfree(table); 939 } 940 941 static void put_reqs_available(struct kioctx *ctx, unsigned nr) 942 { 943 struct kioctx_cpu *kcpu; 944 unsigned long flags; 945 946 local_irq_save(flags); 947 kcpu = this_cpu_ptr(ctx->cpu); 948 kcpu->reqs_available += nr; 949 950 while (kcpu->reqs_available >= ctx->req_batch * 2) { 951 kcpu->reqs_available -= ctx->req_batch; 952 atomic_add(ctx->req_batch, &ctx->reqs_available); 953 } 954 955 local_irq_restore(flags); 956 } 957 958 static bool __get_reqs_available(struct kioctx *ctx) 959 { 960 struct kioctx_cpu *kcpu; 961 bool ret = false; 962 unsigned long flags; 963 964 local_irq_save(flags); 965 kcpu = this_cpu_ptr(ctx->cpu); 966 if (!kcpu->reqs_available) { 967 int avail = atomic_read(&ctx->reqs_available); 968 969 do { 970 if (avail < ctx->req_batch) 971 goto out; 972 } while (!atomic_try_cmpxchg(&ctx->reqs_available, 973 &avail, avail - ctx->req_batch)); 974 975 kcpu->reqs_available += ctx->req_batch; 976 } 977 978 ret = true; 979 kcpu->reqs_available--; 980 out: 981 local_irq_restore(flags); 982 return ret; 983 } 984 985 /* refill_reqs_available 986 * Updates the reqs_available reference counts used for tracking the 987 * number of free slots in the completion ring. This can be called 988 * from aio_complete() (to optimistically update reqs_available) or 989 * from aio_get_req() (the we're out of events case). It must be 990 * called holding ctx->completion_lock. 991 */ 992 static void refill_reqs_available(struct kioctx *ctx, unsigned head, 993 unsigned tail) 994 { 995 unsigned events_in_ring, completed; 996 997 /* Clamp head since userland can write to it. */ 998 head %= ctx->nr_events; 999 if (head <= tail) 1000 events_in_ring = tail - head; 1001 else 1002 events_in_ring = ctx->nr_events - (head - tail); 1003 1004 completed = ctx->completed_events; 1005 if (events_in_ring < completed) 1006 completed -= events_in_ring; 1007 else 1008 completed = 0; 1009 1010 if (!completed) 1011 return; 1012 1013 ctx->completed_events -= completed; 1014 put_reqs_available(ctx, completed); 1015 } 1016 1017 /* user_refill_reqs_available 1018 * Called to refill reqs_available when aio_get_req() encounters an 1019 * out of space in the completion ring. 1020 */ 1021 static void user_refill_reqs_available(struct kioctx *ctx) 1022 { 1023 spin_lock_irq(&ctx->completion_lock); 1024 if (ctx->completed_events) { 1025 struct aio_ring *ring; 1026 unsigned head; 1027 1028 /* Access of ring->head may race with aio_read_events_ring() 1029 * here, but that's okay since whether we read the old version 1030 * or the new version, and either will be valid. The important 1031 * part is that head cannot pass tail since we prevent 1032 * aio_complete() from updating tail by holding 1033 * ctx->completion_lock. Even if head is invalid, the check 1034 * against ctx->completed_events below will make sure we do the 1035 * safe/right thing. 1036 */ 1037 ring = page_address(ctx->ring_pages[0]); 1038 head = ring->head; 1039 1040 refill_reqs_available(ctx, head, ctx->tail); 1041 } 1042 1043 spin_unlock_irq(&ctx->completion_lock); 1044 } 1045 1046 static bool get_reqs_available(struct kioctx *ctx) 1047 { 1048 if (__get_reqs_available(ctx)) 1049 return true; 1050 user_refill_reqs_available(ctx); 1051 return __get_reqs_available(ctx); 1052 } 1053 1054 /* aio_get_req 1055 * Allocate a slot for an aio request. 1056 * Returns NULL if no requests are free. 1057 * 1058 * The refcount is initialized to 2 - one for the async op completion, 1059 * one for the synchronous code that does this. 1060 */ 1061 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) 1062 { 1063 struct aio_kiocb *req; 1064 1065 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); 1066 if (unlikely(!req)) 1067 return NULL; 1068 1069 if (unlikely(!get_reqs_available(ctx))) { 1070 kmem_cache_free(kiocb_cachep, req); 1071 return NULL; 1072 } 1073 1074 percpu_ref_get(&ctx->reqs); 1075 req->ki_ctx = ctx; 1076 INIT_LIST_HEAD(&req->ki_list); 1077 refcount_set(&req->ki_refcnt, 2); 1078 req->ki_eventfd = NULL; 1079 return req; 1080 } 1081 1082 static struct kioctx *lookup_ioctx(unsigned long ctx_id) 1083 { 1084 struct aio_ring __user *ring = (void __user *)ctx_id; 1085 struct mm_struct *mm = current->mm; 1086 struct kioctx *ctx, *ret = NULL; 1087 struct kioctx_table *table; 1088 unsigned id; 1089 1090 if (get_user(id, &ring->id)) 1091 return NULL; 1092 1093 rcu_read_lock(); 1094 table = rcu_dereference(mm->ioctx_table); 1095 1096 if (!table || id >= table->nr) 1097 goto out; 1098 1099 id = array_index_nospec(id, table->nr); 1100 ctx = rcu_dereference(table->table[id]); 1101 if (ctx && ctx->user_id == ctx_id) { 1102 if (percpu_ref_tryget_live(&ctx->users)) 1103 ret = ctx; 1104 } 1105 out: 1106 rcu_read_unlock(); 1107 return ret; 1108 } 1109 1110 static inline void iocb_destroy(struct aio_kiocb *iocb) 1111 { 1112 if (iocb->ki_eventfd) 1113 eventfd_ctx_put(iocb->ki_eventfd); 1114 if (iocb->ki_filp) 1115 fput(iocb->ki_filp); 1116 percpu_ref_put(&iocb->ki_ctx->reqs); 1117 kmem_cache_free(kiocb_cachep, iocb); 1118 } 1119 1120 /* aio_complete 1121 * Called when the io request on the given iocb is complete. 1122 */ 1123 static void aio_complete(struct aio_kiocb *iocb) 1124 { 1125 struct kioctx *ctx = iocb->ki_ctx; 1126 struct aio_ring *ring; 1127 struct io_event *ev_page, *event; 1128 unsigned tail, pos, head; 1129 unsigned long flags; 1130 1131 /* 1132 * Add a completion event to the ring buffer. Must be done holding 1133 * ctx->completion_lock to prevent other code from messing with the tail 1134 * pointer since we might be called from irq context. 1135 */ 1136 spin_lock_irqsave(&ctx->completion_lock, flags); 1137 1138 tail = ctx->tail; 1139 pos = tail + AIO_EVENTS_OFFSET; 1140 1141 if (++tail >= ctx->nr_events) 1142 tail = 0; 1143 1144 ev_page = page_address(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 1145 event = ev_page + pos % AIO_EVENTS_PER_PAGE; 1146 1147 *event = iocb->ki_res; 1148 1149 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 1150 1151 pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb, 1152 (void __user *)(unsigned long)iocb->ki_res.obj, 1153 iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2); 1154 1155 /* after flagging the request as done, we 1156 * must never even look at it again 1157 */ 1158 smp_wmb(); /* make event visible before updating tail */ 1159 1160 ctx->tail = tail; 1161 1162 ring = page_address(ctx->ring_pages[0]); 1163 head = ring->head; 1164 ring->tail = tail; 1165 flush_dcache_page(ctx->ring_pages[0]); 1166 1167 ctx->completed_events++; 1168 if (ctx->completed_events > 1) 1169 refill_reqs_available(ctx, head, tail); 1170 spin_unlock_irqrestore(&ctx->completion_lock, flags); 1171 1172 pr_debug("added to ring %p at [%u]\n", iocb, tail); 1173 1174 /* 1175 * Check if the user asked us to deliver the result through an 1176 * eventfd. The eventfd_signal() function is safe to be called 1177 * from IRQ context. 1178 */ 1179 if (iocb->ki_eventfd) 1180 eventfd_signal(iocb->ki_eventfd, 1); 1181 1182 /* 1183 * We have to order our ring_info tail store above and test 1184 * of the wait list below outside the wait lock. This is 1185 * like in wake_up_bit() where clearing a bit has to be 1186 * ordered with the unlocked test. 1187 */ 1188 smp_mb(); 1189 1190 if (waitqueue_active(&ctx->wait)) 1191 wake_up(&ctx->wait); 1192 } 1193 1194 static inline void iocb_put(struct aio_kiocb *iocb) 1195 { 1196 if (refcount_dec_and_test(&iocb->ki_refcnt)) { 1197 aio_complete(iocb); 1198 iocb_destroy(iocb); 1199 } 1200 } 1201 1202 /* aio_read_events_ring 1203 * Pull an event off of the ioctx's event ring. Returns the number of 1204 * events fetched 1205 */ 1206 static long aio_read_events_ring(struct kioctx *ctx, 1207 struct io_event __user *event, long nr) 1208 { 1209 struct aio_ring *ring; 1210 unsigned head, tail, pos; 1211 long ret = 0; 1212 int copy_ret; 1213 1214 /* 1215 * The mutex can block and wake us up and that will cause 1216 * wait_event_interruptible_hrtimeout() to schedule without sleeping 1217 * and repeat. This should be rare enough that it doesn't cause 1218 * peformance issues. See the comment in read_events() for more detail. 1219 */ 1220 sched_annotate_sleep(); 1221 mutex_lock(&ctx->ring_lock); 1222 1223 /* Access to ->ring_pages here is protected by ctx->ring_lock. */ 1224 ring = page_address(ctx->ring_pages[0]); 1225 head = ring->head; 1226 tail = ring->tail; 1227 1228 /* 1229 * Ensure that once we've read the current tail pointer, that 1230 * we also see the events that were stored up to the tail. 1231 */ 1232 smp_rmb(); 1233 1234 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); 1235 1236 if (head == tail) 1237 goto out; 1238 1239 head %= ctx->nr_events; 1240 tail %= ctx->nr_events; 1241 1242 while (ret < nr) { 1243 long avail; 1244 struct io_event *ev; 1245 struct page *page; 1246 1247 avail = (head <= tail ? tail : ctx->nr_events) - head; 1248 if (head == tail) 1249 break; 1250 1251 pos = head + AIO_EVENTS_OFFSET; 1252 page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; 1253 pos %= AIO_EVENTS_PER_PAGE; 1254 1255 avail = min(avail, nr - ret); 1256 avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos); 1257 1258 ev = page_address(page); 1259 copy_ret = copy_to_user(event + ret, ev + pos, 1260 sizeof(*ev) * avail); 1261 1262 if (unlikely(copy_ret)) { 1263 ret = -EFAULT; 1264 goto out; 1265 } 1266 1267 ret += avail; 1268 head += avail; 1269 head %= ctx->nr_events; 1270 } 1271 1272 ring = page_address(ctx->ring_pages[0]); 1273 ring->head = head; 1274 flush_dcache_page(ctx->ring_pages[0]); 1275 1276 pr_debug("%li h%u t%u\n", ret, head, tail); 1277 out: 1278 mutex_unlock(&ctx->ring_lock); 1279 1280 return ret; 1281 } 1282 1283 static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, 1284 struct io_event __user *event, long *i) 1285 { 1286 long ret = aio_read_events_ring(ctx, event + *i, nr - *i); 1287 1288 if (ret > 0) 1289 *i += ret; 1290 1291 if (unlikely(atomic_read(&ctx->dead))) 1292 ret = -EINVAL; 1293 1294 if (!*i) 1295 *i = ret; 1296 1297 return ret < 0 || *i >= min_nr; 1298 } 1299 1300 static long read_events(struct kioctx *ctx, long min_nr, long nr, 1301 struct io_event __user *event, 1302 ktime_t until) 1303 { 1304 long ret = 0; 1305 1306 /* 1307 * Note that aio_read_events() is being called as the conditional - i.e. 1308 * we're calling it after prepare_to_wait() has set task state to 1309 * TASK_INTERRUPTIBLE. 1310 * 1311 * But aio_read_events() can block, and if it blocks it's going to flip 1312 * the task state back to TASK_RUNNING. 1313 * 1314 * This should be ok, provided it doesn't flip the state back to 1315 * TASK_RUNNING and return 0 too much - that causes us to spin. That 1316 * will only happen if the mutex_lock() call blocks, and we then find 1317 * the ringbuffer empty. So in practice we should be ok, but it's 1318 * something to be aware of when touching this code. 1319 */ 1320 if (until == 0) 1321 aio_read_events(ctx, min_nr, nr, event, &ret); 1322 else 1323 wait_event_interruptible_hrtimeout(ctx->wait, 1324 aio_read_events(ctx, min_nr, nr, event, &ret), 1325 until); 1326 return ret; 1327 } 1328 1329 /* sys_io_setup: 1330 * Create an aio_context capable of receiving at least nr_events. 1331 * ctxp must not point to an aio_context that already exists, and 1332 * must be initialized to 0 prior to the call. On successful 1333 * creation of the aio_context, *ctxp is filled in with the resulting 1334 * handle. May fail with -EINVAL if *ctxp is not initialized, 1335 * if the specified nr_events exceeds internal limits. May fail 1336 * with -EAGAIN if the specified nr_events exceeds the user's limit 1337 * of available events. May fail with -ENOMEM if insufficient kernel 1338 * resources are available. May fail with -EFAULT if an invalid 1339 * pointer is passed for ctxp. Will fail with -ENOSYS if not 1340 * implemented. 1341 */ 1342 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) 1343 { 1344 struct kioctx *ioctx = NULL; 1345 unsigned long ctx; 1346 long ret; 1347 1348 ret = get_user(ctx, ctxp); 1349 if (unlikely(ret)) 1350 goto out; 1351 1352 ret = -EINVAL; 1353 if (unlikely(ctx || nr_events == 0)) { 1354 pr_debug("EINVAL: ctx %lu nr_events %u\n", 1355 ctx, nr_events); 1356 goto out; 1357 } 1358 1359 ioctx = ioctx_alloc(nr_events); 1360 ret = PTR_ERR(ioctx); 1361 if (!IS_ERR(ioctx)) { 1362 ret = put_user(ioctx->user_id, ctxp); 1363 if (ret) 1364 kill_ioctx(current->mm, ioctx, NULL); 1365 percpu_ref_put(&ioctx->users); 1366 } 1367 1368 out: 1369 return ret; 1370 } 1371 1372 #ifdef CONFIG_COMPAT 1373 COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p) 1374 { 1375 struct kioctx *ioctx = NULL; 1376 unsigned long ctx; 1377 long ret; 1378 1379 ret = get_user(ctx, ctx32p); 1380 if (unlikely(ret)) 1381 goto out; 1382 1383 ret = -EINVAL; 1384 if (unlikely(ctx || nr_events == 0)) { 1385 pr_debug("EINVAL: ctx %lu nr_events %u\n", 1386 ctx, nr_events); 1387 goto out; 1388 } 1389 1390 ioctx = ioctx_alloc(nr_events); 1391 ret = PTR_ERR(ioctx); 1392 if (!IS_ERR(ioctx)) { 1393 /* truncating is ok because it's a user address */ 1394 ret = put_user((u32)ioctx->user_id, ctx32p); 1395 if (ret) 1396 kill_ioctx(current->mm, ioctx, NULL); 1397 percpu_ref_put(&ioctx->users); 1398 } 1399 1400 out: 1401 return ret; 1402 } 1403 #endif 1404 1405 /* sys_io_destroy: 1406 * Destroy the aio_context specified. May cancel any outstanding 1407 * AIOs and block on completion. Will fail with -ENOSYS if not 1408 * implemented. May fail with -EINVAL if the context pointed to 1409 * is invalid. 1410 */ 1411 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) 1412 { 1413 struct kioctx *ioctx = lookup_ioctx(ctx); 1414 if (likely(NULL != ioctx)) { 1415 struct ctx_rq_wait wait; 1416 int ret; 1417 1418 init_completion(&wait.comp); 1419 atomic_set(&wait.count, 1); 1420 1421 /* Pass requests_done to kill_ioctx() where it can be set 1422 * in a thread-safe way. If we try to set it here then we have 1423 * a race condition if two io_destroy() called simultaneously. 1424 */ 1425 ret = kill_ioctx(current->mm, ioctx, &wait); 1426 percpu_ref_put(&ioctx->users); 1427 1428 /* Wait until all IO for the context are done. Otherwise kernel 1429 * keep using user-space buffers even if user thinks the context 1430 * is destroyed. 1431 */ 1432 if (!ret) 1433 wait_for_completion(&wait.comp); 1434 1435 return ret; 1436 } 1437 pr_debug("EINVAL: invalid context id\n"); 1438 return -EINVAL; 1439 } 1440 1441 static void aio_remove_iocb(struct aio_kiocb *iocb) 1442 { 1443 struct kioctx *ctx = iocb->ki_ctx; 1444 unsigned long flags; 1445 1446 spin_lock_irqsave(&ctx->ctx_lock, flags); 1447 list_del(&iocb->ki_list); 1448 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 1449 } 1450 1451 static void aio_complete_rw(struct kiocb *kiocb, long res) 1452 { 1453 struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw); 1454 1455 if (!list_empty_careful(&iocb->ki_list)) 1456 aio_remove_iocb(iocb); 1457 1458 if (kiocb->ki_flags & IOCB_WRITE) { 1459 struct inode *inode = file_inode(kiocb->ki_filp); 1460 1461 if (S_ISREG(inode->i_mode)) 1462 kiocb_end_write(kiocb); 1463 } 1464 1465 iocb->ki_res.res = res; 1466 iocb->ki_res.res2 = 0; 1467 iocb_put(iocb); 1468 } 1469 1470 static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) 1471 { 1472 int ret; 1473 1474 req->ki_complete = aio_complete_rw; 1475 req->private = NULL; 1476 req->ki_pos = iocb->aio_offset; 1477 req->ki_flags = req->ki_filp->f_iocb_flags | IOCB_AIO_RW; 1478 if (iocb->aio_flags & IOCB_FLAG_RESFD) 1479 req->ki_flags |= IOCB_EVENTFD; 1480 if (iocb->aio_flags & IOCB_FLAG_IOPRIO) { 1481 /* 1482 * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then 1483 * aio_reqprio is interpreted as an I/O scheduling 1484 * class and priority. 1485 */ 1486 ret = ioprio_check_cap(iocb->aio_reqprio); 1487 if (ret) { 1488 pr_debug("aio ioprio check cap error: %d\n", ret); 1489 return ret; 1490 } 1491 1492 req->ki_ioprio = iocb->aio_reqprio; 1493 } else 1494 req->ki_ioprio = get_current_ioprio(); 1495 1496 ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags); 1497 if (unlikely(ret)) 1498 return ret; 1499 1500 req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */ 1501 return 0; 1502 } 1503 1504 static ssize_t aio_setup_rw(int rw, const struct iocb *iocb, 1505 struct iovec **iovec, bool vectored, bool compat, 1506 struct iov_iter *iter) 1507 { 1508 void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf; 1509 size_t len = iocb->aio_nbytes; 1510 1511 if (!vectored) { 1512 ssize_t ret = import_single_range(rw, buf, len, *iovec, iter); 1513 *iovec = NULL; 1514 return ret; 1515 } 1516 1517 return __import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter, compat); 1518 } 1519 1520 static inline void aio_rw_done(struct kiocb *req, ssize_t ret) 1521 { 1522 switch (ret) { 1523 case -EIOCBQUEUED: 1524 break; 1525 case -ERESTARTSYS: 1526 case -ERESTARTNOINTR: 1527 case -ERESTARTNOHAND: 1528 case -ERESTART_RESTARTBLOCK: 1529 /* 1530 * There's no easy way to restart the syscall since other AIO's 1531 * may be already running. Just fail this IO with EINTR. 1532 */ 1533 ret = -EINTR; 1534 fallthrough; 1535 default: 1536 req->ki_complete(req, ret); 1537 } 1538 } 1539 1540 static int aio_read(struct kiocb *req, const struct iocb *iocb, 1541 bool vectored, bool compat) 1542 { 1543 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1544 struct iov_iter iter; 1545 struct file *file; 1546 int ret; 1547 1548 ret = aio_prep_rw(req, iocb); 1549 if (ret) 1550 return ret; 1551 file = req->ki_filp; 1552 if (unlikely(!(file->f_mode & FMODE_READ))) 1553 return -EBADF; 1554 if (unlikely(!file->f_op->read_iter)) 1555 return -EINVAL; 1556 1557 ret = aio_setup_rw(ITER_DEST, iocb, &iovec, vectored, compat, &iter); 1558 if (ret < 0) 1559 return ret; 1560 ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter)); 1561 if (!ret) 1562 aio_rw_done(req, call_read_iter(file, req, &iter)); 1563 kfree(iovec); 1564 return ret; 1565 } 1566 1567 static int aio_write(struct kiocb *req, const struct iocb *iocb, 1568 bool vectored, bool compat) 1569 { 1570 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1571 struct iov_iter iter; 1572 struct file *file; 1573 int ret; 1574 1575 ret = aio_prep_rw(req, iocb); 1576 if (ret) 1577 return ret; 1578 file = req->ki_filp; 1579 1580 if (unlikely(!(file->f_mode & FMODE_WRITE))) 1581 return -EBADF; 1582 if (unlikely(!file->f_op->write_iter)) 1583 return -EINVAL; 1584 1585 ret = aio_setup_rw(ITER_SOURCE, iocb, &iovec, vectored, compat, &iter); 1586 if (ret < 0) 1587 return ret; 1588 ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter)); 1589 if (!ret) { 1590 if (S_ISREG(file_inode(file)->i_mode)) 1591 kiocb_start_write(req); 1592 req->ki_flags |= IOCB_WRITE; 1593 aio_rw_done(req, call_write_iter(file, req, &iter)); 1594 } 1595 kfree(iovec); 1596 return ret; 1597 } 1598 1599 static void aio_fsync_work(struct work_struct *work) 1600 { 1601 struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work); 1602 const struct cred *old_cred = override_creds(iocb->fsync.creds); 1603 1604 iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync); 1605 revert_creds(old_cred); 1606 put_cred(iocb->fsync.creds); 1607 iocb_put(iocb); 1608 } 1609 1610 static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, 1611 bool datasync) 1612 { 1613 if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes || 1614 iocb->aio_rw_flags)) 1615 return -EINVAL; 1616 1617 if (unlikely(!req->file->f_op->fsync)) 1618 return -EINVAL; 1619 1620 req->creds = prepare_creds(); 1621 if (!req->creds) 1622 return -ENOMEM; 1623 1624 req->datasync = datasync; 1625 INIT_WORK(&req->work, aio_fsync_work); 1626 schedule_work(&req->work); 1627 return 0; 1628 } 1629 1630 static void aio_poll_put_work(struct work_struct *work) 1631 { 1632 struct poll_iocb *req = container_of(work, struct poll_iocb, work); 1633 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); 1634 1635 iocb_put(iocb); 1636 } 1637 1638 /* 1639 * Safely lock the waitqueue which the request is on, synchronizing with the 1640 * case where the ->poll() provider decides to free its waitqueue early. 1641 * 1642 * Returns true on success, meaning that req->head->lock was locked, req->wait 1643 * is on req->head, and an RCU read lock was taken. Returns false if the 1644 * request was already removed from its waitqueue (which might no longer exist). 1645 */ 1646 static bool poll_iocb_lock_wq(struct poll_iocb *req) 1647 { 1648 wait_queue_head_t *head; 1649 1650 /* 1651 * While we hold the waitqueue lock and the waitqueue is nonempty, 1652 * wake_up_pollfree() will wait for us. However, taking the waitqueue 1653 * lock in the first place can race with the waitqueue being freed. 1654 * 1655 * We solve this as eventpoll does: by taking advantage of the fact that 1656 * all users of wake_up_pollfree() will RCU-delay the actual free. If 1657 * we enter rcu_read_lock() and see that the pointer to the queue is 1658 * non-NULL, we can then lock it without the memory being freed out from 1659 * under us, then check whether the request is still on the queue. 1660 * 1661 * Keep holding rcu_read_lock() as long as we hold the queue lock, in 1662 * case the caller deletes the entry from the queue, leaving it empty. 1663 * In that case, only RCU prevents the queue memory from being freed. 1664 */ 1665 rcu_read_lock(); 1666 head = smp_load_acquire(&req->head); 1667 if (head) { 1668 spin_lock(&head->lock); 1669 if (!list_empty(&req->wait.entry)) 1670 return true; 1671 spin_unlock(&head->lock); 1672 } 1673 rcu_read_unlock(); 1674 return false; 1675 } 1676 1677 static void poll_iocb_unlock_wq(struct poll_iocb *req) 1678 { 1679 spin_unlock(&req->head->lock); 1680 rcu_read_unlock(); 1681 } 1682 1683 static void aio_poll_complete_work(struct work_struct *work) 1684 { 1685 struct poll_iocb *req = container_of(work, struct poll_iocb, work); 1686 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); 1687 struct poll_table_struct pt = { ._key = req->events }; 1688 struct kioctx *ctx = iocb->ki_ctx; 1689 __poll_t mask = 0; 1690 1691 if (!READ_ONCE(req->cancelled)) 1692 mask = vfs_poll(req->file, &pt) & req->events; 1693 1694 /* 1695 * Note that ->ki_cancel callers also delete iocb from active_reqs after 1696 * calling ->ki_cancel. We need the ctx_lock roundtrip here to 1697 * synchronize with them. In the cancellation case the list_del_init 1698 * itself is not actually needed, but harmless so we keep it in to 1699 * avoid further branches in the fast path. 1700 */ 1701 spin_lock_irq(&ctx->ctx_lock); 1702 if (poll_iocb_lock_wq(req)) { 1703 if (!mask && !READ_ONCE(req->cancelled)) { 1704 /* 1705 * The request isn't actually ready to be completed yet. 1706 * Reschedule completion if another wakeup came in. 1707 */ 1708 if (req->work_need_resched) { 1709 schedule_work(&req->work); 1710 req->work_need_resched = false; 1711 } else { 1712 req->work_scheduled = false; 1713 } 1714 poll_iocb_unlock_wq(req); 1715 spin_unlock_irq(&ctx->ctx_lock); 1716 return; 1717 } 1718 list_del_init(&req->wait.entry); 1719 poll_iocb_unlock_wq(req); 1720 } /* else, POLLFREE has freed the waitqueue, so we must complete */ 1721 list_del_init(&iocb->ki_list); 1722 iocb->ki_res.res = mangle_poll(mask); 1723 spin_unlock_irq(&ctx->ctx_lock); 1724 1725 iocb_put(iocb); 1726 } 1727 1728 /* assumes we are called with irqs disabled */ 1729 static int aio_poll_cancel(struct kiocb *iocb) 1730 { 1731 struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw); 1732 struct poll_iocb *req = &aiocb->poll; 1733 1734 if (poll_iocb_lock_wq(req)) { 1735 WRITE_ONCE(req->cancelled, true); 1736 if (!req->work_scheduled) { 1737 schedule_work(&aiocb->poll.work); 1738 req->work_scheduled = true; 1739 } 1740 poll_iocb_unlock_wq(req); 1741 } /* else, the request was force-cancelled by POLLFREE already */ 1742 1743 return 0; 1744 } 1745 1746 static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, 1747 void *key) 1748 { 1749 struct poll_iocb *req = container_of(wait, struct poll_iocb, wait); 1750 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); 1751 __poll_t mask = key_to_poll(key); 1752 unsigned long flags; 1753 1754 /* for instances that support it check for an event match first: */ 1755 if (mask && !(mask & req->events)) 1756 return 0; 1757 1758 /* 1759 * Complete the request inline if possible. This requires that three 1760 * conditions be met: 1761 * 1. An event mask must have been passed. If a plain wakeup was done 1762 * instead, then mask == 0 and we have to call vfs_poll() to get 1763 * the events, so inline completion isn't possible. 1764 * 2. The completion work must not have already been scheduled. 1765 * 3. ctx_lock must not be busy. We have to use trylock because we 1766 * already hold the waitqueue lock, so this inverts the normal 1767 * locking order. Use irqsave/irqrestore because not all 1768 * filesystems (e.g. fuse) call this function with IRQs disabled, 1769 * yet IRQs have to be disabled before ctx_lock is obtained. 1770 */ 1771 if (mask && !req->work_scheduled && 1772 spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { 1773 struct kioctx *ctx = iocb->ki_ctx; 1774 1775 list_del_init(&req->wait.entry); 1776 list_del(&iocb->ki_list); 1777 iocb->ki_res.res = mangle_poll(mask); 1778 if (iocb->ki_eventfd && !eventfd_signal_allowed()) { 1779 iocb = NULL; 1780 INIT_WORK(&req->work, aio_poll_put_work); 1781 schedule_work(&req->work); 1782 } 1783 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 1784 if (iocb) 1785 iocb_put(iocb); 1786 } else { 1787 /* 1788 * Schedule the completion work if needed. If it was already 1789 * scheduled, record that another wakeup came in. 1790 * 1791 * Don't remove the request from the waitqueue here, as it might 1792 * not actually be complete yet (we won't know until vfs_poll() 1793 * is called), and we must not miss any wakeups. POLLFREE is an 1794 * exception to this; see below. 1795 */ 1796 if (req->work_scheduled) { 1797 req->work_need_resched = true; 1798 } else { 1799 schedule_work(&req->work); 1800 req->work_scheduled = true; 1801 } 1802 1803 /* 1804 * If the waitqueue is being freed early but we can't complete 1805 * the request inline, we have to tear down the request as best 1806 * we can. That means immediately removing the request from its 1807 * waitqueue and preventing all further accesses to the 1808 * waitqueue via the request. We also need to schedule the 1809 * completion work (done above). Also mark the request as 1810 * cancelled, to potentially skip an unneeded call to ->poll(). 1811 */ 1812 if (mask & POLLFREE) { 1813 WRITE_ONCE(req->cancelled, true); 1814 list_del_init(&req->wait.entry); 1815 1816 /* 1817 * Careful: this *must* be the last step, since as soon 1818 * as req->head is NULL'ed out, the request can be 1819 * completed and freed, since aio_poll_complete_work() 1820 * will no longer need to take the waitqueue lock. 1821 */ 1822 smp_store_release(&req->head, NULL); 1823 } 1824 } 1825 return 1; 1826 } 1827 1828 struct aio_poll_table { 1829 struct poll_table_struct pt; 1830 struct aio_kiocb *iocb; 1831 bool queued; 1832 int error; 1833 }; 1834 1835 static void 1836 aio_poll_queue_proc(struct file *file, struct wait_queue_head *head, 1837 struct poll_table_struct *p) 1838 { 1839 struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt); 1840 1841 /* multiple wait queues per file are not supported */ 1842 if (unlikely(pt->queued)) { 1843 pt->error = -EINVAL; 1844 return; 1845 } 1846 1847 pt->queued = true; 1848 pt->error = 0; 1849 pt->iocb->poll.head = head; 1850 add_wait_queue(head, &pt->iocb->poll.wait); 1851 } 1852 1853 static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) 1854 { 1855 struct kioctx *ctx = aiocb->ki_ctx; 1856 struct poll_iocb *req = &aiocb->poll; 1857 struct aio_poll_table apt; 1858 bool cancel = false; 1859 __poll_t mask; 1860 1861 /* reject any unknown events outside the normal event mask. */ 1862 if ((u16)iocb->aio_buf != iocb->aio_buf) 1863 return -EINVAL; 1864 /* reject fields that are not defined for poll */ 1865 if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags) 1866 return -EINVAL; 1867 1868 INIT_WORK(&req->work, aio_poll_complete_work); 1869 req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; 1870 1871 req->head = NULL; 1872 req->cancelled = false; 1873 req->work_scheduled = false; 1874 req->work_need_resched = false; 1875 1876 apt.pt._qproc = aio_poll_queue_proc; 1877 apt.pt._key = req->events; 1878 apt.iocb = aiocb; 1879 apt.queued = false; 1880 apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */ 1881 1882 /* initialized the list so that we can do list_empty checks */ 1883 INIT_LIST_HEAD(&req->wait.entry); 1884 init_waitqueue_func_entry(&req->wait, aio_poll_wake); 1885 1886 mask = vfs_poll(req->file, &apt.pt) & req->events; 1887 spin_lock_irq(&ctx->ctx_lock); 1888 if (likely(apt.queued)) { 1889 bool on_queue = poll_iocb_lock_wq(req); 1890 1891 if (!on_queue || req->work_scheduled) { 1892 /* 1893 * aio_poll_wake() already either scheduled the async 1894 * completion work, or completed the request inline. 1895 */ 1896 if (apt.error) /* unsupported case: multiple queues */ 1897 cancel = true; 1898 apt.error = 0; 1899 mask = 0; 1900 } 1901 if (mask || apt.error) { 1902 /* Steal to complete synchronously. */ 1903 list_del_init(&req->wait.entry); 1904 } else if (cancel) { 1905 /* Cancel if possible (may be too late though). */ 1906 WRITE_ONCE(req->cancelled, true); 1907 } else if (on_queue) { 1908 /* 1909 * Actually waiting for an event, so add the request to 1910 * active_reqs so that it can be cancelled if needed. 1911 */ 1912 list_add_tail(&aiocb->ki_list, &ctx->active_reqs); 1913 aiocb->ki_cancel = aio_poll_cancel; 1914 } 1915 if (on_queue) 1916 poll_iocb_unlock_wq(req); 1917 } 1918 if (mask) { /* no async, we'd stolen it */ 1919 aiocb->ki_res.res = mangle_poll(mask); 1920 apt.error = 0; 1921 } 1922 spin_unlock_irq(&ctx->ctx_lock); 1923 if (mask) 1924 iocb_put(aiocb); 1925 return apt.error; 1926 } 1927 1928 static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, 1929 struct iocb __user *user_iocb, struct aio_kiocb *req, 1930 bool compat) 1931 { 1932 req->ki_filp = fget(iocb->aio_fildes); 1933 if (unlikely(!req->ki_filp)) 1934 return -EBADF; 1935 1936 if (iocb->aio_flags & IOCB_FLAG_RESFD) { 1937 struct eventfd_ctx *eventfd; 1938 /* 1939 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an 1940 * instance of the file* now. The file descriptor must be 1941 * an eventfd() fd, and will be signaled for each completed 1942 * event using the eventfd_signal() function. 1943 */ 1944 eventfd = eventfd_ctx_fdget(iocb->aio_resfd); 1945 if (IS_ERR(eventfd)) 1946 return PTR_ERR(eventfd); 1947 1948 req->ki_eventfd = eventfd; 1949 } 1950 1951 if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) { 1952 pr_debug("EFAULT: aio_key\n"); 1953 return -EFAULT; 1954 } 1955 1956 req->ki_res.obj = (u64)(unsigned long)user_iocb; 1957 req->ki_res.data = iocb->aio_data; 1958 req->ki_res.res = 0; 1959 req->ki_res.res2 = 0; 1960 1961 switch (iocb->aio_lio_opcode) { 1962 case IOCB_CMD_PREAD: 1963 return aio_read(&req->rw, iocb, false, compat); 1964 case IOCB_CMD_PWRITE: 1965 return aio_write(&req->rw, iocb, false, compat); 1966 case IOCB_CMD_PREADV: 1967 return aio_read(&req->rw, iocb, true, compat); 1968 case IOCB_CMD_PWRITEV: 1969 return aio_write(&req->rw, iocb, true, compat); 1970 case IOCB_CMD_FSYNC: 1971 return aio_fsync(&req->fsync, iocb, false); 1972 case IOCB_CMD_FDSYNC: 1973 return aio_fsync(&req->fsync, iocb, true); 1974 case IOCB_CMD_POLL: 1975 return aio_poll(req, iocb); 1976 default: 1977 pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); 1978 return -EINVAL; 1979 } 1980 } 1981 1982 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1983 bool compat) 1984 { 1985 struct aio_kiocb *req; 1986 struct iocb iocb; 1987 int err; 1988 1989 if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb)))) 1990 return -EFAULT; 1991 1992 /* enforce forwards compatibility on users */ 1993 if (unlikely(iocb.aio_reserved2)) { 1994 pr_debug("EINVAL: reserve field set\n"); 1995 return -EINVAL; 1996 } 1997 1998 /* prevent overflows */ 1999 if (unlikely( 2000 (iocb.aio_buf != (unsigned long)iocb.aio_buf) || 2001 (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) || 2002 ((ssize_t)iocb.aio_nbytes < 0) 2003 )) { 2004 pr_debug("EINVAL: overflow check\n"); 2005 return -EINVAL; 2006 } 2007 2008 req = aio_get_req(ctx); 2009 if (unlikely(!req)) 2010 return -EAGAIN; 2011 2012 err = __io_submit_one(ctx, &iocb, user_iocb, req, compat); 2013 2014 /* Done with the synchronous reference */ 2015 iocb_put(req); 2016 2017 /* 2018 * If err is 0, we'd either done aio_complete() ourselves or have 2019 * arranged for that to be done asynchronously. Anything non-zero 2020 * means that we need to destroy req ourselves. 2021 */ 2022 if (unlikely(err)) { 2023 iocb_destroy(req); 2024 put_reqs_available(ctx, 1); 2025 } 2026 return err; 2027 } 2028 2029 /* sys_io_submit: 2030 * Queue the nr iocbs pointed to by iocbpp for processing. Returns 2031 * the number of iocbs queued. May return -EINVAL if the aio_context 2032 * specified by ctx_id is invalid, if nr is < 0, if the iocb at 2033 * *iocbpp[0] is not properly initialized, if the operation specified 2034 * is invalid for the file descriptor in the iocb. May fail with 2035 * -EFAULT if any of the data structures point to invalid data. May 2036 * fail with -EBADF if the file descriptor specified in the first 2037 * iocb is invalid. May fail with -EAGAIN if insufficient resources 2038 * are available to queue any iocbs. Will return 0 if nr is 0. Will 2039 * fail with -ENOSYS if not implemented. 2040 */ 2041 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, 2042 struct iocb __user * __user *, iocbpp) 2043 { 2044 struct kioctx *ctx; 2045 long ret = 0; 2046 int i = 0; 2047 struct blk_plug plug; 2048 2049 if (unlikely(nr < 0)) 2050 return -EINVAL; 2051 2052 ctx = lookup_ioctx(ctx_id); 2053 if (unlikely(!ctx)) { 2054 pr_debug("EINVAL: invalid context id\n"); 2055 return -EINVAL; 2056 } 2057 2058 if (nr > ctx->nr_events) 2059 nr = ctx->nr_events; 2060 2061 if (nr > AIO_PLUG_THRESHOLD) 2062 blk_start_plug(&plug); 2063 for (i = 0; i < nr; i++) { 2064 struct iocb __user *user_iocb; 2065 2066 if (unlikely(get_user(user_iocb, iocbpp + i))) { 2067 ret = -EFAULT; 2068 break; 2069 } 2070 2071 ret = io_submit_one(ctx, user_iocb, false); 2072 if (ret) 2073 break; 2074 } 2075 if (nr > AIO_PLUG_THRESHOLD) 2076 blk_finish_plug(&plug); 2077 2078 percpu_ref_put(&ctx->users); 2079 return i ? i : ret; 2080 } 2081 2082 #ifdef CONFIG_COMPAT 2083 COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id, 2084 int, nr, compat_uptr_t __user *, iocbpp) 2085 { 2086 struct kioctx *ctx; 2087 long ret = 0; 2088 int i = 0; 2089 struct blk_plug plug; 2090 2091 if (unlikely(nr < 0)) 2092 return -EINVAL; 2093 2094 ctx = lookup_ioctx(ctx_id); 2095 if (unlikely(!ctx)) { 2096 pr_debug("EINVAL: invalid context id\n"); 2097 return -EINVAL; 2098 } 2099 2100 if (nr > ctx->nr_events) 2101 nr = ctx->nr_events; 2102 2103 if (nr > AIO_PLUG_THRESHOLD) 2104 blk_start_plug(&plug); 2105 for (i = 0; i < nr; i++) { 2106 compat_uptr_t user_iocb; 2107 2108 if (unlikely(get_user(user_iocb, iocbpp + i))) { 2109 ret = -EFAULT; 2110 break; 2111 } 2112 2113 ret = io_submit_one(ctx, compat_ptr(user_iocb), true); 2114 if (ret) 2115 break; 2116 } 2117 if (nr > AIO_PLUG_THRESHOLD) 2118 blk_finish_plug(&plug); 2119 2120 percpu_ref_put(&ctx->users); 2121 return i ? i : ret; 2122 } 2123 #endif 2124 2125 /* sys_io_cancel: 2126 * Attempts to cancel an iocb previously passed to io_submit. If 2127 * the operation is successfully cancelled, the resulting event is 2128 * copied into the memory pointed to by result without being placed 2129 * into the completion queue and 0 is returned. May fail with 2130 * -EFAULT if any of the data structures pointed to are invalid. 2131 * May fail with -EINVAL if aio_context specified by ctx_id is 2132 * invalid. May fail with -EAGAIN if the iocb specified was not 2133 * cancelled. Will fail with -ENOSYS if not implemented. 2134 */ 2135 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, 2136 struct io_event __user *, result) 2137 { 2138 struct kioctx *ctx; 2139 struct aio_kiocb *kiocb; 2140 int ret = -EINVAL; 2141 u32 key; 2142 u64 obj = (u64)(unsigned long)iocb; 2143 2144 if (unlikely(get_user(key, &iocb->aio_key))) 2145 return -EFAULT; 2146 if (unlikely(key != KIOCB_KEY)) 2147 return -EINVAL; 2148 2149 ctx = lookup_ioctx(ctx_id); 2150 if (unlikely(!ctx)) 2151 return -EINVAL; 2152 2153 spin_lock_irq(&ctx->ctx_lock); 2154 /* TODO: use a hash or array, this sucks. */ 2155 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { 2156 if (kiocb->ki_res.obj == obj) { 2157 ret = kiocb->ki_cancel(&kiocb->rw); 2158 list_del_init(&kiocb->ki_list); 2159 break; 2160 } 2161 } 2162 spin_unlock_irq(&ctx->ctx_lock); 2163 2164 if (!ret) { 2165 /* 2166 * The result argument is no longer used - the io_event is 2167 * always delivered via the ring buffer. -EINPROGRESS indicates 2168 * cancellation is progress: 2169 */ 2170 ret = -EINPROGRESS; 2171 } 2172 2173 percpu_ref_put(&ctx->users); 2174 2175 return ret; 2176 } 2177 2178 static long do_io_getevents(aio_context_t ctx_id, 2179 long min_nr, 2180 long nr, 2181 struct io_event __user *events, 2182 struct timespec64 *ts) 2183 { 2184 ktime_t until = ts ? timespec64_to_ktime(*ts) : KTIME_MAX; 2185 struct kioctx *ioctx = lookup_ioctx(ctx_id); 2186 long ret = -EINVAL; 2187 2188 if (likely(ioctx)) { 2189 if (likely(min_nr <= nr && min_nr >= 0)) 2190 ret = read_events(ioctx, min_nr, nr, events, until); 2191 percpu_ref_put(&ioctx->users); 2192 } 2193 2194 return ret; 2195 } 2196 2197 /* io_getevents: 2198 * Attempts to read at least min_nr events and up to nr events from 2199 * the completion queue for the aio_context specified by ctx_id. If 2200 * it succeeds, the number of read events is returned. May fail with 2201 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is 2202 * out of range, if timeout is out of range. May fail with -EFAULT 2203 * if any of the memory specified is invalid. May return 0 or 2204 * < min_nr if the timeout specified by timeout has elapsed 2205 * before sufficient events are available, where timeout == NULL 2206 * specifies an infinite timeout. Note that the timeout pointed to by 2207 * timeout is relative. Will fail with -ENOSYS if not implemented. 2208 */ 2209 #ifdef CONFIG_64BIT 2210 2211 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, 2212 long, min_nr, 2213 long, nr, 2214 struct io_event __user *, events, 2215 struct __kernel_timespec __user *, timeout) 2216 { 2217 struct timespec64 ts; 2218 int ret; 2219 2220 if (timeout && unlikely(get_timespec64(&ts, timeout))) 2221 return -EFAULT; 2222 2223 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); 2224 if (!ret && signal_pending(current)) 2225 ret = -EINTR; 2226 return ret; 2227 } 2228 2229 #endif 2230 2231 struct __aio_sigset { 2232 const sigset_t __user *sigmask; 2233 size_t sigsetsize; 2234 }; 2235 2236 SYSCALL_DEFINE6(io_pgetevents, 2237 aio_context_t, ctx_id, 2238 long, min_nr, 2239 long, nr, 2240 struct io_event __user *, events, 2241 struct __kernel_timespec __user *, timeout, 2242 const struct __aio_sigset __user *, usig) 2243 { 2244 struct __aio_sigset ksig = { NULL, }; 2245 struct timespec64 ts; 2246 bool interrupted; 2247 int ret; 2248 2249 if (timeout && unlikely(get_timespec64(&ts, timeout))) 2250 return -EFAULT; 2251 2252 if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) 2253 return -EFAULT; 2254 2255 ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize); 2256 if (ret) 2257 return ret; 2258 2259 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); 2260 2261 interrupted = signal_pending(current); 2262 restore_saved_sigmask_unless(interrupted); 2263 if (interrupted && !ret) 2264 ret = -ERESTARTNOHAND; 2265 2266 return ret; 2267 } 2268 2269 #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT) 2270 2271 SYSCALL_DEFINE6(io_pgetevents_time32, 2272 aio_context_t, ctx_id, 2273 long, min_nr, 2274 long, nr, 2275 struct io_event __user *, events, 2276 struct old_timespec32 __user *, timeout, 2277 const struct __aio_sigset __user *, usig) 2278 { 2279 struct __aio_sigset ksig = { NULL, }; 2280 struct timespec64 ts; 2281 bool interrupted; 2282 int ret; 2283 2284 if (timeout && unlikely(get_old_timespec32(&ts, timeout))) 2285 return -EFAULT; 2286 2287 if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) 2288 return -EFAULT; 2289 2290 2291 ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize); 2292 if (ret) 2293 return ret; 2294 2295 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); 2296 2297 interrupted = signal_pending(current); 2298 restore_saved_sigmask_unless(interrupted); 2299 if (interrupted && !ret) 2300 ret = -ERESTARTNOHAND; 2301 2302 return ret; 2303 } 2304 2305 #endif 2306 2307 #if defined(CONFIG_COMPAT_32BIT_TIME) 2308 2309 SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id, 2310 __s32, min_nr, 2311 __s32, nr, 2312 struct io_event __user *, events, 2313 struct old_timespec32 __user *, timeout) 2314 { 2315 struct timespec64 t; 2316 int ret; 2317 2318 if (timeout && get_old_timespec32(&t, timeout)) 2319 return -EFAULT; 2320 2321 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); 2322 if (!ret && signal_pending(current)) 2323 ret = -EINTR; 2324 return ret; 2325 } 2326 2327 #endif 2328 2329 #ifdef CONFIG_COMPAT 2330 2331 struct __compat_aio_sigset { 2332 compat_uptr_t sigmask; 2333 compat_size_t sigsetsize; 2334 }; 2335 2336 #if defined(CONFIG_COMPAT_32BIT_TIME) 2337 2338 COMPAT_SYSCALL_DEFINE6(io_pgetevents, 2339 compat_aio_context_t, ctx_id, 2340 compat_long_t, min_nr, 2341 compat_long_t, nr, 2342 struct io_event __user *, events, 2343 struct old_timespec32 __user *, timeout, 2344 const struct __compat_aio_sigset __user *, usig) 2345 { 2346 struct __compat_aio_sigset ksig = { 0, }; 2347 struct timespec64 t; 2348 bool interrupted; 2349 int ret; 2350 2351 if (timeout && get_old_timespec32(&t, timeout)) 2352 return -EFAULT; 2353 2354 if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) 2355 return -EFAULT; 2356 2357 ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize); 2358 if (ret) 2359 return ret; 2360 2361 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); 2362 2363 interrupted = signal_pending(current); 2364 restore_saved_sigmask_unless(interrupted); 2365 if (interrupted && !ret) 2366 ret = -ERESTARTNOHAND; 2367 2368 return ret; 2369 } 2370 2371 #endif 2372 2373 COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64, 2374 compat_aio_context_t, ctx_id, 2375 compat_long_t, min_nr, 2376 compat_long_t, nr, 2377 struct io_event __user *, events, 2378 struct __kernel_timespec __user *, timeout, 2379 const struct __compat_aio_sigset __user *, usig) 2380 { 2381 struct __compat_aio_sigset ksig = { 0, }; 2382 struct timespec64 t; 2383 bool interrupted; 2384 int ret; 2385 2386 if (timeout && get_timespec64(&t, timeout)) 2387 return -EFAULT; 2388 2389 if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) 2390 return -EFAULT; 2391 2392 ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize); 2393 if (ret) 2394 return ret; 2395 2396 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); 2397 2398 interrupted = signal_pending(current); 2399 restore_saved_sigmask_unless(interrupted); 2400 if (interrupted && !ret) 2401 ret = -ERESTARTNOHAND; 2402 2403 return ret; 2404 } 2405 #endif 2406