1 /* 2 * An async IO implementation for Linux 3 * Written by Benjamin LaHaise <bcrl@kvack.org> 4 * 5 * Implements an efficient asynchronous io interface. 6 * 7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. 8 * 9 * See ../COPYING for licensing terms. 10 */ 11 #define pr_fmt(fmt) "%s: " fmt, __func__ 12 13 #include <linux/kernel.h> 14 #include <linux/init.h> 15 #include <linux/errno.h> 16 #include <linux/time.h> 17 #include <linux/aio_abi.h> 18 #include <linux/export.h> 19 #include <linux/syscalls.h> 20 #include <linux/backing-dev.h> 21 #include <linux/uio.h> 22 23 #include <linux/sched/signal.h> 24 #include <linux/fs.h> 25 #include <linux/file.h> 26 #include <linux/mm.h> 27 #include <linux/mman.h> 28 #include <linux/mmu_context.h> 29 #include <linux/percpu.h> 30 #include <linux/slab.h> 31 #include <linux/timer.h> 32 #include <linux/aio.h> 33 #include <linux/highmem.h> 34 #include <linux/workqueue.h> 35 #include <linux/security.h> 36 #include <linux/eventfd.h> 37 #include <linux/blkdev.h> 38 #include <linux/compat.h> 39 #include <linux/migrate.h> 40 #include <linux/ramfs.h> 41 #include <linux/percpu-refcount.h> 42 #include <linux/mount.h> 43 44 #include <asm/kmap_types.h> 45 #include <linux/uaccess.h> 46 47 #include "internal.h" 48 49 #define AIO_RING_MAGIC 0xa10a10a1 50 #define AIO_RING_COMPAT_FEATURES 1 51 #define AIO_RING_INCOMPAT_FEATURES 0 52 struct aio_ring { 53 unsigned id; /* kernel internal index number */ 54 unsigned nr; /* number of io_events */ 55 unsigned head; /* Written to by userland or under ring_lock 56 * mutex by aio_read_events_ring(). */ 57 unsigned tail; 58 59 unsigned magic; 60 unsigned compat_features; 61 unsigned incompat_features; 62 unsigned header_length; /* size of aio_ring */ 63 64 65 struct io_event io_events[0]; 66 }; /* 128 bytes + ring size */ 67 68 #define AIO_RING_PAGES 8 69 70 struct kioctx_table { 71 struct rcu_head rcu; 72 unsigned nr; 73 struct kioctx __rcu *table[]; 74 }; 75 76 struct kioctx_cpu { 77 unsigned reqs_available; 78 }; 79 80 struct ctx_rq_wait { 81 struct completion comp; 82 atomic_t count; 83 }; 84 85 struct kioctx { 86 struct percpu_ref users; 87 atomic_t dead; 88 89 struct percpu_ref reqs; 90 91 unsigned long user_id; 92 93 struct __percpu kioctx_cpu *cpu; 94 95 /* 96 * For percpu reqs_available, number of slots we move to/from global 97 * counter at a time: 98 */ 99 unsigned req_batch; 100 /* 101 * This is what userspace passed to io_setup(), it's not used for 102 * anything but counting against the global max_reqs quota. 103 * 104 * The real limit is nr_events - 1, which will be larger (see 105 * aio_setup_ring()) 106 */ 107 unsigned max_reqs; 108 109 /* Size of ringbuffer, in units of struct io_event */ 110 unsigned nr_events; 111 112 unsigned long mmap_base; 113 unsigned long mmap_size; 114 115 struct page **ring_pages; 116 long nr_pages; 117 118 struct rcu_head free_rcu; 119 struct work_struct free_work; /* see free_ioctx() */ 120 121 /* 122 * signals when all in-flight requests are done 123 */ 124 struct ctx_rq_wait *rq_wait; 125 126 struct { 127 /* 128 * This counts the number of available slots in the ringbuffer, 129 * so we avoid overflowing it: it's decremented (if positive) 130 * when allocating a kiocb and incremented when the resulting 131 * io_event is pulled off the ringbuffer. 132 * 133 * We batch accesses to it with a percpu version. 134 */ 135 atomic_t reqs_available; 136 } ____cacheline_aligned_in_smp; 137 138 struct { 139 spinlock_t ctx_lock; 140 struct list_head active_reqs; /* used for cancellation */ 141 } ____cacheline_aligned_in_smp; 142 143 struct { 144 struct mutex ring_lock; 145 wait_queue_head_t wait; 146 } ____cacheline_aligned_in_smp; 147 148 struct { 149 unsigned tail; 150 unsigned completed_events; 151 spinlock_t completion_lock; 152 } ____cacheline_aligned_in_smp; 153 154 struct page *internal_pages[AIO_RING_PAGES]; 155 struct file *aio_ring_file; 156 157 unsigned id; 158 }; 159 160 /* 161 * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either 162 * cancelled or completed (this makes a certain amount of sense because 163 * successful cancellation - io_cancel() - does deliver the completion to 164 * userspace). 165 * 166 * And since most things don't implement kiocb cancellation and we'd really like 167 * kiocb completion to be lockless when possible, we use ki_cancel to 168 * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED 169 * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel(). 170 */ 171 #define KIOCB_CANCELLED ((void *) (~0ULL)) 172 173 struct aio_kiocb { 174 struct kiocb common; 175 176 struct kioctx *ki_ctx; 177 kiocb_cancel_fn *ki_cancel; 178 179 struct iocb __user *ki_user_iocb; /* user's aiocb */ 180 __u64 ki_user_data; /* user's data for completion */ 181 182 struct list_head ki_list; /* the aio core uses this 183 * for cancellation */ 184 185 /* 186 * If the aio_resfd field of the userspace iocb is not zero, 187 * this is the underlying eventfd context to deliver events to. 188 */ 189 struct eventfd_ctx *ki_eventfd; 190 }; 191 192 /*------ sysctl variables----*/ 193 static DEFINE_SPINLOCK(aio_nr_lock); 194 unsigned long aio_nr; /* current system wide number of aio requests */ 195 unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ 196 /*----end sysctl variables---*/ 197 198 static struct kmem_cache *kiocb_cachep; 199 static struct kmem_cache *kioctx_cachep; 200 201 static struct vfsmount *aio_mnt; 202 203 static const struct file_operations aio_ring_fops; 204 static const struct address_space_operations aio_ctx_aops; 205 206 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) 207 { 208 struct qstr this = QSTR_INIT("[aio]", 5); 209 struct file *file; 210 struct path path; 211 struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb); 212 if (IS_ERR(inode)) 213 return ERR_CAST(inode); 214 215 inode->i_mapping->a_ops = &aio_ctx_aops; 216 inode->i_mapping->private_data = ctx; 217 inode->i_size = PAGE_SIZE * nr_pages; 218 219 path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this); 220 if (!path.dentry) { 221 iput(inode); 222 return ERR_PTR(-ENOMEM); 223 } 224 path.mnt = mntget(aio_mnt); 225 226 d_instantiate(path.dentry, inode); 227 file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &aio_ring_fops); 228 if (IS_ERR(file)) { 229 path_put(&path); 230 return file; 231 } 232 233 file->f_flags = O_RDWR; 234 return file; 235 } 236 237 static struct dentry *aio_mount(struct file_system_type *fs_type, 238 int flags, const char *dev_name, void *data) 239 { 240 static const struct dentry_operations ops = { 241 .d_dname = simple_dname, 242 }; 243 struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, &ops, 244 AIO_RING_MAGIC); 245 246 if (!IS_ERR(root)) 247 root->d_sb->s_iflags |= SB_I_NOEXEC; 248 return root; 249 } 250 251 /* aio_setup 252 * Creates the slab caches used by the aio routines, panic on 253 * failure as this is done early during the boot sequence. 254 */ 255 static int __init aio_setup(void) 256 { 257 static struct file_system_type aio_fs = { 258 .name = "aio", 259 .mount = aio_mount, 260 .kill_sb = kill_anon_super, 261 }; 262 aio_mnt = kern_mount(&aio_fs); 263 if (IS_ERR(aio_mnt)) 264 panic("Failed to create aio fs mount."); 265 266 kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 267 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 268 269 pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page)); 270 271 return 0; 272 } 273 __initcall(aio_setup); 274 275 static void put_aio_ring_file(struct kioctx *ctx) 276 { 277 struct file *aio_ring_file = ctx->aio_ring_file; 278 struct address_space *i_mapping; 279 280 if (aio_ring_file) { 281 truncate_setsize(file_inode(aio_ring_file), 0); 282 283 /* Prevent further access to the kioctx from migratepages */ 284 i_mapping = aio_ring_file->f_mapping; 285 spin_lock(&i_mapping->private_lock); 286 i_mapping->private_data = NULL; 287 ctx->aio_ring_file = NULL; 288 spin_unlock(&i_mapping->private_lock); 289 290 fput(aio_ring_file); 291 } 292 } 293 294 static void aio_free_ring(struct kioctx *ctx) 295 { 296 int i; 297 298 /* Disconnect the kiotx from the ring file. This prevents future 299 * accesses to the kioctx from page migration. 300 */ 301 put_aio_ring_file(ctx); 302 303 for (i = 0; i < ctx->nr_pages; i++) { 304 struct page *page; 305 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, 306 page_count(ctx->ring_pages[i])); 307 page = ctx->ring_pages[i]; 308 if (!page) 309 continue; 310 ctx->ring_pages[i] = NULL; 311 put_page(page); 312 } 313 314 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) { 315 kfree(ctx->ring_pages); 316 ctx->ring_pages = NULL; 317 } 318 } 319 320 static int aio_ring_mremap(struct vm_area_struct *vma) 321 { 322 struct file *file = vma->vm_file; 323 struct mm_struct *mm = vma->vm_mm; 324 struct kioctx_table *table; 325 int i, res = -EINVAL; 326 327 spin_lock(&mm->ioctx_lock); 328 rcu_read_lock(); 329 table = rcu_dereference(mm->ioctx_table); 330 for (i = 0; i < table->nr; i++) { 331 struct kioctx *ctx; 332 333 ctx = rcu_dereference(table->table[i]); 334 if (ctx && ctx->aio_ring_file == file) { 335 if (!atomic_read(&ctx->dead)) { 336 ctx->user_id = ctx->mmap_base = vma->vm_start; 337 res = 0; 338 } 339 break; 340 } 341 } 342 343 rcu_read_unlock(); 344 spin_unlock(&mm->ioctx_lock); 345 return res; 346 } 347 348 static const struct vm_operations_struct aio_ring_vm_ops = { 349 .mremap = aio_ring_mremap, 350 #if IS_ENABLED(CONFIG_MMU) 351 .fault = filemap_fault, 352 .map_pages = filemap_map_pages, 353 .page_mkwrite = filemap_page_mkwrite, 354 #endif 355 }; 356 357 static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) 358 { 359 vma->vm_flags |= VM_DONTEXPAND; 360 vma->vm_ops = &aio_ring_vm_ops; 361 return 0; 362 } 363 364 static const struct file_operations aio_ring_fops = { 365 .mmap = aio_ring_mmap, 366 }; 367 368 #if IS_ENABLED(CONFIG_MIGRATION) 369 static int aio_migratepage(struct address_space *mapping, struct page *new, 370 struct page *old, enum migrate_mode mode) 371 { 372 struct kioctx *ctx; 373 unsigned long flags; 374 pgoff_t idx; 375 int rc; 376 377 /* 378 * We cannot support the _NO_COPY case here, because copy needs to 379 * happen under the ctx->completion_lock. That does not work with the 380 * migration workflow of MIGRATE_SYNC_NO_COPY. 381 */ 382 if (mode == MIGRATE_SYNC_NO_COPY) 383 return -EINVAL; 384 385 rc = 0; 386 387 /* mapping->private_lock here protects against the kioctx teardown. */ 388 spin_lock(&mapping->private_lock); 389 ctx = mapping->private_data; 390 if (!ctx) { 391 rc = -EINVAL; 392 goto out; 393 } 394 395 /* The ring_lock mutex. The prevents aio_read_events() from writing 396 * to the ring's head, and prevents page migration from mucking in 397 * a partially initialized kiotx. 398 */ 399 if (!mutex_trylock(&ctx->ring_lock)) { 400 rc = -EAGAIN; 401 goto out; 402 } 403 404 idx = old->index; 405 if (idx < (pgoff_t)ctx->nr_pages) { 406 /* Make sure the old page hasn't already been changed */ 407 if (ctx->ring_pages[idx] != old) 408 rc = -EAGAIN; 409 } else 410 rc = -EINVAL; 411 412 if (rc != 0) 413 goto out_unlock; 414 415 /* Writeback must be complete */ 416 BUG_ON(PageWriteback(old)); 417 get_page(new); 418 419 rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1); 420 if (rc != MIGRATEPAGE_SUCCESS) { 421 put_page(new); 422 goto out_unlock; 423 } 424 425 /* Take completion_lock to prevent other writes to the ring buffer 426 * while the old page is copied to the new. This prevents new 427 * events from being lost. 428 */ 429 spin_lock_irqsave(&ctx->completion_lock, flags); 430 migrate_page_copy(new, old); 431 BUG_ON(ctx->ring_pages[idx] != old); 432 ctx->ring_pages[idx] = new; 433 spin_unlock_irqrestore(&ctx->completion_lock, flags); 434 435 /* The old page is no longer accessible. */ 436 put_page(old); 437 438 out_unlock: 439 mutex_unlock(&ctx->ring_lock); 440 out: 441 spin_unlock(&mapping->private_lock); 442 return rc; 443 } 444 #endif 445 446 static const struct address_space_operations aio_ctx_aops = { 447 .set_page_dirty = __set_page_dirty_no_writeback, 448 #if IS_ENABLED(CONFIG_MIGRATION) 449 .migratepage = aio_migratepage, 450 #endif 451 }; 452 453 static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events) 454 { 455 struct aio_ring *ring; 456 struct mm_struct *mm = current->mm; 457 unsigned long size, unused; 458 int nr_pages; 459 int i; 460 struct file *file; 461 462 /* Compensate for the ring buffer's head/tail overlap entry */ 463 nr_events += 2; /* 1 is required, 2 for good luck */ 464 465 size = sizeof(struct aio_ring); 466 size += sizeof(struct io_event) * nr_events; 467 468 nr_pages = PFN_UP(size); 469 if (nr_pages < 0) 470 return -EINVAL; 471 472 file = aio_private_file(ctx, nr_pages); 473 if (IS_ERR(file)) { 474 ctx->aio_ring_file = NULL; 475 return -ENOMEM; 476 } 477 478 ctx->aio_ring_file = file; 479 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) 480 / sizeof(struct io_event); 481 482 ctx->ring_pages = ctx->internal_pages; 483 if (nr_pages > AIO_RING_PAGES) { 484 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), 485 GFP_KERNEL); 486 if (!ctx->ring_pages) { 487 put_aio_ring_file(ctx); 488 return -ENOMEM; 489 } 490 } 491 492 for (i = 0; i < nr_pages; i++) { 493 struct page *page; 494 page = find_or_create_page(file->f_mapping, 495 i, GFP_HIGHUSER | __GFP_ZERO); 496 if (!page) 497 break; 498 pr_debug("pid(%d) page[%d]->count=%d\n", 499 current->pid, i, page_count(page)); 500 SetPageUptodate(page); 501 unlock_page(page); 502 503 ctx->ring_pages[i] = page; 504 } 505 ctx->nr_pages = i; 506 507 if (unlikely(i != nr_pages)) { 508 aio_free_ring(ctx); 509 return -ENOMEM; 510 } 511 512 ctx->mmap_size = nr_pages * PAGE_SIZE; 513 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); 514 515 if (down_write_killable(&mm->mmap_sem)) { 516 ctx->mmap_size = 0; 517 aio_free_ring(ctx); 518 return -EINTR; 519 } 520 521 ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, 522 PROT_READ | PROT_WRITE, 523 MAP_SHARED, 0, &unused, NULL); 524 up_write(&mm->mmap_sem); 525 if (IS_ERR((void *)ctx->mmap_base)) { 526 ctx->mmap_size = 0; 527 aio_free_ring(ctx); 528 return -ENOMEM; 529 } 530 531 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); 532 533 ctx->user_id = ctx->mmap_base; 534 ctx->nr_events = nr_events; /* trusted copy */ 535 536 ring = kmap_atomic(ctx->ring_pages[0]); 537 ring->nr = nr_events; /* user copy */ 538 ring->id = ~0U; 539 ring->head = ring->tail = 0; 540 ring->magic = AIO_RING_MAGIC; 541 ring->compat_features = AIO_RING_COMPAT_FEATURES; 542 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; 543 ring->header_length = sizeof(struct aio_ring); 544 kunmap_atomic(ring); 545 flush_dcache_page(ctx->ring_pages[0]); 546 547 return 0; 548 } 549 550 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) 551 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) 552 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) 553 554 void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel) 555 { 556 struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, common); 557 struct kioctx *ctx = req->ki_ctx; 558 unsigned long flags; 559 560 spin_lock_irqsave(&ctx->ctx_lock, flags); 561 562 if (!req->ki_list.next) 563 list_add(&req->ki_list, &ctx->active_reqs); 564 565 req->ki_cancel = cancel; 566 567 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 568 } 569 EXPORT_SYMBOL(kiocb_set_cancel_fn); 570 571 static int kiocb_cancel(struct aio_kiocb *kiocb) 572 { 573 kiocb_cancel_fn *old, *cancel; 574 575 /* 576 * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it 577 * actually has a cancel function, hence the cmpxchg() 578 */ 579 580 cancel = READ_ONCE(kiocb->ki_cancel); 581 do { 582 if (!cancel || cancel == KIOCB_CANCELLED) 583 return -EINVAL; 584 585 old = cancel; 586 cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED); 587 } while (cancel != old); 588 589 return cancel(&kiocb->common); 590 } 591 592 /* 593 * free_ioctx() should be RCU delayed to synchronize against the RCU 594 * protected lookup_ioctx() and also needs process context to call 595 * aio_free_ring(), so the double bouncing through kioctx->free_rcu and 596 * ->free_work. 597 */ 598 static void free_ioctx(struct work_struct *work) 599 { 600 struct kioctx *ctx = container_of(work, struct kioctx, free_work); 601 602 pr_debug("freeing %p\n", ctx); 603 604 aio_free_ring(ctx); 605 free_percpu(ctx->cpu); 606 percpu_ref_exit(&ctx->reqs); 607 percpu_ref_exit(&ctx->users); 608 kmem_cache_free(kioctx_cachep, ctx); 609 } 610 611 static void free_ioctx_rcufn(struct rcu_head *head) 612 { 613 struct kioctx *ctx = container_of(head, struct kioctx, free_rcu); 614 615 INIT_WORK(&ctx->free_work, free_ioctx); 616 schedule_work(&ctx->free_work); 617 } 618 619 static void free_ioctx_reqs(struct percpu_ref *ref) 620 { 621 struct kioctx *ctx = container_of(ref, struct kioctx, reqs); 622 623 /* At this point we know that there are no any in-flight requests */ 624 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) 625 complete(&ctx->rq_wait->comp); 626 627 /* Synchronize against RCU protected table->table[] dereferences */ 628 call_rcu(&ctx->free_rcu, free_ioctx_rcufn); 629 } 630 631 /* 632 * When this function runs, the kioctx has been removed from the "hash table" 633 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - 634 * now it's safe to cancel any that need to be. 635 */ 636 static void free_ioctx_users(struct percpu_ref *ref) 637 { 638 struct kioctx *ctx = container_of(ref, struct kioctx, users); 639 struct aio_kiocb *req; 640 641 spin_lock_irq(&ctx->ctx_lock); 642 643 while (!list_empty(&ctx->active_reqs)) { 644 req = list_first_entry(&ctx->active_reqs, 645 struct aio_kiocb, ki_list); 646 647 list_del_init(&req->ki_list); 648 kiocb_cancel(req); 649 } 650 651 spin_unlock_irq(&ctx->ctx_lock); 652 653 percpu_ref_kill(&ctx->reqs); 654 percpu_ref_put(&ctx->reqs); 655 } 656 657 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) 658 { 659 unsigned i, new_nr; 660 struct kioctx_table *table, *old; 661 struct aio_ring *ring; 662 663 spin_lock(&mm->ioctx_lock); 664 table = rcu_dereference_raw(mm->ioctx_table); 665 666 while (1) { 667 if (table) 668 for (i = 0; i < table->nr; i++) 669 if (!rcu_access_pointer(table->table[i])) { 670 ctx->id = i; 671 rcu_assign_pointer(table->table[i], ctx); 672 spin_unlock(&mm->ioctx_lock); 673 674 /* While kioctx setup is in progress, 675 * we are protected from page migration 676 * changes ring_pages by ->ring_lock. 677 */ 678 ring = kmap_atomic(ctx->ring_pages[0]); 679 ring->id = ctx->id; 680 kunmap_atomic(ring); 681 return 0; 682 } 683 684 new_nr = (table ? table->nr : 1) * 4; 685 spin_unlock(&mm->ioctx_lock); 686 687 table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) * 688 new_nr, GFP_KERNEL); 689 if (!table) 690 return -ENOMEM; 691 692 table->nr = new_nr; 693 694 spin_lock(&mm->ioctx_lock); 695 old = rcu_dereference_raw(mm->ioctx_table); 696 697 if (!old) { 698 rcu_assign_pointer(mm->ioctx_table, table); 699 } else if (table->nr > old->nr) { 700 memcpy(table->table, old->table, 701 old->nr * sizeof(struct kioctx *)); 702 703 rcu_assign_pointer(mm->ioctx_table, table); 704 kfree_rcu(old, rcu); 705 } else { 706 kfree(table); 707 table = old; 708 } 709 } 710 } 711 712 static void aio_nr_sub(unsigned nr) 713 { 714 spin_lock(&aio_nr_lock); 715 if (WARN_ON(aio_nr - nr > aio_nr)) 716 aio_nr = 0; 717 else 718 aio_nr -= nr; 719 spin_unlock(&aio_nr_lock); 720 } 721 722 /* ioctx_alloc 723 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. 724 */ 725 static struct kioctx *ioctx_alloc(unsigned nr_events) 726 { 727 struct mm_struct *mm = current->mm; 728 struct kioctx *ctx; 729 int err = -ENOMEM; 730 731 /* 732 * Store the original nr_events -- what userspace passed to io_setup(), 733 * for counting against the global limit -- before it changes. 734 */ 735 unsigned int max_reqs = nr_events; 736 737 /* 738 * We keep track of the number of available ringbuffer slots, to prevent 739 * overflow (reqs_available), and we also use percpu counters for this. 740 * 741 * So since up to half the slots might be on other cpu's percpu counters 742 * and unavailable, double nr_events so userspace sees what they 743 * expected: additionally, we move req_batch slots to/from percpu 744 * counters at a time, so make sure that isn't 0: 745 */ 746 nr_events = max(nr_events, num_possible_cpus() * 4); 747 nr_events *= 2; 748 749 /* Prevent overflows */ 750 if (nr_events > (0x10000000U / sizeof(struct io_event))) { 751 pr_debug("ENOMEM: nr_events too high\n"); 752 return ERR_PTR(-EINVAL); 753 } 754 755 if (!nr_events || (unsigned long)max_reqs > aio_max_nr) 756 return ERR_PTR(-EAGAIN); 757 758 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); 759 if (!ctx) 760 return ERR_PTR(-ENOMEM); 761 762 ctx->max_reqs = max_reqs; 763 764 spin_lock_init(&ctx->ctx_lock); 765 spin_lock_init(&ctx->completion_lock); 766 mutex_init(&ctx->ring_lock); 767 /* Protect against page migration throughout kiotx setup by keeping 768 * the ring_lock mutex held until setup is complete. */ 769 mutex_lock(&ctx->ring_lock); 770 init_waitqueue_head(&ctx->wait); 771 772 INIT_LIST_HEAD(&ctx->active_reqs); 773 774 if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL)) 775 goto err; 776 777 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL)) 778 goto err; 779 780 ctx->cpu = alloc_percpu(struct kioctx_cpu); 781 if (!ctx->cpu) 782 goto err; 783 784 err = aio_setup_ring(ctx, nr_events); 785 if (err < 0) 786 goto err; 787 788 atomic_set(&ctx->reqs_available, ctx->nr_events - 1); 789 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); 790 if (ctx->req_batch < 1) 791 ctx->req_batch = 1; 792 793 /* limit the number of system wide aios */ 794 spin_lock(&aio_nr_lock); 795 if (aio_nr + ctx->max_reqs > aio_max_nr || 796 aio_nr + ctx->max_reqs < aio_nr) { 797 spin_unlock(&aio_nr_lock); 798 err = -EAGAIN; 799 goto err_ctx; 800 } 801 aio_nr += ctx->max_reqs; 802 spin_unlock(&aio_nr_lock); 803 804 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ 805 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ 806 807 err = ioctx_add_table(ctx, mm); 808 if (err) 809 goto err_cleanup; 810 811 /* Release the ring_lock mutex now that all setup is complete. */ 812 mutex_unlock(&ctx->ring_lock); 813 814 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", 815 ctx, ctx->user_id, mm, ctx->nr_events); 816 return ctx; 817 818 err_cleanup: 819 aio_nr_sub(ctx->max_reqs); 820 err_ctx: 821 atomic_set(&ctx->dead, 1); 822 if (ctx->mmap_size) 823 vm_munmap(ctx->mmap_base, ctx->mmap_size); 824 aio_free_ring(ctx); 825 err: 826 mutex_unlock(&ctx->ring_lock); 827 free_percpu(ctx->cpu); 828 percpu_ref_exit(&ctx->reqs); 829 percpu_ref_exit(&ctx->users); 830 kmem_cache_free(kioctx_cachep, ctx); 831 pr_debug("error allocating ioctx %d\n", err); 832 return ERR_PTR(err); 833 } 834 835 /* kill_ioctx 836 * Cancels all outstanding aio requests on an aio context. Used 837 * when the processes owning a context have all exited to encourage 838 * the rapid destruction of the kioctx. 839 */ 840 static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, 841 struct ctx_rq_wait *wait) 842 { 843 struct kioctx_table *table; 844 845 spin_lock(&mm->ioctx_lock); 846 if (atomic_xchg(&ctx->dead, 1)) { 847 spin_unlock(&mm->ioctx_lock); 848 return -EINVAL; 849 } 850 851 table = rcu_dereference_raw(mm->ioctx_table); 852 WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id])); 853 RCU_INIT_POINTER(table->table[ctx->id], NULL); 854 spin_unlock(&mm->ioctx_lock); 855 856 /* free_ioctx_reqs() will do the necessary RCU synchronization */ 857 wake_up_all(&ctx->wait); 858 859 /* 860 * It'd be more correct to do this in free_ioctx(), after all 861 * the outstanding kiocbs have finished - but by then io_destroy 862 * has already returned, so io_setup() could potentially return 863 * -EAGAIN with no ioctxs actually in use (as far as userspace 864 * could tell). 865 */ 866 aio_nr_sub(ctx->max_reqs); 867 868 if (ctx->mmap_size) 869 vm_munmap(ctx->mmap_base, ctx->mmap_size); 870 871 ctx->rq_wait = wait; 872 percpu_ref_kill(&ctx->users); 873 return 0; 874 } 875 876 /* 877 * exit_aio: called when the last user of mm goes away. At this point, there is 878 * no way for any new requests to be submited or any of the io_* syscalls to be 879 * called on the context. 880 * 881 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on 882 * them. 883 */ 884 void exit_aio(struct mm_struct *mm) 885 { 886 struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table); 887 struct ctx_rq_wait wait; 888 int i, skipped; 889 890 if (!table) 891 return; 892 893 atomic_set(&wait.count, table->nr); 894 init_completion(&wait.comp); 895 896 skipped = 0; 897 for (i = 0; i < table->nr; ++i) { 898 struct kioctx *ctx = 899 rcu_dereference_protected(table->table[i], true); 900 901 if (!ctx) { 902 skipped++; 903 continue; 904 } 905 906 /* 907 * We don't need to bother with munmap() here - exit_mmap(mm) 908 * is coming and it'll unmap everything. And we simply can't, 909 * this is not necessarily our ->mm. 910 * Since kill_ioctx() uses non-zero ->mmap_size as indicator 911 * that it needs to unmap the area, just set it to 0. 912 */ 913 ctx->mmap_size = 0; 914 kill_ioctx(mm, ctx, &wait); 915 } 916 917 if (!atomic_sub_and_test(skipped, &wait.count)) { 918 /* Wait until all IO for the context are done. */ 919 wait_for_completion(&wait.comp); 920 } 921 922 RCU_INIT_POINTER(mm->ioctx_table, NULL); 923 kfree(table); 924 } 925 926 static void put_reqs_available(struct kioctx *ctx, unsigned nr) 927 { 928 struct kioctx_cpu *kcpu; 929 unsigned long flags; 930 931 local_irq_save(flags); 932 kcpu = this_cpu_ptr(ctx->cpu); 933 kcpu->reqs_available += nr; 934 935 while (kcpu->reqs_available >= ctx->req_batch * 2) { 936 kcpu->reqs_available -= ctx->req_batch; 937 atomic_add(ctx->req_batch, &ctx->reqs_available); 938 } 939 940 local_irq_restore(flags); 941 } 942 943 static bool get_reqs_available(struct kioctx *ctx) 944 { 945 struct kioctx_cpu *kcpu; 946 bool ret = false; 947 unsigned long flags; 948 949 local_irq_save(flags); 950 kcpu = this_cpu_ptr(ctx->cpu); 951 if (!kcpu->reqs_available) { 952 int old, avail = atomic_read(&ctx->reqs_available); 953 954 do { 955 if (avail < ctx->req_batch) 956 goto out; 957 958 old = avail; 959 avail = atomic_cmpxchg(&ctx->reqs_available, 960 avail, avail - ctx->req_batch); 961 } while (avail != old); 962 963 kcpu->reqs_available += ctx->req_batch; 964 } 965 966 ret = true; 967 kcpu->reqs_available--; 968 out: 969 local_irq_restore(flags); 970 return ret; 971 } 972 973 /* refill_reqs_available 974 * Updates the reqs_available reference counts used for tracking the 975 * number of free slots in the completion ring. This can be called 976 * from aio_complete() (to optimistically update reqs_available) or 977 * from aio_get_req() (the we're out of events case). It must be 978 * called holding ctx->completion_lock. 979 */ 980 static void refill_reqs_available(struct kioctx *ctx, unsigned head, 981 unsigned tail) 982 { 983 unsigned events_in_ring, completed; 984 985 /* Clamp head since userland can write to it. */ 986 head %= ctx->nr_events; 987 if (head <= tail) 988 events_in_ring = tail - head; 989 else 990 events_in_ring = ctx->nr_events - (head - tail); 991 992 completed = ctx->completed_events; 993 if (events_in_ring < completed) 994 completed -= events_in_ring; 995 else 996 completed = 0; 997 998 if (!completed) 999 return; 1000 1001 ctx->completed_events -= completed; 1002 put_reqs_available(ctx, completed); 1003 } 1004 1005 /* user_refill_reqs_available 1006 * Called to refill reqs_available when aio_get_req() encounters an 1007 * out of space in the completion ring. 1008 */ 1009 static void user_refill_reqs_available(struct kioctx *ctx) 1010 { 1011 spin_lock_irq(&ctx->completion_lock); 1012 if (ctx->completed_events) { 1013 struct aio_ring *ring; 1014 unsigned head; 1015 1016 /* Access of ring->head may race with aio_read_events_ring() 1017 * here, but that's okay since whether we read the old version 1018 * or the new version, and either will be valid. The important 1019 * part is that head cannot pass tail since we prevent 1020 * aio_complete() from updating tail by holding 1021 * ctx->completion_lock. Even if head is invalid, the check 1022 * against ctx->completed_events below will make sure we do the 1023 * safe/right thing. 1024 */ 1025 ring = kmap_atomic(ctx->ring_pages[0]); 1026 head = ring->head; 1027 kunmap_atomic(ring); 1028 1029 refill_reqs_available(ctx, head, ctx->tail); 1030 } 1031 1032 spin_unlock_irq(&ctx->completion_lock); 1033 } 1034 1035 /* aio_get_req 1036 * Allocate a slot for an aio request. 1037 * Returns NULL if no requests are free. 1038 */ 1039 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) 1040 { 1041 struct aio_kiocb *req; 1042 1043 if (!get_reqs_available(ctx)) { 1044 user_refill_reqs_available(ctx); 1045 if (!get_reqs_available(ctx)) 1046 return NULL; 1047 } 1048 1049 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); 1050 if (unlikely(!req)) 1051 goto out_put; 1052 1053 percpu_ref_get(&ctx->reqs); 1054 1055 req->ki_ctx = ctx; 1056 return req; 1057 out_put: 1058 put_reqs_available(ctx, 1); 1059 return NULL; 1060 } 1061 1062 static void kiocb_free(struct aio_kiocb *req) 1063 { 1064 if (req->common.ki_filp) 1065 fput(req->common.ki_filp); 1066 if (req->ki_eventfd != NULL) 1067 eventfd_ctx_put(req->ki_eventfd); 1068 kmem_cache_free(kiocb_cachep, req); 1069 } 1070 1071 static struct kioctx *lookup_ioctx(unsigned long ctx_id) 1072 { 1073 struct aio_ring __user *ring = (void __user *)ctx_id; 1074 struct mm_struct *mm = current->mm; 1075 struct kioctx *ctx, *ret = NULL; 1076 struct kioctx_table *table; 1077 unsigned id; 1078 1079 if (get_user(id, &ring->id)) 1080 return NULL; 1081 1082 rcu_read_lock(); 1083 table = rcu_dereference(mm->ioctx_table); 1084 1085 if (!table || id >= table->nr) 1086 goto out; 1087 1088 ctx = rcu_dereference(table->table[id]); 1089 if (ctx && ctx->user_id == ctx_id) { 1090 percpu_ref_get(&ctx->users); 1091 ret = ctx; 1092 } 1093 out: 1094 rcu_read_unlock(); 1095 return ret; 1096 } 1097 1098 /* aio_complete 1099 * Called when the io request on the given iocb is complete. 1100 */ 1101 static void aio_complete(struct kiocb *kiocb, long res, long res2) 1102 { 1103 struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, common); 1104 struct kioctx *ctx = iocb->ki_ctx; 1105 struct aio_ring *ring; 1106 struct io_event *ev_page, *event; 1107 unsigned tail, pos, head; 1108 unsigned long flags; 1109 1110 if (kiocb->ki_flags & IOCB_WRITE) { 1111 struct file *file = kiocb->ki_filp; 1112 1113 /* 1114 * Tell lockdep we inherited freeze protection from submission 1115 * thread. 1116 */ 1117 if (S_ISREG(file_inode(file)->i_mode)) 1118 __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE); 1119 file_end_write(file); 1120 } 1121 1122 /* 1123 * Special case handling for sync iocbs: 1124 * - events go directly into the iocb for fast handling 1125 * - the sync task with the iocb in its stack holds the single iocb 1126 * ref, no other paths have a way to get another ref 1127 * - the sync task helpfully left a reference to itself in the iocb 1128 */ 1129 BUG_ON(is_sync_kiocb(kiocb)); 1130 1131 if (iocb->ki_list.next) { 1132 unsigned long flags; 1133 1134 spin_lock_irqsave(&ctx->ctx_lock, flags); 1135 list_del(&iocb->ki_list); 1136 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 1137 } 1138 1139 /* 1140 * Add a completion event to the ring buffer. Must be done holding 1141 * ctx->completion_lock to prevent other code from messing with the tail 1142 * pointer since we might be called from irq context. 1143 */ 1144 spin_lock_irqsave(&ctx->completion_lock, flags); 1145 1146 tail = ctx->tail; 1147 pos = tail + AIO_EVENTS_OFFSET; 1148 1149 if (++tail >= ctx->nr_events) 1150 tail = 0; 1151 1152 ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 1153 event = ev_page + pos % AIO_EVENTS_PER_PAGE; 1154 1155 event->obj = (u64)(unsigned long)iocb->ki_user_iocb; 1156 event->data = iocb->ki_user_data; 1157 event->res = res; 1158 event->res2 = res2; 1159 1160 kunmap_atomic(ev_page); 1161 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 1162 1163 pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", 1164 ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data, 1165 res, res2); 1166 1167 /* after flagging the request as done, we 1168 * must never even look at it again 1169 */ 1170 smp_wmb(); /* make event visible before updating tail */ 1171 1172 ctx->tail = tail; 1173 1174 ring = kmap_atomic(ctx->ring_pages[0]); 1175 head = ring->head; 1176 ring->tail = tail; 1177 kunmap_atomic(ring); 1178 flush_dcache_page(ctx->ring_pages[0]); 1179 1180 ctx->completed_events++; 1181 if (ctx->completed_events > 1) 1182 refill_reqs_available(ctx, head, tail); 1183 spin_unlock_irqrestore(&ctx->completion_lock, flags); 1184 1185 pr_debug("added to ring %p at [%u]\n", iocb, tail); 1186 1187 /* 1188 * Check if the user asked us to deliver the result through an 1189 * eventfd. The eventfd_signal() function is safe to be called 1190 * from IRQ context. 1191 */ 1192 if (iocb->ki_eventfd != NULL) 1193 eventfd_signal(iocb->ki_eventfd, 1); 1194 1195 /* everything turned out well, dispose of the aiocb. */ 1196 kiocb_free(iocb); 1197 1198 /* 1199 * We have to order our ring_info tail store above and test 1200 * of the wait list below outside the wait lock. This is 1201 * like in wake_up_bit() where clearing a bit has to be 1202 * ordered with the unlocked test. 1203 */ 1204 smp_mb(); 1205 1206 if (waitqueue_active(&ctx->wait)) 1207 wake_up(&ctx->wait); 1208 1209 percpu_ref_put(&ctx->reqs); 1210 } 1211 1212 /* aio_read_events_ring 1213 * Pull an event off of the ioctx's event ring. Returns the number of 1214 * events fetched 1215 */ 1216 static long aio_read_events_ring(struct kioctx *ctx, 1217 struct io_event __user *event, long nr) 1218 { 1219 struct aio_ring *ring; 1220 unsigned head, tail, pos; 1221 long ret = 0; 1222 int copy_ret; 1223 1224 /* 1225 * The mutex can block and wake us up and that will cause 1226 * wait_event_interruptible_hrtimeout() to schedule without sleeping 1227 * and repeat. This should be rare enough that it doesn't cause 1228 * peformance issues. See the comment in read_events() for more detail. 1229 */ 1230 sched_annotate_sleep(); 1231 mutex_lock(&ctx->ring_lock); 1232 1233 /* Access to ->ring_pages here is protected by ctx->ring_lock. */ 1234 ring = kmap_atomic(ctx->ring_pages[0]); 1235 head = ring->head; 1236 tail = ring->tail; 1237 kunmap_atomic(ring); 1238 1239 /* 1240 * Ensure that once we've read the current tail pointer, that 1241 * we also see the events that were stored up to the tail. 1242 */ 1243 smp_rmb(); 1244 1245 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); 1246 1247 if (head == tail) 1248 goto out; 1249 1250 head %= ctx->nr_events; 1251 tail %= ctx->nr_events; 1252 1253 while (ret < nr) { 1254 long avail; 1255 struct io_event *ev; 1256 struct page *page; 1257 1258 avail = (head <= tail ? tail : ctx->nr_events) - head; 1259 if (head == tail) 1260 break; 1261 1262 avail = min(avail, nr - ret); 1263 avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - 1264 ((head + AIO_EVENTS_OFFSET) % AIO_EVENTS_PER_PAGE)); 1265 1266 pos = head + AIO_EVENTS_OFFSET; 1267 page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; 1268 pos %= AIO_EVENTS_PER_PAGE; 1269 1270 ev = kmap(page); 1271 copy_ret = copy_to_user(event + ret, ev + pos, 1272 sizeof(*ev) * avail); 1273 kunmap(page); 1274 1275 if (unlikely(copy_ret)) { 1276 ret = -EFAULT; 1277 goto out; 1278 } 1279 1280 ret += avail; 1281 head += avail; 1282 head %= ctx->nr_events; 1283 } 1284 1285 ring = kmap_atomic(ctx->ring_pages[0]); 1286 ring->head = head; 1287 kunmap_atomic(ring); 1288 flush_dcache_page(ctx->ring_pages[0]); 1289 1290 pr_debug("%li h%u t%u\n", ret, head, tail); 1291 out: 1292 mutex_unlock(&ctx->ring_lock); 1293 1294 return ret; 1295 } 1296 1297 static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, 1298 struct io_event __user *event, long *i) 1299 { 1300 long ret = aio_read_events_ring(ctx, event + *i, nr - *i); 1301 1302 if (ret > 0) 1303 *i += ret; 1304 1305 if (unlikely(atomic_read(&ctx->dead))) 1306 ret = -EINVAL; 1307 1308 if (!*i) 1309 *i = ret; 1310 1311 return ret < 0 || *i >= min_nr; 1312 } 1313 1314 static long read_events(struct kioctx *ctx, long min_nr, long nr, 1315 struct io_event __user *event, 1316 ktime_t until) 1317 { 1318 long ret = 0; 1319 1320 /* 1321 * Note that aio_read_events() is being called as the conditional - i.e. 1322 * we're calling it after prepare_to_wait() has set task state to 1323 * TASK_INTERRUPTIBLE. 1324 * 1325 * But aio_read_events() can block, and if it blocks it's going to flip 1326 * the task state back to TASK_RUNNING. 1327 * 1328 * This should be ok, provided it doesn't flip the state back to 1329 * TASK_RUNNING and return 0 too much - that causes us to spin. That 1330 * will only happen if the mutex_lock() call blocks, and we then find 1331 * the ringbuffer empty. So in practice we should be ok, but it's 1332 * something to be aware of when touching this code. 1333 */ 1334 if (until == 0) 1335 aio_read_events(ctx, min_nr, nr, event, &ret); 1336 else 1337 wait_event_interruptible_hrtimeout(ctx->wait, 1338 aio_read_events(ctx, min_nr, nr, event, &ret), 1339 until); 1340 1341 if (!ret && signal_pending(current)) 1342 ret = -EINTR; 1343 1344 return ret; 1345 } 1346 1347 /* sys_io_setup: 1348 * Create an aio_context capable of receiving at least nr_events. 1349 * ctxp must not point to an aio_context that already exists, and 1350 * must be initialized to 0 prior to the call. On successful 1351 * creation of the aio_context, *ctxp is filled in with the resulting 1352 * handle. May fail with -EINVAL if *ctxp is not initialized, 1353 * if the specified nr_events exceeds internal limits. May fail 1354 * with -EAGAIN if the specified nr_events exceeds the user's limit 1355 * of available events. May fail with -ENOMEM if insufficient kernel 1356 * resources are available. May fail with -EFAULT if an invalid 1357 * pointer is passed for ctxp. Will fail with -ENOSYS if not 1358 * implemented. 1359 */ 1360 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) 1361 { 1362 struct kioctx *ioctx = NULL; 1363 unsigned long ctx; 1364 long ret; 1365 1366 ret = get_user(ctx, ctxp); 1367 if (unlikely(ret)) 1368 goto out; 1369 1370 ret = -EINVAL; 1371 if (unlikely(ctx || nr_events == 0)) { 1372 pr_debug("EINVAL: ctx %lu nr_events %u\n", 1373 ctx, nr_events); 1374 goto out; 1375 } 1376 1377 ioctx = ioctx_alloc(nr_events); 1378 ret = PTR_ERR(ioctx); 1379 if (!IS_ERR(ioctx)) { 1380 ret = put_user(ioctx->user_id, ctxp); 1381 if (ret) 1382 kill_ioctx(current->mm, ioctx, NULL); 1383 percpu_ref_put(&ioctx->users); 1384 } 1385 1386 out: 1387 return ret; 1388 } 1389 1390 #ifdef CONFIG_COMPAT 1391 COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p) 1392 { 1393 struct kioctx *ioctx = NULL; 1394 unsigned long ctx; 1395 long ret; 1396 1397 ret = get_user(ctx, ctx32p); 1398 if (unlikely(ret)) 1399 goto out; 1400 1401 ret = -EINVAL; 1402 if (unlikely(ctx || nr_events == 0)) { 1403 pr_debug("EINVAL: ctx %lu nr_events %u\n", 1404 ctx, nr_events); 1405 goto out; 1406 } 1407 1408 ioctx = ioctx_alloc(nr_events); 1409 ret = PTR_ERR(ioctx); 1410 if (!IS_ERR(ioctx)) { 1411 /* truncating is ok because it's a user address */ 1412 ret = put_user((u32)ioctx->user_id, ctx32p); 1413 if (ret) 1414 kill_ioctx(current->mm, ioctx, NULL); 1415 percpu_ref_put(&ioctx->users); 1416 } 1417 1418 out: 1419 return ret; 1420 } 1421 #endif 1422 1423 /* sys_io_destroy: 1424 * Destroy the aio_context specified. May cancel any outstanding 1425 * AIOs and block on completion. Will fail with -ENOSYS if not 1426 * implemented. May fail with -EINVAL if the context pointed to 1427 * is invalid. 1428 */ 1429 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) 1430 { 1431 struct kioctx *ioctx = lookup_ioctx(ctx); 1432 if (likely(NULL != ioctx)) { 1433 struct ctx_rq_wait wait; 1434 int ret; 1435 1436 init_completion(&wait.comp); 1437 atomic_set(&wait.count, 1); 1438 1439 /* Pass requests_done to kill_ioctx() where it can be set 1440 * in a thread-safe way. If we try to set it here then we have 1441 * a race condition if two io_destroy() called simultaneously. 1442 */ 1443 ret = kill_ioctx(current->mm, ioctx, &wait); 1444 percpu_ref_put(&ioctx->users); 1445 1446 /* Wait until all IO for the context are done. Otherwise kernel 1447 * keep using user-space buffers even if user thinks the context 1448 * is destroyed. 1449 */ 1450 if (!ret) 1451 wait_for_completion(&wait.comp); 1452 1453 return ret; 1454 } 1455 pr_debug("EINVAL: invalid context id\n"); 1456 return -EINVAL; 1457 } 1458 1459 static int aio_setup_rw(int rw, struct iocb *iocb, struct iovec **iovec, 1460 bool vectored, bool compat, struct iov_iter *iter) 1461 { 1462 void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf; 1463 size_t len = iocb->aio_nbytes; 1464 1465 if (!vectored) { 1466 ssize_t ret = import_single_range(rw, buf, len, *iovec, iter); 1467 *iovec = NULL; 1468 return ret; 1469 } 1470 #ifdef CONFIG_COMPAT 1471 if (compat) 1472 return compat_import_iovec(rw, buf, len, UIO_FASTIOV, iovec, 1473 iter); 1474 #endif 1475 return import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter); 1476 } 1477 1478 static inline ssize_t aio_ret(struct kiocb *req, ssize_t ret) 1479 { 1480 switch (ret) { 1481 case -EIOCBQUEUED: 1482 return ret; 1483 case -ERESTARTSYS: 1484 case -ERESTARTNOINTR: 1485 case -ERESTARTNOHAND: 1486 case -ERESTART_RESTARTBLOCK: 1487 /* 1488 * There's no easy way to restart the syscall since other AIO's 1489 * may be already running. Just fail this IO with EINTR. 1490 */ 1491 ret = -EINTR; 1492 /*FALLTHRU*/ 1493 default: 1494 aio_complete(req, ret, 0); 1495 return 0; 1496 } 1497 } 1498 1499 static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored, 1500 bool compat) 1501 { 1502 struct file *file = req->ki_filp; 1503 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1504 struct iov_iter iter; 1505 ssize_t ret; 1506 1507 if (unlikely(!(file->f_mode & FMODE_READ))) 1508 return -EBADF; 1509 if (unlikely(!file->f_op->read_iter)) 1510 return -EINVAL; 1511 1512 ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter); 1513 if (ret) 1514 return ret; 1515 ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter)); 1516 if (!ret) 1517 ret = aio_ret(req, call_read_iter(file, req, &iter)); 1518 kfree(iovec); 1519 return ret; 1520 } 1521 1522 static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored, 1523 bool compat) 1524 { 1525 struct file *file = req->ki_filp; 1526 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1527 struct iov_iter iter; 1528 ssize_t ret; 1529 1530 if (unlikely(!(file->f_mode & FMODE_WRITE))) 1531 return -EBADF; 1532 if (unlikely(!file->f_op->write_iter)) 1533 return -EINVAL; 1534 1535 ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter); 1536 if (ret) 1537 return ret; 1538 ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter)); 1539 if (!ret) { 1540 req->ki_flags |= IOCB_WRITE; 1541 file_start_write(file); 1542 ret = aio_ret(req, call_write_iter(file, req, &iter)); 1543 /* 1544 * We release freeze protection in aio_complete(). Fool lockdep 1545 * by telling it the lock got released so that it doesn't 1546 * complain about held lock when we return to userspace. 1547 */ 1548 if (S_ISREG(file_inode(file)->i_mode)) 1549 __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE); 1550 } 1551 kfree(iovec); 1552 return ret; 1553 } 1554 1555 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1556 struct iocb *iocb, bool compat) 1557 { 1558 struct aio_kiocb *req; 1559 struct file *file; 1560 ssize_t ret; 1561 1562 /* enforce forwards compatibility on users */ 1563 if (unlikely(iocb->aio_reserved2)) { 1564 pr_debug("EINVAL: reserve field set\n"); 1565 return -EINVAL; 1566 } 1567 1568 /* prevent overflows */ 1569 if (unlikely( 1570 (iocb->aio_buf != (unsigned long)iocb->aio_buf) || 1571 (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || 1572 ((ssize_t)iocb->aio_nbytes < 0) 1573 )) { 1574 pr_debug("EINVAL: overflow check\n"); 1575 return -EINVAL; 1576 } 1577 1578 req = aio_get_req(ctx); 1579 if (unlikely(!req)) 1580 return -EAGAIN; 1581 1582 req->common.ki_filp = file = fget(iocb->aio_fildes); 1583 if (unlikely(!req->common.ki_filp)) { 1584 ret = -EBADF; 1585 goto out_put_req; 1586 } 1587 req->common.ki_pos = iocb->aio_offset; 1588 req->common.ki_complete = aio_complete; 1589 req->common.ki_flags = iocb_flags(req->common.ki_filp); 1590 req->common.ki_hint = file_write_hint(file); 1591 1592 if (iocb->aio_flags & IOCB_FLAG_RESFD) { 1593 /* 1594 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an 1595 * instance of the file* now. The file descriptor must be 1596 * an eventfd() fd, and will be signaled for each completed 1597 * event using the eventfd_signal() function. 1598 */ 1599 req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); 1600 if (IS_ERR(req->ki_eventfd)) { 1601 ret = PTR_ERR(req->ki_eventfd); 1602 req->ki_eventfd = NULL; 1603 goto out_put_req; 1604 } 1605 1606 req->common.ki_flags |= IOCB_EVENTFD; 1607 } 1608 1609 ret = kiocb_set_rw_flags(&req->common, iocb->aio_rw_flags); 1610 if (unlikely(ret)) { 1611 pr_debug("EINVAL: aio_rw_flags\n"); 1612 goto out_put_req; 1613 } 1614 1615 ret = put_user(KIOCB_KEY, &user_iocb->aio_key); 1616 if (unlikely(ret)) { 1617 pr_debug("EFAULT: aio_key\n"); 1618 goto out_put_req; 1619 } 1620 1621 req->ki_user_iocb = user_iocb; 1622 req->ki_user_data = iocb->aio_data; 1623 1624 get_file(file); 1625 switch (iocb->aio_lio_opcode) { 1626 case IOCB_CMD_PREAD: 1627 ret = aio_read(&req->common, iocb, false, compat); 1628 break; 1629 case IOCB_CMD_PWRITE: 1630 ret = aio_write(&req->common, iocb, false, compat); 1631 break; 1632 case IOCB_CMD_PREADV: 1633 ret = aio_read(&req->common, iocb, true, compat); 1634 break; 1635 case IOCB_CMD_PWRITEV: 1636 ret = aio_write(&req->common, iocb, true, compat); 1637 break; 1638 default: 1639 pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); 1640 ret = -EINVAL; 1641 break; 1642 } 1643 fput(file); 1644 1645 if (ret && ret != -EIOCBQUEUED) 1646 goto out_put_req; 1647 return 0; 1648 out_put_req: 1649 put_reqs_available(ctx, 1); 1650 percpu_ref_put(&ctx->reqs); 1651 kiocb_free(req); 1652 return ret; 1653 } 1654 1655 static long do_io_submit(aio_context_t ctx_id, long nr, 1656 struct iocb __user *__user *iocbpp, bool compat) 1657 { 1658 struct kioctx *ctx; 1659 long ret = 0; 1660 int i = 0; 1661 struct blk_plug plug; 1662 1663 if (unlikely(nr < 0)) 1664 return -EINVAL; 1665 1666 if (unlikely(nr > LONG_MAX/sizeof(*iocbpp))) 1667 nr = LONG_MAX/sizeof(*iocbpp); 1668 1669 if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) 1670 return -EFAULT; 1671 1672 ctx = lookup_ioctx(ctx_id); 1673 if (unlikely(!ctx)) { 1674 pr_debug("EINVAL: invalid context id\n"); 1675 return -EINVAL; 1676 } 1677 1678 blk_start_plug(&plug); 1679 1680 /* 1681 * AKPM: should this return a partial result if some of the IOs were 1682 * successfully submitted? 1683 */ 1684 for (i=0; i<nr; i++) { 1685 struct iocb __user *user_iocb; 1686 struct iocb tmp; 1687 1688 if (unlikely(__get_user(user_iocb, iocbpp + i))) { 1689 ret = -EFAULT; 1690 break; 1691 } 1692 1693 if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) { 1694 ret = -EFAULT; 1695 break; 1696 } 1697 1698 ret = io_submit_one(ctx, user_iocb, &tmp, compat); 1699 if (ret) 1700 break; 1701 } 1702 blk_finish_plug(&plug); 1703 1704 percpu_ref_put(&ctx->users); 1705 return i ? i : ret; 1706 } 1707 1708 /* sys_io_submit: 1709 * Queue the nr iocbs pointed to by iocbpp for processing. Returns 1710 * the number of iocbs queued. May return -EINVAL if the aio_context 1711 * specified by ctx_id is invalid, if nr is < 0, if the iocb at 1712 * *iocbpp[0] is not properly initialized, if the operation specified 1713 * is invalid for the file descriptor in the iocb. May fail with 1714 * -EFAULT if any of the data structures point to invalid data. May 1715 * fail with -EBADF if the file descriptor specified in the first 1716 * iocb is invalid. May fail with -EAGAIN if insufficient resources 1717 * are available to queue any iocbs. Will return 0 if nr is 0. Will 1718 * fail with -ENOSYS if not implemented. 1719 */ 1720 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, 1721 struct iocb __user * __user *, iocbpp) 1722 { 1723 return do_io_submit(ctx_id, nr, iocbpp, 0); 1724 } 1725 1726 #ifdef CONFIG_COMPAT 1727 static inline long 1728 copy_iocb(long nr, u32 __user *ptr32, struct iocb __user * __user *ptr64) 1729 { 1730 compat_uptr_t uptr; 1731 int i; 1732 1733 for (i = 0; i < nr; ++i) { 1734 if (get_user(uptr, ptr32 + i)) 1735 return -EFAULT; 1736 if (put_user(compat_ptr(uptr), ptr64 + i)) 1737 return -EFAULT; 1738 } 1739 return 0; 1740 } 1741 1742 #define MAX_AIO_SUBMITS (PAGE_SIZE/sizeof(struct iocb *)) 1743 1744 COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id, 1745 int, nr, u32 __user *, iocb) 1746 { 1747 struct iocb __user * __user *iocb64; 1748 long ret; 1749 1750 if (unlikely(nr < 0)) 1751 return -EINVAL; 1752 1753 if (nr > MAX_AIO_SUBMITS) 1754 nr = MAX_AIO_SUBMITS; 1755 1756 iocb64 = compat_alloc_user_space(nr * sizeof(*iocb64)); 1757 ret = copy_iocb(nr, iocb, iocb64); 1758 if (!ret) 1759 ret = do_io_submit(ctx_id, nr, iocb64, 1); 1760 return ret; 1761 } 1762 #endif 1763 1764 /* lookup_kiocb 1765 * Finds a given iocb for cancellation. 1766 */ 1767 static struct aio_kiocb * 1768 lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, u32 key) 1769 { 1770 struct aio_kiocb *kiocb; 1771 1772 assert_spin_locked(&ctx->ctx_lock); 1773 1774 if (key != KIOCB_KEY) 1775 return NULL; 1776 1777 /* TODO: use a hash or array, this sucks. */ 1778 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { 1779 if (kiocb->ki_user_iocb == iocb) 1780 return kiocb; 1781 } 1782 return NULL; 1783 } 1784 1785 /* sys_io_cancel: 1786 * Attempts to cancel an iocb previously passed to io_submit. If 1787 * the operation is successfully cancelled, the resulting event is 1788 * copied into the memory pointed to by result without being placed 1789 * into the completion queue and 0 is returned. May fail with 1790 * -EFAULT if any of the data structures pointed to are invalid. 1791 * May fail with -EINVAL if aio_context specified by ctx_id is 1792 * invalid. May fail with -EAGAIN if the iocb specified was not 1793 * cancelled. Will fail with -ENOSYS if not implemented. 1794 */ 1795 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, 1796 struct io_event __user *, result) 1797 { 1798 struct kioctx *ctx; 1799 struct aio_kiocb *kiocb; 1800 u32 key; 1801 int ret; 1802 1803 ret = get_user(key, &iocb->aio_key); 1804 if (unlikely(ret)) 1805 return -EFAULT; 1806 1807 ctx = lookup_ioctx(ctx_id); 1808 if (unlikely(!ctx)) 1809 return -EINVAL; 1810 1811 spin_lock_irq(&ctx->ctx_lock); 1812 1813 kiocb = lookup_kiocb(ctx, iocb, key); 1814 if (kiocb) 1815 ret = kiocb_cancel(kiocb); 1816 else 1817 ret = -EINVAL; 1818 1819 spin_unlock_irq(&ctx->ctx_lock); 1820 1821 if (!ret) { 1822 /* 1823 * The result argument is no longer used - the io_event is 1824 * always delivered via the ring buffer. -EINPROGRESS indicates 1825 * cancellation is progress: 1826 */ 1827 ret = -EINPROGRESS; 1828 } 1829 1830 percpu_ref_put(&ctx->users); 1831 1832 return ret; 1833 } 1834 1835 static long do_io_getevents(aio_context_t ctx_id, 1836 long min_nr, 1837 long nr, 1838 struct io_event __user *events, 1839 struct timespec64 *ts) 1840 { 1841 ktime_t until = ts ? timespec64_to_ktime(*ts) : KTIME_MAX; 1842 struct kioctx *ioctx = lookup_ioctx(ctx_id); 1843 long ret = -EINVAL; 1844 1845 if (likely(ioctx)) { 1846 if (likely(min_nr <= nr && min_nr >= 0)) 1847 ret = read_events(ioctx, min_nr, nr, events, until); 1848 percpu_ref_put(&ioctx->users); 1849 } 1850 1851 return ret; 1852 } 1853 1854 /* io_getevents: 1855 * Attempts to read at least min_nr events and up to nr events from 1856 * the completion queue for the aio_context specified by ctx_id. If 1857 * it succeeds, the number of read events is returned. May fail with 1858 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is 1859 * out of range, if timeout is out of range. May fail with -EFAULT 1860 * if any of the memory specified is invalid. May return 0 or 1861 * < min_nr if the timeout specified by timeout has elapsed 1862 * before sufficient events are available, where timeout == NULL 1863 * specifies an infinite timeout. Note that the timeout pointed to by 1864 * timeout is relative. Will fail with -ENOSYS if not implemented. 1865 */ 1866 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, 1867 long, min_nr, 1868 long, nr, 1869 struct io_event __user *, events, 1870 struct timespec __user *, timeout) 1871 { 1872 struct timespec64 ts; 1873 1874 if (timeout) { 1875 if (unlikely(get_timespec64(&ts, timeout))) 1876 return -EFAULT; 1877 } 1878 1879 return do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); 1880 } 1881 1882 #ifdef CONFIG_COMPAT 1883 COMPAT_SYSCALL_DEFINE5(io_getevents, compat_aio_context_t, ctx_id, 1884 compat_long_t, min_nr, 1885 compat_long_t, nr, 1886 struct io_event __user *, events, 1887 struct compat_timespec __user *, timeout) 1888 { 1889 struct timespec64 t; 1890 1891 if (timeout) { 1892 if (compat_get_timespec64(&t, timeout)) 1893 return -EFAULT; 1894 1895 } 1896 1897 return do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); 1898 } 1899 #endif 1900