1 /* 2 * An async IO implementation for Linux 3 * Written by Benjamin LaHaise <bcrl@kvack.org> 4 * 5 * Implements an efficient asynchronous io interface. 6 * 7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. 8 * Copyright 2018 Christoph Hellwig. 9 * 10 * See ../COPYING for licensing terms. 11 */ 12 #define pr_fmt(fmt) "%s: " fmt, __func__ 13 14 #include <linux/kernel.h> 15 #include <linux/init.h> 16 #include <linux/errno.h> 17 #include <linux/time.h> 18 #include <linux/aio_abi.h> 19 #include <linux/export.h> 20 #include <linux/syscalls.h> 21 #include <linux/backing-dev.h> 22 #include <linux/refcount.h> 23 #include <linux/uio.h> 24 25 #include <linux/sched/signal.h> 26 #include <linux/fs.h> 27 #include <linux/file.h> 28 #include <linux/mm.h> 29 #include <linux/mman.h> 30 #include <linux/mmu_context.h> 31 #include <linux/percpu.h> 32 #include <linux/slab.h> 33 #include <linux/timer.h> 34 #include <linux/aio.h> 35 #include <linux/highmem.h> 36 #include <linux/workqueue.h> 37 #include <linux/security.h> 38 #include <linux/eventfd.h> 39 #include <linux/blkdev.h> 40 #include <linux/compat.h> 41 #include <linux/migrate.h> 42 #include <linux/ramfs.h> 43 #include <linux/percpu-refcount.h> 44 #include <linux/mount.h> 45 46 #include <asm/kmap_types.h> 47 #include <linux/uaccess.h> 48 #include <linux/nospec.h> 49 50 #include "internal.h" 51 52 #define KIOCB_KEY 0 53 54 #define AIO_RING_MAGIC 0xa10a10a1 55 #define AIO_RING_COMPAT_FEATURES 1 56 #define AIO_RING_INCOMPAT_FEATURES 0 57 struct aio_ring { 58 unsigned id; /* kernel internal index number */ 59 unsigned nr; /* number of io_events */ 60 unsigned head; /* Written to by userland or under ring_lock 61 * mutex by aio_read_events_ring(). */ 62 unsigned tail; 63 64 unsigned magic; 65 unsigned compat_features; 66 unsigned incompat_features; 67 unsigned header_length; /* size of aio_ring */ 68 69 70 struct io_event io_events[0]; 71 }; /* 128 bytes + ring size */ 72 73 /* 74 * Plugging is meant to work with larger batches of IOs. If we don't 75 * have more than the below, then don't bother setting up a plug. 76 */ 77 #define AIO_PLUG_THRESHOLD 2 78 79 #define AIO_RING_PAGES 8 80 81 struct kioctx_table { 82 struct rcu_head rcu; 83 unsigned nr; 84 struct kioctx __rcu *table[]; 85 }; 86 87 struct kioctx_cpu { 88 unsigned reqs_available; 89 }; 90 91 struct ctx_rq_wait { 92 struct completion comp; 93 atomic_t count; 94 }; 95 96 struct kioctx { 97 struct percpu_ref users; 98 atomic_t dead; 99 100 struct percpu_ref reqs; 101 102 unsigned long user_id; 103 104 struct __percpu kioctx_cpu *cpu; 105 106 /* 107 * For percpu reqs_available, number of slots we move to/from global 108 * counter at a time: 109 */ 110 unsigned req_batch; 111 /* 112 * This is what userspace passed to io_setup(), it's not used for 113 * anything but counting against the global max_reqs quota. 114 * 115 * The real limit is nr_events - 1, which will be larger (see 116 * aio_setup_ring()) 117 */ 118 unsigned max_reqs; 119 120 /* Size of ringbuffer, in units of struct io_event */ 121 unsigned nr_events; 122 123 unsigned long mmap_base; 124 unsigned long mmap_size; 125 126 struct page **ring_pages; 127 long nr_pages; 128 129 struct rcu_work free_rwork; /* see free_ioctx() */ 130 131 /* 132 * signals when all in-flight requests are done 133 */ 134 struct ctx_rq_wait *rq_wait; 135 136 struct { 137 /* 138 * This counts the number of available slots in the ringbuffer, 139 * so we avoid overflowing it: it's decremented (if positive) 140 * when allocating a kiocb and incremented when the resulting 141 * io_event is pulled off the ringbuffer. 142 * 143 * We batch accesses to it with a percpu version. 144 */ 145 atomic_t reqs_available; 146 } ____cacheline_aligned_in_smp; 147 148 struct { 149 spinlock_t ctx_lock; 150 struct list_head active_reqs; /* used for cancellation */ 151 } ____cacheline_aligned_in_smp; 152 153 struct { 154 struct mutex ring_lock; 155 wait_queue_head_t wait; 156 } ____cacheline_aligned_in_smp; 157 158 struct { 159 unsigned tail; 160 unsigned completed_events; 161 spinlock_t completion_lock; 162 } ____cacheline_aligned_in_smp; 163 164 struct page *internal_pages[AIO_RING_PAGES]; 165 struct file *aio_ring_file; 166 167 unsigned id; 168 }; 169 170 /* 171 * First field must be the file pointer in all the 172 * iocb unions! See also 'struct kiocb' in <linux/fs.h> 173 */ 174 struct fsync_iocb { 175 struct file *file; 176 struct work_struct work; 177 bool datasync; 178 }; 179 180 struct poll_iocb { 181 struct file *file; 182 struct wait_queue_head *head; 183 __poll_t events; 184 bool woken; 185 bool cancelled; 186 struct wait_queue_entry wait; 187 struct work_struct work; 188 }; 189 190 /* 191 * NOTE! Each of the iocb union members has the file pointer 192 * as the first entry in their struct definition. So you can 193 * access the file pointer through any of the sub-structs, 194 * or directly as just 'ki_filp' in this struct. 195 */ 196 struct aio_kiocb { 197 union { 198 struct file *ki_filp; 199 struct kiocb rw; 200 struct fsync_iocb fsync; 201 struct poll_iocb poll; 202 }; 203 204 struct kioctx *ki_ctx; 205 kiocb_cancel_fn *ki_cancel; 206 207 struct iocb __user *ki_user_iocb; /* user's aiocb */ 208 __u64 ki_user_data; /* user's data for completion */ 209 210 struct list_head ki_list; /* the aio core uses this 211 * for cancellation */ 212 refcount_t ki_refcnt; 213 214 /* 215 * If the aio_resfd field of the userspace iocb is not zero, 216 * this is the underlying eventfd context to deliver events to. 217 */ 218 struct eventfd_ctx *ki_eventfd; 219 }; 220 221 /*------ sysctl variables----*/ 222 static DEFINE_SPINLOCK(aio_nr_lock); 223 unsigned long aio_nr; /* current system wide number of aio requests */ 224 unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ 225 /*----end sysctl variables---*/ 226 227 static struct kmem_cache *kiocb_cachep; 228 static struct kmem_cache *kioctx_cachep; 229 230 static struct vfsmount *aio_mnt; 231 232 static const struct file_operations aio_ring_fops; 233 static const struct address_space_operations aio_ctx_aops; 234 235 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) 236 { 237 struct file *file; 238 struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb); 239 if (IS_ERR(inode)) 240 return ERR_CAST(inode); 241 242 inode->i_mapping->a_ops = &aio_ctx_aops; 243 inode->i_mapping->private_data = ctx; 244 inode->i_size = PAGE_SIZE * nr_pages; 245 246 file = alloc_file_pseudo(inode, aio_mnt, "[aio]", 247 O_RDWR, &aio_ring_fops); 248 if (IS_ERR(file)) 249 iput(inode); 250 return file; 251 } 252 253 static struct dentry *aio_mount(struct file_system_type *fs_type, 254 int flags, const char *dev_name, void *data) 255 { 256 struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, NULL, 257 AIO_RING_MAGIC); 258 259 if (!IS_ERR(root)) 260 root->d_sb->s_iflags |= SB_I_NOEXEC; 261 return root; 262 } 263 264 /* aio_setup 265 * Creates the slab caches used by the aio routines, panic on 266 * failure as this is done early during the boot sequence. 267 */ 268 static int __init aio_setup(void) 269 { 270 static struct file_system_type aio_fs = { 271 .name = "aio", 272 .mount = aio_mount, 273 .kill_sb = kill_anon_super, 274 }; 275 aio_mnt = kern_mount(&aio_fs); 276 if (IS_ERR(aio_mnt)) 277 panic("Failed to create aio fs mount."); 278 279 kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 280 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 281 return 0; 282 } 283 __initcall(aio_setup); 284 285 static void put_aio_ring_file(struct kioctx *ctx) 286 { 287 struct file *aio_ring_file = ctx->aio_ring_file; 288 struct address_space *i_mapping; 289 290 if (aio_ring_file) { 291 truncate_setsize(file_inode(aio_ring_file), 0); 292 293 /* Prevent further access to the kioctx from migratepages */ 294 i_mapping = aio_ring_file->f_mapping; 295 spin_lock(&i_mapping->private_lock); 296 i_mapping->private_data = NULL; 297 ctx->aio_ring_file = NULL; 298 spin_unlock(&i_mapping->private_lock); 299 300 fput(aio_ring_file); 301 } 302 } 303 304 static void aio_free_ring(struct kioctx *ctx) 305 { 306 int i; 307 308 /* Disconnect the kiotx from the ring file. This prevents future 309 * accesses to the kioctx from page migration. 310 */ 311 put_aio_ring_file(ctx); 312 313 for (i = 0; i < ctx->nr_pages; i++) { 314 struct page *page; 315 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, 316 page_count(ctx->ring_pages[i])); 317 page = ctx->ring_pages[i]; 318 if (!page) 319 continue; 320 ctx->ring_pages[i] = NULL; 321 put_page(page); 322 } 323 324 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) { 325 kfree(ctx->ring_pages); 326 ctx->ring_pages = NULL; 327 } 328 } 329 330 static int aio_ring_mremap(struct vm_area_struct *vma) 331 { 332 struct file *file = vma->vm_file; 333 struct mm_struct *mm = vma->vm_mm; 334 struct kioctx_table *table; 335 int i, res = -EINVAL; 336 337 spin_lock(&mm->ioctx_lock); 338 rcu_read_lock(); 339 table = rcu_dereference(mm->ioctx_table); 340 for (i = 0; i < table->nr; i++) { 341 struct kioctx *ctx; 342 343 ctx = rcu_dereference(table->table[i]); 344 if (ctx && ctx->aio_ring_file == file) { 345 if (!atomic_read(&ctx->dead)) { 346 ctx->user_id = ctx->mmap_base = vma->vm_start; 347 res = 0; 348 } 349 break; 350 } 351 } 352 353 rcu_read_unlock(); 354 spin_unlock(&mm->ioctx_lock); 355 return res; 356 } 357 358 static const struct vm_operations_struct aio_ring_vm_ops = { 359 .mremap = aio_ring_mremap, 360 #if IS_ENABLED(CONFIG_MMU) 361 .fault = filemap_fault, 362 .map_pages = filemap_map_pages, 363 .page_mkwrite = filemap_page_mkwrite, 364 #endif 365 }; 366 367 static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) 368 { 369 vma->vm_flags |= VM_DONTEXPAND; 370 vma->vm_ops = &aio_ring_vm_ops; 371 return 0; 372 } 373 374 static const struct file_operations aio_ring_fops = { 375 .mmap = aio_ring_mmap, 376 }; 377 378 #if IS_ENABLED(CONFIG_MIGRATION) 379 static int aio_migratepage(struct address_space *mapping, struct page *new, 380 struct page *old, enum migrate_mode mode) 381 { 382 struct kioctx *ctx; 383 unsigned long flags; 384 pgoff_t idx; 385 int rc; 386 387 /* 388 * We cannot support the _NO_COPY case here, because copy needs to 389 * happen under the ctx->completion_lock. That does not work with the 390 * migration workflow of MIGRATE_SYNC_NO_COPY. 391 */ 392 if (mode == MIGRATE_SYNC_NO_COPY) 393 return -EINVAL; 394 395 rc = 0; 396 397 /* mapping->private_lock here protects against the kioctx teardown. */ 398 spin_lock(&mapping->private_lock); 399 ctx = mapping->private_data; 400 if (!ctx) { 401 rc = -EINVAL; 402 goto out; 403 } 404 405 /* The ring_lock mutex. The prevents aio_read_events() from writing 406 * to the ring's head, and prevents page migration from mucking in 407 * a partially initialized kiotx. 408 */ 409 if (!mutex_trylock(&ctx->ring_lock)) { 410 rc = -EAGAIN; 411 goto out; 412 } 413 414 idx = old->index; 415 if (idx < (pgoff_t)ctx->nr_pages) { 416 /* Make sure the old page hasn't already been changed */ 417 if (ctx->ring_pages[idx] != old) 418 rc = -EAGAIN; 419 } else 420 rc = -EINVAL; 421 422 if (rc != 0) 423 goto out_unlock; 424 425 /* Writeback must be complete */ 426 BUG_ON(PageWriteback(old)); 427 get_page(new); 428 429 rc = migrate_page_move_mapping(mapping, new, old, mode, 1); 430 if (rc != MIGRATEPAGE_SUCCESS) { 431 put_page(new); 432 goto out_unlock; 433 } 434 435 /* Take completion_lock to prevent other writes to the ring buffer 436 * while the old page is copied to the new. This prevents new 437 * events from being lost. 438 */ 439 spin_lock_irqsave(&ctx->completion_lock, flags); 440 migrate_page_copy(new, old); 441 BUG_ON(ctx->ring_pages[idx] != old); 442 ctx->ring_pages[idx] = new; 443 spin_unlock_irqrestore(&ctx->completion_lock, flags); 444 445 /* The old page is no longer accessible. */ 446 put_page(old); 447 448 out_unlock: 449 mutex_unlock(&ctx->ring_lock); 450 out: 451 spin_unlock(&mapping->private_lock); 452 return rc; 453 } 454 #endif 455 456 static const struct address_space_operations aio_ctx_aops = { 457 .set_page_dirty = __set_page_dirty_no_writeback, 458 #if IS_ENABLED(CONFIG_MIGRATION) 459 .migratepage = aio_migratepage, 460 #endif 461 }; 462 463 static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events) 464 { 465 struct aio_ring *ring; 466 struct mm_struct *mm = current->mm; 467 unsigned long size, unused; 468 int nr_pages; 469 int i; 470 struct file *file; 471 472 /* Compensate for the ring buffer's head/tail overlap entry */ 473 nr_events += 2; /* 1 is required, 2 for good luck */ 474 475 size = sizeof(struct aio_ring); 476 size += sizeof(struct io_event) * nr_events; 477 478 nr_pages = PFN_UP(size); 479 if (nr_pages < 0) 480 return -EINVAL; 481 482 file = aio_private_file(ctx, nr_pages); 483 if (IS_ERR(file)) { 484 ctx->aio_ring_file = NULL; 485 return -ENOMEM; 486 } 487 488 ctx->aio_ring_file = file; 489 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) 490 / sizeof(struct io_event); 491 492 ctx->ring_pages = ctx->internal_pages; 493 if (nr_pages > AIO_RING_PAGES) { 494 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), 495 GFP_KERNEL); 496 if (!ctx->ring_pages) { 497 put_aio_ring_file(ctx); 498 return -ENOMEM; 499 } 500 } 501 502 for (i = 0; i < nr_pages; i++) { 503 struct page *page; 504 page = find_or_create_page(file->f_mapping, 505 i, GFP_HIGHUSER | __GFP_ZERO); 506 if (!page) 507 break; 508 pr_debug("pid(%d) page[%d]->count=%d\n", 509 current->pid, i, page_count(page)); 510 SetPageUptodate(page); 511 unlock_page(page); 512 513 ctx->ring_pages[i] = page; 514 } 515 ctx->nr_pages = i; 516 517 if (unlikely(i != nr_pages)) { 518 aio_free_ring(ctx); 519 return -ENOMEM; 520 } 521 522 ctx->mmap_size = nr_pages * PAGE_SIZE; 523 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); 524 525 if (down_write_killable(&mm->mmap_sem)) { 526 ctx->mmap_size = 0; 527 aio_free_ring(ctx); 528 return -EINTR; 529 } 530 531 ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, 532 PROT_READ | PROT_WRITE, 533 MAP_SHARED, 0, &unused, NULL); 534 up_write(&mm->mmap_sem); 535 if (IS_ERR((void *)ctx->mmap_base)) { 536 ctx->mmap_size = 0; 537 aio_free_ring(ctx); 538 return -ENOMEM; 539 } 540 541 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); 542 543 ctx->user_id = ctx->mmap_base; 544 ctx->nr_events = nr_events; /* trusted copy */ 545 546 ring = kmap_atomic(ctx->ring_pages[0]); 547 ring->nr = nr_events; /* user copy */ 548 ring->id = ~0U; 549 ring->head = ring->tail = 0; 550 ring->magic = AIO_RING_MAGIC; 551 ring->compat_features = AIO_RING_COMPAT_FEATURES; 552 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; 553 ring->header_length = sizeof(struct aio_ring); 554 kunmap_atomic(ring); 555 flush_dcache_page(ctx->ring_pages[0]); 556 557 return 0; 558 } 559 560 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) 561 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) 562 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) 563 564 void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel) 565 { 566 struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw); 567 struct kioctx *ctx = req->ki_ctx; 568 unsigned long flags; 569 570 if (WARN_ON_ONCE(!list_empty(&req->ki_list))) 571 return; 572 573 spin_lock_irqsave(&ctx->ctx_lock, flags); 574 list_add_tail(&req->ki_list, &ctx->active_reqs); 575 req->ki_cancel = cancel; 576 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 577 } 578 EXPORT_SYMBOL(kiocb_set_cancel_fn); 579 580 /* 581 * free_ioctx() should be RCU delayed to synchronize against the RCU 582 * protected lookup_ioctx() and also needs process context to call 583 * aio_free_ring(). Use rcu_work. 584 */ 585 static void free_ioctx(struct work_struct *work) 586 { 587 struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx, 588 free_rwork); 589 pr_debug("freeing %p\n", ctx); 590 591 aio_free_ring(ctx); 592 free_percpu(ctx->cpu); 593 percpu_ref_exit(&ctx->reqs); 594 percpu_ref_exit(&ctx->users); 595 kmem_cache_free(kioctx_cachep, ctx); 596 } 597 598 static void free_ioctx_reqs(struct percpu_ref *ref) 599 { 600 struct kioctx *ctx = container_of(ref, struct kioctx, reqs); 601 602 /* At this point we know that there are no any in-flight requests */ 603 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) 604 complete(&ctx->rq_wait->comp); 605 606 /* Synchronize against RCU protected table->table[] dereferences */ 607 INIT_RCU_WORK(&ctx->free_rwork, free_ioctx); 608 queue_rcu_work(system_wq, &ctx->free_rwork); 609 } 610 611 /* 612 * When this function runs, the kioctx has been removed from the "hash table" 613 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - 614 * now it's safe to cancel any that need to be. 615 */ 616 static void free_ioctx_users(struct percpu_ref *ref) 617 { 618 struct kioctx *ctx = container_of(ref, struct kioctx, users); 619 struct aio_kiocb *req; 620 621 spin_lock_irq(&ctx->ctx_lock); 622 623 while (!list_empty(&ctx->active_reqs)) { 624 req = list_first_entry(&ctx->active_reqs, 625 struct aio_kiocb, ki_list); 626 req->ki_cancel(&req->rw); 627 list_del_init(&req->ki_list); 628 } 629 630 spin_unlock_irq(&ctx->ctx_lock); 631 632 percpu_ref_kill(&ctx->reqs); 633 percpu_ref_put(&ctx->reqs); 634 } 635 636 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) 637 { 638 unsigned i, new_nr; 639 struct kioctx_table *table, *old; 640 struct aio_ring *ring; 641 642 spin_lock(&mm->ioctx_lock); 643 table = rcu_dereference_raw(mm->ioctx_table); 644 645 while (1) { 646 if (table) 647 for (i = 0; i < table->nr; i++) 648 if (!rcu_access_pointer(table->table[i])) { 649 ctx->id = i; 650 rcu_assign_pointer(table->table[i], ctx); 651 spin_unlock(&mm->ioctx_lock); 652 653 /* While kioctx setup is in progress, 654 * we are protected from page migration 655 * changes ring_pages by ->ring_lock. 656 */ 657 ring = kmap_atomic(ctx->ring_pages[0]); 658 ring->id = ctx->id; 659 kunmap_atomic(ring); 660 return 0; 661 } 662 663 new_nr = (table ? table->nr : 1) * 4; 664 spin_unlock(&mm->ioctx_lock); 665 666 table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) * 667 new_nr, GFP_KERNEL); 668 if (!table) 669 return -ENOMEM; 670 671 table->nr = new_nr; 672 673 spin_lock(&mm->ioctx_lock); 674 old = rcu_dereference_raw(mm->ioctx_table); 675 676 if (!old) { 677 rcu_assign_pointer(mm->ioctx_table, table); 678 } else if (table->nr > old->nr) { 679 memcpy(table->table, old->table, 680 old->nr * sizeof(struct kioctx *)); 681 682 rcu_assign_pointer(mm->ioctx_table, table); 683 kfree_rcu(old, rcu); 684 } else { 685 kfree(table); 686 table = old; 687 } 688 } 689 } 690 691 static void aio_nr_sub(unsigned nr) 692 { 693 spin_lock(&aio_nr_lock); 694 if (WARN_ON(aio_nr - nr > aio_nr)) 695 aio_nr = 0; 696 else 697 aio_nr -= nr; 698 spin_unlock(&aio_nr_lock); 699 } 700 701 /* ioctx_alloc 702 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. 703 */ 704 static struct kioctx *ioctx_alloc(unsigned nr_events) 705 { 706 struct mm_struct *mm = current->mm; 707 struct kioctx *ctx; 708 int err = -ENOMEM; 709 710 /* 711 * Store the original nr_events -- what userspace passed to io_setup(), 712 * for counting against the global limit -- before it changes. 713 */ 714 unsigned int max_reqs = nr_events; 715 716 /* 717 * We keep track of the number of available ringbuffer slots, to prevent 718 * overflow (reqs_available), and we also use percpu counters for this. 719 * 720 * So since up to half the slots might be on other cpu's percpu counters 721 * and unavailable, double nr_events so userspace sees what they 722 * expected: additionally, we move req_batch slots to/from percpu 723 * counters at a time, so make sure that isn't 0: 724 */ 725 nr_events = max(nr_events, num_possible_cpus() * 4); 726 nr_events *= 2; 727 728 /* Prevent overflows */ 729 if (nr_events > (0x10000000U / sizeof(struct io_event))) { 730 pr_debug("ENOMEM: nr_events too high\n"); 731 return ERR_PTR(-EINVAL); 732 } 733 734 if (!nr_events || (unsigned long)max_reqs > aio_max_nr) 735 return ERR_PTR(-EAGAIN); 736 737 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); 738 if (!ctx) 739 return ERR_PTR(-ENOMEM); 740 741 ctx->max_reqs = max_reqs; 742 743 spin_lock_init(&ctx->ctx_lock); 744 spin_lock_init(&ctx->completion_lock); 745 mutex_init(&ctx->ring_lock); 746 /* Protect against page migration throughout kiotx setup by keeping 747 * the ring_lock mutex held until setup is complete. */ 748 mutex_lock(&ctx->ring_lock); 749 init_waitqueue_head(&ctx->wait); 750 751 INIT_LIST_HEAD(&ctx->active_reqs); 752 753 if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL)) 754 goto err; 755 756 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL)) 757 goto err; 758 759 ctx->cpu = alloc_percpu(struct kioctx_cpu); 760 if (!ctx->cpu) 761 goto err; 762 763 err = aio_setup_ring(ctx, nr_events); 764 if (err < 0) 765 goto err; 766 767 atomic_set(&ctx->reqs_available, ctx->nr_events - 1); 768 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); 769 if (ctx->req_batch < 1) 770 ctx->req_batch = 1; 771 772 /* limit the number of system wide aios */ 773 spin_lock(&aio_nr_lock); 774 if (aio_nr + ctx->max_reqs > aio_max_nr || 775 aio_nr + ctx->max_reqs < aio_nr) { 776 spin_unlock(&aio_nr_lock); 777 err = -EAGAIN; 778 goto err_ctx; 779 } 780 aio_nr += ctx->max_reqs; 781 spin_unlock(&aio_nr_lock); 782 783 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ 784 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ 785 786 err = ioctx_add_table(ctx, mm); 787 if (err) 788 goto err_cleanup; 789 790 /* Release the ring_lock mutex now that all setup is complete. */ 791 mutex_unlock(&ctx->ring_lock); 792 793 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", 794 ctx, ctx->user_id, mm, ctx->nr_events); 795 return ctx; 796 797 err_cleanup: 798 aio_nr_sub(ctx->max_reqs); 799 err_ctx: 800 atomic_set(&ctx->dead, 1); 801 if (ctx->mmap_size) 802 vm_munmap(ctx->mmap_base, ctx->mmap_size); 803 aio_free_ring(ctx); 804 err: 805 mutex_unlock(&ctx->ring_lock); 806 free_percpu(ctx->cpu); 807 percpu_ref_exit(&ctx->reqs); 808 percpu_ref_exit(&ctx->users); 809 kmem_cache_free(kioctx_cachep, ctx); 810 pr_debug("error allocating ioctx %d\n", err); 811 return ERR_PTR(err); 812 } 813 814 /* kill_ioctx 815 * Cancels all outstanding aio requests on an aio context. Used 816 * when the processes owning a context have all exited to encourage 817 * the rapid destruction of the kioctx. 818 */ 819 static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, 820 struct ctx_rq_wait *wait) 821 { 822 struct kioctx_table *table; 823 824 spin_lock(&mm->ioctx_lock); 825 if (atomic_xchg(&ctx->dead, 1)) { 826 spin_unlock(&mm->ioctx_lock); 827 return -EINVAL; 828 } 829 830 table = rcu_dereference_raw(mm->ioctx_table); 831 WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id])); 832 RCU_INIT_POINTER(table->table[ctx->id], NULL); 833 spin_unlock(&mm->ioctx_lock); 834 835 /* free_ioctx_reqs() will do the necessary RCU synchronization */ 836 wake_up_all(&ctx->wait); 837 838 /* 839 * It'd be more correct to do this in free_ioctx(), after all 840 * the outstanding kiocbs have finished - but by then io_destroy 841 * has already returned, so io_setup() could potentially return 842 * -EAGAIN with no ioctxs actually in use (as far as userspace 843 * could tell). 844 */ 845 aio_nr_sub(ctx->max_reqs); 846 847 if (ctx->mmap_size) 848 vm_munmap(ctx->mmap_base, ctx->mmap_size); 849 850 ctx->rq_wait = wait; 851 percpu_ref_kill(&ctx->users); 852 return 0; 853 } 854 855 /* 856 * exit_aio: called when the last user of mm goes away. At this point, there is 857 * no way for any new requests to be submited or any of the io_* syscalls to be 858 * called on the context. 859 * 860 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on 861 * them. 862 */ 863 void exit_aio(struct mm_struct *mm) 864 { 865 struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table); 866 struct ctx_rq_wait wait; 867 int i, skipped; 868 869 if (!table) 870 return; 871 872 atomic_set(&wait.count, table->nr); 873 init_completion(&wait.comp); 874 875 skipped = 0; 876 for (i = 0; i < table->nr; ++i) { 877 struct kioctx *ctx = 878 rcu_dereference_protected(table->table[i], true); 879 880 if (!ctx) { 881 skipped++; 882 continue; 883 } 884 885 /* 886 * We don't need to bother with munmap() here - exit_mmap(mm) 887 * is coming and it'll unmap everything. And we simply can't, 888 * this is not necessarily our ->mm. 889 * Since kill_ioctx() uses non-zero ->mmap_size as indicator 890 * that it needs to unmap the area, just set it to 0. 891 */ 892 ctx->mmap_size = 0; 893 kill_ioctx(mm, ctx, &wait); 894 } 895 896 if (!atomic_sub_and_test(skipped, &wait.count)) { 897 /* Wait until all IO for the context are done. */ 898 wait_for_completion(&wait.comp); 899 } 900 901 RCU_INIT_POINTER(mm->ioctx_table, NULL); 902 kfree(table); 903 } 904 905 static void put_reqs_available(struct kioctx *ctx, unsigned nr) 906 { 907 struct kioctx_cpu *kcpu; 908 unsigned long flags; 909 910 local_irq_save(flags); 911 kcpu = this_cpu_ptr(ctx->cpu); 912 kcpu->reqs_available += nr; 913 914 while (kcpu->reqs_available >= ctx->req_batch * 2) { 915 kcpu->reqs_available -= ctx->req_batch; 916 atomic_add(ctx->req_batch, &ctx->reqs_available); 917 } 918 919 local_irq_restore(flags); 920 } 921 922 static bool __get_reqs_available(struct kioctx *ctx) 923 { 924 struct kioctx_cpu *kcpu; 925 bool ret = false; 926 unsigned long flags; 927 928 local_irq_save(flags); 929 kcpu = this_cpu_ptr(ctx->cpu); 930 if (!kcpu->reqs_available) { 931 int old, avail = atomic_read(&ctx->reqs_available); 932 933 do { 934 if (avail < ctx->req_batch) 935 goto out; 936 937 old = avail; 938 avail = atomic_cmpxchg(&ctx->reqs_available, 939 avail, avail - ctx->req_batch); 940 } while (avail != old); 941 942 kcpu->reqs_available += ctx->req_batch; 943 } 944 945 ret = true; 946 kcpu->reqs_available--; 947 out: 948 local_irq_restore(flags); 949 return ret; 950 } 951 952 /* refill_reqs_available 953 * Updates the reqs_available reference counts used for tracking the 954 * number of free slots in the completion ring. This can be called 955 * from aio_complete() (to optimistically update reqs_available) or 956 * from aio_get_req() (the we're out of events case). It must be 957 * called holding ctx->completion_lock. 958 */ 959 static void refill_reqs_available(struct kioctx *ctx, unsigned head, 960 unsigned tail) 961 { 962 unsigned events_in_ring, completed; 963 964 /* Clamp head since userland can write to it. */ 965 head %= ctx->nr_events; 966 if (head <= tail) 967 events_in_ring = tail - head; 968 else 969 events_in_ring = ctx->nr_events - (head - tail); 970 971 completed = ctx->completed_events; 972 if (events_in_ring < completed) 973 completed -= events_in_ring; 974 else 975 completed = 0; 976 977 if (!completed) 978 return; 979 980 ctx->completed_events -= completed; 981 put_reqs_available(ctx, completed); 982 } 983 984 /* user_refill_reqs_available 985 * Called to refill reqs_available when aio_get_req() encounters an 986 * out of space in the completion ring. 987 */ 988 static void user_refill_reqs_available(struct kioctx *ctx) 989 { 990 spin_lock_irq(&ctx->completion_lock); 991 if (ctx->completed_events) { 992 struct aio_ring *ring; 993 unsigned head; 994 995 /* Access of ring->head may race with aio_read_events_ring() 996 * here, but that's okay since whether we read the old version 997 * or the new version, and either will be valid. The important 998 * part is that head cannot pass tail since we prevent 999 * aio_complete() from updating tail by holding 1000 * ctx->completion_lock. Even if head is invalid, the check 1001 * against ctx->completed_events below will make sure we do the 1002 * safe/right thing. 1003 */ 1004 ring = kmap_atomic(ctx->ring_pages[0]); 1005 head = ring->head; 1006 kunmap_atomic(ring); 1007 1008 refill_reqs_available(ctx, head, ctx->tail); 1009 } 1010 1011 spin_unlock_irq(&ctx->completion_lock); 1012 } 1013 1014 static bool get_reqs_available(struct kioctx *ctx) 1015 { 1016 if (__get_reqs_available(ctx)) 1017 return true; 1018 user_refill_reqs_available(ctx); 1019 return __get_reqs_available(ctx); 1020 } 1021 1022 /* aio_get_req 1023 * Allocate a slot for an aio request. 1024 * Returns NULL if no requests are free. 1025 */ 1026 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) 1027 { 1028 struct aio_kiocb *req; 1029 1030 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); 1031 if (unlikely(!req)) 1032 return NULL; 1033 1034 percpu_ref_get(&ctx->reqs); 1035 req->ki_ctx = ctx; 1036 INIT_LIST_HEAD(&req->ki_list); 1037 refcount_set(&req->ki_refcnt, 0); 1038 req->ki_eventfd = NULL; 1039 return req; 1040 } 1041 1042 static struct kioctx *lookup_ioctx(unsigned long ctx_id) 1043 { 1044 struct aio_ring __user *ring = (void __user *)ctx_id; 1045 struct mm_struct *mm = current->mm; 1046 struct kioctx *ctx, *ret = NULL; 1047 struct kioctx_table *table; 1048 unsigned id; 1049 1050 if (get_user(id, &ring->id)) 1051 return NULL; 1052 1053 rcu_read_lock(); 1054 table = rcu_dereference(mm->ioctx_table); 1055 1056 if (!table || id >= table->nr) 1057 goto out; 1058 1059 id = array_index_nospec(id, table->nr); 1060 ctx = rcu_dereference(table->table[id]); 1061 if (ctx && ctx->user_id == ctx_id) { 1062 if (percpu_ref_tryget_live(&ctx->users)) 1063 ret = ctx; 1064 } 1065 out: 1066 rcu_read_unlock(); 1067 return ret; 1068 } 1069 1070 static inline void iocb_put(struct aio_kiocb *iocb) 1071 { 1072 if (refcount_read(&iocb->ki_refcnt) == 0 || 1073 refcount_dec_and_test(&iocb->ki_refcnt)) { 1074 if (iocb->ki_filp) 1075 fput(iocb->ki_filp); 1076 percpu_ref_put(&iocb->ki_ctx->reqs); 1077 kmem_cache_free(kiocb_cachep, iocb); 1078 } 1079 } 1080 1081 static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb, 1082 long res, long res2) 1083 { 1084 ev->obj = (u64)(unsigned long)iocb->ki_user_iocb; 1085 ev->data = iocb->ki_user_data; 1086 ev->res = res; 1087 ev->res2 = res2; 1088 } 1089 1090 /* aio_complete 1091 * Called when the io request on the given iocb is complete. 1092 */ 1093 static void aio_complete(struct aio_kiocb *iocb, long res, long res2) 1094 { 1095 struct kioctx *ctx = iocb->ki_ctx; 1096 struct aio_ring *ring; 1097 struct io_event *ev_page, *event; 1098 unsigned tail, pos, head; 1099 unsigned long flags; 1100 1101 /* 1102 * Add a completion event to the ring buffer. Must be done holding 1103 * ctx->completion_lock to prevent other code from messing with the tail 1104 * pointer since we might be called from irq context. 1105 */ 1106 spin_lock_irqsave(&ctx->completion_lock, flags); 1107 1108 tail = ctx->tail; 1109 pos = tail + AIO_EVENTS_OFFSET; 1110 1111 if (++tail >= ctx->nr_events) 1112 tail = 0; 1113 1114 ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 1115 event = ev_page + pos % AIO_EVENTS_PER_PAGE; 1116 1117 aio_fill_event(event, iocb, res, res2); 1118 1119 kunmap_atomic(ev_page); 1120 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 1121 1122 pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", 1123 ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data, 1124 res, res2); 1125 1126 /* after flagging the request as done, we 1127 * must never even look at it again 1128 */ 1129 smp_wmb(); /* make event visible before updating tail */ 1130 1131 ctx->tail = tail; 1132 1133 ring = kmap_atomic(ctx->ring_pages[0]); 1134 head = ring->head; 1135 ring->tail = tail; 1136 kunmap_atomic(ring); 1137 flush_dcache_page(ctx->ring_pages[0]); 1138 1139 ctx->completed_events++; 1140 if (ctx->completed_events > 1) 1141 refill_reqs_available(ctx, head, tail); 1142 spin_unlock_irqrestore(&ctx->completion_lock, flags); 1143 1144 pr_debug("added to ring %p at [%u]\n", iocb, tail); 1145 1146 /* 1147 * Check if the user asked us to deliver the result through an 1148 * eventfd. The eventfd_signal() function is safe to be called 1149 * from IRQ context. 1150 */ 1151 if (iocb->ki_eventfd) { 1152 eventfd_signal(iocb->ki_eventfd, 1); 1153 eventfd_ctx_put(iocb->ki_eventfd); 1154 } 1155 1156 /* 1157 * We have to order our ring_info tail store above and test 1158 * of the wait list below outside the wait lock. This is 1159 * like in wake_up_bit() where clearing a bit has to be 1160 * ordered with the unlocked test. 1161 */ 1162 smp_mb(); 1163 1164 if (waitqueue_active(&ctx->wait)) 1165 wake_up(&ctx->wait); 1166 iocb_put(iocb); 1167 } 1168 1169 /* aio_read_events_ring 1170 * Pull an event off of the ioctx's event ring. Returns the number of 1171 * events fetched 1172 */ 1173 static long aio_read_events_ring(struct kioctx *ctx, 1174 struct io_event __user *event, long nr) 1175 { 1176 struct aio_ring *ring; 1177 unsigned head, tail, pos; 1178 long ret = 0; 1179 int copy_ret; 1180 1181 /* 1182 * The mutex can block and wake us up and that will cause 1183 * wait_event_interruptible_hrtimeout() to schedule without sleeping 1184 * and repeat. This should be rare enough that it doesn't cause 1185 * peformance issues. See the comment in read_events() for more detail. 1186 */ 1187 sched_annotate_sleep(); 1188 mutex_lock(&ctx->ring_lock); 1189 1190 /* Access to ->ring_pages here is protected by ctx->ring_lock. */ 1191 ring = kmap_atomic(ctx->ring_pages[0]); 1192 head = ring->head; 1193 tail = ring->tail; 1194 kunmap_atomic(ring); 1195 1196 /* 1197 * Ensure that once we've read the current tail pointer, that 1198 * we also see the events that were stored up to the tail. 1199 */ 1200 smp_rmb(); 1201 1202 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); 1203 1204 if (head == tail) 1205 goto out; 1206 1207 head %= ctx->nr_events; 1208 tail %= ctx->nr_events; 1209 1210 while (ret < nr) { 1211 long avail; 1212 struct io_event *ev; 1213 struct page *page; 1214 1215 avail = (head <= tail ? tail : ctx->nr_events) - head; 1216 if (head == tail) 1217 break; 1218 1219 pos = head + AIO_EVENTS_OFFSET; 1220 page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; 1221 pos %= AIO_EVENTS_PER_PAGE; 1222 1223 avail = min(avail, nr - ret); 1224 avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos); 1225 1226 ev = kmap(page); 1227 copy_ret = copy_to_user(event + ret, ev + pos, 1228 sizeof(*ev) * avail); 1229 kunmap(page); 1230 1231 if (unlikely(copy_ret)) { 1232 ret = -EFAULT; 1233 goto out; 1234 } 1235 1236 ret += avail; 1237 head += avail; 1238 head %= ctx->nr_events; 1239 } 1240 1241 ring = kmap_atomic(ctx->ring_pages[0]); 1242 ring->head = head; 1243 kunmap_atomic(ring); 1244 flush_dcache_page(ctx->ring_pages[0]); 1245 1246 pr_debug("%li h%u t%u\n", ret, head, tail); 1247 out: 1248 mutex_unlock(&ctx->ring_lock); 1249 1250 return ret; 1251 } 1252 1253 static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, 1254 struct io_event __user *event, long *i) 1255 { 1256 long ret = aio_read_events_ring(ctx, event + *i, nr - *i); 1257 1258 if (ret > 0) 1259 *i += ret; 1260 1261 if (unlikely(atomic_read(&ctx->dead))) 1262 ret = -EINVAL; 1263 1264 if (!*i) 1265 *i = ret; 1266 1267 return ret < 0 || *i >= min_nr; 1268 } 1269 1270 static long read_events(struct kioctx *ctx, long min_nr, long nr, 1271 struct io_event __user *event, 1272 ktime_t until) 1273 { 1274 long ret = 0; 1275 1276 /* 1277 * Note that aio_read_events() is being called as the conditional - i.e. 1278 * we're calling it after prepare_to_wait() has set task state to 1279 * TASK_INTERRUPTIBLE. 1280 * 1281 * But aio_read_events() can block, and if it blocks it's going to flip 1282 * the task state back to TASK_RUNNING. 1283 * 1284 * This should be ok, provided it doesn't flip the state back to 1285 * TASK_RUNNING and return 0 too much - that causes us to spin. That 1286 * will only happen if the mutex_lock() call blocks, and we then find 1287 * the ringbuffer empty. So in practice we should be ok, but it's 1288 * something to be aware of when touching this code. 1289 */ 1290 if (until == 0) 1291 aio_read_events(ctx, min_nr, nr, event, &ret); 1292 else 1293 wait_event_interruptible_hrtimeout(ctx->wait, 1294 aio_read_events(ctx, min_nr, nr, event, &ret), 1295 until); 1296 return ret; 1297 } 1298 1299 /* sys_io_setup: 1300 * Create an aio_context capable of receiving at least nr_events. 1301 * ctxp must not point to an aio_context that already exists, and 1302 * must be initialized to 0 prior to the call. On successful 1303 * creation of the aio_context, *ctxp is filled in with the resulting 1304 * handle. May fail with -EINVAL if *ctxp is not initialized, 1305 * if the specified nr_events exceeds internal limits. May fail 1306 * with -EAGAIN if the specified nr_events exceeds the user's limit 1307 * of available events. May fail with -ENOMEM if insufficient kernel 1308 * resources are available. May fail with -EFAULT if an invalid 1309 * pointer is passed for ctxp. Will fail with -ENOSYS if not 1310 * implemented. 1311 */ 1312 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) 1313 { 1314 struct kioctx *ioctx = NULL; 1315 unsigned long ctx; 1316 long ret; 1317 1318 ret = get_user(ctx, ctxp); 1319 if (unlikely(ret)) 1320 goto out; 1321 1322 ret = -EINVAL; 1323 if (unlikely(ctx || nr_events == 0)) { 1324 pr_debug("EINVAL: ctx %lu nr_events %u\n", 1325 ctx, nr_events); 1326 goto out; 1327 } 1328 1329 ioctx = ioctx_alloc(nr_events); 1330 ret = PTR_ERR(ioctx); 1331 if (!IS_ERR(ioctx)) { 1332 ret = put_user(ioctx->user_id, ctxp); 1333 if (ret) 1334 kill_ioctx(current->mm, ioctx, NULL); 1335 percpu_ref_put(&ioctx->users); 1336 } 1337 1338 out: 1339 return ret; 1340 } 1341 1342 #ifdef CONFIG_COMPAT 1343 COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p) 1344 { 1345 struct kioctx *ioctx = NULL; 1346 unsigned long ctx; 1347 long ret; 1348 1349 ret = get_user(ctx, ctx32p); 1350 if (unlikely(ret)) 1351 goto out; 1352 1353 ret = -EINVAL; 1354 if (unlikely(ctx || nr_events == 0)) { 1355 pr_debug("EINVAL: ctx %lu nr_events %u\n", 1356 ctx, nr_events); 1357 goto out; 1358 } 1359 1360 ioctx = ioctx_alloc(nr_events); 1361 ret = PTR_ERR(ioctx); 1362 if (!IS_ERR(ioctx)) { 1363 /* truncating is ok because it's a user address */ 1364 ret = put_user((u32)ioctx->user_id, ctx32p); 1365 if (ret) 1366 kill_ioctx(current->mm, ioctx, NULL); 1367 percpu_ref_put(&ioctx->users); 1368 } 1369 1370 out: 1371 return ret; 1372 } 1373 #endif 1374 1375 /* sys_io_destroy: 1376 * Destroy the aio_context specified. May cancel any outstanding 1377 * AIOs and block on completion. Will fail with -ENOSYS if not 1378 * implemented. May fail with -EINVAL if the context pointed to 1379 * is invalid. 1380 */ 1381 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) 1382 { 1383 struct kioctx *ioctx = lookup_ioctx(ctx); 1384 if (likely(NULL != ioctx)) { 1385 struct ctx_rq_wait wait; 1386 int ret; 1387 1388 init_completion(&wait.comp); 1389 atomic_set(&wait.count, 1); 1390 1391 /* Pass requests_done to kill_ioctx() where it can be set 1392 * in a thread-safe way. If we try to set it here then we have 1393 * a race condition if two io_destroy() called simultaneously. 1394 */ 1395 ret = kill_ioctx(current->mm, ioctx, &wait); 1396 percpu_ref_put(&ioctx->users); 1397 1398 /* Wait until all IO for the context are done. Otherwise kernel 1399 * keep using user-space buffers even if user thinks the context 1400 * is destroyed. 1401 */ 1402 if (!ret) 1403 wait_for_completion(&wait.comp); 1404 1405 return ret; 1406 } 1407 pr_debug("EINVAL: invalid context id\n"); 1408 return -EINVAL; 1409 } 1410 1411 static void aio_remove_iocb(struct aio_kiocb *iocb) 1412 { 1413 struct kioctx *ctx = iocb->ki_ctx; 1414 unsigned long flags; 1415 1416 spin_lock_irqsave(&ctx->ctx_lock, flags); 1417 list_del(&iocb->ki_list); 1418 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 1419 } 1420 1421 static void aio_complete_rw(struct kiocb *kiocb, long res, long res2) 1422 { 1423 struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw); 1424 1425 if (!list_empty_careful(&iocb->ki_list)) 1426 aio_remove_iocb(iocb); 1427 1428 if (kiocb->ki_flags & IOCB_WRITE) { 1429 struct inode *inode = file_inode(kiocb->ki_filp); 1430 1431 /* 1432 * Tell lockdep we inherited freeze protection from submission 1433 * thread. 1434 */ 1435 if (S_ISREG(inode->i_mode)) 1436 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE); 1437 file_end_write(kiocb->ki_filp); 1438 } 1439 1440 aio_complete(iocb, res, res2); 1441 } 1442 1443 static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) 1444 { 1445 int ret; 1446 1447 req->ki_complete = aio_complete_rw; 1448 req->private = NULL; 1449 req->ki_pos = iocb->aio_offset; 1450 req->ki_flags = iocb_flags(req->ki_filp); 1451 if (iocb->aio_flags & IOCB_FLAG_RESFD) 1452 req->ki_flags |= IOCB_EVENTFD; 1453 req->ki_hint = ki_hint_validate(file_write_hint(req->ki_filp)); 1454 if (iocb->aio_flags & IOCB_FLAG_IOPRIO) { 1455 /* 1456 * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then 1457 * aio_reqprio is interpreted as an I/O scheduling 1458 * class and priority. 1459 */ 1460 ret = ioprio_check_cap(iocb->aio_reqprio); 1461 if (ret) { 1462 pr_debug("aio ioprio check cap error: %d\n", ret); 1463 return ret; 1464 } 1465 1466 req->ki_ioprio = iocb->aio_reqprio; 1467 } else 1468 req->ki_ioprio = get_current_ioprio(); 1469 1470 ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags); 1471 if (unlikely(ret)) 1472 return ret; 1473 1474 req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */ 1475 return 0; 1476 } 1477 1478 static int aio_setup_rw(int rw, const struct iocb *iocb, struct iovec **iovec, 1479 bool vectored, bool compat, struct iov_iter *iter) 1480 { 1481 void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf; 1482 size_t len = iocb->aio_nbytes; 1483 1484 if (!vectored) { 1485 ssize_t ret = import_single_range(rw, buf, len, *iovec, iter); 1486 *iovec = NULL; 1487 return ret; 1488 } 1489 #ifdef CONFIG_COMPAT 1490 if (compat) 1491 return compat_import_iovec(rw, buf, len, UIO_FASTIOV, iovec, 1492 iter); 1493 #endif 1494 return import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter); 1495 } 1496 1497 static inline void aio_rw_done(struct kiocb *req, ssize_t ret) 1498 { 1499 switch (ret) { 1500 case -EIOCBQUEUED: 1501 break; 1502 case -ERESTARTSYS: 1503 case -ERESTARTNOINTR: 1504 case -ERESTARTNOHAND: 1505 case -ERESTART_RESTARTBLOCK: 1506 /* 1507 * There's no easy way to restart the syscall since other AIO's 1508 * may be already running. Just fail this IO with EINTR. 1509 */ 1510 ret = -EINTR; 1511 /*FALLTHRU*/ 1512 default: 1513 req->ki_complete(req, ret, 0); 1514 } 1515 } 1516 1517 static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb, 1518 bool vectored, bool compat) 1519 { 1520 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1521 struct iov_iter iter; 1522 struct file *file; 1523 ssize_t ret; 1524 1525 ret = aio_prep_rw(req, iocb); 1526 if (ret) 1527 return ret; 1528 file = req->ki_filp; 1529 if (unlikely(!(file->f_mode & FMODE_READ))) 1530 return -EBADF; 1531 ret = -EINVAL; 1532 if (unlikely(!file->f_op->read_iter)) 1533 return -EINVAL; 1534 1535 ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter); 1536 if (ret) 1537 return ret; 1538 ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter)); 1539 if (!ret) 1540 aio_rw_done(req, call_read_iter(file, req, &iter)); 1541 kfree(iovec); 1542 return ret; 1543 } 1544 1545 static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb, 1546 bool vectored, bool compat) 1547 { 1548 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1549 struct iov_iter iter; 1550 struct file *file; 1551 ssize_t ret; 1552 1553 ret = aio_prep_rw(req, iocb); 1554 if (ret) 1555 return ret; 1556 file = req->ki_filp; 1557 1558 if (unlikely(!(file->f_mode & FMODE_WRITE))) 1559 return -EBADF; 1560 if (unlikely(!file->f_op->write_iter)) 1561 return -EINVAL; 1562 1563 ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter); 1564 if (ret) 1565 return ret; 1566 ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter)); 1567 if (!ret) { 1568 /* 1569 * Open-code file_start_write here to grab freeze protection, 1570 * which will be released by another thread in 1571 * aio_complete_rw(). Fool lockdep by telling it the lock got 1572 * released so that it doesn't complain about the held lock when 1573 * we return to userspace. 1574 */ 1575 if (S_ISREG(file_inode(file)->i_mode)) { 1576 __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true); 1577 __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE); 1578 } 1579 req->ki_flags |= IOCB_WRITE; 1580 aio_rw_done(req, call_write_iter(file, req, &iter)); 1581 } 1582 kfree(iovec); 1583 return ret; 1584 } 1585 1586 static void aio_fsync_work(struct work_struct *work) 1587 { 1588 struct fsync_iocb *req = container_of(work, struct fsync_iocb, work); 1589 int ret; 1590 1591 ret = vfs_fsync(req->file, req->datasync); 1592 aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0); 1593 } 1594 1595 static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, 1596 bool datasync) 1597 { 1598 if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes || 1599 iocb->aio_rw_flags)) 1600 return -EINVAL; 1601 1602 if (unlikely(!req->file->f_op->fsync)) 1603 return -EINVAL; 1604 1605 req->datasync = datasync; 1606 INIT_WORK(&req->work, aio_fsync_work); 1607 schedule_work(&req->work); 1608 return 0; 1609 } 1610 1611 static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask) 1612 { 1613 aio_complete(iocb, mangle_poll(mask), 0); 1614 } 1615 1616 static void aio_poll_complete_work(struct work_struct *work) 1617 { 1618 struct poll_iocb *req = container_of(work, struct poll_iocb, work); 1619 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); 1620 struct poll_table_struct pt = { ._key = req->events }; 1621 struct kioctx *ctx = iocb->ki_ctx; 1622 __poll_t mask = 0; 1623 1624 if (!READ_ONCE(req->cancelled)) 1625 mask = vfs_poll(req->file, &pt) & req->events; 1626 1627 /* 1628 * Note that ->ki_cancel callers also delete iocb from active_reqs after 1629 * calling ->ki_cancel. We need the ctx_lock roundtrip here to 1630 * synchronize with them. In the cancellation case the list_del_init 1631 * itself is not actually needed, but harmless so we keep it in to 1632 * avoid further branches in the fast path. 1633 */ 1634 spin_lock_irq(&ctx->ctx_lock); 1635 if (!mask && !READ_ONCE(req->cancelled)) { 1636 add_wait_queue(req->head, &req->wait); 1637 spin_unlock_irq(&ctx->ctx_lock); 1638 return; 1639 } 1640 list_del_init(&iocb->ki_list); 1641 spin_unlock_irq(&ctx->ctx_lock); 1642 1643 aio_poll_complete(iocb, mask); 1644 } 1645 1646 /* assumes we are called with irqs disabled */ 1647 static int aio_poll_cancel(struct kiocb *iocb) 1648 { 1649 struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw); 1650 struct poll_iocb *req = &aiocb->poll; 1651 1652 spin_lock(&req->head->lock); 1653 WRITE_ONCE(req->cancelled, true); 1654 if (!list_empty(&req->wait.entry)) { 1655 list_del_init(&req->wait.entry); 1656 schedule_work(&aiocb->poll.work); 1657 } 1658 spin_unlock(&req->head->lock); 1659 1660 return 0; 1661 } 1662 1663 static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, 1664 void *key) 1665 { 1666 struct poll_iocb *req = container_of(wait, struct poll_iocb, wait); 1667 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); 1668 __poll_t mask = key_to_poll(key); 1669 unsigned long flags; 1670 1671 req->woken = true; 1672 1673 /* for instances that support it check for an event match first: */ 1674 if (mask) { 1675 if (!(mask & req->events)) 1676 return 0; 1677 1678 /* 1679 * Try to complete the iocb inline if we can. Use 1680 * irqsave/irqrestore because not all filesystems (e.g. fuse) 1681 * call this function with IRQs disabled and because IRQs 1682 * have to be disabled before ctx_lock is obtained. 1683 */ 1684 if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { 1685 list_del(&iocb->ki_list); 1686 spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags); 1687 1688 list_del_init(&req->wait.entry); 1689 aio_poll_complete(iocb, mask); 1690 return 1; 1691 } 1692 } 1693 1694 list_del_init(&req->wait.entry); 1695 schedule_work(&req->work); 1696 return 1; 1697 } 1698 1699 struct aio_poll_table { 1700 struct poll_table_struct pt; 1701 struct aio_kiocb *iocb; 1702 int error; 1703 }; 1704 1705 static void 1706 aio_poll_queue_proc(struct file *file, struct wait_queue_head *head, 1707 struct poll_table_struct *p) 1708 { 1709 struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt); 1710 1711 /* multiple wait queues per file are not supported */ 1712 if (unlikely(pt->iocb->poll.head)) { 1713 pt->error = -EINVAL; 1714 return; 1715 } 1716 1717 pt->error = 0; 1718 pt->iocb->poll.head = head; 1719 add_wait_queue(head, &pt->iocb->poll.wait); 1720 } 1721 1722 static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) 1723 { 1724 struct kioctx *ctx = aiocb->ki_ctx; 1725 struct poll_iocb *req = &aiocb->poll; 1726 struct aio_poll_table apt; 1727 __poll_t mask; 1728 1729 /* reject any unknown events outside the normal event mask. */ 1730 if ((u16)iocb->aio_buf != iocb->aio_buf) 1731 return -EINVAL; 1732 /* reject fields that are not defined for poll */ 1733 if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags) 1734 return -EINVAL; 1735 1736 INIT_WORK(&req->work, aio_poll_complete_work); 1737 req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; 1738 1739 req->head = NULL; 1740 req->woken = false; 1741 req->cancelled = false; 1742 1743 apt.pt._qproc = aio_poll_queue_proc; 1744 apt.pt._key = req->events; 1745 apt.iocb = aiocb; 1746 apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */ 1747 1748 /* initialized the list so that we can do list_empty checks */ 1749 INIT_LIST_HEAD(&req->wait.entry); 1750 init_waitqueue_func_entry(&req->wait, aio_poll_wake); 1751 1752 /* one for removal from waitqueue, one for this function */ 1753 refcount_set(&aiocb->ki_refcnt, 2); 1754 1755 mask = vfs_poll(req->file, &apt.pt) & req->events; 1756 if (unlikely(!req->head)) { 1757 /* we did not manage to set up a waitqueue, done */ 1758 goto out; 1759 } 1760 1761 spin_lock_irq(&ctx->ctx_lock); 1762 spin_lock(&req->head->lock); 1763 if (req->woken) { 1764 /* wake_up context handles the rest */ 1765 mask = 0; 1766 apt.error = 0; 1767 } else if (mask || apt.error) { 1768 /* if we get an error or a mask we are done */ 1769 WARN_ON_ONCE(list_empty(&req->wait.entry)); 1770 list_del_init(&req->wait.entry); 1771 } else { 1772 /* actually waiting for an event */ 1773 list_add_tail(&aiocb->ki_list, &ctx->active_reqs); 1774 aiocb->ki_cancel = aio_poll_cancel; 1775 } 1776 spin_unlock(&req->head->lock); 1777 spin_unlock_irq(&ctx->ctx_lock); 1778 1779 out: 1780 if (unlikely(apt.error)) 1781 return apt.error; 1782 1783 if (mask) 1784 aio_poll_complete(aiocb, mask); 1785 iocb_put(aiocb); 1786 return 0; 1787 } 1788 1789 static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, 1790 struct iocb __user *user_iocb, bool compat) 1791 { 1792 struct aio_kiocb *req; 1793 ssize_t ret; 1794 1795 /* enforce forwards compatibility on users */ 1796 if (unlikely(iocb->aio_reserved2)) { 1797 pr_debug("EINVAL: reserve field set\n"); 1798 return -EINVAL; 1799 } 1800 1801 /* prevent overflows */ 1802 if (unlikely( 1803 (iocb->aio_buf != (unsigned long)iocb->aio_buf) || 1804 (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || 1805 ((ssize_t)iocb->aio_nbytes < 0) 1806 )) { 1807 pr_debug("EINVAL: overflow check\n"); 1808 return -EINVAL; 1809 } 1810 1811 if (!get_reqs_available(ctx)) 1812 return -EAGAIN; 1813 1814 ret = -EAGAIN; 1815 req = aio_get_req(ctx); 1816 if (unlikely(!req)) 1817 goto out_put_reqs_available; 1818 1819 req->ki_filp = fget(iocb->aio_fildes); 1820 ret = -EBADF; 1821 if (unlikely(!req->ki_filp)) 1822 goto out_put_req; 1823 1824 if (iocb->aio_flags & IOCB_FLAG_RESFD) { 1825 /* 1826 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an 1827 * instance of the file* now. The file descriptor must be 1828 * an eventfd() fd, and will be signaled for each completed 1829 * event using the eventfd_signal() function. 1830 */ 1831 req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); 1832 if (IS_ERR(req->ki_eventfd)) { 1833 ret = PTR_ERR(req->ki_eventfd); 1834 req->ki_eventfd = NULL; 1835 goto out_put_req; 1836 } 1837 } 1838 1839 ret = put_user(KIOCB_KEY, &user_iocb->aio_key); 1840 if (unlikely(ret)) { 1841 pr_debug("EFAULT: aio_key\n"); 1842 goto out_put_req; 1843 } 1844 1845 req->ki_user_iocb = user_iocb; 1846 req->ki_user_data = iocb->aio_data; 1847 1848 switch (iocb->aio_lio_opcode) { 1849 case IOCB_CMD_PREAD: 1850 ret = aio_read(&req->rw, iocb, false, compat); 1851 break; 1852 case IOCB_CMD_PWRITE: 1853 ret = aio_write(&req->rw, iocb, false, compat); 1854 break; 1855 case IOCB_CMD_PREADV: 1856 ret = aio_read(&req->rw, iocb, true, compat); 1857 break; 1858 case IOCB_CMD_PWRITEV: 1859 ret = aio_write(&req->rw, iocb, true, compat); 1860 break; 1861 case IOCB_CMD_FSYNC: 1862 ret = aio_fsync(&req->fsync, iocb, false); 1863 break; 1864 case IOCB_CMD_FDSYNC: 1865 ret = aio_fsync(&req->fsync, iocb, true); 1866 break; 1867 case IOCB_CMD_POLL: 1868 ret = aio_poll(req, iocb); 1869 break; 1870 default: 1871 pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); 1872 ret = -EINVAL; 1873 break; 1874 } 1875 1876 /* 1877 * If ret is 0, we'd either done aio_complete() ourselves or have 1878 * arranged for that to be done asynchronously. Anything non-zero 1879 * means that we need to destroy req ourselves. 1880 */ 1881 if (ret) 1882 goto out_put_req; 1883 return 0; 1884 out_put_req: 1885 if (req->ki_eventfd) 1886 eventfd_ctx_put(req->ki_eventfd); 1887 iocb_put(req); 1888 out_put_reqs_available: 1889 put_reqs_available(ctx, 1); 1890 return ret; 1891 } 1892 1893 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1894 bool compat) 1895 { 1896 struct iocb iocb; 1897 1898 if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb)))) 1899 return -EFAULT; 1900 1901 return __io_submit_one(ctx, &iocb, user_iocb, compat); 1902 } 1903 1904 /* sys_io_submit: 1905 * Queue the nr iocbs pointed to by iocbpp for processing. Returns 1906 * the number of iocbs queued. May return -EINVAL if the aio_context 1907 * specified by ctx_id is invalid, if nr is < 0, if the iocb at 1908 * *iocbpp[0] is not properly initialized, if the operation specified 1909 * is invalid for the file descriptor in the iocb. May fail with 1910 * -EFAULT if any of the data structures point to invalid data. May 1911 * fail with -EBADF if the file descriptor specified in the first 1912 * iocb is invalid. May fail with -EAGAIN if insufficient resources 1913 * are available to queue any iocbs. Will return 0 if nr is 0. Will 1914 * fail with -ENOSYS if not implemented. 1915 */ 1916 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, 1917 struct iocb __user * __user *, iocbpp) 1918 { 1919 struct kioctx *ctx; 1920 long ret = 0; 1921 int i = 0; 1922 struct blk_plug plug; 1923 1924 if (unlikely(nr < 0)) 1925 return -EINVAL; 1926 1927 ctx = lookup_ioctx(ctx_id); 1928 if (unlikely(!ctx)) { 1929 pr_debug("EINVAL: invalid context id\n"); 1930 return -EINVAL; 1931 } 1932 1933 if (nr > ctx->nr_events) 1934 nr = ctx->nr_events; 1935 1936 if (nr > AIO_PLUG_THRESHOLD) 1937 blk_start_plug(&plug); 1938 for (i = 0; i < nr; i++) { 1939 struct iocb __user *user_iocb; 1940 1941 if (unlikely(get_user(user_iocb, iocbpp + i))) { 1942 ret = -EFAULT; 1943 break; 1944 } 1945 1946 ret = io_submit_one(ctx, user_iocb, false); 1947 if (ret) 1948 break; 1949 } 1950 if (nr > AIO_PLUG_THRESHOLD) 1951 blk_finish_plug(&plug); 1952 1953 percpu_ref_put(&ctx->users); 1954 return i ? i : ret; 1955 } 1956 1957 #ifdef CONFIG_COMPAT 1958 COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id, 1959 int, nr, compat_uptr_t __user *, iocbpp) 1960 { 1961 struct kioctx *ctx; 1962 long ret = 0; 1963 int i = 0; 1964 struct blk_plug plug; 1965 1966 if (unlikely(nr < 0)) 1967 return -EINVAL; 1968 1969 ctx = lookup_ioctx(ctx_id); 1970 if (unlikely(!ctx)) { 1971 pr_debug("EINVAL: invalid context id\n"); 1972 return -EINVAL; 1973 } 1974 1975 if (nr > ctx->nr_events) 1976 nr = ctx->nr_events; 1977 1978 if (nr > AIO_PLUG_THRESHOLD) 1979 blk_start_plug(&plug); 1980 for (i = 0; i < nr; i++) { 1981 compat_uptr_t user_iocb; 1982 1983 if (unlikely(get_user(user_iocb, iocbpp + i))) { 1984 ret = -EFAULT; 1985 break; 1986 } 1987 1988 ret = io_submit_one(ctx, compat_ptr(user_iocb), true); 1989 if (ret) 1990 break; 1991 } 1992 if (nr > AIO_PLUG_THRESHOLD) 1993 blk_finish_plug(&plug); 1994 1995 percpu_ref_put(&ctx->users); 1996 return i ? i : ret; 1997 } 1998 #endif 1999 2000 /* lookup_kiocb 2001 * Finds a given iocb for cancellation. 2002 */ 2003 static struct aio_kiocb * 2004 lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb) 2005 { 2006 struct aio_kiocb *kiocb; 2007 2008 assert_spin_locked(&ctx->ctx_lock); 2009 2010 /* TODO: use a hash or array, this sucks. */ 2011 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { 2012 if (kiocb->ki_user_iocb == iocb) 2013 return kiocb; 2014 } 2015 return NULL; 2016 } 2017 2018 /* sys_io_cancel: 2019 * Attempts to cancel an iocb previously passed to io_submit. If 2020 * the operation is successfully cancelled, the resulting event is 2021 * copied into the memory pointed to by result without being placed 2022 * into the completion queue and 0 is returned. May fail with 2023 * -EFAULT if any of the data structures pointed to are invalid. 2024 * May fail with -EINVAL if aio_context specified by ctx_id is 2025 * invalid. May fail with -EAGAIN if the iocb specified was not 2026 * cancelled. Will fail with -ENOSYS if not implemented. 2027 */ 2028 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, 2029 struct io_event __user *, result) 2030 { 2031 struct kioctx *ctx; 2032 struct aio_kiocb *kiocb; 2033 int ret = -EINVAL; 2034 u32 key; 2035 2036 if (unlikely(get_user(key, &iocb->aio_key))) 2037 return -EFAULT; 2038 if (unlikely(key != KIOCB_KEY)) 2039 return -EINVAL; 2040 2041 ctx = lookup_ioctx(ctx_id); 2042 if (unlikely(!ctx)) 2043 return -EINVAL; 2044 2045 spin_lock_irq(&ctx->ctx_lock); 2046 kiocb = lookup_kiocb(ctx, iocb); 2047 if (kiocb) { 2048 ret = kiocb->ki_cancel(&kiocb->rw); 2049 list_del_init(&kiocb->ki_list); 2050 } 2051 spin_unlock_irq(&ctx->ctx_lock); 2052 2053 if (!ret) { 2054 /* 2055 * The result argument is no longer used - the io_event is 2056 * always delivered via the ring buffer. -EINPROGRESS indicates 2057 * cancellation is progress: 2058 */ 2059 ret = -EINPROGRESS; 2060 } 2061 2062 percpu_ref_put(&ctx->users); 2063 2064 return ret; 2065 } 2066 2067 static long do_io_getevents(aio_context_t ctx_id, 2068 long min_nr, 2069 long nr, 2070 struct io_event __user *events, 2071 struct timespec64 *ts) 2072 { 2073 ktime_t until = ts ? timespec64_to_ktime(*ts) : KTIME_MAX; 2074 struct kioctx *ioctx = lookup_ioctx(ctx_id); 2075 long ret = -EINVAL; 2076 2077 if (likely(ioctx)) { 2078 if (likely(min_nr <= nr && min_nr >= 0)) 2079 ret = read_events(ioctx, min_nr, nr, events, until); 2080 percpu_ref_put(&ioctx->users); 2081 } 2082 2083 return ret; 2084 } 2085 2086 /* io_getevents: 2087 * Attempts to read at least min_nr events and up to nr events from 2088 * the completion queue for the aio_context specified by ctx_id. If 2089 * it succeeds, the number of read events is returned. May fail with 2090 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is 2091 * out of range, if timeout is out of range. May fail with -EFAULT 2092 * if any of the memory specified is invalid. May return 0 or 2093 * < min_nr if the timeout specified by timeout has elapsed 2094 * before sufficient events are available, where timeout == NULL 2095 * specifies an infinite timeout. Note that the timeout pointed to by 2096 * timeout is relative. Will fail with -ENOSYS if not implemented. 2097 */ 2098 #if !defined(CONFIG_64BIT_TIME) || defined(CONFIG_64BIT) 2099 2100 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, 2101 long, min_nr, 2102 long, nr, 2103 struct io_event __user *, events, 2104 struct __kernel_timespec __user *, timeout) 2105 { 2106 struct timespec64 ts; 2107 int ret; 2108 2109 if (timeout && unlikely(get_timespec64(&ts, timeout))) 2110 return -EFAULT; 2111 2112 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); 2113 if (!ret && signal_pending(current)) 2114 ret = -EINTR; 2115 return ret; 2116 } 2117 2118 #endif 2119 2120 struct __aio_sigset { 2121 const sigset_t __user *sigmask; 2122 size_t sigsetsize; 2123 }; 2124 2125 SYSCALL_DEFINE6(io_pgetevents, 2126 aio_context_t, ctx_id, 2127 long, min_nr, 2128 long, nr, 2129 struct io_event __user *, events, 2130 struct __kernel_timespec __user *, timeout, 2131 const struct __aio_sigset __user *, usig) 2132 { 2133 struct __aio_sigset ksig = { NULL, }; 2134 sigset_t ksigmask, sigsaved; 2135 struct timespec64 ts; 2136 int ret; 2137 2138 if (timeout && unlikely(get_timespec64(&ts, timeout))) 2139 return -EFAULT; 2140 2141 if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) 2142 return -EFAULT; 2143 2144 ret = set_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize); 2145 if (ret) 2146 return ret; 2147 2148 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); 2149 restore_user_sigmask(ksig.sigmask, &sigsaved); 2150 if (signal_pending(current) && !ret) 2151 ret = -ERESTARTNOHAND; 2152 2153 return ret; 2154 } 2155 2156 #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT) 2157 2158 SYSCALL_DEFINE6(io_pgetevents_time32, 2159 aio_context_t, ctx_id, 2160 long, min_nr, 2161 long, nr, 2162 struct io_event __user *, events, 2163 struct old_timespec32 __user *, timeout, 2164 const struct __aio_sigset __user *, usig) 2165 { 2166 struct __aio_sigset ksig = { NULL, }; 2167 sigset_t ksigmask, sigsaved; 2168 struct timespec64 ts; 2169 int ret; 2170 2171 if (timeout && unlikely(get_old_timespec32(&ts, timeout))) 2172 return -EFAULT; 2173 2174 if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) 2175 return -EFAULT; 2176 2177 2178 ret = set_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize); 2179 if (ret) 2180 return ret; 2181 2182 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); 2183 restore_user_sigmask(ksig.sigmask, &sigsaved); 2184 if (signal_pending(current) && !ret) 2185 ret = -ERESTARTNOHAND; 2186 2187 return ret; 2188 } 2189 2190 #endif 2191 2192 #if defined(CONFIG_COMPAT_32BIT_TIME) 2193 2194 SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id, 2195 __s32, min_nr, 2196 __s32, nr, 2197 struct io_event __user *, events, 2198 struct old_timespec32 __user *, timeout) 2199 { 2200 struct timespec64 t; 2201 int ret; 2202 2203 if (timeout && get_old_timespec32(&t, timeout)) 2204 return -EFAULT; 2205 2206 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); 2207 if (!ret && signal_pending(current)) 2208 ret = -EINTR; 2209 return ret; 2210 } 2211 2212 #endif 2213 2214 #ifdef CONFIG_COMPAT 2215 2216 struct __compat_aio_sigset { 2217 compat_sigset_t __user *sigmask; 2218 compat_size_t sigsetsize; 2219 }; 2220 2221 #if defined(CONFIG_COMPAT_32BIT_TIME) 2222 2223 COMPAT_SYSCALL_DEFINE6(io_pgetevents, 2224 compat_aio_context_t, ctx_id, 2225 compat_long_t, min_nr, 2226 compat_long_t, nr, 2227 struct io_event __user *, events, 2228 struct old_timespec32 __user *, timeout, 2229 const struct __compat_aio_sigset __user *, usig) 2230 { 2231 struct __compat_aio_sigset ksig = { NULL, }; 2232 sigset_t ksigmask, sigsaved; 2233 struct timespec64 t; 2234 int ret; 2235 2236 if (timeout && get_old_timespec32(&t, timeout)) 2237 return -EFAULT; 2238 2239 if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) 2240 return -EFAULT; 2241 2242 ret = set_compat_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize); 2243 if (ret) 2244 return ret; 2245 2246 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); 2247 restore_user_sigmask(ksig.sigmask, &sigsaved); 2248 if (signal_pending(current) && !ret) 2249 ret = -ERESTARTNOHAND; 2250 2251 return ret; 2252 } 2253 2254 #endif 2255 2256 COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64, 2257 compat_aio_context_t, ctx_id, 2258 compat_long_t, min_nr, 2259 compat_long_t, nr, 2260 struct io_event __user *, events, 2261 struct __kernel_timespec __user *, timeout, 2262 const struct __compat_aio_sigset __user *, usig) 2263 { 2264 struct __compat_aio_sigset ksig = { NULL, }; 2265 sigset_t ksigmask, sigsaved; 2266 struct timespec64 t; 2267 int ret; 2268 2269 if (timeout && get_timespec64(&t, timeout)) 2270 return -EFAULT; 2271 2272 if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) 2273 return -EFAULT; 2274 2275 ret = set_compat_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize); 2276 if (ret) 2277 return ret; 2278 2279 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); 2280 restore_user_sigmask(ksig.sigmask, &sigsaved); 2281 if (signal_pending(current) && !ret) 2282 ret = -ERESTARTNOHAND; 2283 2284 return ret; 2285 } 2286 #endif 2287