1 /* 2 * An async IO implementation for Linux 3 * Written by Benjamin LaHaise <bcrl@kvack.org> 4 * 5 * Implements an efficient asynchronous io interface. 6 * 7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. 8 * 9 * See ../COPYING for licensing terms. 10 */ 11 #define pr_fmt(fmt) "%s: " fmt, __func__ 12 13 #include <linux/kernel.h> 14 #include <linux/init.h> 15 #include <linux/errno.h> 16 #include <linux/time.h> 17 #include <linux/aio_abi.h> 18 #include <linux/export.h> 19 #include <linux/syscalls.h> 20 #include <linux/backing-dev.h> 21 #include <linux/uio.h> 22 23 #include <linux/sched.h> 24 #include <linux/fs.h> 25 #include <linux/file.h> 26 #include <linux/mm.h> 27 #include <linux/mman.h> 28 #include <linux/mmu_context.h> 29 #include <linux/percpu.h> 30 #include <linux/slab.h> 31 #include <linux/timer.h> 32 #include <linux/aio.h> 33 #include <linux/highmem.h> 34 #include <linux/workqueue.h> 35 #include <linux/security.h> 36 #include <linux/eventfd.h> 37 #include <linux/blkdev.h> 38 #include <linux/compat.h> 39 #include <linux/migrate.h> 40 #include <linux/ramfs.h> 41 #include <linux/percpu-refcount.h> 42 #include <linux/mount.h> 43 44 #include <asm/kmap_types.h> 45 #include <asm/uaccess.h> 46 47 #include "internal.h" 48 49 #define AIO_RING_MAGIC 0xa10a10a1 50 #define AIO_RING_COMPAT_FEATURES 1 51 #define AIO_RING_INCOMPAT_FEATURES 0 52 struct aio_ring { 53 unsigned id; /* kernel internal index number */ 54 unsigned nr; /* number of io_events */ 55 unsigned head; 56 unsigned tail; 57 58 unsigned magic; 59 unsigned compat_features; 60 unsigned incompat_features; 61 unsigned header_length; /* size of aio_ring */ 62 63 64 struct io_event io_events[0]; 65 }; /* 128 bytes + ring size */ 66 67 #define AIO_RING_PAGES 8 68 69 struct kioctx_table { 70 struct rcu_head rcu; 71 unsigned nr; 72 struct kioctx *table[]; 73 }; 74 75 struct kioctx_cpu { 76 unsigned reqs_available; 77 }; 78 79 struct kioctx { 80 struct percpu_ref users; 81 atomic_t dead; 82 83 struct percpu_ref reqs; 84 85 unsigned long user_id; 86 87 struct __percpu kioctx_cpu *cpu; 88 89 /* 90 * For percpu reqs_available, number of slots we move to/from global 91 * counter at a time: 92 */ 93 unsigned req_batch; 94 /* 95 * This is what userspace passed to io_setup(), it's not used for 96 * anything but counting against the global max_reqs quota. 97 * 98 * The real limit is nr_events - 1, which will be larger (see 99 * aio_setup_ring()) 100 */ 101 unsigned max_reqs; 102 103 /* Size of ringbuffer, in units of struct io_event */ 104 unsigned nr_events; 105 106 unsigned long mmap_base; 107 unsigned long mmap_size; 108 109 struct page **ring_pages; 110 long nr_pages; 111 112 struct work_struct free_work; 113 114 struct { 115 /* 116 * This counts the number of available slots in the ringbuffer, 117 * so we avoid overflowing it: it's decremented (if positive) 118 * when allocating a kiocb and incremented when the resulting 119 * io_event is pulled off the ringbuffer. 120 * 121 * We batch accesses to it with a percpu version. 122 */ 123 atomic_t reqs_available; 124 } ____cacheline_aligned_in_smp; 125 126 struct { 127 spinlock_t ctx_lock; 128 struct list_head active_reqs; /* used for cancellation */ 129 } ____cacheline_aligned_in_smp; 130 131 struct { 132 struct mutex ring_lock; 133 wait_queue_head_t wait; 134 } ____cacheline_aligned_in_smp; 135 136 struct { 137 unsigned tail; 138 spinlock_t completion_lock; 139 } ____cacheline_aligned_in_smp; 140 141 struct page *internal_pages[AIO_RING_PAGES]; 142 struct file *aio_ring_file; 143 144 unsigned id; 145 }; 146 147 /*------ sysctl variables----*/ 148 static DEFINE_SPINLOCK(aio_nr_lock); 149 unsigned long aio_nr; /* current system wide number of aio requests */ 150 unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ 151 /*----end sysctl variables---*/ 152 153 static struct kmem_cache *kiocb_cachep; 154 static struct kmem_cache *kioctx_cachep; 155 156 static struct vfsmount *aio_mnt; 157 158 static const struct file_operations aio_ring_fops; 159 static const struct address_space_operations aio_ctx_aops; 160 161 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) 162 { 163 struct qstr this = QSTR_INIT("[aio]", 5); 164 struct file *file; 165 struct path path; 166 struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb); 167 if (IS_ERR(inode)) 168 return ERR_CAST(inode); 169 170 inode->i_mapping->a_ops = &aio_ctx_aops; 171 inode->i_mapping->private_data = ctx; 172 inode->i_size = PAGE_SIZE * nr_pages; 173 174 path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this); 175 if (!path.dentry) { 176 iput(inode); 177 return ERR_PTR(-ENOMEM); 178 } 179 path.mnt = mntget(aio_mnt); 180 181 d_instantiate(path.dentry, inode); 182 file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &aio_ring_fops); 183 if (IS_ERR(file)) { 184 path_put(&path); 185 return file; 186 } 187 188 file->f_flags = O_RDWR; 189 file->private_data = ctx; 190 return file; 191 } 192 193 static struct dentry *aio_mount(struct file_system_type *fs_type, 194 int flags, const char *dev_name, void *data) 195 { 196 static const struct dentry_operations ops = { 197 .d_dname = simple_dname, 198 }; 199 return mount_pseudo(fs_type, "aio:", NULL, &ops, 0xa10a10a1); 200 } 201 202 /* aio_setup 203 * Creates the slab caches used by the aio routines, panic on 204 * failure as this is done early during the boot sequence. 205 */ 206 static int __init aio_setup(void) 207 { 208 static struct file_system_type aio_fs = { 209 .name = "aio", 210 .mount = aio_mount, 211 .kill_sb = kill_anon_super, 212 }; 213 aio_mnt = kern_mount(&aio_fs); 214 if (IS_ERR(aio_mnt)) 215 panic("Failed to create aio fs mount."); 216 217 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 218 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 219 220 pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page)); 221 222 return 0; 223 } 224 __initcall(aio_setup); 225 226 static void put_aio_ring_file(struct kioctx *ctx) 227 { 228 struct file *aio_ring_file = ctx->aio_ring_file; 229 if (aio_ring_file) { 230 truncate_setsize(aio_ring_file->f_inode, 0); 231 232 /* Prevent further access to the kioctx from migratepages */ 233 spin_lock(&aio_ring_file->f_inode->i_mapping->private_lock); 234 aio_ring_file->f_inode->i_mapping->private_data = NULL; 235 ctx->aio_ring_file = NULL; 236 spin_unlock(&aio_ring_file->f_inode->i_mapping->private_lock); 237 238 fput(aio_ring_file); 239 } 240 } 241 242 static void aio_free_ring(struct kioctx *ctx) 243 { 244 int i; 245 246 for (i = 0; i < ctx->nr_pages; i++) { 247 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, 248 page_count(ctx->ring_pages[i])); 249 put_page(ctx->ring_pages[i]); 250 } 251 252 put_aio_ring_file(ctx); 253 254 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) { 255 kfree(ctx->ring_pages); 256 ctx->ring_pages = NULL; 257 } 258 } 259 260 static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) 261 { 262 vma->vm_ops = &generic_file_vm_ops; 263 return 0; 264 } 265 266 static const struct file_operations aio_ring_fops = { 267 .mmap = aio_ring_mmap, 268 }; 269 270 static int aio_set_page_dirty(struct page *page) 271 { 272 return 0; 273 } 274 275 #if IS_ENABLED(CONFIG_MIGRATION) 276 static int aio_migratepage(struct address_space *mapping, struct page *new, 277 struct page *old, enum migrate_mode mode) 278 { 279 struct kioctx *ctx; 280 unsigned long flags; 281 int rc; 282 283 /* Writeback must be complete */ 284 BUG_ON(PageWriteback(old)); 285 put_page(old); 286 287 rc = migrate_page_move_mapping(mapping, new, old, NULL, mode); 288 if (rc != MIGRATEPAGE_SUCCESS) { 289 get_page(old); 290 return rc; 291 } 292 293 get_page(new); 294 295 /* We can potentially race against kioctx teardown here. Use the 296 * address_space's private data lock to protect the mapping's 297 * private_data. 298 */ 299 spin_lock(&mapping->private_lock); 300 ctx = mapping->private_data; 301 if (ctx) { 302 pgoff_t idx; 303 spin_lock_irqsave(&ctx->completion_lock, flags); 304 migrate_page_copy(new, old); 305 idx = old->index; 306 if (idx < (pgoff_t)ctx->nr_pages) 307 ctx->ring_pages[idx] = new; 308 spin_unlock_irqrestore(&ctx->completion_lock, flags); 309 } else 310 rc = -EBUSY; 311 spin_unlock(&mapping->private_lock); 312 313 return rc; 314 } 315 #endif 316 317 static const struct address_space_operations aio_ctx_aops = { 318 .set_page_dirty = aio_set_page_dirty, 319 #if IS_ENABLED(CONFIG_MIGRATION) 320 .migratepage = aio_migratepage, 321 #endif 322 }; 323 324 static int aio_setup_ring(struct kioctx *ctx) 325 { 326 struct aio_ring *ring; 327 unsigned nr_events = ctx->max_reqs; 328 struct mm_struct *mm = current->mm; 329 unsigned long size, populate; 330 int nr_pages; 331 int i; 332 struct file *file; 333 334 /* Compensate for the ring buffer's head/tail overlap entry */ 335 nr_events += 2; /* 1 is required, 2 for good luck */ 336 337 size = sizeof(struct aio_ring); 338 size += sizeof(struct io_event) * nr_events; 339 340 nr_pages = PFN_UP(size); 341 if (nr_pages < 0) 342 return -EINVAL; 343 344 file = aio_private_file(ctx, nr_pages); 345 if (IS_ERR(file)) { 346 ctx->aio_ring_file = NULL; 347 return -EAGAIN; 348 } 349 350 for (i = 0; i < nr_pages; i++) { 351 struct page *page; 352 page = find_or_create_page(file->f_inode->i_mapping, 353 i, GFP_HIGHUSER | __GFP_ZERO); 354 if (!page) 355 break; 356 pr_debug("pid(%d) page[%d]->count=%d\n", 357 current->pid, i, page_count(page)); 358 SetPageUptodate(page); 359 SetPageDirty(page); 360 unlock_page(page); 361 } 362 ctx->aio_ring_file = file; 363 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) 364 / sizeof(struct io_event); 365 366 ctx->ring_pages = ctx->internal_pages; 367 if (nr_pages > AIO_RING_PAGES) { 368 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), 369 GFP_KERNEL); 370 if (!ctx->ring_pages) 371 return -ENOMEM; 372 } 373 374 ctx->mmap_size = nr_pages * PAGE_SIZE; 375 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); 376 377 down_write(&mm->mmap_sem); 378 ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, 379 PROT_READ | PROT_WRITE, 380 MAP_SHARED | MAP_POPULATE, 0, &populate); 381 if (IS_ERR((void *)ctx->mmap_base)) { 382 up_write(&mm->mmap_sem); 383 ctx->mmap_size = 0; 384 aio_free_ring(ctx); 385 return -EAGAIN; 386 } 387 388 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); 389 390 /* We must do this while still holding mmap_sem for write, as we 391 * need to be protected against userspace attempting to mremap() 392 * or munmap() the ring buffer. 393 */ 394 ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages, 395 1, 0, ctx->ring_pages, NULL); 396 397 /* Dropping the reference here is safe as the page cache will hold 398 * onto the pages for us. It is also required so that page migration 399 * can unmap the pages and get the right reference count. 400 */ 401 for (i = 0; i < ctx->nr_pages; i++) 402 put_page(ctx->ring_pages[i]); 403 404 up_write(&mm->mmap_sem); 405 406 if (unlikely(ctx->nr_pages != nr_pages)) { 407 aio_free_ring(ctx); 408 return -EAGAIN; 409 } 410 411 ctx->user_id = ctx->mmap_base; 412 ctx->nr_events = nr_events; /* trusted copy */ 413 414 ring = kmap_atomic(ctx->ring_pages[0]); 415 ring->nr = nr_events; /* user copy */ 416 ring->id = ~0U; 417 ring->head = ring->tail = 0; 418 ring->magic = AIO_RING_MAGIC; 419 ring->compat_features = AIO_RING_COMPAT_FEATURES; 420 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; 421 ring->header_length = sizeof(struct aio_ring); 422 kunmap_atomic(ring); 423 flush_dcache_page(ctx->ring_pages[0]); 424 425 return 0; 426 } 427 428 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) 429 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) 430 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) 431 432 void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel) 433 { 434 struct kioctx *ctx = req->ki_ctx; 435 unsigned long flags; 436 437 spin_lock_irqsave(&ctx->ctx_lock, flags); 438 439 if (!req->ki_list.next) 440 list_add(&req->ki_list, &ctx->active_reqs); 441 442 req->ki_cancel = cancel; 443 444 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 445 } 446 EXPORT_SYMBOL(kiocb_set_cancel_fn); 447 448 static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb) 449 { 450 kiocb_cancel_fn *old, *cancel; 451 452 /* 453 * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it 454 * actually has a cancel function, hence the cmpxchg() 455 */ 456 457 cancel = ACCESS_ONCE(kiocb->ki_cancel); 458 do { 459 if (!cancel || cancel == KIOCB_CANCELLED) 460 return -EINVAL; 461 462 old = cancel; 463 cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED); 464 } while (cancel != old); 465 466 return cancel(kiocb); 467 } 468 469 static void free_ioctx(struct work_struct *work) 470 { 471 struct kioctx *ctx = container_of(work, struct kioctx, free_work); 472 473 pr_debug("freeing %p\n", ctx); 474 475 aio_free_ring(ctx); 476 free_percpu(ctx->cpu); 477 kmem_cache_free(kioctx_cachep, ctx); 478 } 479 480 static void free_ioctx_reqs(struct percpu_ref *ref) 481 { 482 struct kioctx *ctx = container_of(ref, struct kioctx, reqs); 483 484 INIT_WORK(&ctx->free_work, free_ioctx); 485 schedule_work(&ctx->free_work); 486 } 487 488 /* 489 * When this function runs, the kioctx has been removed from the "hash table" 490 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - 491 * now it's safe to cancel any that need to be. 492 */ 493 static void free_ioctx_users(struct percpu_ref *ref) 494 { 495 struct kioctx *ctx = container_of(ref, struct kioctx, users); 496 struct kiocb *req; 497 498 spin_lock_irq(&ctx->ctx_lock); 499 500 while (!list_empty(&ctx->active_reqs)) { 501 req = list_first_entry(&ctx->active_reqs, 502 struct kiocb, ki_list); 503 504 list_del_init(&req->ki_list); 505 kiocb_cancel(ctx, req); 506 } 507 508 spin_unlock_irq(&ctx->ctx_lock); 509 510 percpu_ref_kill(&ctx->reqs); 511 percpu_ref_put(&ctx->reqs); 512 } 513 514 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) 515 { 516 unsigned i, new_nr; 517 struct kioctx_table *table, *old; 518 struct aio_ring *ring; 519 520 spin_lock(&mm->ioctx_lock); 521 rcu_read_lock(); 522 table = rcu_dereference(mm->ioctx_table); 523 524 while (1) { 525 if (table) 526 for (i = 0; i < table->nr; i++) 527 if (!table->table[i]) { 528 ctx->id = i; 529 table->table[i] = ctx; 530 rcu_read_unlock(); 531 spin_unlock(&mm->ioctx_lock); 532 533 ring = kmap_atomic(ctx->ring_pages[0]); 534 ring->id = ctx->id; 535 kunmap_atomic(ring); 536 return 0; 537 } 538 539 new_nr = (table ? table->nr : 1) * 4; 540 541 rcu_read_unlock(); 542 spin_unlock(&mm->ioctx_lock); 543 544 table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) * 545 new_nr, GFP_KERNEL); 546 if (!table) 547 return -ENOMEM; 548 549 table->nr = new_nr; 550 551 spin_lock(&mm->ioctx_lock); 552 rcu_read_lock(); 553 old = rcu_dereference(mm->ioctx_table); 554 555 if (!old) { 556 rcu_assign_pointer(mm->ioctx_table, table); 557 } else if (table->nr > old->nr) { 558 memcpy(table->table, old->table, 559 old->nr * sizeof(struct kioctx *)); 560 561 rcu_assign_pointer(mm->ioctx_table, table); 562 kfree_rcu(old, rcu); 563 } else { 564 kfree(table); 565 table = old; 566 } 567 } 568 } 569 570 static void aio_nr_sub(unsigned nr) 571 { 572 spin_lock(&aio_nr_lock); 573 if (WARN_ON(aio_nr - nr > aio_nr)) 574 aio_nr = 0; 575 else 576 aio_nr -= nr; 577 spin_unlock(&aio_nr_lock); 578 } 579 580 /* ioctx_alloc 581 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. 582 */ 583 static struct kioctx *ioctx_alloc(unsigned nr_events) 584 { 585 struct mm_struct *mm = current->mm; 586 struct kioctx *ctx; 587 int err = -ENOMEM; 588 589 /* 590 * We keep track of the number of available ringbuffer slots, to prevent 591 * overflow (reqs_available), and we also use percpu counters for this. 592 * 593 * So since up to half the slots might be on other cpu's percpu counters 594 * and unavailable, double nr_events so userspace sees what they 595 * expected: additionally, we move req_batch slots to/from percpu 596 * counters at a time, so make sure that isn't 0: 597 */ 598 nr_events = max(nr_events, num_possible_cpus() * 4); 599 nr_events *= 2; 600 601 /* Prevent overflows */ 602 if ((nr_events > (0x10000000U / sizeof(struct io_event))) || 603 (nr_events > (0x10000000U / sizeof(struct kiocb)))) { 604 pr_debug("ENOMEM: nr_events too high\n"); 605 return ERR_PTR(-EINVAL); 606 } 607 608 if (!nr_events || (unsigned long)nr_events > (aio_max_nr * 2UL)) 609 return ERR_PTR(-EAGAIN); 610 611 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); 612 if (!ctx) 613 return ERR_PTR(-ENOMEM); 614 615 ctx->max_reqs = nr_events; 616 617 if (percpu_ref_init(&ctx->users, free_ioctx_users)) 618 goto err; 619 620 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs)) 621 goto err; 622 623 spin_lock_init(&ctx->ctx_lock); 624 spin_lock_init(&ctx->completion_lock); 625 mutex_init(&ctx->ring_lock); 626 init_waitqueue_head(&ctx->wait); 627 628 INIT_LIST_HEAD(&ctx->active_reqs); 629 630 ctx->cpu = alloc_percpu(struct kioctx_cpu); 631 if (!ctx->cpu) 632 goto err; 633 634 if (aio_setup_ring(ctx) < 0) 635 goto err; 636 637 atomic_set(&ctx->reqs_available, ctx->nr_events - 1); 638 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); 639 if (ctx->req_batch < 1) 640 ctx->req_batch = 1; 641 642 /* limit the number of system wide aios */ 643 spin_lock(&aio_nr_lock); 644 if (aio_nr + nr_events > (aio_max_nr * 2UL) || 645 aio_nr + nr_events < aio_nr) { 646 spin_unlock(&aio_nr_lock); 647 err = -EAGAIN; 648 goto err; 649 } 650 aio_nr += ctx->max_reqs; 651 spin_unlock(&aio_nr_lock); 652 653 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ 654 655 err = ioctx_add_table(ctx, mm); 656 if (err) 657 goto err_cleanup; 658 659 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", 660 ctx, ctx->user_id, mm, ctx->nr_events); 661 return ctx; 662 663 err_cleanup: 664 aio_nr_sub(ctx->max_reqs); 665 err: 666 free_percpu(ctx->cpu); 667 free_percpu(ctx->reqs.pcpu_count); 668 free_percpu(ctx->users.pcpu_count); 669 kmem_cache_free(kioctx_cachep, ctx); 670 pr_debug("error allocating ioctx %d\n", err); 671 return ERR_PTR(err); 672 } 673 674 /* kill_ioctx 675 * Cancels all outstanding aio requests on an aio context. Used 676 * when the processes owning a context have all exited to encourage 677 * the rapid destruction of the kioctx. 678 */ 679 static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx) 680 { 681 if (!atomic_xchg(&ctx->dead, 1)) { 682 struct kioctx_table *table; 683 684 spin_lock(&mm->ioctx_lock); 685 rcu_read_lock(); 686 table = rcu_dereference(mm->ioctx_table); 687 688 WARN_ON(ctx != table->table[ctx->id]); 689 table->table[ctx->id] = NULL; 690 rcu_read_unlock(); 691 spin_unlock(&mm->ioctx_lock); 692 693 /* percpu_ref_kill() will do the necessary call_rcu() */ 694 wake_up_all(&ctx->wait); 695 696 /* 697 * It'd be more correct to do this in free_ioctx(), after all 698 * the outstanding kiocbs have finished - but by then io_destroy 699 * has already returned, so io_setup() could potentially return 700 * -EAGAIN with no ioctxs actually in use (as far as userspace 701 * could tell). 702 */ 703 aio_nr_sub(ctx->max_reqs); 704 705 if (ctx->mmap_size) 706 vm_munmap(ctx->mmap_base, ctx->mmap_size); 707 708 percpu_ref_kill(&ctx->users); 709 } 710 } 711 712 /* wait_on_sync_kiocb: 713 * Waits on the given sync kiocb to complete. 714 */ 715 ssize_t wait_on_sync_kiocb(struct kiocb *req) 716 { 717 while (!req->ki_ctx) { 718 set_current_state(TASK_UNINTERRUPTIBLE); 719 if (req->ki_ctx) 720 break; 721 io_schedule(); 722 } 723 __set_current_state(TASK_RUNNING); 724 return req->ki_user_data; 725 } 726 EXPORT_SYMBOL(wait_on_sync_kiocb); 727 728 /* 729 * exit_aio: called when the last user of mm goes away. At this point, there is 730 * no way for any new requests to be submited or any of the io_* syscalls to be 731 * called on the context. 732 * 733 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on 734 * them. 735 */ 736 void exit_aio(struct mm_struct *mm) 737 { 738 struct kioctx_table *table; 739 struct kioctx *ctx; 740 unsigned i = 0; 741 742 while (1) { 743 rcu_read_lock(); 744 table = rcu_dereference(mm->ioctx_table); 745 746 do { 747 if (!table || i >= table->nr) { 748 rcu_read_unlock(); 749 rcu_assign_pointer(mm->ioctx_table, NULL); 750 if (table) 751 kfree(table); 752 return; 753 } 754 755 ctx = table->table[i++]; 756 } while (!ctx); 757 758 rcu_read_unlock(); 759 760 /* 761 * We don't need to bother with munmap() here - 762 * exit_mmap(mm) is coming and it'll unmap everything. 763 * Since aio_free_ring() uses non-zero ->mmap_size 764 * as indicator that it needs to unmap the area, 765 * just set it to 0; aio_free_ring() is the only 766 * place that uses ->mmap_size, so it's safe. 767 */ 768 ctx->mmap_size = 0; 769 770 kill_ioctx(mm, ctx); 771 } 772 } 773 774 static void put_reqs_available(struct kioctx *ctx, unsigned nr) 775 { 776 struct kioctx_cpu *kcpu; 777 778 preempt_disable(); 779 kcpu = this_cpu_ptr(ctx->cpu); 780 781 kcpu->reqs_available += nr; 782 while (kcpu->reqs_available >= ctx->req_batch * 2) { 783 kcpu->reqs_available -= ctx->req_batch; 784 atomic_add(ctx->req_batch, &ctx->reqs_available); 785 } 786 787 preempt_enable(); 788 } 789 790 static bool get_reqs_available(struct kioctx *ctx) 791 { 792 struct kioctx_cpu *kcpu; 793 bool ret = false; 794 795 preempt_disable(); 796 kcpu = this_cpu_ptr(ctx->cpu); 797 798 if (!kcpu->reqs_available) { 799 int old, avail = atomic_read(&ctx->reqs_available); 800 801 do { 802 if (avail < ctx->req_batch) 803 goto out; 804 805 old = avail; 806 avail = atomic_cmpxchg(&ctx->reqs_available, 807 avail, avail - ctx->req_batch); 808 } while (avail != old); 809 810 kcpu->reqs_available += ctx->req_batch; 811 } 812 813 ret = true; 814 kcpu->reqs_available--; 815 out: 816 preempt_enable(); 817 return ret; 818 } 819 820 /* aio_get_req 821 * Allocate a slot for an aio request. 822 * Returns NULL if no requests are free. 823 */ 824 static inline struct kiocb *aio_get_req(struct kioctx *ctx) 825 { 826 struct kiocb *req; 827 828 if (!get_reqs_available(ctx)) 829 return NULL; 830 831 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); 832 if (unlikely(!req)) 833 goto out_put; 834 835 percpu_ref_get(&ctx->reqs); 836 837 req->ki_ctx = ctx; 838 return req; 839 out_put: 840 put_reqs_available(ctx, 1); 841 return NULL; 842 } 843 844 static void kiocb_free(struct kiocb *req) 845 { 846 if (req->ki_filp) 847 fput(req->ki_filp); 848 if (req->ki_eventfd != NULL) 849 eventfd_ctx_put(req->ki_eventfd); 850 kmem_cache_free(kiocb_cachep, req); 851 } 852 853 static struct kioctx *lookup_ioctx(unsigned long ctx_id) 854 { 855 struct aio_ring __user *ring = (void __user *)ctx_id; 856 struct mm_struct *mm = current->mm; 857 struct kioctx *ctx, *ret = NULL; 858 struct kioctx_table *table; 859 unsigned id; 860 861 if (get_user(id, &ring->id)) 862 return NULL; 863 864 rcu_read_lock(); 865 table = rcu_dereference(mm->ioctx_table); 866 867 if (!table || id >= table->nr) 868 goto out; 869 870 ctx = table->table[id]; 871 if (ctx && ctx->user_id == ctx_id) { 872 percpu_ref_get(&ctx->users); 873 ret = ctx; 874 } 875 out: 876 rcu_read_unlock(); 877 return ret; 878 } 879 880 /* aio_complete 881 * Called when the io request on the given iocb is complete. 882 */ 883 void aio_complete(struct kiocb *iocb, long res, long res2) 884 { 885 struct kioctx *ctx = iocb->ki_ctx; 886 struct aio_ring *ring; 887 struct io_event *ev_page, *event; 888 unsigned long flags; 889 unsigned tail, pos; 890 891 /* 892 * Special case handling for sync iocbs: 893 * - events go directly into the iocb for fast handling 894 * - the sync task with the iocb in its stack holds the single iocb 895 * ref, no other paths have a way to get another ref 896 * - the sync task helpfully left a reference to itself in the iocb 897 */ 898 if (is_sync_kiocb(iocb)) { 899 iocb->ki_user_data = res; 900 smp_wmb(); 901 iocb->ki_ctx = ERR_PTR(-EXDEV); 902 wake_up_process(iocb->ki_obj.tsk); 903 return; 904 } 905 906 if (iocb->ki_list.next) { 907 unsigned long flags; 908 909 spin_lock_irqsave(&ctx->ctx_lock, flags); 910 list_del(&iocb->ki_list); 911 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 912 } 913 914 /* 915 * Add a completion event to the ring buffer. Must be done holding 916 * ctx->completion_lock to prevent other code from messing with the tail 917 * pointer since we might be called from irq context. 918 */ 919 spin_lock_irqsave(&ctx->completion_lock, flags); 920 921 tail = ctx->tail; 922 pos = tail + AIO_EVENTS_OFFSET; 923 924 if (++tail >= ctx->nr_events) 925 tail = 0; 926 927 ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 928 event = ev_page + pos % AIO_EVENTS_PER_PAGE; 929 930 event->obj = (u64)(unsigned long)iocb->ki_obj.user; 931 event->data = iocb->ki_user_data; 932 event->res = res; 933 event->res2 = res2; 934 935 kunmap_atomic(ev_page); 936 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 937 938 pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", 939 ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, 940 res, res2); 941 942 /* after flagging the request as done, we 943 * must never even look at it again 944 */ 945 smp_wmb(); /* make event visible before updating tail */ 946 947 ctx->tail = tail; 948 949 ring = kmap_atomic(ctx->ring_pages[0]); 950 ring->tail = tail; 951 kunmap_atomic(ring); 952 flush_dcache_page(ctx->ring_pages[0]); 953 954 spin_unlock_irqrestore(&ctx->completion_lock, flags); 955 956 pr_debug("added to ring %p at [%u]\n", iocb, tail); 957 958 /* 959 * Check if the user asked us to deliver the result through an 960 * eventfd. The eventfd_signal() function is safe to be called 961 * from IRQ context. 962 */ 963 if (iocb->ki_eventfd != NULL) 964 eventfd_signal(iocb->ki_eventfd, 1); 965 966 /* everything turned out well, dispose of the aiocb. */ 967 kiocb_free(iocb); 968 969 /* 970 * We have to order our ring_info tail store above and test 971 * of the wait list below outside the wait lock. This is 972 * like in wake_up_bit() where clearing a bit has to be 973 * ordered with the unlocked test. 974 */ 975 smp_mb(); 976 977 if (waitqueue_active(&ctx->wait)) 978 wake_up(&ctx->wait); 979 980 percpu_ref_put(&ctx->reqs); 981 } 982 EXPORT_SYMBOL(aio_complete); 983 984 /* aio_read_events 985 * Pull an event off of the ioctx's event ring. Returns the number of 986 * events fetched 987 */ 988 static long aio_read_events_ring(struct kioctx *ctx, 989 struct io_event __user *event, long nr) 990 { 991 struct aio_ring *ring; 992 unsigned head, tail, pos; 993 long ret = 0; 994 int copy_ret; 995 996 mutex_lock(&ctx->ring_lock); 997 998 ring = kmap_atomic(ctx->ring_pages[0]); 999 head = ring->head; 1000 tail = ring->tail; 1001 kunmap_atomic(ring); 1002 1003 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); 1004 1005 if (head == tail) 1006 goto out; 1007 1008 while (ret < nr) { 1009 long avail; 1010 struct io_event *ev; 1011 struct page *page; 1012 1013 avail = (head <= tail ? tail : ctx->nr_events) - head; 1014 if (head == tail) 1015 break; 1016 1017 avail = min(avail, nr - ret); 1018 avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - 1019 ((head + AIO_EVENTS_OFFSET) % AIO_EVENTS_PER_PAGE)); 1020 1021 pos = head + AIO_EVENTS_OFFSET; 1022 page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; 1023 pos %= AIO_EVENTS_PER_PAGE; 1024 1025 ev = kmap(page); 1026 copy_ret = copy_to_user(event + ret, ev + pos, 1027 sizeof(*ev) * avail); 1028 kunmap(page); 1029 1030 if (unlikely(copy_ret)) { 1031 ret = -EFAULT; 1032 goto out; 1033 } 1034 1035 ret += avail; 1036 head += avail; 1037 head %= ctx->nr_events; 1038 } 1039 1040 ring = kmap_atomic(ctx->ring_pages[0]); 1041 ring->head = head; 1042 kunmap_atomic(ring); 1043 flush_dcache_page(ctx->ring_pages[0]); 1044 1045 pr_debug("%li h%u t%u\n", ret, head, tail); 1046 1047 put_reqs_available(ctx, ret); 1048 out: 1049 mutex_unlock(&ctx->ring_lock); 1050 1051 return ret; 1052 } 1053 1054 static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, 1055 struct io_event __user *event, long *i) 1056 { 1057 long ret = aio_read_events_ring(ctx, event + *i, nr - *i); 1058 1059 if (ret > 0) 1060 *i += ret; 1061 1062 if (unlikely(atomic_read(&ctx->dead))) 1063 ret = -EINVAL; 1064 1065 if (!*i) 1066 *i = ret; 1067 1068 return ret < 0 || *i >= min_nr; 1069 } 1070 1071 static long read_events(struct kioctx *ctx, long min_nr, long nr, 1072 struct io_event __user *event, 1073 struct timespec __user *timeout) 1074 { 1075 ktime_t until = { .tv64 = KTIME_MAX }; 1076 long ret = 0; 1077 1078 if (timeout) { 1079 struct timespec ts; 1080 1081 if (unlikely(copy_from_user(&ts, timeout, sizeof(ts)))) 1082 return -EFAULT; 1083 1084 until = timespec_to_ktime(ts); 1085 } 1086 1087 /* 1088 * Note that aio_read_events() is being called as the conditional - i.e. 1089 * we're calling it after prepare_to_wait() has set task state to 1090 * TASK_INTERRUPTIBLE. 1091 * 1092 * But aio_read_events() can block, and if it blocks it's going to flip 1093 * the task state back to TASK_RUNNING. 1094 * 1095 * This should be ok, provided it doesn't flip the state back to 1096 * TASK_RUNNING and return 0 too much - that causes us to spin. That 1097 * will only happen if the mutex_lock() call blocks, and we then find 1098 * the ringbuffer empty. So in practice we should be ok, but it's 1099 * something to be aware of when touching this code. 1100 */ 1101 wait_event_interruptible_hrtimeout(ctx->wait, 1102 aio_read_events(ctx, min_nr, nr, event, &ret), until); 1103 1104 if (!ret && signal_pending(current)) 1105 ret = -EINTR; 1106 1107 return ret; 1108 } 1109 1110 /* sys_io_setup: 1111 * Create an aio_context capable of receiving at least nr_events. 1112 * ctxp must not point to an aio_context that already exists, and 1113 * must be initialized to 0 prior to the call. On successful 1114 * creation of the aio_context, *ctxp is filled in with the resulting 1115 * handle. May fail with -EINVAL if *ctxp is not initialized, 1116 * if the specified nr_events exceeds internal limits. May fail 1117 * with -EAGAIN if the specified nr_events exceeds the user's limit 1118 * of available events. May fail with -ENOMEM if insufficient kernel 1119 * resources are available. May fail with -EFAULT if an invalid 1120 * pointer is passed for ctxp. Will fail with -ENOSYS if not 1121 * implemented. 1122 */ 1123 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) 1124 { 1125 struct kioctx *ioctx = NULL; 1126 unsigned long ctx; 1127 long ret; 1128 1129 ret = get_user(ctx, ctxp); 1130 if (unlikely(ret)) 1131 goto out; 1132 1133 ret = -EINVAL; 1134 if (unlikely(ctx || nr_events == 0)) { 1135 pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", 1136 ctx, nr_events); 1137 goto out; 1138 } 1139 1140 ioctx = ioctx_alloc(nr_events); 1141 ret = PTR_ERR(ioctx); 1142 if (!IS_ERR(ioctx)) { 1143 ret = put_user(ioctx->user_id, ctxp); 1144 if (ret) 1145 kill_ioctx(current->mm, ioctx); 1146 percpu_ref_put(&ioctx->users); 1147 } 1148 1149 out: 1150 return ret; 1151 } 1152 1153 /* sys_io_destroy: 1154 * Destroy the aio_context specified. May cancel any outstanding 1155 * AIOs and block on completion. Will fail with -ENOSYS if not 1156 * implemented. May fail with -EINVAL if the context pointed to 1157 * is invalid. 1158 */ 1159 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) 1160 { 1161 struct kioctx *ioctx = lookup_ioctx(ctx); 1162 if (likely(NULL != ioctx)) { 1163 kill_ioctx(current->mm, ioctx); 1164 percpu_ref_put(&ioctx->users); 1165 return 0; 1166 } 1167 pr_debug("EINVAL: io_destroy: invalid context id\n"); 1168 return -EINVAL; 1169 } 1170 1171 typedef ssize_t (aio_rw_op)(struct kiocb *, const struct iovec *, 1172 unsigned long, loff_t); 1173 1174 static ssize_t aio_setup_vectored_rw(struct kiocb *kiocb, 1175 int rw, char __user *buf, 1176 unsigned long *nr_segs, 1177 struct iovec **iovec, 1178 bool compat) 1179 { 1180 ssize_t ret; 1181 1182 *nr_segs = kiocb->ki_nbytes; 1183 1184 #ifdef CONFIG_COMPAT 1185 if (compat) 1186 ret = compat_rw_copy_check_uvector(rw, 1187 (struct compat_iovec __user *)buf, 1188 *nr_segs, 1, *iovec, iovec); 1189 else 1190 #endif 1191 ret = rw_copy_check_uvector(rw, 1192 (struct iovec __user *)buf, 1193 *nr_segs, 1, *iovec, iovec); 1194 if (ret < 0) 1195 return ret; 1196 1197 /* ki_nbytes now reflect bytes instead of segs */ 1198 kiocb->ki_nbytes = ret; 1199 return 0; 1200 } 1201 1202 static ssize_t aio_setup_single_vector(struct kiocb *kiocb, 1203 int rw, char __user *buf, 1204 unsigned long *nr_segs, 1205 struct iovec *iovec) 1206 { 1207 if (unlikely(!access_ok(!rw, buf, kiocb->ki_nbytes))) 1208 return -EFAULT; 1209 1210 iovec->iov_base = buf; 1211 iovec->iov_len = kiocb->ki_nbytes; 1212 *nr_segs = 1; 1213 return 0; 1214 } 1215 1216 /* 1217 * aio_setup_iocb: 1218 * Performs the initial checks and aio retry method 1219 * setup for the kiocb at the time of io submission. 1220 */ 1221 static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode, 1222 char __user *buf, bool compat) 1223 { 1224 struct file *file = req->ki_filp; 1225 ssize_t ret; 1226 unsigned long nr_segs; 1227 int rw; 1228 fmode_t mode; 1229 aio_rw_op *rw_op; 1230 struct iovec inline_vec, *iovec = &inline_vec; 1231 1232 switch (opcode) { 1233 case IOCB_CMD_PREAD: 1234 case IOCB_CMD_PREADV: 1235 mode = FMODE_READ; 1236 rw = READ; 1237 rw_op = file->f_op->aio_read; 1238 goto rw_common; 1239 1240 case IOCB_CMD_PWRITE: 1241 case IOCB_CMD_PWRITEV: 1242 mode = FMODE_WRITE; 1243 rw = WRITE; 1244 rw_op = file->f_op->aio_write; 1245 goto rw_common; 1246 rw_common: 1247 if (unlikely(!(file->f_mode & mode))) 1248 return -EBADF; 1249 1250 if (!rw_op) 1251 return -EINVAL; 1252 1253 ret = (opcode == IOCB_CMD_PREADV || 1254 opcode == IOCB_CMD_PWRITEV) 1255 ? aio_setup_vectored_rw(req, rw, buf, &nr_segs, 1256 &iovec, compat) 1257 : aio_setup_single_vector(req, rw, buf, &nr_segs, 1258 iovec); 1259 if (ret) 1260 return ret; 1261 1262 ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes); 1263 if (ret < 0) { 1264 if (iovec != &inline_vec) 1265 kfree(iovec); 1266 return ret; 1267 } 1268 1269 req->ki_nbytes = ret; 1270 1271 /* XXX: move/kill - rw_verify_area()? */ 1272 /* This matches the pread()/pwrite() logic */ 1273 if (req->ki_pos < 0) { 1274 ret = -EINVAL; 1275 break; 1276 } 1277 1278 if (rw == WRITE) 1279 file_start_write(file); 1280 1281 ret = rw_op(req, iovec, nr_segs, req->ki_pos); 1282 1283 if (rw == WRITE) 1284 file_end_write(file); 1285 break; 1286 1287 case IOCB_CMD_FDSYNC: 1288 if (!file->f_op->aio_fsync) 1289 return -EINVAL; 1290 1291 ret = file->f_op->aio_fsync(req, 1); 1292 break; 1293 1294 case IOCB_CMD_FSYNC: 1295 if (!file->f_op->aio_fsync) 1296 return -EINVAL; 1297 1298 ret = file->f_op->aio_fsync(req, 0); 1299 break; 1300 1301 default: 1302 pr_debug("EINVAL: no operation provided\n"); 1303 return -EINVAL; 1304 } 1305 1306 if (iovec != &inline_vec) 1307 kfree(iovec); 1308 1309 if (ret != -EIOCBQUEUED) { 1310 /* 1311 * There's no easy way to restart the syscall since other AIO's 1312 * may be already running. Just fail this IO with EINTR. 1313 */ 1314 if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR || 1315 ret == -ERESTARTNOHAND || 1316 ret == -ERESTART_RESTARTBLOCK)) 1317 ret = -EINTR; 1318 aio_complete(req, ret, 0); 1319 } 1320 1321 return 0; 1322 } 1323 1324 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1325 struct iocb *iocb, bool compat) 1326 { 1327 struct kiocb *req; 1328 ssize_t ret; 1329 1330 /* enforce forwards compatibility on users */ 1331 if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) { 1332 pr_debug("EINVAL: reserve field set\n"); 1333 return -EINVAL; 1334 } 1335 1336 /* prevent overflows */ 1337 if (unlikely( 1338 (iocb->aio_buf != (unsigned long)iocb->aio_buf) || 1339 (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || 1340 ((ssize_t)iocb->aio_nbytes < 0) 1341 )) { 1342 pr_debug("EINVAL: io_submit: overflow check\n"); 1343 return -EINVAL; 1344 } 1345 1346 req = aio_get_req(ctx); 1347 if (unlikely(!req)) 1348 return -EAGAIN; 1349 1350 req->ki_filp = fget(iocb->aio_fildes); 1351 if (unlikely(!req->ki_filp)) { 1352 ret = -EBADF; 1353 goto out_put_req; 1354 } 1355 1356 if (iocb->aio_flags & IOCB_FLAG_RESFD) { 1357 /* 1358 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an 1359 * instance of the file* now. The file descriptor must be 1360 * an eventfd() fd, and will be signaled for each completed 1361 * event using the eventfd_signal() function. 1362 */ 1363 req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); 1364 if (IS_ERR(req->ki_eventfd)) { 1365 ret = PTR_ERR(req->ki_eventfd); 1366 req->ki_eventfd = NULL; 1367 goto out_put_req; 1368 } 1369 } 1370 1371 ret = put_user(KIOCB_KEY, &user_iocb->aio_key); 1372 if (unlikely(ret)) { 1373 pr_debug("EFAULT: aio_key\n"); 1374 goto out_put_req; 1375 } 1376 1377 req->ki_obj.user = user_iocb; 1378 req->ki_user_data = iocb->aio_data; 1379 req->ki_pos = iocb->aio_offset; 1380 req->ki_nbytes = iocb->aio_nbytes; 1381 1382 ret = aio_run_iocb(req, iocb->aio_lio_opcode, 1383 (char __user *)(unsigned long)iocb->aio_buf, 1384 compat); 1385 if (ret) 1386 goto out_put_req; 1387 1388 return 0; 1389 out_put_req: 1390 put_reqs_available(ctx, 1); 1391 percpu_ref_put(&ctx->reqs); 1392 kiocb_free(req); 1393 return ret; 1394 } 1395 1396 long do_io_submit(aio_context_t ctx_id, long nr, 1397 struct iocb __user *__user *iocbpp, bool compat) 1398 { 1399 struct kioctx *ctx; 1400 long ret = 0; 1401 int i = 0; 1402 struct blk_plug plug; 1403 1404 if (unlikely(nr < 0)) 1405 return -EINVAL; 1406 1407 if (unlikely(nr > LONG_MAX/sizeof(*iocbpp))) 1408 nr = LONG_MAX/sizeof(*iocbpp); 1409 1410 if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) 1411 return -EFAULT; 1412 1413 ctx = lookup_ioctx(ctx_id); 1414 if (unlikely(!ctx)) { 1415 pr_debug("EINVAL: invalid context id\n"); 1416 return -EINVAL; 1417 } 1418 1419 blk_start_plug(&plug); 1420 1421 /* 1422 * AKPM: should this return a partial result if some of the IOs were 1423 * successfully submitted? 1424 */ 1425 for (i=0; i<nr; i++) { 1426 struct iocb __user *user_iocb; 1427 struct iocb tmp; 1428 1429 if (unlikely(__get_user(user_iocb, iocbpp + i))) { 1430 ret = -EFAULT; 1431 break; 1432 } 1433 1434 if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) { 1435 ret = -EFAULT; 1436 break; 1437 } 1438 1439 ret = io_submit_one(ctx, user_iocb, &tmp, compat); 1440 if (ret) 1441 break; 1442 } 1443 blk_finish_plug(&plug); 1444 1445 percpu_ref_put(&ctx->users); 1446 return i ? i : ret; 1447 } 1448 1449 /* sys_io_submit: 1450 * Queue the nr iocbs pointed to by iocbpp for processing. Returns 1451 * the number of iocbs queued. May return -EINVAL if the aio_context 1452 * specified by ctx_id is invalid, if nr is < 0, if the iocb at 1453 * *iocbpp[0] is not properly initialized, if the operation specified 1454 * is invalid for the file descriptor in the iocb. May fail with 1455 * -EFAULT if any of the data structures point to invalid data. May 1456 * fail with -EBADF if the file descriptor specified in the first 1457 * iocb is invalid. May fail with -EAGAIN if insufficient resources 1458 * are available to queue any iocbs. Will return 0 if nr is 0. Will 1459 * fail with -ENOSYS if not implemented. 1460 */ 1461 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, 1462 struct iocb __user * __user *, iocbpp) 1463 { 1464 return do_io_submit(ctx_id, nr, iocbpp, 0); 1465 } 1466 1467 /* lookup_kiocb 1468 * Finds a given iocb for cancellation. 1469 */ 1470 static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, 1471 u32 key) 1472 { 1473 struct list_head *pos; 1474 1475 assert_spin_locked(&ctx->ctx_lock); 1476 1477 if (key != KIOCB_KEY) 1478 return NULL; 1479 1480 /* TODO: use a hash or array, this sucks. */ 1481 list_for_each(pos, &ctx->active_reqs) { 1482 struct kiocb *kiocb = list_kiocb(pos); 1483 if (kiocb->ki_obj.user == iocb) 1484 return kiocb; 1485 } 1486 return NULL; 1487 } 1488 1489 /* sys_io_cancel: 1490 * Attempts to cancel an iocb previously passed to io_submit. If 1491 * the operation is successfully cancelled, the resulting event is 1492 * copied into the memory pointed to by result without being placed 1493 * into the completion queue and 0 is returned. May fail with 1494 * -EFAULT if any of the data structures pointed to are invalid. 1495 * May fail with -EINVAL if aio_context specified by ctx_id is 1496 * invalid. May fail with -EAGAIN if the iocb specified was not 1497 * cancelled. Will fail with -ENOSYS if not implemented. 1498 */ 1499 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, 1500 struct io_event __user *, result) 1501 { 1502 struct kioctx *ctx; 1503 struct kiocb *kiocb; 1504 u32 key; 1505 int ret; 1506 1507 ret = get_user(key, &iocb->aio_key); 1508 if (unlikely(ret)) 1509 return -EFAULT; 1510 1511 ctx = lookup_ioctx(ctx_id); 1512 if (unlikely(!ctx)) 1513 return -EINVAL; 1514 1515 spin_lock_irq(&ctx->ctx_lock); 1516 1517 kiocb = lookup_kiocb(ctx, iocb, key); 1518 if (kiocb) 1519 ret = kiocb_cancel(ctx, kiocb); 1520 else 1521 ret = -EINVAL; 1522 1523 spin_unlock_irq(&ctx->ctx_lock); 1524 1525 if (!ret) { 1526 /* 1527 * The result argument is no longer used - the io_event is 1528 * always delivered via the ring buffer. -EINPROGRESS indicates 1529 * cancellation is progress: 1530 */ 1531 ret = -EINPROGRESS; 1532 } 1533 1534 percpu_ref_put(&ctx->users); 1535 1536 return ret; 1537 } 1538 1539 /* io_getevents: 1540 * Attempts to read at least min_nr events and up to nr events from 1541 * the completion queue for the aio_context specified by ctx_id. If 1542 * it succeeds, the number of read events is returned. May fail with 1543 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is 1544 * out of range, if timeout is out of range. May fail with -EFAULT 1545 * if any of the memory specified is invalid. May return 0 or 1546 * < min_nr if the timeout specified by timeout has elapsed 1547 * before sufficient events are available, where timeout == NULL 1548 * specifies an infinite timeout. Note that the timeout pointed to by 1549 * timeout is relative. Will fail with -ENOSYS if not implemented. 1550 */ 1551 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, 1552 long, min_nr, 1553 long, nr, 1554 struct io_event __user *, events, 1555 struct timespec __user *, timeout) 1556 { 1557 struct kioctx *ioctx = lookup_ioctx(ctx_id); 1558 long ret = -EINVAL; 1559 1560 if (likely(ioctx)) { 1561 if (likely(min_nr <= nr && min_nr >= 0)) 1562 ret = read_events(ioctx, min_nr, nr, events, timeout); 1563 percpu_ref_put(&ioctx->users); 1564 } 1565 return ret; 1566 } 1567