1 /* 2 * An async IO implementation for Linux 3 * Written by Benjamin LaHaise <bcrl@kvack.org> 4 * 5 * Implements an efficient asynchronous io interface. 6 * 7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. 8 * 9 * See ../COPYING for licensing terms. 10 */ 11 #include <linux/kernel.h> 12 #include <linux/init.h> 13 #include <linux/errno.h> 14 #include <linux/time.h> 15 #include <linux/aio_abi.h> 16 #include <linux/module.h> 17 #include <linux/syscalls.h> 18 #include <linux/backing-dev.h> 19 #include <linux/uio.h> 20 21 #define DEBUG 0 22 23 #include <linux/sched.h> 24 #include <linux/fs.h> 25 #include <linux/file.h> 26 #include <linux/mm.h> 27 #include <linux/mman.h> 28 #include <linux/mmu_context.h> 29 #include <linux/slab.h> 30 #include <linux/timer.h> 31 #include <linux/aio.h> 32 #include <linux/highmem.h> 33 #include <linux/workqueue.h> 34 #include <linux/security.h> 35 #include <linux/eventfd.h> 36 #include <linux/blkdev.h> 37 #include <linux/mempool.h> 38 #include <linux/hash.h> 39 #include <linux/compat.h> 40 41 #include <asm/kmap_types.h> 42 #include <asm/uaccess.h> 43 44 #if DEBUG > 1 45 #define dprintk printk 46 #else 47 #define dprintk(x...) do { ; } while (0) 48 #endif 49 50 /*------ sysctl variables----*/ 51 static DEFINE_SPINLOCK(aio_nr_lock); 52 unsigned long aio_nr; /* current system wide number of aio requests */ 53 unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ 54 /*----end sysctl variables---*/ 55 56 static struct kmem_cache *kiocb_cachep; 57 static struct kmem_cache *kioctx_cachep; 58 59 static struct workqueue_struct *aio_wq; 60 61 /* Used for rare fput completion. */ 62 static void aio_fput_routine(struct work_struct *); 63 static DECLARE_WORK(fput_work, aio_fput_routine); 64 65 static DEFINE_SPINLOCK(fput_lock); 66 static LIST_HEAD(fput_head); 67 68 #define AIO_BATCH_HASH_BITS 3 /* allocated on-stack, so don't go crazy */ 69 #define AIO_BATCH_HASH_SIZE (1 << AIO_BATCH_HASH_BITS) 70 struct aio_batch_entry { 71 struct hlist_node list; 72 struct address_space *mapping; 73 }; 74 mempool_t *abe_pool; 75 76 static void aio_kick_handler(struct work_struct *); 77 static void aio_queue_work(struct kioctx *); 78 79 /* aio_setup 80 * Creates the slab caches used by the aio routines, panic on 81 * failure as this is done early during the boot sequence. 82 */ 83 static int __init aio_setup(void) 84 { 85 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 86 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 87 88 aio_wq = create_workqueue("aio"); 89 abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry)); 90 BUG_ON(!aio_wq || !abe_pool); 91 92 pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page)); 93 94 return 0; 95 } 96 __initcall(aio_setup); 97 98 static void aio_free_ring(struct kioctx *ctx) 99 { 100 struct aio_ring_info *info = &ctx->ring_info; 101 long i; 102 103 for (i=0; i<info->nr_pages; i++) 104 put_page(info->ring_pages[i]); 105 106 if (info->mmap_size) { 107 down_write(&ctx->mm->mmap_sem); 108 do_munmap(ctx->mm, info->mmap_base, info->mmap_size); 109 up_write(&ctx->mm->mmap_sem); 110 } 111 112 if (info->ring_pages && info->ring_pages != info->internal_pages) 113 kfree(info->ring_pages); 114 info->ring_pages = NULL; 115 info->nr = 0; 116 } 117 118 static int aio_setup_ring(struct kioctx *ctx) 119 { 120 struct aio_ring *ring; 121 struct aio_ring_info *info = &ctx->ring_info; 122 unsigned nr_events = ctx->max_reqs; 123 unsigned long size; 124 int nr_pages; 125 126 /* Compensate for the ring buffer's head/tail overlap entry */ 127 nr_events += 2; /* 1 is required, 2 for good luck */ 128 129 size = sizeof(struct aio_ring); 130 size += sizeof(struct io_event) * nr_events; 131 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT; 132 133 if (nr_pages < 0) 134 return -EINVAL; 135 136 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); 137 138 info->nr = 0; 139 info->ring_pages = info->internal_pages; 140 if (nr_pages > AIO_RING_PAGES) { 141 info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); 142 if (!info->ring_pages) 143 return -ENOMEM; 144 } 145 146 info->mmap_size = nr_pages * PAGE_SIZE; 147 dprintk("attempting mmap of %lu bytes\n", info->mmap_size); 148 down_write(&ctx->mm->mmap_sem); 149 info->mmap_base = do_mmap(NULL, 0, info->mmap_size, 150 PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, 151 0); 152 if (IS_ERR((void *)info->mmap_base)) { 153 up_write(&ctx->mm->mmap_sem); 154 info->mmap_size = 0; 155 aio_free_ring(ctx); 156 return -EAGAIN; 157 } 158 159 dprintk("mmap address: 0x%08lx\n", info->mmap_base); 160 info->nr_pages = get_user_pages(current, ctx->mm, 161 info->mmap_base, nr_pages, 162 1, 0, info->ring_pages, NULL); 163 up_write(&ctx->mm->mmap_sem); 164 165 if (unlikely(info->nr_pages != nr_pages)) { 166 aio_free_ring(ctx); 167 return -EAGAIN; 168 } 169 170 ctx->user_id = info->mmap_base; 171 172 info->nr = nr_events; /* trusted copy */ 173 174 ring = kmap_atomic(info->ring_pages[0], KM_USER0); 175 ring->nr = nr_events; /* user copy */ 176 ring->id = ctx->user_id; 177 ring->head = ring->tail = 0; 178 ring->magic = AIO_RING_MAGIC; 179 ring->compat_features = AIO_RING_COMPAT_FEATURES; 180 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; 181 ring->header_length = sizeof(struct aio_ring); 182 kunmap_atomic(ring, KM_USER0); 183 184 return 0; 185 } 186 187 188 /* aio_ring_event: returns a pointer to the event at the given index from 189 * kmap_atomic(, km). Release the pointer with put_aio_ring_event(); 190 */ 191 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) 192 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) 193 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) 194 195 #define aio_ring_event(info, nr, km) ({ \ 196 unsigned pos = (nr) + AIO_EVENTS_OFFSET; \ 197 struct io_event *__event; \ 198 __event = kmap_atomic( \ 199 (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \ 200 __event += pos % AIO_EVENTS_PER_PAGE; \ 201 __event; \ 202 }) 203 204 #define put_aio_ring_event(event, km) do { \ 205 struct io_event *__event = (event); \ 206 (void)__event; \ 207 kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ 208 } while(0) 209 210 static void ctx_rcu_free(struct rcu_head *head) 211 { 212 struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); 213 unsigned nr_events = ctx->max_reqs; 214 215 kmem_cache_free(kioctx_cachep, ctx); 216 217 if (nr_events) { 218 spin_lock(&aio_nr_lock); 219 BUG_ON(aio_nr - nr_events > aio_nr); 220 aio_nr -= nr_events; 221 spin_unlock(&aio_nr_lock); 222 } 223 } 224 225 /* __put_ioctx 226 * Called when the last user of an aio context has gone away, 227 * and the struct needs to be freed. 228 */ 229 static void __put_ioctx(struct kioctx *ctx) 230 { 231 BUG_ON(ctx->reqs_active); 232 233 cancel_delayed_work(&ctx->wq); 234 cancel_work_sync(&ctx->wq.work); 235 aio_free_ring(ctx); 236 mmdrop(ctx->mm); 237 ctx->mm = NULL; 238 pr_debug("__put_ioctx: freeing %p\n", ctx); 239 call_rcu(&ctx->rcu_head, ctx_rcu_free); 240 } 241 242 #define get_ioctx(kioctx) do { \ 243 BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ 244 atomic_inc(&(kioctx)->users); \ 245 } while (0) 246 #define put_ioctx(kioctx) do { \ 247 BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ 248 if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \ 249 __put_ioctx(kioctx); \ 250 } while (0) 251 252 /* ioctx_alloc 253 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. 254 */ 255 static struct kioctx *ioctx_alloc(unsigned nr_events) 256 { 257 struct mm_struct *mm; 258 struct kioctx *ctx; 259 int did_sync = 0; 260 261 /* Prevent overflows */ 262 if ((nr_events > (0x10000000U / sizeof(struct io_event))) || 263 (nr_events > (0x10000000U / sizeof(struct kiocb)))) { 264 pr_debug("ENOMEM: nr_events too high\n"); 265 return ERR_PTR(-EINVAL); 266 } 267 268 if ((unsigned long)nr_events > aio_max_nr) 269 return ERR_PTR(-EAGAIN); 270 271 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); 272 if (!ctx) 273 return ERR_PTR(-ENOMEM); 274 275 ctx->max_reqs = nr_events; 276 mm = ctx->mm = current->mm; 277 atomic_inc(&mm->mm_count); 278 279 atomic_set(&ctx->users, 1); 280 spin_lock_init(&ctx->ctx_lock); 281 spin_lock_init(&ctx->ring_info.ring_lock); 282 init_waitqueue_head(&ctx->wait); 283 284 INIT_LIST_HEAD(&ctx->active_reqs); 285 INIT_LIST_HEAD(&ctx->run_list); 286 INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler); 287 288 if (aio_setup_ring(ctx) < 0) 289 goto out_freectx; 290 291 /* limit the number of system wide aios */ 292 do { 293 spin_lock_bh(&aio_nr_lock); 294 if (aio_nr + nr_events > aio_max_nr || 295 aio_nr + nr_events < aio_nr) 296 ctx->max_reqs = 0; 297 else 298 aio_nr += ctx->max_reqs; 299 spin_unlock_bh(&aio_nr_lock); 300 if (ctx->max_reqs || did_sync) 301 break; 302 303 /* wait for rcu callbacks to have completed before giving up */ 304 synchronize_rcu(); 305 did_sync = 1; 306 ctx->max_reqs = nr_events; 307 } while (1); 308 309 if (ctx->max_reqs == 0) 310 goto out_cleanup; 311 312 /* now link into global list. */ 313 spin_lock(&mm->ioctx_lock); 314 hlist_add_head_rcu(&ctx->list, &mm->ioctx_list); 315 spin_unlock(&mm->ioctx_lock); 316 317 dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", 318 ctx, ctx->user_id, current->mm, ctx->ring_info.nr); 319 return ctx; 320 321 out_cleanup: 322 __put_ioctx(ctx); 323 return ERR_PTR(-EAGAIN); 324 325 out_freectx: 326 mmdrop(mm); 327 kmem_cache_free(kioctx_cachep, ctx); 328 ctx = ERR_PTR(-ENOMEM); 329 330 dprintk("aio: error allocating ioctx %p\n", ctx); 331 return ctx; 332 } 333 334 /* aio_cancel_all 335 * Cancels all outstanding aio requests on an aio context. Used 336 * when the processes owning a context have all exited to encourage 337 * the rapid destruction of the kioctx. 338 */ 339 static void aio_cancel_all(struct kioctx *ctx) 340 { 341 int (*cancel)(struct kiocb *, struct io_event *); 342 struct io_event res; 343 spin_lock_irq(&ctx->ctx_lock); 344 ctx->dead = 1; 345 while (!list_empty(&ctx->active_reqs)) { 346 struct list_head *pos = ctx->active_reqs.next; 347 struct kiocb *iocb = list_kiocb(pos); 348 list_del_init(&iocb->ki_list); 349 cancel = iocb->ki_cancel; 350 kiocbSetCancelled(iocb); 351 if (cancel) { 352 iocb->ki_users++; 353 spin_unlock_irq(&ctx->ctx_lock); 354 cancel(iocb, &res); 355 spin_lock_irq(&ctx->ctx_lock); 356 } 357 } 358 spin_unlock_irq(&ctx->ctx_lock); 359 } 360 361 static void wait_for_all_aios(struct kioctx *ctx) 362 { 363 struct task_struct *tsk = current; 364 DECLARE_WAITQUEUE(wait, tsk); 365 366 spin_lock_irq(&ctx->ctx_lock); 367 if (!ctx->reqs_active) 368 goto out; 369 370 add_wait_queue(&ctx->wait, &wait); 371 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 372 while (ctx->reqs_active) { 373 spin_unlock_irq(&ctx->ctx_lock); 374 io_schedule(); 375 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 376 spin_lock_irq(&ctx->ctx_lock); 377 } 378 __set_task_state(tsk, TASK_RUNNING); 379 remove_wait_queue(&ctx->wait, &wait); 380 381 out: 382 spin_unlock_irq(&ctx->ctx_lock); 383 } 384 385 /* wait_on_sync_kiocb: 386 * Waits on the given sync kiocb to complete. 387 */ 388 ssize_t wait_on_sync_kiocb(struct kiocb *iocb) 389 { 390 while (iocb->ki_users) { 391 set_current_state(TASK_UNINTERRUPTIBLE); 392 if (!iocb->ki_users) 393 break; 394 io_schedule(); 395 } 396 __set_current_state(TASK_RUNNING); 397 return iocb->ki_user_data; 398 } 399 EXPORT_SYMBOL(wait_on_sync_kiocb); 400 401 /* exit_aio: called when the last user of mm goes away. At this point, 402 * there is no way for any new requests to be submited or any of the 403 * io_* syscalls to be called on the context. However, there may be 404 * outstanding requests which hold references to the context; as they 405 * go away, they will call put_ioctx and release any pinned memory 406 * associated with the request (held via struct page * references). 407 */ 408 void exit_aio(struct mm_struct *mm) 409 { 410 struct kioctx *ctx; 411 412 while (!hlist_empty(&mm->ioctx_list)) { 413 ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list); 414 hlist_del_rcu(&ctx->list); 415 416 aio_cancel_all(ctx); 417 418 wait_for_all_aios(ctx); 419 /* 420 * Ensure we don't leave the ctx on the aio_wq 421 */ 422 cancel_work_sync(&ctx->wq.work); 423 424 if (1 != atomic_read(&ctx->users)) 425 printk(KERN_DEBUG 426 "exit_aio:ioctx still alive: %d %d %d\n", 427 atomic_read(&ctx->users), ctx->dead, 428 ctx->reqs_active); 429 put_ioctx(ctx); 430 } 431 } 432 433 /* aio_get_req 434 * Allocate a slot for an aio request. Increments the users count 435 * of the kioctx so that the kioctx stays around until all requests are 436 * complete. Returns NULL if no requests are free. 437 * 438 * Returns with kiocb->users set to 2. The io submit code path holds 439 * an extra reference while submitting the i/o. 440 * This prevents races between the aio code path referencing the 441 * req (after submitting it) and aio_complete() freeing the req. 442 */ 443 static struct kiocb *__aio_get_req(struct kioctx *ctx) 444 { 445 struct kiocb *req = NULL; 446 struct aio_ring *ring; 447 int okay = 0; 448 449 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); 450 if (unlikely(!req)) 451 return NULL; 452 453 req->ki_flags = 0; 454 req->ki_users = 2; 455 req->ki_key = 0; 456 req->ki_ctx = ctx; 457 req->ki_cancel = NULL; 458 req->ki_retry = NULL; 459 req->ki_dtor = NULL; 460 req->private = NULL; 461 req->ki_iovec = NULL; 462 INIT_LIST_HEAD(&req->ki_run_list); 463 req->ki_eventfd = NULL; 464 465 /* Check if the completion queue has enough free space to 466 * accept an event from this io. 467 */ 468 spin_lock_irq(&ctx->ctx_lock); 469 ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0); 470 if (ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring)) { 471 list_add(&req->ki_list, &ctx->active_reqs); 472 ctx->reqs_active++; 473 okay = 1; 474 } 475 kunmap_atomic(ring, KM_USER0); 476 spin_unlock_irq(&ctx->ctx_lock); 477 478 if (!okay) { 479 kmem_cache_free(kiocb_cachep, req); 480 req = NULL; 481 } 482 483 return req; 484 } 485 486 static inline struct kiocb *aio_get_req(struct kioctx *ctx) 487 { 488 struct kiocb *req; 489 /* Handle a potential starvation case -- should be exceedingly rare as 490 * requests will be stuck on fput_head only if the aio_fput_routine is 491 * delayed and the requests were the last user of the struct file. 492 */ 493 req = __aio_get_req(ctx); 494 if (unlikely(NULL == req)) { 495 aio_fput_routine(NULL); 496 req = __aio_get_req(ctx); 497 } 498 return req; 499 } 500 501 static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) 502 { 503 assert_spin_locked(&ctx->ctx_lock); 504 505 if (req->ki_eventfd != NULL) 506 eventfd_ctx_put(req->ki_eventfd); 507 if (req->ki_dtor) 508 req->ki_dtor(req); 509 if (req->ki_iovec != &req->ki_inline_vec) 510 kfree(req->ki_iovec); 511 kmem_cache_free(kiocb_cachep, req); 512 ctx->reqs_active--; 513 514 if (unlikely(!ctx->reqs_active && ctx->dead)) 515 wake_up(&ctx->wait); 516 } 517 518 static void aio_fput_routine(struct work_struct *data) 519 { 520 spin_lock_irq(&fput_lock); 521 while (likely(!list_empty(&fput_head))) { 522 struct kiocb *req = list_kiocb(fput_head.next); 523 struct kioctx *ctx = req->ki_ctx; 524 525 list_del(&req->ki_list); 526 spin_unlock_irq(&fput_lock); 527 528 /* Complete the fput(s) */ 529 if (req->ki_filp != NULL) 530 fput(req->ki_filp); 531 532 /* Link the iocb into the context's free list */ 533 spin_lock_irq(&ctx->ctx_lock); 534 really_put_req(ctx, req); 535 spin_unlock_irq(&ctx->ctx_lock); 536 537 put_ioctx(ctx); 538 spin_lock_irq(&fput_lock); 539 } 540 spin_unlock_irq(&fput_lock); 541 } 542 543 /* __aio_put_req 544 * Returns true if this put was the last user of the request. 545 */ 546 static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) 547 { 548 dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", 549 req, atomic_long_read(&req->ki_filp->f_count)); 550 551 assert_spin_locked(&ctx->ctx_lock); 552 553 req->ki_users--; 554 BUG_ON(req->ki_users < 0); 555 if (likely(req->ki_users)) 556 return 0; 557 list_del(&req->ki_list); /* remove from active_reqs */ 558 req->ki_cancel = NULL; 559 req->ki_retry = NULL; 560 561 /* 562 * Try to optimize the aio and eventfd file* puts, by avoiding to 563 * schedule work in case it is not final fput() time. In normal cases, 564 * we would not be holding the last reference to the file*, so 565 * this function will be executed w/out any aio kthread wakeup. 566 */ 567 if (unlikely(!fput_atomic(req->ki_filp))) { 568 get_ioctx(ctx); 569 spin_lock(&fput_lock); 570 list_add(&req->ki_list, &fput_head); 571 spin_unlock(&fput_lock); 572 queue_work(aio_wq, &fput_work); 573 } else { 574 req->ki_filp = NULL; 575 really_put_req(ctx, req); 576 } 577 return 1; 578 } 579 580 /* aio_put_req 581 * Returns true if this put was the last user of the kiocb, 582 * false if the request is still in use. 583 */ 584 int aio_put_req(struct kiocb *req) 585 { 586 struct kioctx *ctx = req->ki_ctx; 587 int ret; 588 spin_lock_irq(&ctx->ctx_lock); 589 ret = __aio_put_req(ctx, req); 590 spin_unlock_irq(&ctx->ctx_lock); 591 return ret; 592 } 593 EXPORT_SYMBOL(aio_put_req); 594 595 static struct kioctx *lookup_ioctx(unsigned long ctx_id) 596 { 597 struct mm_struct *mm = current->mm; 598 struct kioctx *ctx, *ret = NULL; 599 struct hlist_node *n; 600 601 rcu_read_lock(); 602 603 hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { 604 if (ctx->user_id == ctx_id && !ctx->dead) { 605 get_ioctx(ctx); 606 ret = ctx; 607 break; 608 } 609 } 610 611 rcu_read_unlock(); 612 return ret; 613 } 614 615 /* 616 * Queue up a kiocb to be retried. Assumes that the kiocb 617 * has already been marked as kicked, and places it on 618 * the retry run list for the corresponding ioctx, if it 619 * isn't already queued. Returns 1 if it actually queued 620 * the kiocb (to tell the caller to activate the work 621 * queue to process it), or 0, if it found that it was 622 * already queued. 623 */ 624 static inline int __queue_kicked_iocb(struct kiocb *iocb) 625 { 626 struct kioctx *ctx = iocb->ki_ctx; 627 628 assert_spin_locked(&ctx->ctx_lock); 629 630 if (list_empty(&iocb->ki_run_list)) { 631 list_add_tail(&iocb->ki_run_list, 632 &ctx->run_list); 633 return 1; 634 } 635 return 0; 636 } 637 638 /* aio_run_iocb 639 * This is the core aio execution routine. It is 640 * invoked both for initial i/o submission and 641 * subsequent retries via the aio_kick_handler. 642 * Expects to be invoked with iocb->ki_ctx->lock 643 * already held. The lock is released and reacquired 644 * as needed during processing. 645 * 646 * Calls the iocb retry method (already setup for the 647 * iocb on initial submission) for operation specific 648 * handling, but takes care of most of common retry 649 * execution details for a given iocb. The retry method 650 * needs to be non-blocking as far as possible, to avoid 651 * holding up other iocbs waiting to be serviced by the 652 * retry kernel thread. 653 * 654 * The trickier parts in this code have to do with 655 * ensuring that only one retry instance is in progress 656 * for a given iocb at any time. Providing that guarantee 657 * simplifies the coding of individual aio operations as 658 * it avoids various potential races. 659 */ 660 static ssize_t aio_run_iocb(struct kiocb *iocb) 661 { 662 struct kioctx *ctx = iocb->ki_ctx; 663 ssize_t (*retry)(struct kiocb *); 664 ssize_t ret; 665 666 if (!(retry = iocb->ki_retry)) { 667 printk("aio_run_iocb: iocb->ki_retry = NULL\n"); 668 return 0; 669 } 670 671 /* 672 * We don't want the next retry iteration for this 673 * operation to start until this one has returned and 674 * updated the iocb state. However, wait_queue functions 675 * can trigger a kick_iocb from interrupt context in the 676 * meantime, indicating that data is available for the next 677 * iteration. We want to remember that and enable the 678 * next retry iteration _after_ we are through with 679 * this one. 680 * 681 * So, in order to be able to register a "kick", but 682 * prevent it from being queued now, we clear the kick 683 * flag, but make the kick code *think* that the iocb is 684 * still on the run list until we are actually done. 685 * When we are done with this iteration, we check if 686 * the iocb was kicked in the meantime and if so, queue 687 * it up afresh. 688 */ 689 690 kiocbClearKicked(iocb); 691 692 /* 693 * This is so that aio_complete knows it doesn't need to 694 * pull the iocb off the run list (We can't just call 695 * INIT_LIST_HEAD because we don't want a kick_iocb to 696 * queue this on the run list yet) 697 */ 698 iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL; 699 spin_unlock_irq(&ctx->ctx_lock); 700 701 /* Quit retrying if the i/o has been cancelled */ 702 if (kiocbIsCancelled(iocb)) { 703 ret = -EINTR; 704 aio_complete(iocb, ret, 0); 705 /* must not access the iocb after this */ 706 goto out; 707 } 708 709 /* 710 * Now we are all set to call the retry method in async 711 * context. 712 */ 713 ret = retry(iocb); 714 715 if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) { 716 /* 717 * There's no easy way to restart the syscall since other AIO's 718 * may be already running. Just fail this IO with EINTR. 719 */ 720 if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR || 721 ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK)) 722 ret = -EINTR; 723 aio_complete(iocb, ret, 0); 724 } 725 out: 726 spin_lock_irq(&ctx->ctx_lock); 727 728 if (-EIOCBRETRY == ret) { 729 /* 730 * OK, now that we are done with this iteration 731 * and know that there is more left to go, 732 * this is where we let go so that a subsequent 733 * "kick" can start the next iteration 734 */ 735 736 /* will make __queue_kicked_iocb succeed from here on */ 737 INIT_LIST_HEAD(&iocb->ki_run_list); 738 /* we must queue the next iteration ourselves, if it 739 * has already been kicked */ 740 if (kiocbIsKicked(iocb)) { 741 __queue_kicked_iocb(iocb); 742 743 /* 744 * __queue_kicked_iocb will always return 1 here, because 745 * iocb->ki_run_list is empty at this point so it should 746 * be safe to unconditionally queue the context into the 747 * work queue. 748 */ 749 aio_queue_work(ctx); 750 } 751 } 752 return ret; 753 } 754 755 /* 756 * __aio_run_iocbs: 757 * Process all pending retries queued on the ioctx 758 * run list. 759 * Assumes it is operating within the aio issuer's mm 760 * context. 761 */ 762 static int __aio_run_iocbs(struct kioctx *ctx) 763 { 764 struct kiocb *iocb; 765 struct list_head run_list; 766 767 assert_spin_locked(&ctx->ctx_lock); 768 769 list_replace_init(&ctx->run_list, &run_list); 770 while (!list_empty(&run_list)) { 771 iocb = list_entry(run_list.next, struct kiocb, 772 ki_run_list); 773 list_del(&iocb->ki_run_list); 774 /* 775 * Hold an extra reference while retrying i/o. 776 */ 777 iocb->ki_users++; /* grab extra reference */ 778 aio_run_iocb(iocb); 779 __aio_put_req(ctx, iocb); 780 } 781 if (!list_empty(&ctx->run_list)) 782 return 1; 783 return 0; 784 } 785 786 static void aio_queue_work(struct kioctx * ctx) 787 { 788 unsigned long timeout; 789 /* 790 * if someone is waiting, get the work started right 791 * away, otherwise, use a longer delay 792 */ 793 smp_mb(); 794 if (waitqueue_active(&ctx->wait)) 795 timeout = 1; 796 else 797 timeout = HZ/10; 798 queue_delayed_work(aio_wq, &ctx->wq, timeout); 799 } 800 801 /* 802 * aio_run_all_iocbs: 803 * Process all pending retries queued on the ioctx 804 * run list, and keep running them until the list 805 * stays empty. 806 * Assumes it is operating within the aio issuer's mm context. 807 */ 808 static inline void aio_run_all_iocbs(struct kioctx *ctx) 809 { 810 spin_lock_irq(&ctx->ctx_lock); 811 while (__aio_run_iocbs(ctx)) 812 ; 813 spin_unlock_irq(&ctx->ctx_lock); 814 } 815 816 /* 817 * aio_kick_handler: 818 * Work queue handler triggered to process pending 819 * retries on an ioctx. Takes on the aio issuer's 820 * mm context before running the iocbs, so that 821 * copy_xxx_user operates on the issuer's address 822 * space. 823 * Run on aiod's context. 824 */ 825 static void aio_kick_handler(struct work_struct *work) 826 { 827 struct kioctx *ctx = container_of(work, struct kioctx, wq.work); 828 mm_segment_t oldfs = get_fs(); 829 struct mm_struct *mm; 830 int requeue; 831 832 set_fs(USER_DS); 833 use_mm(ctx->mm); 834 spin_lock_irq(&ctx->ctx_lock); 835 requeue =__aio_run_iocbs(ctx); 836 mm = ctx->mm; 837 spin_unlock_irq(&ctx->ctx_lock); 838 unuse_mm(mm); 839 set_fs(oldfs); 840 /* 841 * we're in a worker thread already, don't use queue_delayed_work, 842 */ 843 if (requeue) 844 queue_delayed_work(aio_wq, &ctx->wq, 0); 845 } 846 847 848 /* 849 * Called by kick_iocb to queue the kiocb for retry 850 * and if required activate the aio work queue to process 851 * it 852 */ 853 static void try_queue_kicked_iocb(struct kiocb *iocb) 854 { 855 struct kioctx *ctx = iocb->ki_ctx; 856 unsigned long flags; 857 int run = 0; 858 859 spin_lock_irqsave(&ctx->ctx_lock, flags); 860 /* set this inside the lock so that we can't race with aio_run_iocb() 861 * testing it and putting the iocb on the run list under the lock */ 862 if (!kiocbTryKick(iocb)) 863 run = __queue_kicked_iocb(iocb); 864 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 865 if (run) 866 aio_queue_work(ctx); 867 } 868 869 /* 870 * kick_iocb: 871 * Called typically from a wait queue callback context 872 * to trigger a retry of the iocb. 873 * The retry is usually executed by aio workqueue 874 * threads (See aio_kick_handler). 875 */ 876 void kick_iocb(struct kiocb *iocb) 877 { 878 /* sync iocbs are easy: they can only ever be executing from a 879 * single context. */ 880 if (is_sync_kiocb(iocb)) { 881 kiocbSetKicked(iocb); 882 wake_up_process(iocb->ki_obj.tsk); 883 return; 884 } 885 886 try_queue_kicked_iocb(iocb); 887 } 888 EXPORT_SYMBOL(kick_iocb); 889 890 /* aio_complete 891 * Called when the io request on the given iocb is complete. 892 * Returns true if this is the last user of the request. The 893 * only other user of the request can be the cancellation code. 894 */ 895 int aio_complete(struct kiocb *iocb, long res, long res2) 896 { 897 struct kioctx *ctx = iocb->ki_ctx; 898 struct aio_ring_info *info; 899 struct aio_ring *ring; 900 struct io_event *event; 901 unsigned long flags; 902 unsigned long tail; 903 int ret; 904 905 /* 906 * Special case handling for sync iocbs: 907 * - events go directly into the iocb for fast handling 908 * - the sync task with the iocb in its stack holds the single iocb 909 * ref, no other paths have a way to get another ref 910 * - the sync task helpfully left a reference to itself in the iocb 911 */ 912 if (is_sync_kiocb(iocb)) { 913 BUG_ON(iocb->ki_users != 1); 914 iocb->ki_user_data = res; 915 iocb->ki_users = 0; 916 wake_up_process(iocb->ki_obj.tsk); 917 return 1; 918 } 919 920 info = &ctx->ring_info; 921 922 /* add a completion event to the ring buffer. 923 * must be done holding ctx->ctx_lock to prevent 924 * other code from messing with the tail 925 * pointer since we might be called from irq 926 * context. 927 */ 928 spin_lock_irqsave(&ctx->ctx_lock, flags); 929 930 if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list)) 931 list_del_init(&iocb->ki_run_list); 932 933 /* 934 * cancelled requests don't get events, userland was given one 935 * when the event got cancelled. 936 */ 937 if (kiocbIsCancelled(iocb)) 938 goto put_rq; 939 940 ring = kmap_atomic(info->ring_pages[0], KM_IRQ1); 941 942 tail = info->tail; 943 event = aio_ring_event(info, tail, KM_IRQ0); 944 if (++tail >= info->nr) 945 tail = 0; 946 947 event->obj = (u64)(unsigned long)iocb->ki_obj.user; 948 event->data = iocb->ki_user_data; 949 event->res = res; 950 event->res2 = res2; 951 952 dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n", 953 ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, 954 res, res2); 955 956 /* after flagging the request as done, we 957 * must never even look at it again 958 */ 959 smp_wmb(); /* make event visible before updating tail */ 960 961 info->tail = tail; 962 ring->tail = tail; 963 964 put_aio_ring_event(event, KM_IRQ0); 965 kunmap_atomic(ring, KM_IRQ1); 966 967 pr_debug("added to ring %p at [%lu]\n", iocb, tail); 968 969 /* 970 * Check if the user asked us to deliver the result through an 971 * eventfd. The eventfd_signal() function is safe to be called 972 * from IRQ context. 973 */ 974 if (iocb->ki_eventfd != NULL) 975 eventfd_signal(iocb->ki_eventfd, 1); 976 977 put_rq: 978 /* everything turned out well, dispose of the aiocb. */ 979 ret = __aio_put_req(ctx, iocb); 980 981 /* 982 * We have to order our ring_info tail store above and test 983 * of the wait list below outside the wait lock. This is 984 * like in wake_up_bit() where clearing a bit has to be 985 * ordered with the unlocked test. 986 */ 987 smp_mb(); 988 989 if (waitqueue_active(&ctx->wait)) 990 wake_up(&ctx->wait); 991 992 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 993 return ret; 994 } 995 EXPORT_SYMBOL(aio_complete); 996 997 /* aio_read_evt 998 * Pull an event off of the ioctx's event ring. Returns the number of 999 * events fetched (0 or 1 ;-) 1000 * FIXME: make this use cmpxchg. 1001 * TODO: make the ringbuffer user mmap()able (requires FIXME). 1002 */ 1003 static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent) 1004 { 1005 struct aio_ring_info *info = &ioctx->ring_info; 1006 struct aio_ring *ring; 1007 unsigned long head; 1008 int ret = 0; 1009 1010 ring = kmap_atomic(info->ring_pages[0], KM_USER0); 1011 dprintk("in aio_read_evt h%lu t%lu m%lu\n", 1012 (unsigned long)ring->head, (unsigned long)ring->tail, 1013 (unsigned long)ring->nr); 1014 1015 if (ring->head == ring->tail) 1016 goto out; 1017 1018 spin_lock(&info->ring_lock); 1019 1020 head = ring->head % info->nr; 1021 if (head != ring->tail) { 1022 struct io_event *evp = aio_ring_event(info, head, KM_USER1); 1023 *ent = *evp; 1024 head = (head + 1) % info->nr; 1025 smp_mb(); /* finish reading the event before updatng the head */ 1026 ring->head = head; 1027 ret = 1; 1028 put_aio_ring_event(evp, KM_USER1); 1029 } 1030 spin_unlock(&info->ring_lock); 1031 1032 out: 1033 kunmap_atomic(ring, KM_USER0); 1034 dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret, 1035 (unsigned long)ring->head, (unsigned long)ring->tail); 1036 return ret; 1037 } 1038 1039 struct aio_timeout { 1040 struct timer_list timer; 1041 int timed_out; 1042 struct task_struct *p; 1043 }; 1044 1045 static void timeout_func(unsigned long data) 1046 { 1047 struct aio_timeout *to = (struct aio_timeout *)data; 1048 1049 to->timed_out = 1; 1050 wake_up_process(to->p); 1051 } 1052 1053 static inline void init_timeout(struct aio_timeout *to) 1054 { 1055 setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to); 1056 to->timed_out = 0; 1057 to->p = current; 1058 } 1059 1060 static inline void set_timeout(long start_jiffies, struct aio_timeout *to, 1061 const struct timespec *ts) 1062 { 1063 to->timer.expires = start_jiffies + timespec_to_jiffies(ts); 1064 if (time_after(to->timer.expires, jiffies)) 1065 add_timer(&to->timer); 1066 else 1067 to->timed_out = 1; 1068 } 1069 1070 static inline void clear_timeout(struct aio_timeout *to) 1071 { 1072 del_singleshot_timer_sync(&to->timer); 1073 } 1074 1075 static int read_events(struct kioctx *ctx, 1076 long min_nr, long nr, 1077 struct io_event __user *event, 1078 struct timespec __user *timeout) 1079 { 1080 long start_jiffies = jiffies; 1081 struct task_struct *tsk = current; 1082 DECLARE_WAITQUEUE(wait, tsk); 1083 int ret; 1084 int i = 0; 1085 struct io_event ent; 1086 struct aio_timeout to; 1087 int retry = 0; 1088 1089 /* needed to zero any padding within an entry (there shouldn't be 1090 * any, but C is fun! 1091 */ 1092 memset(&ent, 0, sizeof(ent)); 1093 retry: 1094 ret = 0; 1095 while (likely(i < nr)) { 1096 ret = aio_read_evt(ctx, &ent); 1097 if (unlikely(ret <= 0)) 1098 break; 1099 1100 dprintk("read event: %Lx %Lx %Lx %Lx\n", 1101 ent.data, ent.obj, ent.res, ent.res2); 1102 1103 /* Could we split the check in two? */ 1104 ret = -EFAULT; 1105 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { 1106 dprintk("aio: lost an event due to EFAULT.\n"); 1107 break; 1108 } 1109 ret = 0; 1110 1111 /* Good, event copied to userland, update counts. */ 1112 event ++; 1113 i ++; 1114 } 1115 1116 if (min_nr <= i) 1117 return i; 1118 if (ret) 1119 return ret; 1120 1121 /* End fast path */ 1122 1123 /* racey check, but it gets redone */ 1124 if (!retry && unlikely(!list_empty(&ctx->run_list))) { 1125 retry = 1; 1126 aio_run_all_iocbs(ctx); 1127 goto retry; 1128 } 1129 1130 init_timeout(&to); 1131 if (timeout) { 1132 struct timespec ts; 1133 ret = -EFAULT; 1134 if (unlikely(copy_from_user(&ts, timeout, sizeof(ts)))) 1135 goto out; 1136 1137 set_timeout(start_jiffies, &to, &ts); 1138 } 1139 1140 while (likely(i < nr)) { 1141 add_wait_queue_exclusive(&ctx->wait, &wait); 1142 do { 1143 set_task_state(tsk, TASK_INTERRUPTIBLE); 1144 ret = aio_read_evt(ctx, &ent); 1145 if (ret) 1146 break; 1147 if (min_nr <= i) 1148 break; 1149 if (unlikely(ctx->dead)) { 1150 ret = -EINVAL; 1151 break; 1152 } 1153 if (to.timed_out) /* Only check after read evt */ 1154 break; 1155 /* Try to only show up in io wait if there are ops 1156 * in flight */ 1157 if (ctx->reqs_active) 1158 io_schedule(); 1159 else 1160 schedule(); 1161 if (signal_pending(tsk)) { 1162 ret = -EINTR; 1163 break; 1164 } 1165 /*ret = aio_read_evt(ctx, &ent);*/ 1166 } while (1) ; 1167 1168 set_task_state(tsk, TASK_RUNNING); 1169 remove_wait_queue(&ctx->wait, &wait); 1170 1171 if (unlikely(ret <= 0)) 1172 break; 1173 1174 ret = -EFAULT; 1175 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { 1176 dprintk("aio: lost an event due to EFAULT.\n"); 1177 break; 1178 } 1179 1180 /* Good, event copied to userland, update counts. */ 1181 event ++; 1182 i ++; 1183 } 1184 1185 if (timeout) 1186 clear_timeout(&to); 1187 out: 1188 destroy_timer_on_stack(&to.timer); 1189 return i ? i : ret; 1190 } 1191 1192 /* Take an ioctx and remove it from the list of ioctx's. Protects 1193 * against races with itself via ->dead. 1194 */ 1195 static void io_destroy(struct kioctx *ioctx) 1196 { 1197 struct mm_struct *mm = current->mm; 1198 int was_dead; 1199 1200 /* delete the entry from the list is someone else hasn't already */ 1201 spin_lock(&mm->ioctx_lock); 1202 was_dead = ioctx->dead; 1203 ioctx->dead = 1; 1204 hlist_del_rcu(&ioctx->list); 1205 spin_unlock(&mm->ioctx_lock); 1206 1207 dprintk("aio_release(%p)\n", ioctx); 1208 if (likely(!was_dead)) 1209 put_ioctx(ioctx); /* twice for the list */ 1210 1211 aio_cancel_all(ioctx); 1212 wait_for_all_aios(ioctx); 1213 1214 /* 1215 * Wake up any waiters. The setting of ctx->dead must be seen 1216 * by other CPUs at this point. Right now, we rely on the 1217 * locking done by the above calls to ensure this consistency. 1218 */ 1219 wake_up(&ioctx->wait); 1220 put_ioctx(ioctx); /* once for the lookup */ 1221 } 1222 1223 /* sys_io_setup: 1224 * Create an aio_context capable of receiving at least nr_events. 1225 * ctxp must not point to an aio_context that already exists, and 1226 * must be initialized to 0 prior to the call. On successful 1227 * creation of the aio_context, *ctxp is filled in with the resulting 1228 * handle. May fail with -EINVAL if *ctxp is not initialized, 1229 * if the specified nr_events exceeds internal limits. May fail 1230 * with -EAGAIN if the specified nr_events exceeds the user's limit 1231 * of available events. May fail with -ENOMEM if insufficient kernel 1232 * resources are available. May fail with -EFAULT if an invalid 1233 * pointer is passed for ctxp. Will fail with -ENOSYS if not 1234 * implemented. 1235 */ 1236 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) 1237 { 1238 struct kioctx *ioctx = NULL; 1239 unsigned long ctx; 1240 long ret; 1241 1242 ret = get_user(ctx, ctxp); 1243 if (unlikely(ret)) 1244 goto out; 1245 1246 ret = -EINVAL; 1247 if (unlikely(ctx || nr_events == 0)) { 1248 pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", 1249 ctx, nr_events); 1250 goto out; 1251 } 1252 1253 ioctx = ioctx_alloc(nr_events); 1254 ret = PTR_ERR(ioctx); 1255 if (!IS_ERR(ioctx)) { 1256 ret = put_user(ioctx->user_id, ctxp); 1257 if (!ret) 1258 return 0; 1259 1260 get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */ 1261 io_destroy(ioctx); 1262 } 1263 1264 out: 1265 return ret; 1266 } 1267 1268 /* sys_io_destroy: 1269 * Destroy the aio_context specified. May cancel any outstanding 1270 * AIOs and block on completion. Will fail with -ENOSYS if not 1271 * implemented. May fail with -EINVAL if the context pointed to 1272 * is invalid. 1273 */ 1274 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) 1275 { 1276 struct kioctx *ioctx = lookup_ioctx(ctx); 1277 if (likely(NULL != ioctx)) { 1278 io_destroy(ioctx); 1279 return 0; 1280 } 1281 pr_debug("EINVAL: io_destroy: invalid context id\n"); 1282 return -EINVAL; 1283 } 1284 1285 static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret) 1286 { 1287 struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg]; 1288 1289 BUG_ON(ret <= 0); 1290 1291 while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) { 1292 ssize_t this = min((ssize_t)iov->iov_len, ret); 1293 iov->iov_base += this; 1294 iov->iov_len -= this; 1295 iocb->ki_left -= this; 1296 ret -= this; 1297 if (iov->iov_len == 0) { 1298 iocb->ki_cur_seg++; 1299 iov++; 1300 } 1301 } 1302 1303 /* the caller should not have done more io than what fit in 1304 * the remaining iovecs */ 1305 BUG_ON(ret > 0 && iocb->ki_left == 0); 1306 } 1307 1308 static ssize_t aio_rw_vect_retry(struct kiocb *iocb) 1309 { 1310 struct file *file = iocb->ki_filp; 1311 struct address_space *mapping = file->f_mapping; 1312 struct inode *inode = mapping->host; 1313 ssize_t (*rw_op)(struct kiocb *, const struct iovec *, 1314 unsigned long, loff_t); 1315 ssize_t ret = 0; 1316 unsigned short opcode; 1317 1318 if ((iocb->ki_opcode == IOCB_CMD_PREADV) || 1319 (iocb->ki_opcode == IOCB_CMD_PREAD)) { 1320 rw_op = file->f_op->aio_read; 1321 opcode = IOCB_CMD_PREADV; 1322 } else { 1323 rw_op = file->f_op->aio_write; 1324 opcode = IOCB_CMD_PWRITEV; 1325 } 1326 1327 /* This matches the pread()/pwrite() logic */ 1328 if (iocb->ki_pos < 0) 1329 return -EINVAL; 1330 1331 do { 1332 ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg], 1333 iocb->ki_nr_segs - iocb->ki_cur_seg, 1334 iocb->ki_pos); 1335 if (ret > 0) 1336 aio_advance_iovec(iocb, ret); 1337 1338 /* retry all partial writes. retry partial reads as long as its a 1339 * regular file. */ 1340 } while (ret > 0 && iocb->ki_left > 0 && 1341 (opcode == IOCB_CMD_PWRITEV || 1342 (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)))); 1343 1344 /* This means we must have transferred all that we could */ 1345 /* No need to retry anymore */ 1346 if ((ret == 0) || (iocb->ki_left == 0)) 1347 ret = iocb->ki_nbytes - iocb->ki_left; 1348 1349 /* If we managed to write some out we return that, rather than 1350 * the eventual error. */ 1351 if (opcode == IOCB_CMD_PWRITEV 1352 && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY 1353 && iocb->ki_nbytes - iocb->ki_left) 1354 ret = iocb->ki_nbytes - iocb->ki_left; 1355 1356 return ret; 1357 } 1358 1359 static ssize_t aio_fdsync(struct kiocb *iocb) 1360 { 1361 struct file *file = iocb->ki_filp; 1362 ssize_t ret = -EINVAL; 1363 1364 if (file->f_op->aio_fsync) 1365 ret = file->f_op->aio_fsync(iocb, 1); 1366 return ret; 1367 } 1368 1369 static ssize_t aio_fsync(struct kiocb *iocb) 1370 { 1371 struct file *file = iocb->ki_filp; 1372 ssize_t ret = -EINVAL; 1373 1374 if (file->f_op->aio_fsync) 1375 ret = file->f_op->aio_fsync(iocb, 0); 1376 return ret; 1377 } 1378 1379 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat) 1380 { 1381 ssize_t ret; 1382 1383 #ifdef CONFIG_COMPAT 1384 if (compat) 1385 ret = compat_rw_copy_check_uvector(type, 1386 (struct compat_iovec __user *)kiocb->ki_buf, 1387 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, 1388 &kiocb->ki_iovec); 1389 else 1390 #endif 1391 ret = rw_copy_check_uvector(type, 1392 (struct iovec __user *)kiocb->ki_buf, 1393 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, 1394 &kiocb->ki_iovec); 1395 if (ret < 0) 1396 goto out; 1397 1398 kiocb->ki_nr_segs = kiocb->ki_nbytes; 1399 kiocb->ki_cur_seg = 0; 1400 /* ki_nbytes/left now reflect bytes instead of segs */ 1401 kiocb->ki_nbytes = ret; 1402 kiocb->ki_left = ret; 1403 1404 ret = 0; 1405 out: 1406 return ret; 1407 } 1408 1409 static ssize_t aio_setup_single_vector(struct kiocb *kiocb) 1410 { 1411 kiocb->ki_iovec = &kiocb->ki_inline_vec; 1412 kiocb->ki_iovec->iov_base = kiocb->ki_buf; 1413 kiocb->ki_iovec->iov_len = kiocb->ki_left; 1414 kiocb->ki_nr_segs = 1; 1415 kiocb->ki_cur_seg = 0; 1416 return 0; 1417 } 1418 1419 /* 1420 * aio_setup_iocb: 1421 * Performs the initial checks and aio retry method 1422 * setup for the kiocb at the time of io submission. 1423 */ 1424 static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) 1425 { 1426 struct file *file = kiocb->ki_filp; 1427 ssize_t ret = 0; 1428 1429 switch (kiocb->ki_opcode) { 1430 case IOCB_CMD_PREAD: 1431 ret = -EBADF; 1432 if (unlikely(!(file->f_mode & FMODE_READ))) 1433 break; 1434 ret = -EFAULT; 1435 if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf, 1436 kiocb->ki_left))) 1437 break; 1438 ret = security_file_permission(file, MAY_READ); 1439 if (unlikely(ret)) 1440 break; 1441 ret = aio_setup_single_vector(kiocb); 1442 if (ret) 1443 break; 1444 ret = -EINVAL; 1445 if (file->f_op->aio_read) 1446 kiocb->ki_retry = aio_rw_vect_retry; 1447 break; 1448 case IOCB_CMD_PWRITE: 1449 ret = -EBADF; 1450 if (unlikely(!(file->f_mode & FMODE_WRITE))) 1451 break; 1452 ret = -EFAULT; 1453 if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf, 1454 kiocb->ki_left))) 1455 break; 1456 ret = security_file_permission(file, MAY_WRITE); 1457 if (unlikely(ret)) 1458 break; 1459 ret = aio_setup_single_vector(kiocb); 1460 if (ret) 1461 break; 1462 ret = -EINVAL; 1463 if (file->f_op->aio_write) 1464 kiocb->ki_retry = aio_rw_vect_retry; 1465 break; 1466 case IOCB_CMD_PREADV: 1467 ret = -EBADF; 1468 if (unlikely(!(file->f_mode & FMODE_READ))) 1469 break; 1470 ret = security_file_permission(file, MAY_READ); 1471 if (unlikely(ret)) 1472 break; 1473 ret = aio_setup_vectored_rw(READ, kiocb, compat); 1474 if (ret) 1475 break; 1476 ret = -EINVAL; 1477 if (file->f_op->aio_read) 1478 kiocb->ki_retry = aio_rw_vect_retry; 1479 break; 1480 case IOCB_CMD_PWRITEV: 1481 ret = -EBADF; 1482 if (unlikely(!(file->f_mode & FMODE_WRITE))) 1483 break; 1484 ret = security_file_permission(file, MAY_WRITE); 1485 if (unlikely(ret)) 1486 break; 1487 ret = aio_setup_vectored_rw(WRITE, kiocb, compat); 1488 if (ret) 1489 break; 1490 ret = -EINVAL; 1491 if (file->f_op->aio_write) 1492 kiocb->ki_retry = aio_rw_vect_retry; 1493 break; 1494 case IOCB_CMD_FDSYNC: 1495 ret = -EINVAL; 1496 if (file->f_op->aio_fsync) 1497 kiocb->ki_retry = aio_fdsync; 1498 break; 1499 case IOCB_CMD_FSYNC: 1500 ret = -EINVAL; 1501 if (file->f_op->aio_fsync) 1502 kiocb->ki_retry = aio_fsync; 1503 break; 1504 default: 1505 dprintk("EINVAL: io_submit: no operation provided\n"); 1506 ret = -EINVAL; 1507 } 1508 1509 if (!kiocb->ki_retry) 1510 return ret; 1511 1512 return 0; 1513 } 1514 1515 static void aio_batch_add(struct address_space *mapping, 1516 struct hlist_head *batch_hash) 1517 { 1518 struct aio_batch_entry *abe; 1519 struct hlist_node *pos; 1520 unsigned bucket; 1521 1522 bucket = hash_ptr(mapping, AIO_BATCH_HASH_BITS); 1523 hlist_for_each_entry(abe, pos, &batch_hash[bucket], list) { 1524 if (abe->mapping == mapping) 1525 return; 1526 } 1527 1528 abe = mempool_alloc(abe_pool, GFP_KERNEL); 1529 1530 /* 1531 * we should be using igrab here, but 1532 * we don't want to hammer on the global 1533 * inode spinlock just to take an extra 1534 * reference on a file that we must already 1535 * have a reference to. 1536 * 1537 * When we're called, we always have a reference 1538 * on the file, so we must always have a reference 1539 * on the inode, so ihold() is safe here. 1540 */ 1541 ihold(mapping->host); 1542 abe->mapping = mapping; 1543 hlist_add_head(&abe->list, &batch_hash[bucket]); 1544 return; 1545 } 1546 1547 static void aio_batch_free(struct hlist_head *batch_hash) 1548 { 1549 struct aio_batch_entry *abe; 1550 struct hlist_node *pos, *n; 1551 int i; 1552 1553 for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) { 1554 hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) { 1555 blk_run_address_space(abe->mapping); 1556 iput(abe->mapping->host); 1557 hlist_del(&abe->list); 1558 mempool_free(abe, abe_pool); 1559 } 1560 } 1561 } 1562 1563 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1564 struct iocb *iocb, struct hlist_head *batch_hash, 1565 bool compat) 1566 { 1567 struct kiocb *req; 1568 struct file *file; 1569 ssize_t ret; 1570 1571 /* enforce forwards compatibility on users */ 1572 if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) { 1573 pr_debug("EINVAL: io_submit: reserve field set\n"); 1574 return -EINVAL; 1575 } 1576 1577 /* prevent overflows */ 1578 if (unlikely( 1579 (iocb->aio_buf != (unsigned long)iocb->aio_buf) || 1580 (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || 1581 ((ssize_t)iocb->aio_nbytes < 0) 1582 )) { 1583 pr_debug("EINVAL: io_submit: overflow check\n"); 1584 return -EINVAL; 1585 } 1586 1587 file = fget(iocb->aio_fildes); 1588 if (unlikely(!file)) 1589 return -EBADF; 1590 1591 req = aio_get_req(ctx); /* returns with 2 references to req */ 1592 if (unlikely(!req)) { 1593 fput(file); 1594 return -EAGAIN; 1595 } 1596 req->ki_filp = file; 1597 if (iocb->aio_flags & IOCB_FLAG_RESFD) { 1598 /* 1599 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an 1600 * instance of the file* now. The file descriptor must be 1601 * an eventfd() fd, and will be signaled for each completed 1602 * event using the eventfd_signal() function. 1603 */ 1604 req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); 1605 if (IS_ERR(req->ki_eventfd)) { 1606 ret = PTR_ERR(req->ki_eventfd); 1607 req->ki_eventfd = NULL; 1608 goto out_put_req; 1609 } 1610 } 1611 1612 ret = put_user(req->ki_key, &user_iocb->aio_key); 1613 if (unlikely(ret)) { 1614 dprintk("EFAULT: aio_key\n"); 1615 goto out_put_req; 1616 } 1617 1618 req->ki_obj.user = user_iocb; 1619 req->ki_user_data = iocb->aio_data; 1620 req->ki_pos = iocb->aio_offset; 1621 1622 req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf; 1623 req->ki_left = req->ki_nbytes = iocb->aio_nbytes; 1624 req->ki_opcode = iocb->aio_lio_opcode; 1625 1626 ret = aio_setup_iocb(req, compat); 1627 1628 if (ret) 1629 goto out_put_req; 1630 1631 spin_lock_irq(&ctx->ctx_lock); 1632 aio_run_iocb(req); 1633 if (!list_empty(&ctx->run_list)) { 1634 /* drain the run list */ 1635 while (__aio_run_iocbs(ctx)) 1636 ; 1637 } 1638 spin_unlock_irq(&ctx->ctx_lock); 1639 if (req->ki_opcode == IOCB_CMD_PREAD || 1640 req->ki_opcode == IOCB_CMD_PREADV || 1641 req->ki_opcode == IOCB_CMD_PWRITE || 1642 req->ki_opcode == IOCB_CMD_PWRITEV) 1643 aio_batch_add(file->f_mapping, batch_hash); 1644 1645 aio_put_req(req); /* drop extra ref to req */ 1646 return 0; 1647 1648 out_put_req: 1649 aio_put_req(req); /* drop extra ref to req */ 1650 aio_put_req(req); /* drop i/o ref to req */ 1651 return ret; 1652 } 1653 1654 long do_io_submit(aio_context_t ctx_id, long nr, 1655 struct iocb __user *__user *iocbpp, bool compat) 1656 { 1657 struct kioctx *ctx; 1658 long ret = 0; 1659 int i; 1660 struct hlist_head batch_hash[AIO_BATCH_HASH_SIZE] = { { 0, }, }; 1661 1662 if (unlikely(nr < 0)) 1663 return -EINVAL; 1664 1665 if (unlikely(nr > LONG_MAX/sizeof(*iocbpp))) 1666 nr = LONG_MAX/sizeof(*iocbpp); 1667 1668 if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) 1669 return -EFAULT; 1670 1671 ctx = lookup_ioctx(ctx_id); 1672 if (unlikely(!ctx)) { 1673 pr_debug("EINVAL: io_submit: invalid context id\n"); 1674 return -EINVAL; 1675 } 1676 1677 /* 1678 * AKPM: should this return a partial result if some of the IOs were 1679 * successfully submitted? 1680 */ 1681 for (i=0; i<nr; i++) { 1682 struct iocb __user *user_iocb; 1683 struct iocb tmp; 1684 1685 if (unlikely(__get_user(user_iocb, iocbpp + i))) { 1686 ret = -EFAULT; 1687 break; 1688 } 1689 1690 if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) { 1691 ret = -EFAULT; 1692 break; 1693 } 1694 1695 ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat); 1696 if (ret) 1697 break; 1698 } 1699 aio_batch_free(batch_hash); 1700 1701 put_ioctx(ctx); 1702 return i ? i : ret; 1703 } 1704 1705 /* sys_io_submit: 1706 * Queue the nr iocbs pointed to by iocbpp for processing. Returns 1707 * the number of iocbs queued. May return -EINVAL if the aio_context 1708 * specified by ctx_id is invalid, if nr is < 0, if the iocb at 1709 * *iocbpp[0] is not properly initialized, if the operation specified 1710 * is invalid for the file descriptor in the iocb. May fail with 1711 * -EFAULT if any of the data structures point to invalid data. May 1712 * fail with -EBADF if the file descriptor specified in the first 1713 * iocb is invalid. May fail with -EAGAIN if insufficient resources 1714 * are available to queue any iocbs. Will return 0 if nr is 0. Will 1715 * fail with -ENOSYS if not implemented. 1716 */ 1717 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, 1718 struct iocb __user * __user *, iocbpp) 1719 { 1720 return do_io_submit(ctx_id, nr, iocbpp, 0); 1721 } 1722 1723 /* lookup_kiocb 1724 * Finds a given iocb for cancellation. 1725 */ 1726 static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, 1727 u32 key) 1728 { 1729 struct list_head *pos; 1730 1731 assert_spin_locked(&ctx->ctx_lock); 1732 1733 /* TODO: use a hash or array, this sucks. */ 1734 list_for_each(pos, &ctx->active_reqs) { 1735 struct kiocb *kiocb = list_kiocb(pos); 1736 if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key) 1737 return kiocb; 1738 } 1739 return NULL; 1740 } 1741 1742 /* sys_io_cancel: 1743 * Attempts to cancel an iocb previously passed to io_submit. If 1744 * the operation is successfully cancelled, the resulting event is 1745 * copied into the memory pointed to by result without being placed 1746 * into the completion queue and 0 is returned. May fail with 1747 * -EFAULT if any of the data structures pointed to are invalid. 1748 * May fail with -EINVAL if aio_context specified by ctx_id is 1749 * invalid. May fail with -EAGAIN if the iocb specified was not 1750 * cancelled. Will fail with -ENOSYS if not implemented. 1751 */ 1752 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, 1753 struct io_event __user *, result) 1754 { 1755 int (*cancel)(struct kiocb *iocb, struct io_event *res); 1756 struct kioctx *ctx; 1757 struct kiocb *kiocb; 1758 u32 key; 1759 int ret; 1760 1761 ret = get_user(key, &iocb->aio_key); 1762 if (unlikely(ret)) 1763 return -EFAULT; 1764 1765 ctx = lookup_ioctx(ctx_id); 1766 if (unlikely(!ctx)) 1767 return -EINVAL; 1768 1769 spin_lock_irq(&ctx->ctx_lock); 1770 ret = -EAGAIN; 1771 kiocb = lookup_kiocb(ctx, iocb, key); 1772 if (kiocb && kiocb->ki_cancel) { 1773 cancel = kiocb->ki_cancel; 1774 kiocb->ki_users ++; 1775 kiocbSetCancelled(kiocb); 1776 } else 1777 cancel = NULL; 1778 spin_unlock_irq(&ctx->ctx_lock); 1779 1780 if (NULL != cancel) { 1781 struct io_event tmp; 1782 pr_debug("calling cancel\n"); 1783 memset(&tmp, 0, sizeof(tmp)); 1784 tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user; 1785 tmp.data = kiocb->ki_user_data; 1786 ret = cancel(kiocb, &tmp); 1787 if (!ret) { 1788 /* Cancellation succeeded -- copy the result 1789 * into the user's buffer. 1790 */ 1791 if (copy_to_user(result, &tmp, sizeof(tmp))) 1792 ret = -EFAULT; 1793 } 1794 } else 1795 ret = -EINVAL; 1796 1797 put_ioctx(ctx); 1798 1799 return ret; 1800 } 1801 1802 /* io_getevents: 1803 * Attempts to read at least min_nr events and up to nr events from 1804 * the completion queue for the aio_context specified by ctx_id. If 1805 * it succeeds, the number of read events is returned. May fail with 1806 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is 1807 * out of range, if timeout is out of range. May fail with -EFAULT 1808 * if any of the memory specified is invalid. May return 0 or 1809 * < min_nr if the timeout specified by timeout has elapsed 1810 * before sufficient events are available, where timeout == NULL 1811 * specifies an infinite timeout. Note that the timeout pointed to by 1812 * timeout is relative and will be updated if not NULL and the 1813 * operation blocks. Will fail with -ENOSYS if not implemented. 1814 */ 1815 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, 1816 long, min_nr, 1817 long, nr, 1818 struct io_event __user *, events, 1819 struct timespec __user *, timeout) 1820 { 1821 struct kioctx *ioctx = lookup_ioctx(ctx_id); 1822 long ret = -EINVAL; 1823 1824 if (likely(ioctx)) { 1825 if (likely(min_nr <= nr && min_nr >= 0)) 1826 ret = read_events(ioctx, min_nr, nr, events, timeout); 1827 put_ioctx(ioctx); 1828 } 1829 1830 asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout); 1831 return ret; 1832 } 1833