xref: /openbmc/linux/fs/aio.c (revision e23feb16)
1 /*
2  *	An async IO implementation for Linux
3  *	Written by Benjamin LaHaise <bcrl@kvack.org>
4  *
5  *	Implements an efficient asynchronous io interface.
6  *
7  *	Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
8  *
9  *	See ../COPYING for licensing terms.
10  */
11 #define pr_fmt(fmt) "%s: " fmt, __func__
12 
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/errno.h>
16 #include <linux/time.h>
17 #include <linux/aio_abi.h>
18 #include <linux/export.h>
19 #include <linux/syscalls.h>
20 #include <linux/backing-dev.h>
21 #include <linux/uio.h>
22 
23 #include <linux/sched.h>
24 #include <linux/fs.h>
25 #include <linux/file.h>
26 #include <linux/mm.h>
27 #include <linux/mman.h>
28 #include <linux/mmu_context.h>
29 #include <linux/percpu.h>
30 #include <linux/slab.h>
31 #include <linux/timer.h>
32 #include <linux/aio.h>
33 #include <linux/highmem.h>
34 #include <linux/workqueue.h>
35 #include <linux/security.h>
36 #include <linux/eventfd.h>
37 #include <linux/blkdev.h>
38 #include <linux/compat.h>
39 #include <linux/anon_inodes.h>
40 #include <linux/migrate.h>
41 #include <linux/ramfs.h>
42 #include <linux/percpu-refcount.h>
43 
44 #include <asm/kmap_types.h>
45 #include <asm/uaccess.h>
46 
47 #include "internal.h"
48 
49 #define AIO_RING_MAGIC			0xa10a10a1
50 #define AIO_RING_COMPAT_FEATURES	1
51 #define AIO_RING_INCOMPAT_FEATURES	0
52 struct aio_ring {
53 	unsigned	id;	/* kernel internal index number */
54 	unsigned	nr;	/* number of io_events */
55 	unsigned	head;
56 	unsigned	tail;
57 
58 	unsigned	magic;
59 	unsigned	compat_features;
60 	unsigned	incompat_features;
61 	unsigned	header_length;	/* size of aio_ring */
62 
63 
64 	struct io_event		io_events[0];
65 }; /* 128 bytes + ring size */
66 
67 #define AIO_RING_PAGES	8
68 
69 struct kioctx_table {
70 	struct rcu_head	rcu;
71 	unsigned	nr;
72 	struct kioctx	*table[];
73 };
74 
75 struct kioctx_cpu {
76 	unsigned		reqs_available;
77 };
78 
79 struct kioctx {
80 	struct percpu_ref	users;
81 	atomic_t		dead;
82 
83 	unsigned long		user_id;
84 
85 	struct __percpu kioctx_cpu *cpu;
86 
87 	/*
88 	 * For percpu reqs_available, number of slots we move to/from global
89 	 * counter at a time:
90 	 */
91 	unsigned		req_batch;
92 	/*
93 	 * This is what userspace passed to io_setup(), it's not used for
94 	 * anything but counting against the global max_reqs quota.
95 	 *
96 	 * The real limit is nr_events - 1, which will be larger (see
97 	 * aio_setup_ring())
98 	 */
99 	unsigned		max_reqs;
100 
101 	/* Size of ringbuffer, in units of struct io_event */
102 	unsigned		nr_events;
103 
104 	unsigned long		mmap_base;
105 	unsigned long		mmap_size;
106 
107 	struct page		**ring_pages;
108 	long			nr_pages;
109 
110 	struct rcu_head		rcu_head;
111 	struct work_struct	free_work;
112 
113 	struct {
114 		/*
115 		 * This counts the number of available slots in the ringbuffer,
116 		 * so we avoid overflowing it: it's decremented (if positive)
117 		 * when allocating a kiocb and incremented when the resulting
118 		 * io_event is pulled off the ringbuffer.
119 		 *
120 		 * We batch accesses to it with a percpu version.
121 		 */
122 		atomic_t	reqs_available;
123 	} ____cacheline_aligned_in_smp;
124 
125 	struct {
126 		spinlock_t	ctx_lock;
127 		struct list_head active_reqs;	/* used for cancellation */
128 	} ____cacheline_aligned_in_smp;
129 
130 	struct {
131 		struct mutex	ring_lock;
132 		wait_queue_head_t wait;
133 	} ____cacheline_aligned_in_smp;
134 
135 	struct {
136 		unsigned	tail;
137 		spinlock_t	completion_lock;
138 	} ____cacheline_aligned_in_smp;
139 
140 	struct page		*internal_pages[AIO_RING_PAGES];
141 	struct file		*aio_ring_file;
142 
143 	unsigned		id;
144 };
145 
146 /*------ sysctl variables----*/
147 static DEFINE_SPINLOCK(aio_nr_lock);
148 unsigned long aio_nr;		/* current system wide number of aio requests */
149 unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
150 /*----end sysctl variables---*/
151 
152 static struct kmem_cache	*kiocb_cachep;
153 static struct kmem_cache	*kioctx_cachep;
154 
155 /* aio_setup
156  *	Creates the slab caches used by the aio routines, panic on
157  *	failure as this is done early during the boot sequence.
158  */
159 static int __init aio_setup(void)
160 {
161 	kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
162 	kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
163 
164 	pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page));
165 
166 	return 0;
167 }
168 __initcall(aio_setup);
169 
170 static void put_aio_ring_file(struct kioctx *ctx)
171 {
172 	struct file *aio_ring_file = ctx->aio_ring_file;
173 	if (aio_ring_file) {
174 		truncate_setsize(aio_ring_file->f_inode, 0);
175 
176 		/* Prevent further access to the kioctx from migratepages */
177 		spin_lock(&aio_ring_file->f_inode->i_mapping->private_lock);
178 		aio_ring_file->f_inode->i_mapping->private_data = NULL;
179 		ctx->aio_ring_file = NULL;
180 		spin_unlock(&aio_ring_file->f_inode->i_mapping->private_lock);
181 
182 		fput(aio_ring_file);
183 	}
184 }
185 
186 static void aio_free_ring(struct kioctx *ctx)
187 {
188 	int i;
189 
190 	for (i = 0; i < ctx->nr_pages; i++) {
191 		pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
192 				page_count(ctx->ring_pages[i]));
193 		put_page(ctx->ring_pages[i]);
194 	}
195 
196 	put_aio_ring_file(ctx);
197 
198 	if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages)
199 		kfree(ctx->ring_pages);
200 }
201 
202 static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
203 {
204 	vma->vm_ops = &generic_file_vm_ops;
205 	return 0;
206 }
207 
208 static const struct file_operations aio_ring_fops = {
209 	.mmap = aio_ring_mmap,
210 };
211 
212 static int aio_set_page_dirty(struct page *page)
213 {
214 	return 0;
215 }
216 
217 #if IS_ENABLED(CONFIG_MIGRATION)
218 static int aio_migratepage(struct address_space *mapping, struct page *new,
219 			struct page *old, enum migrate_mode mode)
220 {
221 	struct kioctx *ctx;
222 	unsigned long flags;
223 	int rc;
224 
225 	/* Writeback must be complete */
226 	BUG_ON(PageWriteback(old));
227 	put_page(old);
228 
229 	rc = migrate_page_move_mapping(mapping, new, old, NULL, mode);
230 	if (rc != MIGRATEPAGE_SUCCESS) {
231 		get_page(old);
232 		return rc;
233 	}
234 
235 	get_page(new);
236 
237 	/* We can potentially race against kioctx teardown here.  Use the
238 	 * address_space's private data lock to protect the mapping's
239 	 * private_data.
240 	 */
241 	spin_lock(&mapping->private_lock);
242 	ctx = mapping->private_data;
243 	if (ctx) {
244 		pgoff_t idx;
245 		spin_lock_irqsave(&ctx->completion_lock, flags);
246 		migrate_page_copy(new, old);
247 		idx = old->index;
248 		if (idx < (pgoff_t)ctx->nr_pages)
249 			ctx->ring_pages[idx] = new;
250 		spin_unlock_irqrestore(&ctx->completion_lock, flags);
251 	} else
252 		rc = -EBUSY;
253 	spin_unlock(&mapping->private_lock);
254 
255 	return rc;
256 }
257 #endif
258 
259 static const struct address_space_operations aio_ctx_aops = {
260 	.set_page_dirty = aio_set_page_dirty,
261 #if IS_ENABLED(CONFIG_MIGRATION)
262 	.migratepage	= aio_migratepage,
263 #endif
264 };
265 
266 static int aio_setup_ring(struct kioctx *ctx)
267 {
268 	struct aio_ring *ring;
269 	unsigned nr_events = ctx->max_reqs;
270 	struct mm_struct *mm = current->mm;
271 	unsigned long size, populate;
272 	int nr_pages;
273 	int i;
274 	struct file *file;
275 
276 	/* Compensate for the ring buffer's head/tail overlap entry */
277 	nr_events += 2;	/* 1 is required, 2 for good luck */
278 
279 	size = sizeof(struct aio_ring);
280 	size += sizeof(struct io_event) * nr_events;
281 
282 	nr_pages = PFN_UP(size);
283 	if (nr_pages < 0)
284 		return -EINVAL;
285 
286 	file = anon_inode_getfile_private("[aio]", &aio_ring_fops, ctx, O_RDWR);
287 	if (IS_ERR(file)) {
288 		ctx->aio_ring_file = NULL;
289 		return -EAGAIN;
290 	}
291 
292 	file->f_inode->i_mapping->a_ops = &aio_ctx_aops;
293 	file->f_inode->i_mapping->private_data = ctx;
294 	file->f_inode->i_size = PAGE_SIZE * (loff_t)nr_pages;
295 
296 	for (i = 0; i < nr_pages; i++) {
297 		struct page *page;
298 		page = find_or_create_page(file->f_inode->i_mapping,
299 					   i, GFP_HIGHUSER | __GFP_ZERO);
300 		if (!page)
301 			break;
302 		pr_debug("pid(%d) page[%d]->count=%d\n",
303 			 current->pid, i, page_count(page));
304 		SetPageUptodate(page);
305 		SetPageDirty(page);
306 		unlock_page(page);
307 	}
308 	ctx->aio_ring_file = file;
309 	nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
310 			/ sizeof(struct io_event);
311 
312 	ctx->ring_pages = ctx->internal_pages;
313 	if (nr_pages > AIO_RING_PAGES) {
314 		ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
315 					  GFP_KERNEL);
316 		if (!ctx->ring_pages)
317 			return -ENOMEM;
318 	}
319 
320 	ctx->mmap_size = nr_pages * PAGE_SIZE;
321 	pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
322 
323 	down_write(&mm->mmap_sem);
324 	ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
325 				       PROT_READ | PROT_WRITE,
326 				       MAP_SHARED | MAP_POPULATE, 0, &populate);
327 	if (IS_ERR((void *)ctx->mmap_base)) {
328 		up_write(&mm->mmap_sem);
329 		ctx->mmap_size = 0;
330 		aio_free_ring(ctx);
331 		return -EAGAIN;
332 	}
333 
334 	pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
335 
336 	/* We must do this while still holding mmap_sem for write, as we
337 	 * need to be protected against userspace attempting to mremap()
338 	 * or munmap() the ring buffer.
339 	 */
340 	ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages,
341 				       1, 0, ctx->ring_pages, NULL);
342 
343 	/* Dropping the reference here is safe as the page cache will hold
344 	 * onto the pages for us.  It is also required so that page migration
345 	 * can unmap the pages and get the right reference count.
346 	 */
347 	for (i = 0; i < ctx->nr_pages; i++)
348 		put_page(ctx->ring_pages[i]);
349 
350 	up_write(&mm->mmap_sem);
351 
352 	if (unlikely(ctx->nr_pages != nr_pages)) {
353 		aio_free_ring(ctx);
354 		return -EAGAIN;
355 	}
356 
357 	ctx->user_id = ctx->mmap_base;
358 	ctx->nr_events = nr_events; /* trusted copy */
359 
360 	ring = kmap_atomic(ctx->ring_pages[0]);
361 	ring->nr = nr_events;	/* user copy */
362 	ring->id = ~0U;
363 	ring->head = ring->tail = 0;
364 	ring->magic = AIO_RING_MAGIC;
365 	ring->compat_features = AIO_RING_COMPAT_FEATURES;
366 	ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
367 	ring->header_length = sizeof(struct aio_ring);
368 	kunmap_atomic(ring);
369 	flush_dcache_page(ctx->ring_pages[0]);
370 
371 	return 0;
372 }
373 
374 #define AIO_EVENTS_PER_PAGE	(PAGE_SIZE / sizeof(struct io_event))
375 #define AIO_EVENTS_FIRST_PAGE	((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
376 #define AIO_EVENTS_OFFSET	(AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
377 
378 void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel)
379 {
380 	struct kioctx *ctx = req->ki_ctx;
381 	unsigned long flags;
382 
383 	spin_lock_irqsave(&ctx->ctx_lock, flags);
384 
385 	if (!req->ki_list.next)
386 		list_add(&req->ki_list, &ctx->active_reqs);
387 
388 	req->ki_cancel = cancel;
389 
390 	spin_unlock_irqrestore(&ctx->ctx_lock, flags);
391 }
392 EXPORT_SYMBOL(kiocb_set_cancel_fn);
393 
394 static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb)
395 {
396 	kiocb_cancel_fn *old, *cancel;
397 
398 	/*
399 	 * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it
400 	 * actually has a cancel function, hence the cmpxchg()
401 	 */
402 
403 	cancel = ACCESS_ONCE(kiocb->ki_cancel);
404 	do {
405 		if (!cancel || cancel == KIOCB_CANCELLED)
406 			return -EINVAL;
407 
408 		old = cancel;
409 		cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED);
410 	} while (cancel != old);
411 
412 	return cancel(kiocb);
413 }
414 
415 static void free_ioctx_rcu(struct rcu_head *head)
416 {
417 	struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
418 
419 	free_percpu(ctx->cpu);
420 	kmem_cache_free(kioctx_cachep, ctx);
421 }
422 
423 /*
424  * When this function runs, the kioctx has been removed from the "hash table"
425  * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
426  * now it's safe to cancel any that need to be.
427  */
428 static void free_ioctx(struct work_struct *work)
429 {
430 	struct kioctx *ctx = container_of(work, struct kioctx, free_work);
431 	struct aio_ring *ring;
432 	struct kiocb *req;
433 	unsigned cpu, avail;
434 	DEFINE_WAIT(wait);
435 
436 	spin_lock_irq(&ctx->ctx_lock);
437 
438 	while (!list_empty(&ctx->active_reqs)) {
439 		req = list_first_entry(&ctx->active_reqs,
440 				       struct kiocb, ki_list);
441 
442 		list_del_init(&req->ki_list);
443 		kiocb_cancel(ctx, req);
444 	}
445 
446 	spin_unlock_irq(&ctx->ctx_lock);
447 
448 	for_each_possible_cpu(cpu) {
449 		struct kioctx_cpu *kcpu = per_cpu_ptr(ctx->cpu, cpu);
450 
451 		atomic_add(kcpu->reqs_available, &ctx->reqs_available);
452 		kcpu->reqs_available = 0;
453 	}
454 
455 	while (1) {
456 		prepare_to_wait(&ctx->wait, &wait, TASK_UNINTERRUPTIBLE);
457 
458 		ring = kmap_atomic(ctx->ring_pages[0]);
459 		avail = (ring->head <= ring->tail)
460 			 ? ring->tail - ring->head
461 			 : ctx->nr_events - ring->head + ring->tail;
462 
463 		atomic_add(avail, &ctx->reqs_available);
464 		ring->head = ring->tail;
465 		kunmap_atomic(ring);
466 
467 		if (atomic_read(&ctx->reqs_available) >= ctx->nr_events - 1)
468 			break;
469 
470 		schedule();
471 	}
472 	finish_wait(&ctx->wait, &wait);
473 
474 	WARN_ON(atomic_read(&ctx->reqs_available) > ctx->nr_events - 1);
475 
476 	aio_free_ring(ctx);
477 
478 	pr_debug("freeing %p\n", ctx);
479 
480 	/*
481 	 * Here the call_rcu() is between the wait_event() for reqs_active to
482 	 * hit 0, and freeing the ioctx.
483 	 *
484 	 * aio_complete() decrements reqs_active, but it has to touch the ioctx
485 	 * after to issue a wakeup so we use rcu.
486 	 */
487 	call_rcu(&ctx->rcu_head, free_ioctx_rcu);
488 }
489 
490 static void free_ioctx_ref(struct percpu_ref *ref)
491 {
492 	struct kioctx *ctx = container_of(ref, struct kioctx, users);
493 
494 	INIT_WORK(&ctx->free_work, free_ioctx);
495 	schedule_work(&ctx->free_work);
496 }
497 
498 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
499 {
500 	unsigned i, new_nr;
501 	struct kioctx_table *table, *old;
502 	struct aio_ring *ring;
503 
504 	spin_lock(&mm->ioctx_lock);
505 	rcu_read_lock();
506 	table = rcu_dereference(mm->ioctx_table);
507 
508 	while (1) {
509 		if (table)
510 			for (i = 0; i < table->nr; i++)
511 				if (!table->table[i]) {
512 					ctx->id = i;
513 					table->table[i] = ctx;
514 					rcu_read_unlock();
515 					spin_unlock(&mm->ioctx_lock);
516 
517 					ring = kmap_atomic(ctx->ring_pages[0]);
518 					ring->id = ctx->id;
519 					kunmap_atomic(ring);
520 					return 0;
521 				}
522 
523 		new_nr = (table ? table->nr : 1) * 4;
524 
525 		rcu_read_unlock();
526 		spin_unlock(&mm->ioctx_lock);
527 
528 		table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) *
529 				new_nr, GFP_KERNEL);
530 		if (!table)
531 			return -ENOMEM;
532 
533 		table->nr = new_nr;
534 
535 		spin_lock(&mm->ioctx_lock);
536 		rcu_read_lock();
537 		old = rcu_dereference(mm->ioctx_table);
538 
539 		if (!old) {
540 			rcu_assign_pointer(mm->ioctx_table, table);
541 		} else if (table->nr > old->nr) {
542 			memcpy(table->table, old->table,
543 			       old->nr * sizeof(struct kioctx *));
544 
545 			rcu_assign_pointer(mm->ioctx_table, table);
546 			kfree_rcu(old, rcu);
547 		} else {
548 			kfree(table);
549 			table = old;
550 		}
551 	}
552 }
553 
554 /* ioctx_alloc
555  *	Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
556  */
557 static struct kioctx *ioctx_alloc(unsigned nr_events)
558 {
559 	struct mm_struct *mm = current->mm;
560 	struct kioctx *ctx;
561 	int err = -ENOMEM;
562 
563 	/*
564 	 * We keep track of the number of available ringbuffer slots, to prevent
565 	 * overflow (reqs_available), and we also use percpu counters for this.
566 	 *
567 	 * So since up to half the slots might be on other cpu's percpu counters
568 	 * and unavailable, double nr_events so userspace sees what they
569 	 * expected: additionally, we move req_batch slots to/from percpu
570 	 * counters at a time, so make sure that isn't 0:
571 	 */
572 	nr_events = max(nr_events, num_possible_cpus() * 4);
573 	nr_events *= 2;
574 
575 	/* Prevent overflows */
576 	if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
577 	    (nr_events > (0x10000000U / sizeof(struct kiocb)))) {
578 		pr_debug("ENOMEM: nr_events too high\n");
579 		return ERR_PTR(-EINVAL);
580 	}
581 
582 	if (!nr_events || (unsigned long)nr_events > (aio_max_nr * 2UL))
583 		return ERR_PTR(-EAGAIN);
584 
585 	ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
586 	if (!ctx)
587 		return ERR_PTR(-ENOMEM);
588 
589 	ctx->max_reqs = nr_events;
590 
591 	if (percpu_ref_init(&ctx->users, free_ioctx_ref))
592 		goto out_freectx;
593 
594 	spin_lock_init(&ctx->ctx_lock);
595 	spin_lock_init(&ctx->completion_lock);
596 	mutex_init(&ctx->ring_lock);
597 	init_waitqueue_head(&ctx->wait);
598 
599 	INIT_LIST_HEAD(&ctx->active_reqs);
600 
601 	ctx->cpu = alloc_percpu(struct kioctx_cpu);
602 	if (!ctx->cpu)
603 		goto out_freeref;
604 
605 	if (aio_setup_ring(ctx) < 0)
606 		goto out_freepcpu;
607 
608 	atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
609 	ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
610 	if (ctx->req_batch < 1)
611 		ctx->req_batch = 1;
612 
613 	/* limit the number of system wide aios */
614 	spin_lock(&aio_nr_lock);
615 	if (aio_nr + nr_events > (aio_max_nr * 2UL) ||
616 	    aio_nr + nr_events < aio_nr) {
617 		spin_unlock(&aio_nr_lock);
618 		goto out_cleanup;
619 	}
620 	aio_nr += ctx->max_reqs;
621 	spin_unlock(&aio_nr_lock);
622 
623 	percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
624 
625 	err = ioctx_add_table(ctx, mm);
626 	if (err)
627 		goto out_cleanup_put;
628 
629 	pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
630 		 ctx, ctx->user_id, mm, ctx->nr_events);
631 	return ctx;
632 
633 out_cleanup_put:
634 	percpu_ref_put(&ctx->users);
635 out_cleanup:
636 	err = -EAGAIN;
637 	aio_free_ring(ctx);
638 out_freepcpu:
639 	free_percpu(ctx->cpu);
640 out_freeref:
641 	free_percpu(ctx->users.pcpu_count);
642 out_freectx:
643 	put_aio_ring_file(ctx);
644 	kmem_cache_free(kioctx_cachep, ctx);
645 	pr_debug("error allocating ioctx %d\n", err);
646 	return ERR_PTR(err);
647 }
648 
649 /* kill_ioctx
650  *	Cancels all outstanding aio requests on an aio context.  Used
651  *	when the processes owning a context have all exited to encourage
652  *	the rapid destruction of the kioctx.
653  */
654 static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
655 {
656 	if (!atomic_xchg(&ctx->dead, 1)) {
657 		struct kioctx_table *table;
658 
659 		spin_lock(&mm->ioctx_lock);
660 		rcu_read_lock();
661 		table = rcu_dereference(mm->ioctx_table);
662 
663 		WARN_ON(ctx != table->table[ctx->id]);
664 		table->table[ctx->id] = NULL;
665 		rcu_read_unlock();
666 		spin_unlock(&mm->ioctx_lock);
667 
668 		/* percpu_ref_kill() will do the necessary call_rcu() */
669 		wake_up_all(&ctx->wait);
670 
671 		/*
672 		 * It'd be more correct to do this in free_ioctx(), after all
673 		 * the outstanding kiocbs have finished - but by then io_destroy
674 		 * has already returned, so io_setup() could potentially return
675 		 * -EAGAIN with no ioctxs actually in use (as far as userspace
676 		 *  could tell).
677 		 */
678 		spin_lock(&aio_nr_lock);
679 		BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
680 		aio_nr -= ctx->max_reqs;
681 		spin_unlock(&aio_nr_lock);
682 
683 		if (ctx->mmap_size)
684 			vm_munmap(ctx->mmap_base, ctx->mmap_size);
685 
686 		percpu_ref_kill(&ctx->users);
687 	}
688 }
689 
690 /* wait_on_sync_kiocb:
691  *	Waits on the given sync kiocb to complete.
692  */
693 ssize_t wait_on_sync_kiocb(struct kiocb *req)
694 {
695 	while (!req->ki_ctx) {
696 		set_current_state(TASK_UNINTERRUPTIBLE);
697 		if (req->ki_ctx)
698 			break;
699 		io_schedule();
700 	}
701 	__set_current_state(TASK_RUNNING);
702 	return req->ki_user_data;
703 }
704 EXPORT_SYMBOL(wait_on_sync_kiocb);
705 
706 /*
707  * exit_aio: called when the last user of mm goes away.  At this point, there is
708  * no way for any new requests to be submited or any of the io_* syscalls to be
709  * called on the context.
710  *
711  * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
712  * them.
713  */
714 void exit_aio(struct mm_struct *mm)
715 {
716 	struct kioctx_table *table;
717 	struct kioctx *ctx;
718 	unsigned i = 0;
719 
720 	while (1) {
721 		rcu_read_lock();
722 		table = rcu_dereference(mm->ioctx_table);
723 
724 		do {
725 			if (!table || i >= table->nr) {
726 				rcu_read_unlock();
727 				rcu_assign_pointer(mm->ioctx_table, NULL);
728 				if (table)
729 					kfree(table);
730 				return;
731 			}
732 
733 			ctx = table->table[i++];
734 		} while (!ctx);
735 
736 		rcu_read_unlock();
737 
738 		/*
739 		 * We don't need to bother with munmap() here -
740 		 * exit_mmap(mm) is coming and it'll unmap everything.
741 		 * Since aio_free_ring() uses non-zero ->mmap_size
742 		 * as indicator that it needs to unmap the area,
743 		 * just set it to 0; aio_free_ring() is the only
744 		 * place that uses ->mmap_size, so it's safe.
745 		 */
746 		ctx->mmap_size = 0;
747 
748 		kill_ioctx(mm, ctx);
749 	}
750 }
751 
752 static void put_reqs_available(struct kioctx *ctx, unsigned nr)
753 {
754 	struct kioctx_cpu *kcpu;
755 
756 	preempt_disable();
757 	kcpu = this_cpu_ptr(ctx->cpu);
758 
759 	kcpu->reqs_available += nr;
760 	while (kcpu->reqs_available >= ctx->req_batch * 2) {
761 		kcpu->reqs_available -= ctx->req_batch;
762 		atomic_add(ctx->req_batch, &ctx->reqs_available);
763 	}
764 
765 	preempt_enable();
766 }
767 
768 static bool get_reqs_available(struct kioctx *ctx)
769 {
770 	struct kioctx_cpu *kcpu;
771 	bool ret = false;
772 
773 	preempt_disable();
774 	kcpu = this_cpu_ptr(ctx->cpu);
775 
776 	if (!kcpu->reqs_available) {
777 		int old, avail = atomic_read(&ctx->reqs_available);
778 
779 		do {
780 			if (avail < ctx->req_batch)
781 				goto out;
782 
783 			old = avail;
784 			avail = atomic_cmpxchg(&ctx->reqs_available,
785 					       avail, avail - ctx->req_batch);
786 		} while (avail != old);
787 
788 		kcpu->reqs_available += ctx->req_batch;
789 	}
790 
791 	ret = true;
792 	kcpu->reqs_available--;
793 out:
794 	preempt_enable();
795 	return ret;
796 }
797 
798 /* aio_get_req
799  *	Allocate a slot for an aio request.
800  * Returns NULL if no requests are free.
801  */
802 static inline struct kiocb *aio_get_req(struct kioctx *ctx)
803 {
804 	struct kiocb *req;
805 
806 	if (!get_reqs_available(ctx))
807 		return NULL;
808 
809 	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
810 	if (unlikely(!req))
811 		goto out_put;
812 
813 	req->ki_ctx = ctx;
814 	return req;
815 out_put:
816 	put_reqs_available(ctx, 1);
817 	return NULL;
818 }
819 
820 static void kiocb_free(struct kiocb *req)
821 {
822 	if (req->ki_filp)
823 		fput(req->ki_filp);
824 	if (req->ki_eventfd != NULL)
825 		eventfd_ctx_put(req->ki_eventfd);
826 	kmem_cache_free(kiocb_cachep, req);
827 }
828 
829 static struct kioctx *lookup_ioctx(unsigned long ctx_id)
830 {
831 	struct aio_ring __user *ring  = (void __user *)ctx_id;
832 	struct mm_struct *mm = current->mm;
833 	struct kioctx *ctx, *ret = NULL;
834 	struct kioctx_table *table;
835 	unsigned id;
836 
837 	if (get_user(id, &ring->id))
838 		return NULL;
839 
840 	rcu_read_lock();
841 	table = rcu_dereference(mm->ioctx_table);
842 
843 	if (!table || id >= table->nr)
844 		goto out;
845 
846 	ctx = table->table[id];
847 	if (ctx && ctx->user_id == ctx_id) {
848 		percpu_ref_get(&ctx->users);
849 		ret = ctx;
850 	}
851 out:
852 	rcu_read_unlock();
853 	return ret;
854 }
855 
856 /* aio_complete
857  *	Called when the io request on the given iocb is complete.
858  */
859 void aio_complete(struct kiocb *iocb, long res, long res2)
860 {
861 	struct kioctx	*ctx = iocb->ki_ctx;
862 	struct aio_ring	*ring;
863 	struct io_event	*ev_page, *event;
864 	unsigned long	flags;
865 	unsigned tail, pos;
866 
867 	/*
868 	 * Special case handling for sync iocbs:
869 	 *  - events go directly into the iocb for fast handling
870 	 *  - the sync task with the iocb in its stack holds the single iocb
871 	 *    ref, no other paths have a way to get another ref
872 	 *  - the sync task helpfully left a reference to itself in the iocb
873 	 */
874 	if (is_sync_kiocb(iocb)) {
875 		iocb->ki_user_data = res;
876 		smp_wmb();
877 		iocb->ki_ctx = ERR_PTR(-EXDEV);
878 		wake_up_process(iocb->ki_obj.tsk);
879 		return;
880 	}
881 
882 	/*
883 	 * Take rcu_read_lock() in case the kioctx is being destroyed, as we
884 	 * need to issue a wakeup after incrementing reqs_available.
885 	 */
886 	rcu_read_lock();
887 
888 	if (iocb->ki_list.next) {
889 		unsigned long flags;
890 
891 		spin_lock_irqsave(&ctx->ctx_lock, flags);
892 		list_del(&iocb->ki_list);
893 		spin_unlock_irqrestore(&ctx->ctx_lock, flags);
894 	}
895 
896 	/*
897 	 * Add a completion event to the ring buffer. Must be done holding
898 	 * ctx->completion_lock to prevent other code from messing with the tail
899 	 * pointer since we might be called from irq context.
900 	 */
901 	spin_lock_irqsave(&ctx->completion_lock, flags);
902 
903 	tail = ctx->tail;
904 	pos = tail + AIO_EVENTS_OFFSET;
905 
906 	if (++tail >= ctx->nr_events)
907 		tail = 0;
908 
909 	ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
910 	event = ev_page + pos % AIO_EVENTS_PER_PAGE;
911 
912 	event->obj = (u64)(unsigned long)iocb->ki_obj.user;
913 	event->data = iocb->ki_user_data;
914 	event->res = res;
915 	event->res2 = res2;
916 
917 	kunmap_atomic(ev_page);
918 	flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
919 
920 	pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
921 		 ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
922 		 res, res2);
923 
924 	/* after flagging the request as done, we
925 	 * must never even look at it again
926 	 */
927 	smp_wmb();	/* make event visible before updating tail */
928 
929 	ctx->tail = tail;
930 
931 	ring = kmap_atomic(ctx->ring_pages[0]);
932 	ring->tail = tail;
933 	kunmap_atomic(ring);
934 	flush_dcache_page(ctx->ring_pages[0]);
935 
936 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
937 
938 	pr_debug("added to ring %p at [%u]\n", iocb, tail);
939 
940 	/*
941 	 * Check if the user asked us to deliver the result through an
942 	 * eventfd. The eventfd_signal() function is safe to be called
943 	 * from IRQ context.
944 	 */
945 	if (iocb->ki_eventfd != NULL)
946 		eventfd_signal(iocb->ki_eventfd, 1);
947 
948 	/* everything turned out well, dispose of the aiocb. */
949 	kiocb_free(iocb);
950 
951 	/*
952 	 * We have to order our ring_info tail store above and test
953 	 * of the wait list below outside the wait lock.  This is
954 	 * like in wake_up_bit() where clearing a bit has to be
955 	 * ordered with the unlocked test.
956 	 */
957 	smp_mb();
958 
959 	if (waitqueue_active(&ctx->wait))
960 		wake_up(&ctx->wait);
961 
962 	rcu_read_unlock();
963 }
964 EXPORT_SYMBOL(aio_complete);
965 
966 /* aio_read_events
967  *	Pull an event off of the ioctx's event ring.  Returns the number of
968  *	events fetched
969  */
970 static long aio_read_events_ring(struct kioctx *ctx,
971 				 struct io_event __user *event, long nr)
972 {
973 	struct aio_ring *ring;
974 	unsigned head, tail, pos;
975 	long ret = 0;
976 	int copy_ret;
977 
978 	mutex_lock(&ctx->ring_lock);
979 
980 	ring = kmap_atomic(ctx->ring_pages[0]);
981 	head = ring->head;
982 	tail = ring->tail;
983 	kunmap_atomic(ring);
984 
985 	pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
986 
987 	if (head == tail)
988 		goto out;
989 
990 	while (ret < nr) {
991 		long avail;
992 		struct io_event *ev;
993 		struct page *page;
994 
995 		avail = (head <= tail ?  tail : ctx->nr_events) - head;
996 		if (head == tail)
997 			break;
998 
999 		avail = min(avail, nr - ret);
1000 		avail = min_t(long, avail, AIO_EVENTS_PER_PAGE -
1001 			    ((head + AIO_EVENTS_OFFSET) % AIO_EVENTS_PER_PAGE));
1002 
1003 		pos = head + AIO_EVENTS_OFFSET;
1004 		page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
1005 		pos %= AIO_EVENTS_PER_PAGE;
1006 
1007 		ev = kmap(page);
1008 		copy_ret = copy_to_user(event + ret, ev + pos,
1009 					sizeof(*ev) * avail);
1010 		kunmap(page);
1011 
1012 		if (unlikely(copy_ret)) {
1013 			ret = -EFAULT;
1014 			goto out;
1015 		}
1016 
1017 		ret += avail;
1018 		head += avail;
1019 		head %= ctx->nr_events;
1020 	}
1021 
1022 	ring = kmap_atomic(ctx->ring_pages[0]);
1023 	ring->head = head;
1024 	kunmap_atomic(ring);
1025 	flush_dcache_page(ctx->ring_pages[0]);
1026 
1027 	pr_debug("%li  h%u t%u\n", ret, head, tail);
1028 
1029 	put_reqs_available(ctx, ret);
1030 out:
1031 	mutex_unlock(&ctx->ring_lock);
1032 
1033 	return ret;
1034 }
1035 
1036 static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
1037 			    struct io_event __user *event, long *i)
1038 {
1039 	long ret = aio_read_events_ring(ctx, event + *i, nr - *i);
1040 
1041 	if (ret > 0)
1042 		*i += ret;
1043 
1044 	if (unlikely(atomic_read(&ctx->dead)))
1045 		ret = -EINVAL;
1046 
1047 	if (!*i)
1048 		*i = ret;
1049 
1050 	return ret < 0 || *i >= min_nr;
1051 }
1052 
1053 static long read_events(struct kioctx *ctx, long min_nr, long nr,
1054 			struct io_event __user *event,
1055 			struct timespec __user *timeout)
1056 {
1057 	ktime_t until = { .tv64 = KTIME_MAX };
1058 	long ret = 0;
1059 
1060 	if (timeout) {
1061 		struct timespec	ts;
1062 
1063 		if (unlikely(copy_from_user(&ts, timeout, sizeof(ts))))
1064 			return -EFAULT;
1065 
1066 		until = timespec_to_ktime(ts);
1067 	}
1068 
1069 	/*
1070 	 * Note that aio_read_events() is being called as the conditional - i.e.
1071 	 * we're calling it after prepare_to_wait() has set task state to
1072 	 * TASK_INTERRUPTIBLE.
1073 	 *
1074 	 * But aio_read_events() can block, and if it blocks it's going to flip
1075 	 * the task state back to TASK_RUNNING.
1076 	 *
1077 	 * This should be ok, provided it doesn't flip the state back to
1078 	 * TASK_RUNNING and return 0 too much - that causes us to spin. That
1079 	 * will only happen if the mutex_lock() call blocks, and we then find
1080 	 * the ringbuffer empty. So in practice we should be ok, but it's
1081 	 * something to be aware of when touching this code.
1082 	 */
1083 	wait_event_interruptible_hrtimeout(ctx->wait,
1084 			aio_read_events(ctx, min_nr, nr, event, &ret), until);
1085 
1086 	if (!ret && signal_pending(current))
1087 		ret = -EINTR;
1088 
1089 	return ret;
1090 }
1091 
1092 /* sys_io_setup:
1093  *	Create an aio_context capable of receiving at least nr_events.
1094  *	ctxp must not point to an aio_context that already exists, and
1095  *	must be initialized to 0 prior to the call.  On successful
1096  *	creation of the aio_context, *ctxp is filled in with the resulting
1097  *	handle.  May fail with -EINVAL if *ctxp is not initialized,
1098  *	if the specified nr_events exceeds internal limits.  May fail
1099  *	with -EAGAIN if the specified nr_events exceeds the user's limit
1100  *	of available events.  May fail with -ENOMEM if insufficient kernel
1101  *	resources are available.  May fail with -EFAULT if an invalid
1102  *	pointer is passed for ctxp.  Will fail with -ENOSYS if not
1103  *	implemented.
1104  */
1105 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1106 {
1107 	struct kioctx *ioctx = NULL;
1108 	unsigned long ctx;
1109 	long ret;
1110 
1111 	ret = get_user(ctx, ctxp);
1112 	if (unlikely(ret))
1113 		goto out;
1114 
1115 	ret = -EINVAL;
1116 	if (unlikely(ctx || nr_events == 0)) {
1117 		pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
1118 		         ctx, nr_events);
1119 		goto out;
1120 	}
1121 
1122 	ioctx = ioctx_alloc(nr_events);
1123 	ret = PTR_ERR(ioctx);
1124 	if (!IS_ERR(ioctx)) {
1125 		ret = put_user(ioctx->user_id, ctxp);
1126 		if (ret)
1127 			kill_ioctx(current->mm, ioctx);
1128 		percpu_ref_put(&ioctx->users);
1129 	}
1130 
1131 out:
1132 	return ret;
1133 }
1134 
1135 /* sys_io_destroy:
1136  *	Destroy the aio_context specified.  May cancel any outstanding
1137  *	AIOs and block on completion.  Will fail with -ENOSYS if not
1138  *	implemented.  May fail with -EINVAL if the context pointed to
1139  *	is invalid.
1140  */
1141 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1142 {
1143 	struct kioctx *ioctx = lookup_ioctx(ctx);
1144 	if (likely(NULL != ioctx)) {
1145 		kill_ioctx(current->mm, ioctx);
1146 		percpu_ref_put(&ioctx->users);
1147 		return 0;
1148 	}
1149 	pr_debug("EINVAL: io_destroy: invalid context id\n");
1150 	return -EINVAL;
1151 }
1152 
1153 typedef ssize_t (aio_rw_op)(struct kiocb *, const struct iovec *,
1154 			    unsigned long, loff_t);
1155 
1156 static ssize_t aio_setup_vectored_rw(struct kiocb *kiocb,
1157 				     int rw, char __user *buf,
1158 				     unsigned long *nr_segs,
1159 				     struct iovec **iovec,
1160 				     bool compat)
1161 {
1162 	ssize_t ret;
1163 
1164 	*nr_segs = kiocb->ki_nbytes;
1165 
1166 #ifdef CONFIG_COMPAT
1167 	if (compat)
1168 		ret = compat_rw_copy_check_uvector(rw,
1169 				(struct compat_iovec __user *)buf,
1170 				*nr_segs, 1, *iovec, iovec);
1171 	else
1172 #endif
1173 		ret = rw_copy_check_uvector(rw,
1174 				(struct iovec __user *)buf,
1175 				*nr_segs, 1, *iovec, iovec);
1176 	if (ret < 0)
1177 		return ret;
1178 
1179 	/* ki_nbytes now reflect bytes instead of segs */
1180 	kiocb->ki_nbytes = ret;
1181 	return 0;
1182 }
1183 
1184 static ssize_t aio_setup_single_vector(struct kiocb *kiocb,
1185 				       int rw, char __user *buf,
1186 				       unsigned long *nr_segs,
1187 				       struct iovec *iovec)
1188 {
1189 	if (unlikely(!access_ok(!rw, buf, kiocb->ki_nbytes)))
1190 		return -EFAULT;
1191 
1192 	iovec->iov_base = buf;
1193 	iovec->iov_len = kiocb->ki_nbytes;
1194 	*nr_segs = 1;
1195 	return 0;
1196 }
1197 
1198 /*
1199  * aio_setup_iocb:
1200  *	Performs the initial checks and aio retry method
1201  *	setup for the kiocb at the time of io submission.
1202  */
1203 static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
1204 			    char __user *buf, bool compat)
1205 {
1206 	struct file *file = req->ki_filp;
1207 	ssize_t ret;
1208 	unsigned long nr_segs;
1209 	int rw;
1210 	fmode_t mode;
1211 	aio_rw_op *rw_op;
1212 	struct iovec inline_vec, *iovec = &inline_vec;
1213 
1214 	switch (opcode) {
1215 	case IOCB_CMD_PREAD:
1216 	case IOCB_CMD_PREADV:
1217 		mode	= FMODE_READ;
1218 		rw	= READ;
1219 		rw_op	= file->f_op->aio_read;
1220 		goto rw_common;
1221 
1222 	case IOCB_CMD_PWRITE:
1223 	case IOCB_CMD_PWRITEV:
1224 		mode	= FMODE_WRITE;
1225 		rw	= WRITE;
1226 		rw_op	= file->f_op->aio_write;
1227 		goto rw_common;
1228 rw_common:
1229 		if (unlikely(!(file->f_mode & mode)))
1230 			return -EBADF;
1231 
1232 		if (!rw_op)
1233 			return -EINVAL;
1234 
1235 		ret = (opcode == IOCB_CMD_PREADV ||
1236 		       opcode == IOCB_CMD_PWRITEV)
1237 			? aio_setup_vectored_rw(req, rw, buf, &nr_segs,
1238 						&iovec, compat)
1239 			: aio_setup_single_vector(req, rw, buf, &nr_segs,
1240 						  iovec);
1241 		if (ret)
1242 			return ret;
1243 
1244 		ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
1245 		if (ret < 0) {
1246 			if (iovec != &inline_vec)
1247 				kfree(iovec);
1248 			return ret;
1249 		}
1250 
1251 		req->ki_nbytes = ret;
1252 
1253 		/* XXX: move/kill - rw_verify_area()? */
1254 		/* This matches the pread()/pwrite() logic */
1255 		if (req->ki_pos < 0) {
1256 			ret = -EINVAL;
1257 			break;
1258 		}
1259 
1260 		if (rw == WRITE)
1261 			file_start_write(file);
1262 
1263 		ret = rw_op(req, iovec, nr_segs, req->ki_pos);
1264 
1265 		if (rw == WRITE)
1266 			file_end_write(file);
1267 		break;
1268 
1269 	case IOCB_CMD_FDSYNC:
1270 		if (!file->f_op->aio_fsync)
1271 			return -EINVAL;
1272 
1273 		ret = file->f_op->aio_fsync(req, 1);
1274 		break;
1275 
1276 	case IOCB_CMD_FSYNC:
1277 		if (!file->f_op->aio_fsync)
1278 			return -EINVAL;
1279 
1280 		ret = file->f_op->aio_fsync(req, 0);
1281 		break;
1282 
1283 	default:
1284 		pr_debug("EINVAL: no operation provided\n");
1285 		return -EINVAL;
1286 	}
1287 
1288 	if (iovec != &inline_vec)
1289 		kfree(iovec);
1290 
1291 	if (ret != -EIOCBQUEUED) {
1292 		/*
1293 		 * There's no easy way to restart the syscall since other AIO's
1294 		 * may be already running. Just fail this IO with EINTR.
1295 		 */
1296 		if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
1297 			     ret == -ERESTARTNOHAND ||
1298 			     ret == -ERESTART_RESTARTBLOCK))
1299 			ret = -EINTR;
1300 		aio_complete(req, ret, 0);
1301 	}
1302 
1303 	return 0;
1304 }
1305 
1306 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1307 			 struct iocb *iocb, bool compat)
1308 {
1309 	struct kiocb *req;
1310 	ssize_t ret;
1311 
1312 	/* enforce forwards compatibility on users */
1313 	if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) {
1314 		pr_debug("EINVAL: reserve field set\n");
1315 		return -EINVAL;
1316 	}
1317 
1318 	/* prevent overflows */
1319 	if (unlikely(
1320 	    (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
1321 	    (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
1322 	    ((ssize_t)iocb->aio_nbytes < 0)
1323 	   )) {
1324 		pr_debug("EINVAL: io_submit: overflow check\n");
1325 		return -EINVAL;
1326 	}
1327 
1328 	req = aio_get_req(ctx);
1329 	if (unlikely(!req))
1330 		return -EAGAIN;
1331 
1332 	req->ki_filp = fget(iocb->aio_fildes);
1333 	if (unlikely(!req->ki_filp)) {
1334 		ret = -EBADF;
1335 		goto out_put_req;
1336 	}
1337 
1338 	if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1339 		/*
1340 		 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1341 		 * instance of the file* now. The file descriptor must be
1342 		 * an eventfd() fd, and will be signaled for each completed
1343 		 * event using the eventfd_signal() function.
1344 		 */
1345 		req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
1346 		if (IS_ERR(req->ki_eventfd)) {
1347 			ret = PTR_ERR(req->ki_eventfd);
1348 			req->ki_eventfd = NULL;
1349 			goto out_put_req;
1350 		}
1351 	}
1352 
1353 	ret = put_user(KIOCB_KEY, &user_iocb->aio_key);
1354 	if (unlikely(ret)) {
1355 		pr_debug("EFAULT: aio_key\n");
1356 		goto out_put_req;
1357 	}
1358 
1359 	req->ki_obj.user = user_iocb;
1360 	req->ki_user_data = iocb->aio_data;
1361 	req->ki_pos = iocb->aio_offset;
1362 	req->ki_nbytes = iocb->aio_nbytes;
1363 
1364 	ret = aio_run_iocb(req, iocb->aio_lio_opcode,
1365 			   (char __user *)(unsigned long)iocb->aio_buf,
1366 			   compat);
1367 	if (ret)
1368 		goto out_put_req;
1369 
1370 	return 0;
1371 out_put_req:
1372 	put_reqs_available(ctx, 1);
1373 	kiocb_free(req);
1374 	return ret;
1375 }
1376 
1377 long do_io_submit(aio_context_t ctx_id, long nr,
1378 		  struct iocb __user *__user *iocbpp, bool compat)
1379 {
1380 	struct kioctx *ctx;
1381 	long ret = 0;
1382 	int i = 0;
1383 	struct blk_plug plug;
1384 
1385 	if (unlikely(nr < 0))
1386 		return -EINVAL;
1387 
1388 	if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
1389 		nr = LONG_MAX/sizeof(*iocbpp);
1390 
1391 	if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
1392 		return -EFAULT;
1393 
1394 	ctx = lookup_ioctx(ctx_id);
1395 	if (unlikely(!ctx)) {
1396 		pr_debug("EINVAL: invalid context id\n");
1397 		return -EINVAL;
1398 	}
1399 
1400 	blk_start_plug(&plug);
1401 
1402 	/*
1403 	 * AKPM: should this return a partial result if some of the IOs were
1404 	 * successfully submitted?
1405 	 */
1406 	for (i=0; i<nr; i++) {
1407 		struct iocb __user *user_iocb;
1408 		struct iocb tmp;
1409 
1410 		if (unlikely(__get_user(user_iocb, iocbpp + i))) {
1411 			ret = -EFAULT;
1412 			break;
1413 		}
1414 
1415 		if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) {
1416 			ret = -EFAULT;
1417 			break;
1418 		}
1419 
1420 		ret = io_submit_one(ctx, user_iocb, &tmp, compat);
1421 		if (ret)
1422 			break;
1423 	}
1424 	blk_finish_plug(&plug);
1425 
1426 	percpu_ref_put(&ctx->users);
1427 	return i ? i : ret;
1428 }
1429 
1430 /* sys_io_submit:
1431  *	Queue the nr iocbs pointed to by iocbpp for processing.  Returns
1432  *	the number of iocbs queued.  May return -EINVAL if the aio_context
1433  *	specified by ctx_id is invalid, if nr is < 0, if the iocb at
1434  *	*iocbpp[0] is not properly initialized, if the operation specified
1435  *	is invalid for the file descriptor in the iocb.  May fail with
1436  *	-EFAULT if any of the data structures point to invalid data.  May
1437  *	fail with -EBADF if the file descriptor specified in the first
1438  *	iocb is invalid.  May fail with -EAGAIN if insufficient resources
1439  *	are available to queue any iocbs.  Will return 0 if nr is 0.  Will
1440  *	fail with -ENOSYS if not implemented.
1441  */
1442 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1443 		struct iocb __user * __user *, iocbpp)
1444 {
1445 	return do_io_submit(ctx_id, nr, iocbpp, 0);
1446 }
1447 
1448 /* lookup_kiocb
1449  *	Finds a given iocb for cancellation.
1450  */
1451 static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
1452 				  u32 key)
1453 {
1454 	struct list_head *pos;
1455 
1456 	assert_spin_locked(&ctx->ctx_lock);
1457 
1458 	if (key != KIOCB_KEY)
1459 		return NULL;
1460 
1461 	/* TODO: use a hash or array, this sucks. */
1462 	list_for_each(pos, &ctx->active_reqs) {
1463 		struct kiocb *kiocb = list_kiocb(pos);
1464 		if (kiocb->ki_obj.user == iocb)
1465 			return kiocb;
1466 	}
1467 	return NULL;
1468 }
1469 
1470 /* sys_io_cancel:
1471  *	Attempts to cancel an iocb previously passed to io_submit.  If
1472  *	the operation is successfully cancelled, the resulting event is
1473  *	copied into the memory pointed to by result without being placed
1474  *	into the completion queue and 0 is returned.  May fail with
1475  *	-EFAULT if any of the data structures pointed to are invalid.
1476  *	May fail with -EINVAL if aio_context specified by ctx_id is
1477  *	invalid.  May fail with -EAGAIN if the iocb specified was not
1478  *	cancelled.  Will fail with -ENOSYS if not implemented.
1479  */
1480 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1481 		struct io_event __user *, result)
1482 {
1483 	struct kioctx *ctx;
1484 	struct kiocb *kiocb;
1485 	u32 key;
1486 	int ret;
1487 
1488 	ret = get_user(key, &iocb->aio_key);
1489 	if (unlikely(ret))
1490 		return -EFAULT;
1491 
1492 	ctx = lookup_ioctx(ctx_id);
1493 	if (unlikely(!ctx))
1494 		return -EINVAL;
1495 
1496 	spin_lock_irq(&ctx->ctx_lock);
1497 
1498 	kiocb = lookup_kiocb(ctx, iocb, key);
1499 	if (kiocb)
1500 		ret = kiocb_cancel(ctx, kiocb);
1501 	else
1502 		ret = -EINVAL;
1503 
1504 	spin_unlock_irq(&ctx->ctx_lock);
1505 
1506 	if (!ret) {
1507 		/*
1508 		 * The result argument is no longer used - the io_event is
1509 		 * always delivered via the ring buffer. -EINPROGRESS indicates
1510 		 * cancellation is progress:
1511 		 */
1512 		ret = -EINPROGRESS;
1513 	}
1514 
1515 	percpu_ref_put(&ctx->users);
1516 
1517 	return ret;
1518 }
1519 
1520 /* io_getevents:
1521  *	Attempts to read at least min_nr events and up to nr events from
1522  *	the completion queue for the aio_context specified by ctx_id. If
1523  *	it succeeds, the number of read events is returned. May fail with
1524  *	-EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
1525  *	out of range, if timeout is out of range.  May fail with -EFAULT
1526  *	if any of the memory specified is invalid.  May return 0 or
1527  *	< min_nr if the timeout specified by timeout has elapsed
1528  *	before sufficient events are available, where timeout == NULL
1529  *	specifies an infinite timeout. Note that the timeout pointed to by
1530  *	timeout is relative.  Will fail with -ENOSYS if not implemented.
1531  */
1532 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
1533 		long, min_nr,
1534 		long, nr,
1535 		struct io_event __user *, events,
1536 		struct timespec __user *, timeout)
1537 {
1538 	struct kioctx *ioctx = lookup_ioctx(ctx_id);
1539 	long ret = -EINVAL;
1540 
1541 	if (likely(ioctx)) {
1542 		if (likely(min_nr <= nr && min_nr >= 0))
1543 			ret = read_events(ioctx, min_nr, nr, events, timeout);
1544 		percpu_ref_put(&ioctx->users);
1545 	}
1546 	return ret;
1547 }
1548