xref: /openbmc/linux/fs/aio.c (revision e839ca52)
1 /*
2  *	An async IO implementation for Linux
3  *	Written by Benjamin LaHaise <bcrl@kvack.org>
4  *
5  *	Implements an efficient asynchronous io interface.
6  *
7  *	Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
8  *
9  *	See ../COPYING for licensing terms.
10  */
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/time.h>
15 #include <linux/aio_abi.h>
16 #include <linux/module.h>
17 #include <linux/syscalls.h>
18 #include <linux/backing-dev.h>
19 #include <linux/uio.h>
20 
21 #define DEBUG 0
22 
23 #include <linux/sched.h>
24 #include <linux/fs.h>
25 #include <linux/file.h>
26 #include <linux/mm.h>
27 #include <linux/mman.h>
28 #include <linux/mmu_context.h>
29 #include <linux/slab.h>
30 #include <linux/timer.h>
31 #include <linux/aio.h>
32 #include <linux/highmem.h>
33 #include <linux/workqueue.h>
34 #include <linux/security.h>
35 #include <linux/eventfd.h>
36 #include <linux/blkdev.h>
37 #include <linux/compat.h>
38 
39 #include <asm/kmap_types.h>
40 #include <asm/uaccess.h>
41 
42 #if DEBUG > 1
43 #define dprintk		printk
44 #else
45 #define dprintk(x...)	do { ; } while (0)
46 #endif
47 
48 /*------ sysctl variables----*/
49 static DEFINE_SPINLOCK(aio_nr_lock);
50 unsigned long aio_nr;		/* current system wide number of aio requests */
51 unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
52 /*----end sysctl variables---*/
53 
54 static struct kmem_cache	*kiocb_cachep;
55 static struct kmem_cache	*kioctx_cachep;
56 
57 static struct workqueue_struct *aio_wq;
58 
59 /* Used for rare fput completion. */
60 static void aio_fput_routine(struct work_struct *);
61 static DECLARE_WORK(fput_work, aio_fput_routine);
62 
63 static DEFINE_SPINLOCK(fput_lock);
64 static LIST_HEAD(fput_head);
65 
66 static void aio_kick_handler(struct work_struct *);
67 static void aio_queue_work(struct kioctx *);
68 
69 /* aio_setup
70  *	Creates the slab caches used by the aio routines, panic on
71  *	failure as this is done early during the boot sequence.
72  */
73 static int __init aio_setup(void)
74 {
75 	kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
76 	kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
77 
78 	aio_wq = alloc_workqueue("aio", 0, 1);	/* used to limit concurrency */
79 	BUG_ON(!aio_wq);
80 
81 	pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
82 
83 	return 0;
84 }
85 __initcall(aio_setup);
86 
87 static void aio_free_ring(struct kioctx *ctx)
88 {
89 	struct aio_ring_info *info = &ctx->ring_info;
90 	long i;
91 
92 	for (i=0; i<info->nr_pages; i++)
93 		put_page(info->ring_pages[i]);
94 
95 	if (info->mmap_size) {
96 		down_write(&ctx->mm->mmap_sem);
97 		do_munmap(ctx->mm, info->mmap_base, info->mmap_size);
98 		up_write(&ctx->mm->mmap_sem);
99 	}
100 
101 	if (info->ring_pages && info->ring_pages != info->internal_pages)
102 		kfree(info->ring_pages);
103 	info->ring_pages = NULL;
104 	info->nr = 0;
105 }
106 
107 static int aio_setup_ring(struct kioctx *ctx)
108 {
109 	struct aio_ring *ring;
110 	struct aio_ring_info *info = &ctx->ring_info;
111 	unsigned nr_events = ctx->max_reqs;
112 	unsigned long size;
113 	int nr_pages;
114 
115 	/* Compensate for the ring buffer's head/tail overlap entry */
116 	nr_events += 2;	/* 1 is required, 2 for good luck */
117 
118 	size = sizeof(struct aio_ring);
119 	size += sizeof(struct io_event) * nr_events;
120 	nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
121 
122 	if (nr_pages < 0)
123 		return -EINVAL;
124 
125 	nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
126 
127 	info->nr = 0;
128 	info->ring_pages = info->internal_pages;
129 	if (nr_pages > AIO_RING_PAGES) {
130 		info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
131 		if (!info->ring_pages)
132 			return -ENOMEM;
133 	}
134 
135 	info->mmap_size = nr_pages * PAGE_SIZE;
136 	dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
137 	down_write(&ctx->mm->mmap_sem);
138 	info->mmap_base = do_mmap(NULL, 0, info->mmap_size,
139 				  PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE,
140 				  0);
141 	if (IS_ERR((void *)info->mmap_base)) {
142 		up_write(&ctx->mm->mmap_sem);
143 		info->mmap_size = 0;
144 		aio_free_ring(ctx);
145 		return -EAGAIN;
146 	}
147 
148 	dprintk("mmap address: 0x%08lx\n", info->mmap_base);
149 	info->nr_pages = get_user_pages(current, ctx->mm,
150 					info->mmap_base, nr_pages,
151 					1, 0, info->ring_pages, NULL);
152 	up_write(&ctx->mm->mmap_sem);
153 
154 	if (unlikely(info->nr_pages != nr_pages)) {
155 		aio_free_ring(ctx);
156 		return -EAGAIN;
157 	}
158 
159 	ctx->user_id = info->mmap_base;
160 
161 	info->nr = nr_events;		/* trusted copy */
162 
163 	ring = kmap_atomic(info->ring_pages[0]);
164 	ring->nr = nr_events;	/* user copy */
165 	ring->id = ctx->user_id;
166 	ring->head = ring->tail = 0;
167 	ring->magic = AIO_RING_MAGIC;
168 	ring->compat_features = AIO_RING_COMPAT_FEATURES;
169 	ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
170 	ring->header_length = sizeof(struct aio_ring);
171 	kunmap_atomic(ring);
172 
173 	return 0;
174 }
175 
176 
177 /* aio_ring_event: returns a pointer to the event at the given index from
178  * kmap_atomic().  Release the pointer with put_aio_ring_event();
179  */
180 #define AIO_EVENTS_PER_PAGE	(PAGE_SIZE / sizeof(struct io_event))
181 #define AIO_EVENTS_FIRST_PAGE	((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
182 #define AIO_EVENTS_OFFSET	(AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
183 
184 #define aio_ring_event(info, nr) ({					\
185 	unsigned pos = (nr) + AIO_EVENTS_OFFSET;			\
186 	struct io_event *__event;					\
187 	__event = kmap_atomic(						\
188 			(info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \
189 	__event += pos % AIO_EVENTS_PER_PAGE;				\
190 	__event;							\
191 })
192 
193 #define put_aio_ring_event(event) do {		\
194 	struct io_event *__event = (event);	\
195 	(void)__event;				\
196 	kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \
197 } while(0)
198 
199 static void ctx_rcu_free(struct rcu_head *head)
200 {
201 	struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
202 	kmem_cache_free(kioctx_cachep, ctx);
203 }
204 
205 /* __put_ioctx
206  *	Called when the last user of an aio context has gone away,
207  *	and the struct needs to be freed.
208  */
209 static void __put_ioctx(struct kioctx *ctx)
210 {
211 	unsigned nr_events = ctx->max_reqs;
212 	BUG_ON(ctx->reqs_active);
213 
214 	cancel_delayed_work_sync(&ctx->wq);
215 	aio_free_ring(ctx);
216 	mmdrop(ctx->mm);
217 	ctx->mm = NULL;
218 	if (nr_events) {
219 		spin_lock(&aio_nr_lock);
220 		BUG_ON(aio_nr - nr_events > aio_nr);
221 		aio_nr -= nr_events;
222 		spin_unlock(&aio_nr_lock);
223 	}
224 	pr_debug("__put_ioctx: freeing %p\n", ctx);
225 	call_rcu(&ctx->rcu_head, ctx_rcu_free);
226 }
227 
228 static inline int try_get_ioctx(struct kioctx *kioctx)
229 {
230 	return atomic_inc_not_zero(&kioctx->users);
231 }
232 
233 static inline void put_ioctx(struct kioctx *kioctx)
234 {
235 	BUG_ON(atomic_read(&kioctx->users) <= 0);
236 	if (unlikely(atomic_dec_and_test(&kioctx->users)))
237 		__put_ioctx(kioctx);
238 }
239 
240 /* ioctx_alloc
241  *	Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
242  */
243 static struct kioctx *ioctx_alloc(unsigned nr_events)
244 {
245 	struct mm_struct *mm;
246 	struct kioctx *ctx;
247 	int err = -ENOMEM;
248 
249 	/* Prevent overflows */
250 	if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
251 	    (nr_events > (0x10000000U / sizeof(struct kiocb)))) {
252 		pr_debug("ENOMEM: nr_events too high\n");
253 		return ERR_PTR(-EINVAL);
254 	}
255 
256 	if (!nr_events || (unsigned long)nr_events > aio_max_nr)
257 		return ERR_PTR(-EAGAIN);
258 
259 	ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
260 	if (!ctx)
261 		return ERR_PTR(-ENOMEM);
262 
263 	ctx->max_reqs = nr_events;
264 	mm = ctx->mm = current->mm;
265 	atomic_inc(&mm->mm_count);
266 
267 	atomic_set(&ctx->users, 2);
268 	spin_lock_init(&ctx->ctx_lock);
269 	spin_lock_init(&ctx->ring_info.ring_lock);
270 	init_waitqueue_head(&ctx->wait);
271 
272 	INIT_LIST_HEAD(&ctx->active_reqs);
273 	INIT_LIST_HEAD(&ctx->run_list);
274 	INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
275 
276 	if (aio_setup_ring(ctx) < 0)
277 		goto out_freectx;
278 
279 	/* limit the number of system wide aios */
280 	spin_lock(&aio_nr_lock);
281 	if (aio_nr + nr_events > aio_max_nr ||
282 	    aio_nr + nr_events < aio_nr) {
283 		spin_unlock(&aio_nr_lock);
284 		goto out_cleanup;
285 	}
286 	aio_nr += ctx->max_reqs;
287 	spin_unlock(&aio_nr_lock);
288 
289 	/* now link into global list. */
290 	spin_lock(&mm->ioctx_lock);
291 	hlist_add_head_rcu(&ctx->list, &mm->ioctx_list);
292 	spin_unlock(&mm->ioctx_lock);
293 
294 	dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
295 		ctx, ctx->user_id, current->mm, ctx->ring_info.nr);
296 	return ctx;
297 
298 out_cleanup:
299 	err = -EAGAIN;
300 	aio_free_ring(ctx);
301 out_freectx:
302 	mmdrop(mm);
303 	kmem_cache_free(kioctx_cachep, ctx);
304 	dprintk("aio: error allocating ioctx %d\n", err);
305 	return ERR_PTR(err);
306 }
307 
308 /* aio_cancel_all
309  *	Cancels all outstanding aio requests on an aio context.  Used
310  *	when the processes owning a context have all exited to encourage
311  *	the rapid destruction of the kioctx.
312  */
313 static void aio_cancel_all(struct kioctx *ctx)
314 {
315 	int (*cancel)(struct kiocb *, struct io_event *);
316 	struct io_event res;
317 	spin_lock_irq(&ctx->ctx_lock);
318 	ctx->dead = 1;
319 	while (!list_empty(&ctx->active_reqs)) {
320 		struct list_head *pos = ctx->active_reqs.next;
321 		struct kiocb *iocb = list_kiocb(pos);
322 		list_del_init(&iocb->ki_list);
323 		cancel = iocb->ki_cancel;
324 		kiocbSetCancelled(iocb);
325 		if (cancel) {
326 			iocb->ki_users++;
327 			spin_unlock_irq(&ctx->ctx_lock);
328 			cancel(iocb, &res);
329 			spin_lock_irq(&ctx->ctx_lock);
330 		}
331 	}
332 	spin_unlock_irq(&ctx->ctx_lock);
333 }
334 
335 static void wait_for_all_aios(struct kioctx *ctx)
336 {
337 	struct task_struct *tsk = current;
338 	DECLARE_WAITQUEUE(wait, tsk);
339 
340 	spin_lock_irq(&ctx->ctx_lock);
341 	if (!ctx->reqs_active)
342 		goto out;
343 
344 	add_wait_queue(&ctx->wait, &wait);
345 	set_task_state(tsk, TASK_UNINTERRUPTIBLE);
346 	while (ctx->reqs_active) {
347 		spin_unlock_irq(&ctx->ctx_lock);
348 		io_schedule();
349 		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
350 		spin_lock_irq(&ctx->ctx_lock);
351 	}
352 	__set_task_state(tsk, TASK_RUNNING);
353 	remove_wait_queue(&ctx->wait, &wait);
354 
355 out:
356 	spin_unlock_irq(&ctx->ctx_lock);
357 }
358 
359 /* wait_on_sync_kiocb:
360  *	Waits on the given sync kiocb to complete.
361  */
362 ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
363 {
364 	while (iocb->ki_users) {
365 		set_current_state(TASK_UNINTERRUPTIBLE);
366 		if (!iocb->ki_users)
367 			break;
368 		io_schedule();
369 	}
370 	__set_current_state(TASK_RUNNING);
371 	return iocb->ki_user_data;
372 }
373 EXPORT_SYMBOL(wait_on_sync_kiocb);
374 
375 /* exit_aio: called when the last user of mm goes away.  At this point,
376  * there is no way for any new requests to be submited or any of the
377  * io_* syscalls to be called on the context.  However, there may be
378  * outstanding requests which hold references to the context; as they
379  * go away, they will call put_ioctx and release any pinned memory
380  * associated with the request (held via struct page * references).
381  */
382 void exit_aio(struct mm_struct *mm)
383 {
384 	struct kioctx *ctx;
385 
386 	while (!hlist_empty(&mm->ioctx_list)) {
387 		ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
388 		hlist_del_rcu(&ctx->list);
389 
390 		aio_cancel_all(ctx);
391 
392 		wait_for_all_aios(ctx);
393 
394 		if (1 != atomic_read(&ctx->users))
395 			printk(KERN_DEBUG
396 				"exit_aio:ioctx still alive: %d %d %d\n",
397 				atomic_read(&ctx->users), ctx->dead,
398 				ctx->reqs_active);
399 		put_ioctx(ctx);
400 	}
401 }
402 
403 /* aio_get_req
404  *	Allocate a slot for an aio request.  Increments the users count
405  * of the kioctx so that the kioctx stays around until all requests are
406  * complete.  Returns NULL if no requests are free.
407  *
408  * Returns with kiocb->users set to 2.  The io submit code path holds
409  * an extra reference while submitting the i/o.
410  * This prevents races between the aio code path referencing the
411  * req (after submitting it) and aio_complete() freeing the req.
412  */
413 static struct kiocb *__aio_get_req(struct kioctx *ctx)
414 {
415 	struct kiocb *req = NULL;
416 
417 	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
418 	if (unlikely(!req))
419 		return NULL;
420 
421 	req->ki_flags = 0;
422 	req->ki_users = 2;
423 	req->ki_key = 0;
424 	req->ki_ctx = ctx;
425 	req->ki_cancel = NULL;
426 	req->ki_retry = NULL;
427 	req->ki_dtor = NULL;
428 	req->private = NULL;
429 	req->ki_iovec = NULL;
430 	INIT_LIST_HEAD(&req->ki_run_list);
431 	req->ki_eventfd = NULL;
432 
433 	return req;
434 }
435 
436 /*
437  * struct kiocb's are allocated in batches to reduce the number of
438  * times the ctx lock is acquired and released.
439  */
440 #define KIOCB_BATCH_SIZE	32L
441 struct kiocb_batch {
442 	struct list_head head;
443 	long count; /* number of requests left to allocate */
444 };
445 
446 static void kiocb_batch_init(struct kiocb_batch *batch, long total)
447 {
448 	INIT_LIST_HEAD(&batch->head);
449 	batch->count = total;
450 }
451 
452 static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
453 {
454 	struct kiocb *req, *n;
455 
456 	if (list_empty(&batch->head))
457 		return;
458 
459 	spin_lock_irq(&ctx->ctx_lock);
460 	list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
461 		list_del(&req->ki_batch);
462 		list_del(&req->ki_list);
463 		kmem_cache_free(kiocb_cachep, req);
464 		ctx->reqs_active--;
465 	}
466 	if (unlikely(!ctx->reqs_active && ctx->dead))
467 		wake_up_all(&ctx->wait);
468 	spin_unlock_irq(&ctx->ctx_lock);
469 }
470 
471 /*
472  * Allocate a batch of kiocbs.  This avoids taking and dropping the
473  * context lock a lot during setup.
474  */
475 static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
476 {
477 	unsigned short allocated, to_alloc;
478 	long avail;
479 	bool called_fput = false;
480 	struct kiocb *req, *n;
481 	struct aio_ring *ring;
482 
483 	to_alloc = min(batch->count, KIOCB_BATCH_SIZE);
484 	for (allocated = 0; allocated < to_alloc; allocated++) {
485 		req = __aio_get_req(ctx);
486 		if (!req)
487 			/* allocation failed, go with what we've got */
488 			break;
489 		list_add(&req->ki_batch, &batch->head);
490 	}
491 
492 	if (allocated == 0)
493 		goto out;
494 
495 retry:
496 	spin_lock_irq(&ctx->ctx_lock);
497 	ring = kmap_atomic(ctx->ring_info.ring_pages[0]);
498 
499 	avail = aio_ring_avail(&ctx->ring_info, ring) - ctx->reqs_active;
500 	BUG_ON(avail < 0);
501 	if (avail == 0 && !called_fput) {
502 		/*
503 		 * Handle a potential starvation case.  It is possible that
504 		 * we hold the last reference on a struct file, causing us
505 		 * to delay the final fput to non-irq context.  In this case,
506 		 * ctx->reqs_active is artificially high.  Calling the fput
507 		 * routine here may free up a slot in the event completion
508 		 * ring, allowing this allocation to succeed.
509 		 */
510 		kunmap_atomic(ring);
511 		spin_unlock_irq(&ctx->ctx_lock);
512 		aio_fput_routine(NULL);
513 		called_fput = true;
514 		goto retry;
515 	}
516 
517 	if (avail < allocated) {
518 		/* Trim back the number of requests. */
519 		list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
520 			list_del(&req->ki_batch);
521 			kmem_cache_free(kiocb_cachep, req);
522 			if (--allocated <= avail)
523 				break;
524 		}
525 	}
526 
527 	batch->count -= allocated;
528 	list_for_each_entry(req, &batch->head, ki_batch) {
529 		list_add(&req->ki_list, &ctx->active_reqs);
530 		ctx->reqs_active++;
531 	}
532 
533 	kunmap_atomic(ring);
534 	spin_unlock_irq(&ctx->ctx_lock);
535 
536 out:
537 	return allocated;
538 }
539 
540 static inline struct kiocb *aio_get_req(struct kioctx *ctx,
541 					struct kiocb_batch *batch)
542 {
543 	struct kiocb *req;
544 
545 	if (list_empty(&batch->head))
546 		if (kiocb_batch_refill(ctx, batch) == 0)
547 			return NULL;
548 	req = list_first_entry(&batch->head, struct kiocb, ki_batch);
549 	list_del(&req->ki_batch);
550 	return req;
551 }
552 
553 static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
554 {
555 	assert_spin_locked(&ctx->ctx_lock);
556 
557 	if (req->ki_eventfd != NULL)
558 		eventfd_ctx_put(req->ki_eventfd);
559 	if (req->ki_dtor)
560 		req->ki_dtor(req);
561 	if (req->ki_iovec != &req->ki_inline_vec)
562 		kfree(req->ki_iovec);
563 	kmem_cache_free(kiocb_cachep, req);
564 	ctx->reqs_active--;
565 
566 	if (unlikely(!ctx->reqs_active && ctx->dead))
567 		wake_up_all(&ctx->wait);
568 }
569 
570 static void aio_fput_routine(struct work_struct *data)
571 {
572 	spin_lock_irq(&fput_lock);
573 	while (likely(!list_empty(&fput_head))) {
574 		struct kiocb *req = list_kiocb(fput_head.next);
575 		struct kioctx *ctx = req->ki_ctx;
576 
577 		list_del(&req->ki_list);
578 		spin_unlock_irq(&fput_lock);
579 
580 		/* Complete the fput(s) */
581 		if (req->ki_filp != NULL)
582 			fput(req->ki_filp);
583 
584 		/* Link the iocb into the context's free list */
585 		rcu_read_lock();
586 		spin_lock_irq(&ctx->ctx_lock);
587 		really_put_req(ctx, req);
588 		/*
589 		 * at that point ctx might've been killed, but actual
590 		 * freeing is RCU'd
591 		 */
592 		spin_unlock_irq(&ctx->ctx_lock);
593 		rcu_read_unlock();
594 
595 		spin_lock_irq(&fput_lock);
596 	}
597 	spin_unlock_irq(&fput_lock);
598 }
599 
600 /* __aio_put_req
601  *	Returns true if this put was the last user of the request.
602  */
603 static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
604 {
605 	dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
606 		req, atomic_long_read(&req->ki_filp->f_count));
607 
608 	assert_spin_locked(&ctx->ctx_lock);
609 
610 	req->ki_users--;
611 	BUG_ON(req->ki_users < 0);
612 	if (likely(req->ki_users))
613 		return 0;
614 	list_del(&req->ki_list);		/* remove from active_reqs */
615 	req->ki_cancel = NULL;
616 	req->ki_retry = NULL;
617 
618 	/*
619 	 * Try to optimize the aio and eventfd file* puts, by avoiding to
620 	 * schedule work in case it is not final fput() time. In normal cases,
621 	 * we would not be holding the last reference to the file*, so
622 	 * this function will be executed w/out any aio kthread wakeup.
623 	 */
624 	if (unlikely(!fput_atomic(req->ki_filp))) {
625 		spin_lock(&fput_lock);
626 		list_add(&req->ki_list, &fput_head);
627 		spin_unlock(&fput_lock);
628 		schedule_work(&fput_work);
629 	} else {
630 		req->ki_filp = NULL;
631 		really_put_req(ctx, req);
632 	}
633 	return 1;
634 }
635 
636 /* aio_put_req
637  *	Returns true if this put was the last user of the kiocb,
638  *	false if the request is still in use.
639  */
640 int aio_put_req(struct kiocb *req)
641 {
642 	struct kioctx *ctx = req->ki_ctx;
643 	int ret;
644 	spin_lock_irq(&ctx->ctx_lock);
645 	ret = __aio_put_req(ctx, req);
646 	spin_unlock_irq(&ctx->ctx_lock);
647 	return ret;
648 }
649 EXPORT_SYMBOL(aio_put_req);
650 
651 static struct kioctx *lookup_ioctx(unsigned long ctx_id)
652 {
653 	struct mm_struct *mm = current->mm;
654 	struct kioctx *ctx, *ret = NULL;
655 	struct hlist_node *n;
656 
657 	rcu_read_lock();
658 
659 	hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
660 		/*
661 		 * RCU protects us against accessing freed memory but
662 		 * we have to be careful not to get a reference when the
663 		 * reference count already dropped to 0 (ctx->dead test
664 		 * is unreliable because of races).
665 		 */
666 		if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){
667 			ret = ctx;
668 			break;
669 		}
670 	}
671 
672 	rcu_read_unlock();
673 	return ret;
674 }
675 
676 /*
677  * Queue up a kiocb to be retried. Assumes that the kiocb
678  * has already been marked as kicked, and places it on
679  * the retry run list for the corresponding ioctx, if it
680  * isn't already queued. Returns 1 if it actually queued
681  * the kiocb (to tell the caller to activate the work
682  * queue to process it), or 0, if it found that it was
683  * already queued.
684  */
685 static inline int __queue_kicked_iocb(struct kiocb *iocb)
686 {
687 	struct kioctx *ctx = iocb->ki_ctx;
688 
689 	assert_spin_locked(&ctx->ctx_lock);
690 
691 	if (list_empty(&iocb->ki_run_list)) {
692 		list_add_tail(&iocb->ki_run_list,
693 			&ctx->run_list);
694 		return 1;
695 	}
696 	return 0;
697 }
698 
699 /* aio_run_iocb
700  *	This is the core aio execution routine. It is
701  *	invoked both for initial i/o submission and
702  *	subsequent retries via the aio_kick_handler.
703  *	Expects to be invoked with iocb->ki_ctx->lock
704  *	already held. The lock is released and reacquired
705  *	as needed during processing.
706  *
707  * Calls the iocb retry method (already setup for the
708  * iocb on initial submission) for operation specific
709  * handling, but takes care of most of common retry
710  * execution details for a given iocb. The retry method
711  * needs to be non-blocking as far as possible, to avoid
712  * holding up other iocbs waiting to be serviced by the
713  * retry kernel thread.
714  *
715  * The trickier parts in this code have to do with
716  * ensuring that only one retry instance is in progress
717  * for a given iocb at any time. Providing that guarantee
718  * simplifies the coding of individual aio operations as
719  * it avoids various potential races.
720  */
721 static ssize_t aio_run_iocb(struct kiocb *iocb)
722 {
723 	struct kioctx	*ctx = iocb->ki_ctx;
724 	ssize_t (*retry)(struct kiocb *);
725 	ssize_t ret;
726 
727 	if (!(retry = iocb->ki_retry)) {
728 		printk("aio_run_iocb: iocb->ki_retry = NULL\n");
729 		return 0;
730 	}
731 
732 	/*
733 	 * We don't want the next retry iteration for this
734 	 * operation to start until this one has returned and
735 	 * updated the iocb state. However, wait_queue functions
736 	 * can trigger a kick_iocb from interrupt context in the
737 	 * meantime, indicating that data is available for the next
738 	 * iteration. We want to remember that and enable the
739 	 * next retry iteration _after_ we are through with
740 	 * this one.
741 	 *
742 	 * So, in order to be able to register a "kick", but
743 	 * prevent it from being queued now, we clear the kick
744 	 * flag, but make the kick code *think* that the iocb is
745 	 * still on the run list until we are actually done.
746 	 * When we are done with this iteration, we check if
747 	 * the iocb was kicked in the meantime and if so, queue
748 	 * it up afresh.
749 	 */
750 
751 	kiocbClearKicked(iocb);
752 
753 	/*
754 	 * This is so that aio_complete knows it doesn't need to
755 	 * pull the iocb off the run list (We can't just call
756 	 * INIT_LIST_HEAD because we don't want a kick_iocb to
757 	 * queue this on the run list yet)
758 	 */
759 	iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL;
760 	spin_unlock_irq(&ctx->ctx_lock);
761 
762 	/* Quit retrying if the i/o has been cancelled */
763 	if (kiocbIsCancelled(iocb)) {
764 		ret = -EINTR;
765 		aio_complete(iocb, ret, 0);
766 		/* must not access the iocb after this */
767 		goto out;
768 	}
769 
770 	/*
771 	 * Now we are all set to call the retry method in async
772 	 * context.
773 	 */
774 	ret = retry(iocb);
775 
776 	if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
777 		/*
778 		 * There's no easy way to restart the syscall since other AIO's
779 		 * may be already running. Just fail this IO with EINTR.
780 		 */
781 		if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
782 			     ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK))
783 			ret = -EINTR;
784 		aio_complete(iocb, ret, 0);
785 	}
786 out:
787 	spin_lock_irq(&ctx->ctx_lock);
788 
789 	if (-EIOCBRETRY == ret) {
790 		/*
791 		 * OK, now that we are done with this iteration
792 		 * and know that there is more left to go,
793 		 * this is where we let go so that a subsequent
794 		 * "kick" can start the next iteration
795 		 */
796 
797 		/* will make __queue_kicked_iocb succeed from here on */
798 		INIT_LIST_HEAD(&iocb->ki_run_list);
799 		/* we must queue the next iteration ourselves, if it
800 		 * has already been kicked */
801 		if (kiocbIsKicked(iocb)) {
802 			__queue_kicked_iocb(iocb);
803 
804 			/*
805 			 * __queue_kicked_iocb will always return 1 here, because
806 			 * iocb->ki_run_list is empty at this point so it should
807 			 * be safe to unconditionally queue the context into the
808 			 * work queue.
809 			 */
810 			aio_queue_work(ctx);
811 		}
812 	}
813 	return ret;
814 }
815 
816 /*
817  * __aio_run_iocbs:
818  * 	Process all pending retries queued on the ioctx
819  * 	run list.
820  * Assumes it is operating within the aio issuer's mm
821  * context.
822  */
823 static int __aio_run_iocbs(struct kioctx *ctx)
824 {
825 	struct kiocb *iocb;
826 	struct list_head run_list;
827 
828 	assert_spin_locked(&ctx->ctx_lock);
829 
830 	list_replace_init(&ctx->run_list, &run_list);
831 	while (!list_empty(&run_list)) {
832 		iocb = list_entry(run_list.next, struct kiocb,
833 			ki_run_list);
834 		list_del(&iocb->ki_run_list);
835 		/*
836 		 * Hold an extra reference while retrying i/o.
837 		 */
838 		iocb->ki_users++;       /* grab extra reference */
839 		aio_run_iocb(iocb);
840 		__aio_put_req(ctx, iocb);
841  	}
842 	if (!list_empty(&ctx->run_list))
843 		return 1;
844 	return 0;
845 }
846 
847 static void aio_queue_work(struct kioctx * ctx)
848 {
849 	unsigned long timeout;
850 	/*
851 	 * if someone is waiting, get the work started right
852 	 * away, otherwise, use a longer delay
853 	 */
854 	smp_mb();
855 	if (waitqueue_active(&ctx->wait))
856 		timeout = 1;
857 	else
858 		timeout = HZ/10;
859 	queue_delayed_work(aio_wq, &ctx->wq, timeout);
860 }
861 
862 /*
863  * aio_run_all_iocbs:
864  *	Process all pending retries queued on the ioctx
865  *	run list, and keep running them until the list
866  *	stays empty.
867  * Assumes it is operating within the aio issuer's mm context.
868  */
869 static inline void aio_run_all_iocbs(struct kioctx *ctx)
870 {
871 	spin_lock_irq(&ctx->ctx_lock);
872 	while (__aio_run_iocbs(ctx))
873 		;
874 	spin_unlock_irq(&ctx->ctx_lock);
875 }
876 
877 /*
878  * aio_kick_handler:
879  * 	Work queue handler triggered to process pending
880  * 	retries on an ioctx. Takes on the aio issuer's
881  *	mm context before running the iocbs, so that
882  *	copy_xxx_user operates on the issuer's address
883  *      space.
884  * Run on aiod's context.
885  */
886 static void aio_kick_handler(struct work_struct *work)
887 {
888 	struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
889 	mm_segment_t oldfs = get_fs();
890 	struct mm_struct *mm;
891 	int requeue;
892 
893 	set_fs(USER_DS);
894 	use_mm(ctx->mm);
895 	spin_lock_irq(&ctx->ctx_lock);
896 	requeue =__aio_run_iocbs(ctx);
897 	mm = ctx->mm;
898 	spin_unlock_irq(&ctx->ctx_lock);
899  	unuse_mm(mm);
900 	set_fs(oldfs);
901 	/*
902 	 * we're in a worker thread already; no point using non-zero delay
903 	 */
904 	if (requeue)
905 		queue_delayed_work(aio_wq, &ctx->wq, 0);
906 }
907 
908 
909 /*
910  * Called by kick_iocb to queue the kiocb for retry
911  * and if required activate the aio work queue to process
912  * it
913  */
914 static void try_queue_kicked_iocb(struct kiocb *iocb)
915 {
916  	struct kioctx	*ctx = iocb->ki_ctx;
917 	unsigned long flags;
918 	int run = 0;
919 
920 	spin_lock_irqsave(&ctx->ctx_lock, flags);
921 	/* set this inside the lock so that we can't race with aio_run_iocb()
922 	 * testing it and putting the iocb on the run list under the lock */
923 	if (!kiocbTryKick(iocb))
924 		run = __queue_kicked_iocb(iocb);
925 	spin_unlock_irqrestore(&ctx->ctx_lock, flags);
926 	if (run)
927 		aio_queue_work(ctx);
928 }
929 
930 /*
931  * kick_iocb:
932  *      Called typically from a wait queue callback context
933  *      to trigger a retry of the iocb.
934  *      The retry is usually executed by aio workqueue
935  *      threads (See aio_kick_handler).
936  */
937 void kick_iocb(struct kiocb *iocb)
938 {
939 	/* sync iocbs are easy: they can only ever be executing from a
940 	 * single context. */
941 	if (is_sync_kiocb(iocb)) {
942 		kiocbSetKicked(iocb);
943 	        wake_up_process(iocb->ki_obj.tsk);
944 		return;
945 	}
946 
947 	try_queue_kicked_iocb(iocb);
948 }
949 EXPORT_SYMBOL(kick_iocb);
950 
951 /* aio_complete
952  *	Called when the io request on the given iocb is complete.
953  *	Returns true if this is the last user of the request.  The
954  *	only other user of the request can be the cancellation code.
955  */
956 int aio_complete(struct kiocb *iocb, long res, long res2)
957 {
958 	struct kioctx	*ctx = iocb->ki_ctx;
959 	struct aio_ring_info	*info;
960 	struct aio_ring	*ring;
961 	struct io_event	*event;
962 	unsigned long	flags;
963 	unsigned long	tail;
964 	int		ret;
965 
966 	/*
967 	 * Special case handling for sync iocbs:
968 	 *  - events go directly into the iocb for fast handling
969 	 *  - the sync task with the iocb in its stack holds the single iocb
970 	 *    ref, no other paths have a way to get another ref
971 	 *  - the sync task helpfully left a reference to itself in the iocb
972 	 */
973 	if (is_sync_kiocb(iocb)) {
974 		BUG_ON(iocb->ki_users != 1);
975 		iocb->ki_user_data = res;
976 		iocb->ki_users = 0;
977 		wake_up_process(iocb->ki_obj.tsk);
978 		return 1;
979 	}
980 
981 	info = &ctx->ring_info;
982 
983 	/* add a completion event to the ring buffer.
984 	 * must be done holding ctx->ctx_lock to prevent
985 	 * other code from messing with the tail
986 	 * pointer since we might be called from irq
987 	 * context.
988 	 */
989 	spin_lock_irqsave(&ctx->ctx_lock, flags);
990 
991 	if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list))
992 		list_del_init(&iocb->ki_run_list);
993 
994 	/*
995 	 * cancelled requests don't get events, userland was given one
996 	 * when the event got cancelled.
997 	 */
998 	if (kiocbIsCancelled(iocb))
999 		goto put_rq;
1000 
1001 	ring = kmap_atomic(info->ring_pages[0]);
1002 
1003 	tail = info->tail;
1004 	event = aio_ring_event(info, tail);
1005 	if (++tail >= info->nr)
1006 		tail = 0;
1007 
1008 	event->obj = (u64)(unsigned long)iocb->ki_obj.user;
1009 	event->data = iocb->ki_user_data;
1010 	event->res = res;
1011 	event->res2 = res2;
1012 
1013 	dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n",
1014 		ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
1015 		res, res2);
1016 
1017 	/* after flagging the request as done, we
1018 	 * must never even look at it again
1019 	 */
1020 	smp_wmb();	/* make event visible before updating tail */
1021 
1022 	info->tail = tail;
1023 	ring->tail = tail;
1024 
1025 	put_aio_ring_event(event);
1026 	kunmap_atomic(ring);
1027 
1028 	pr_debug("added to ring %p at [%lu]\n", iocb, tail);
1029 
1030 	/*
1031 	 * Check if the user asked us to deliver the result through an
1032 	 * eventfd. The eventfd_signal() function is safe to be called
1033 	 * from IRQ context.
1034 	 */
1035 	if (iocb->ki_eventfd != NULL)
1036 		eventfd_signal(iocb->ki_eventfd, 1);
1037 
1038 put_rq:
1039 	/* everything turned out well, dispose of the aiocb. */
1040 	ret = __aio_put_req(ctx, iocb);
1041 
1042 	/*
1043 	 * We have to order our ring_info tail store above and test
1044 	 * of the wait list below outside the wait lock.  This is
1045 	 * like in wake_up_bit() where clearing a bit has to be
1046 	 * ordered with the unlocked test.
1047 	 */
1048 	smp_mb();
1049 
1050 	if (waitqueue_active(&ctx->wait))
1051 		wake_up(&ctx->wait);
1052 
1053 	spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1054 	return ret;
1055 }
1056 EXPORT_SYMBOL(aio_complete);
1057 
1058 /* aio_read_evt
1059  *	Pull an event off of the ioctx's event ring.  Returns the number of
1060  *	events fetched (0 or 1 ;-)
1061  *	FIXME: make this use cmpxchg.
1062  *	TODO: make the ringbuffer user mmap()able (requires FIXME).
1063  */
1064 static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
1065 {
1066 	struct aio_ring_info *info = &ioctx->ring_info;
1067 	struct aio_ring *ring;
1068 	unsigned long head;
1069 	int ret = 0;
1070 
1071 	ring = kmap_atomic(info->ring_pages[0]);
1072 	dprintk("in aio_read_evt h%lu t%lu m%lu\n",
1073 		 (unsigned long)ring->head, (unsigned long)ring->tail,
1074 		 (unsigned long)ring->nr);
1075 
1076 	if (ring->head == ring->tail)
1077 		goto out;
1078 
1079 	spin_lock(&info->ring_lock);
1080 
1081 	head = ring->head % info->nr;
1082 	if (head != ring->tail) {
1083 		struct io_event *evp = aio_ring_event(info, head);
1084 		*ent = *evp;
1085 		head = (head + 1) % info->nr;
1086 		smp_mb(); /* finish reading the event before updatng the head */
1087 		ring->head = head;
1088 		ret = 1;
1089 		put_aio_ring_event(evp);
1090 	}
1091 	spin_unlock(&info->ring_lock);
1092 
1093 out:
1094 	kunmap_atomic(ring);
1095 	dprintk("leaving aio_read_evt: %d  h%lu t%lu\n", ret,
1096 		 (unsigned long)ring->head, (unsigned long)ring->tail);
1097 	return ret;
1098 }
1099 
1100 struct aio_timeout {
1101 	struct timer_list	timer;
1102 	int			timed_out;
1103 	struct task_struct	*p;
1104 };
1105 
1106 static void timeout_func(unsigned long data)
1107 {
1108 	struct aio_timeout *to = (struct aio_timeout *)data;
1109 
1110 	to->timed_out = 1;
1111 	wake_up_process(to->p);
1112 }
1113 
1114 static inline void init_timeout(struct aio_timeout *to)
1115 {
1116 	setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to);
1117 	to->timed_out = 0;
1118 	to->p = current;
1119 }
1120 
1121 static inline void set_timeout(long start_jiffies, struct aio_timeout *to,
1122 			       const struct timespec *ts)
1123 {
1124 	to->timer.expires = start_jiffies + timespec_to_jiffies(ts);
1125 	if (time_after(to->timer.expires, jiffies))
1126 		add_timer(&to->timer);
1127 	else
1128 		to->timed_out = 1;
1129 }
1130 
1131 static inline void clear_timeout(struct aio_timeout *to)
1132 {
1133 	del_singleshot_timer_sync(&to->timer);
1134 }
1135 
1136 static int read_events(struct kioctx *ctx,
1137 			long min_nr, long nr,
1138 			struct io_event __user *event,
1139 			struct timespec __user *timeout)
1140 {
1141 	long			start_jiffies = jiffies;
1142 	struct task_struct	*tsk = current;
1143 	DECLARE_WAITQUEUE(wait, tsk);
1144 	int			ret;
1145 	int			i = 0;
1146 	struct io_event		ent;
1147 	struct aio_timeout	to;
1148 	int			retry = 0;
1149 
1150 	/* needed to zero any padding within an entry (there shouldn't be
1151 	 * any, but C is fun!
1152 	 */
1153 	memset(&ent, 0, sizeof(ent));
1154 retry:
1155 	ret = 0;
1156 	while (likely(i < nr)) {
1157 		ret = aio_read_evt(ctx, &ent);
1158 		if (unlikely(ret <= 0))
1159 			break;
1160 
1161 		dprintk("read event: %Lx %Lx %Lx %Lx\n",
1162 			ent.data, ent.obj, ent.res, ent.res2);
1163 
1164 		/* Could we split the check in two? */
1165 		ret = -EFAULT;
1166 		if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
1167 			dprintk("aio: lost an event due to EFAULT.\n");
1168 			break;
1169 		}
1170 		ret = 0;
1171 
1172 		/* Good, event copied to userland, update counts. */
1173 		event ++;
1174 		i ++;
1175 	}
1176 
1177 	if (min_nr <= i)
1178 		return i;
1179 	if (ret)
1180 		return ret;
1181 
1182 	/* End fast path */
1183 
1184 	/* racey check, but it gets redone */
1185 	if (!retry && unlikely(!list_empty(&ctx->run_list))) {
1186 		retry = 1;
1187 		aio_run_all_iocbs(ctx);
1188 		goto retry;
1189 	}
1190 
1191 	init_timeout(&to);
1192 	if (timeout) {
1193 		struct timespec	ts;
1194 		ret = -EFAULT;
1195 		if (unlikely(copy_from_user(&ts, timeout, sizeof(ts))))
1196 			goto out;
1197 
1198 		set_timeout(start_jiffies, &to, &ts);
1199 	}
1200 
1201 	while (likely(i < nr)) {
1202 		add_wait_queue_exclusive(&ctx->wait, &wait);
1203 		do {
1204 			set_task_state(tsk, TASK_INTERRUPTIBLE);
1205 			ret = aio_read_evt(ctx, &ent);
1206 			if (ret)
1207 				break;
1208 			if (min_nr <= i)
1209 				break;
1210 			if (unlikely(ctx->dead)) {
1211 				ret = -EINVAL;
1212 				break;
1213 			}
1214 			if (to.timed_out)	/* Only check after read evt */
1215 				break;
1216 			/* Try to only show up in io wait if there are ops
1217 			 *  in flight */
1218 			if (ctx->reqs_active)
1219 				io_schedule();
1220 			else
1221 				schedule();
1222 			if (signal_pending(tsk)) {
1223 				ret = -EINTR;
1224 				break;
1225 			}
1226 			/*ret = aio_read_evt(ctx, &ent);*/
1227 		} while (1) ;
1228 
1229 		set_task_state(tsk, TASK_RUNNING);
1230 		remove_wait_queue(&ctx->wait, &wait);
1231 
1232 		if (unlikely(ret <= 0))
1233 			break;
1234 
1235 		ret = -EFAULT;
1236 		if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
1237 			dprintk("aio: lost an event due to EFAULT.\n");
1238 			break;
1239 		}
1240 
1241 		/* Good, event copied to userland, update counts. */
1242 		event ++;
1243 		i ++;
1244 	}
1245 
1246 	if (timeout)
1247 		clear_timeout(&to);
1248 out:
1249 	destroy_timer_on_stack(&to.timer);
1250 	return i ? i : ret;
1251 }
1252 
1253 /* Take an ioctx and remove it from the list of ioctx's.  Protects
1254  * against races with itself via ->dead.
1255  */
1256 static void io_destroy(struct kioctx *ioctx)
1257 {
1258 	struct mm_struct *mm = current->mm;
1259 	int was_dead;
1260 
1261 	/* delete the entry from the list is someone else hasn't already */
1262 	spin_lock(&mm->ioctx_lock);
1263 	was_dead = ioctx->dead;
1264 	ioctx->dead = 1;
1265 	hlist_del_rcu(&ioctx->list);
1266 	spin_unlock(&mm->ioctx_lock);
1267 
1268 	dprintk("aio_release(%p)\n", ioctx);
1269 	if (likely(!was_dead))
1270 		put_ioctx(ioctx);	/* twice for the list */
1271 
1272 	aio_cancel_all(ioctx);
1273 	wait_for_all_aios(ioctx);
1274 
1275 	/*
1276 	 * Wake up any waiters.  The setting of ctx->dead must be seen
1277 	 * by other CPUs at this point.  Right now, we rely on the
1278 	 * locking done by the above calls to ensure this consistency.
1279 	 */
1280 	wake_up_all(&ioctx->wait);
1281 	put_ioctx(ioctx);	/* once for the lookup */
1282 }
1283 
1284 /* sys_io_setup:
1285  *	Create an aio_context capable of receiving at least nr_events.
1286  *	ctxp must not point to an aio_context that already exists, and
1287  *	must be initialized to 0 prior to the call.  On successful
1288  *	creation of the aio_context, *ctxp is filled in with the resulting
1289  *	handle.  May fail with -EINVAL if *ctxp is not initialized,
1290  *	if the specified nr_events exceeds internal limits.  May fail
1291  *	with -EAGAIN if the specified nr_events exceeds the user's limit
1292  *	of available events.  May fail with -ENOMEM if insufficient kernel
1293  *	resources are available.  May fail with -EFAULT if an invalid
1294  *	pointer is passed for ctxp.  Will fail with -ENOSYS if not
1295  *	implemented.
1296  */
1297 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1298 {
1299 	struct kioctx *ioctx = NULL;
1300 	unsigned long ctx;
1301 	long ret;
1302 
1303 	ret = get_user(ctx, ctxp);
1304 	if (unlikely(ret))
1305 		goto out;
1306 
1307 	ret = -EINVAL;
1308 	if (unlikely(ctx || nr_events == 0)) {
1309 		pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
1310 		         ctx, nr_events);
1311 		goto out;
1312 	}
1313 
1314 	ioctx = ioctx_alloc(nr_events);
1315 	ret = PTR_ERR(ioctx);
1316 	if (!IS_ERR(ioctx)) {
1317 		ret = put_user(ioctx->user_id, ctxp);
1318 		if (!ret) {
1319 			put_ioctx(ioctx);
1320 			return 0;
1321 		}
1322 		io_destroy(ioctx);
1323 	}
1324 
1325 out:
1326 	return ret;
1327 }
1328 
1329 /* sys_io_destroy:
1330  *	Destroy the aio_context specified.  May cancel any outstanding
1331  *	AIOs and block on completion.  Will fail with -ENOSYS if not
1332  *	implemented.  May fail with -EINVAL if the context pointed to
1333  *	is invalid.
1334  */
1335 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1336 {
1337 	struct kioctx *ioctx = lookup_ioctx(ctx);
1338 	if (likely(NULL != ioctx)) {
1339 		io_destroy(ioctx);
1340 		return 0;
1341 	}
1342 	pr_debug("EINVAL: io_destroy: invalid context id\n");
1343 	return -EINVAL;
1344 }
1345 
1346 static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret)
1347 {
1348 	struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg];
1349 
1350 	BUG_ON(ret <= 0);
1351 
1352 	while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) {
1353 		ssize_t this = min((ssize_t)iov->iov_len, ret);
1354 		iov->iov_base += this;
1355 		iov->iov_len -= this;
1356 		iocb->ki_left -= this;
1357 		ret -= this;
1358 		if (iov->iov_len == 0) {
1359 			iocb->ki_cur_seg++;
1360 			iov++;
1361 		}
1362 	}
1363 
1364 	/* the caller should not have done more io than what fit in
1365 	 * the remaining iovecs */
1366 	BUG_ON(ret > 0 && iocb->ki_left == 0);
1367 }
1368 
1369 static ssize_t aio_rw_vect_retry(struct kiocb *iocb)
1370 {
1371 	struct file *file = iocb->ki_filp;
1372 	struct address_space *mapping = file->f_mapping;
1373 	struct inode *inode = mapping->host;
1374 	ssize_t (*rw_op)(struct kiocb *, const struct iovec *,
1375 			 unsigned long, loff_t);
1376 	ssize_t ret = 0;
1377 	unsigned short opcode;
1378 
1379 	if ((iocb->ki_opcode == IOCB_CMD_PREADV) ||
1380 		(iocb->ki_opcode == IOCB_CMD_PREAD)) {
1381 		rw_op = file->f_op->aio_read;
1382 		opcode = IOCB_CMD_PREADV;
1383 	} else {
1384 		rw_op = file->f_op->aio_write;
1385 		opcode = IOCB_CMD_PWRITEV;
1386 	}
1387 
1388 	/* This matches the pread()/pwrite() logic */
1389 	if (iocb->ki_pos < 0)
1390 		return -EINVAL;
1391 
1392 	do {
1393 		ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg],
1394 			    iocb->ki_nr_segs - iocb->ki_cur_seg,
1395 			    iocb->ki_pos);
1396 		if (ret > 0)
1397 			aio_advance_iovec(iocb, ret);
1398 
1399 	/* retry all partial writes.  retry partial reads as long as its a
1400 	 * regular file. */
1401 	} while (ret > 0 && iocb->ki_left > 0 &&
1402 		 (opcode == IOCB_CMD_PWRITEV ||
1403 		  (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode))));
1404 
1405 	/* This means we must have transferred all that we could */
1406 	/* No need to retry anymore */
1407 	if ((ret == 0) || (iocb->ki_left == 0))
1408 		ret = iocb->ki_nbytes - iocb->ki_left;
1409 
1410 	/* If we managed to write some out we return that, rather than
1411 	 * the eventual error. */
1412 	if (opcode == IOCB_CMD_PWRITEV
1413 	    && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY
1414 	    && iocb->ki_nbytes - iocb->ki_left)
1415 		ret = iocb->ki_nbytes - iocb->ki_left;
1416 
1417 	return ret;
1418 }
1419 
1420 static ssize_t aio_fdsync(struct kiocb *iocb)
1421 {
1422 	struct file *file = iocb->ki_filp;
1423 	ssize_t ret = -EINVAL;
1424 
1425 	if (file->f_op->aio_fsync)
1426 		ret = file->f_op->aio_fsync(iocb, 1);
1427 	return ret;
1428 }
1429 
1430 static ssize_t aio_fsync(struct kiocb *iocb)
1431 {
1432 	struct file *file = iocb->ki_filp;
1433 	ssize_t ret = -EINVAL;
1434 
1435 	if (file->f_op->aio_fsync)
1436 		ret = file->f_op->aio_fsync(iocb, 0);
1437 	return ret;
1438 }
1439 
1440 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
1441 {
1442 	ssize_t ret;
1443 
1444 #ifdef CONFIG_COMPAT
1445 	if (compat)
1446 		ret = compat_rw_copy_check_uvector(type,
1447 				(struct compat_iovec __user *)kiocb->ki_buf,
1448 				kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
1449 				&kiocb->ki_iovec, 1);
1450 	else
1451 #endif
1452 		ret = rw_copy_check_uvector(type,
1453 				(struct iovec __user *)kiocb->ki_buf,
1454 				kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
1455 				&kiocb->ki_iovec, 1);
1456 	if (ret < 0)
1457 		goto out;
1458 
1459 	kiocb->ki_nr_segs = kiocb->ki_nbytes;
1460 	kiocb->ki_cur_seg = 0;
1461 	/* ki_nbytes/left now reflect bytes instead of segs */
1462 	kiocb->ki_nbytes = ret;
1463 	kiocb->ki_left = ret;
1464 
1465 	ret = 0;
1466 out:
1467 	return ret;
1468 }
1469 
1470 static ssize_t aio_setup_single_vector(struct kiocb *kiocb)
1471 {
1472 	kiocb->ki_iovec = &kiocb->ki_inline_vec;
1473 	kiocb->ki_iovec->iov_base = kiocb->ki_buf;
1474 	kiocb->ki_iovec->iov_len = kiocb->ki_left;
1475 	kiocb->ki_nr_segs = 1;
1476 	kiocb->ki_cur_seg = 0;
1477 	return 0;
1478 }
1479 
1480 /*
1481  * aio_setup_iocb:
1482  *	Performs the initial checks and aio retry method
1483  *	setup for the kiocb at the time of io submission.
1484  */
1485 static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
1486 {
1487 	struct file *file = kiocb->ki_filp;
1488 	ssize_t ret = 0;
1489 
1490 	switch (kiocb->ki_opcode) {
1491 	case IOCB_CMD_PREAD:
1492 		ret = -EBADF;
1493 		if (unlikely(!(file->f_mode & FMODE_READ)))
1494 			break;
1495 		ret = -EFAULT;
1496 		if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf,
1497 			kiocb->ki_left)))
1498 			break;
1499 		ret = security_file_permission(file, MAY_READ);
1500 		if (unlikely(ret))
1501 			break;
1502 		ret = aio_setup_single_vector(kiocb);
1503 		if (ret)
1504 			break;
1505 		ret = -EINVAL;
1506 		if (file->f_op->aio_read)
1507 			kiocb->ki_retry = aio_rw_vect_retry;
1508 		break;
1509 	case IOCB_CMD_PWRITE:
1510 		ret = -EBADF;
1511 		if (unlikely(!(file->f_mode & FMODE_WRITE)))
1512 			break;
1513 		ret = -EFAULT;
1514 		if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf,
1515 			kiocb->ki_left)))
1516 			break;
1517 		ret = security_file_permission(file, MAY_WRITE);
1518 		if (unlikely(ret))
1519 			break;
1520 		ret = aio_setup_single_vector(kiocb);
1521 		if (ret)
1522 			break;
1523 		ret = -EINVAL;
1524 		if (file->f_op->aio_write)
1525 			kiocb->ki_retry = aio_rw_vect_retry;
1526 		break;
1527 	case IOCB_CMD_PREADV:
1528 		ret = -EBADF;
1529 		if (unlikely(!(file->f_mode & FMODE_READ)))
1530 			break;
1531 		ret = security_file_permission(file, MAY_READ);
1532 		if (unlikely(ret))
1533 			break;
1534 		ret = aio_setup_vectored_rw(READ, kiocb, compat);
1535 		if (ret)
1536 			break;
1537 		ret = -EINVAL;
1538 		if (file->f_op->aio_read)
1539 			kiocb->ki_retry = aio_rw_vect_retry;
1540 		break;
1541 	case IOCB_CMD_PWRITEV:
1542 		ret = -EBADF;
1543 		if (unlikely(!(file->f_mode & FMODE_WRITE)))
1544 			break;
1545 		ret = security_file_permission(file, MAY_WRITE);
1546 		if (unlikely(ret))
1547 			break;
1548 		ret = aio_setup_vectored_rw(WRITE, kiocb, compat);
1549 		if (ret)
1550 			break;
1551 		ret = -EINVAL;
1552 		if (file->f_op->aio_write)
1553 			kiocb->ki_retry = aio_rw_vect_retry;
1554 		break;
1555 	case IOCB_CMD_FDSYNC:
1556 		ret = -EINVAL;
1557 		if (file->f_op->aio_fsync)
1558 			kiocb->ki_retry = aio_fdsync;
1559 		break;
1560 	case IOCB_CMD_FSYNC:
1561 		ret = -EINVAL;
1562 		if (file->f_op->aio_fsync)
1563 			kiocb->ki_retry = aio_fsync;
1564 		break;
1565 	default:
1566 		dprintk("EINVAL: io_submit: no operation provided\n");
1567 		ret = -EINVAL;
1568 	}
1569 
1570 	if (!kiocb->ki_retry)
1571 		return ret;
1572 
1573 	return 0;
1574 }
1575 
1576 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1577 			 struct iocb *iocb, struct kiocb_batch *batch,
1578 			 bool compat)
1579 {
1580 	struct kiocb *req;
1581 	struct file *file;
1582 	ssize_t ret;
1583 
1584 	/* enforce forwards compatibility on users */
1585 	if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) {
1586 		pr_debug("EINVAL: io_submit: reserve field set\n");
1587 		return -EINVAL;
1588 	}
1589 
1590 	/* prevent overflows */
1591 	if (unlikely(
1592 	    (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
1593 	    (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
1594 	    ((ssize_t)iocb->aio_nbytes < 0)
1595 	   )) {
1596 		pr_debug("EINVAL: io_submit: overflow check\n");
1597 		return -EINVAL;
1598 	}
1599 
1600 	file = fget(iocb->aio_fildes);
1601 	if (unlikely(!file))
1602 		return -EBADF;
1603 
1604 	req = aio_get_req(ctx, batch);  /* returns with 2 references to req */
1605 	if (unlikely(!req)) {
1606 		fput(file);
1607 		return -EAGAIN;
1608 	}
1609 	req->ki_filp = file;
1610 	if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1611 		/*
1612 		 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1613 		 * instance of the file* now. The file descriptor must be
1614 		 * an eventfd() fd, and will be signaled for each completed
1615 		 * event using the eventfd_signal() function.
1616 		 */
1617 		req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
1618 		if (IS_ERR(req->ki_eventfd)) {
1619 			ret = PTR_ERR(req->ki_eventfd);
1620 			req->ki_eventfd = NULL;
1621 			goto out_put_req;
1622 		}
1623 	}
1624 
1625 	ret = put_user(req->ki_key, &user_iocb->aio_key);
1626 	if (unlikely(ret)) {
1627 		dprintk("EFAULT: aio_key\n");
1628 		goto out_put_req;
1629 	}
1630 
1631 	req->ki_obj.user = user_iocb;
1632 	req->ki_user_data = iocb->aio_data;
1633 	req->ki_pos = iocb->aio_offset;
1634 
1635 	req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf;
1636 	req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
1637 	req->ki_opcode = iocb->aio_lio_opcode;
1638 
1639 	ret = aio_setup_iocb(req, compat);
1640 
1641 	if (ret)
1642 		goto out_put_req;
1643 
1644 	spin_lock_irq(&ctx->ctx_lock);
1645 	/*
1646 	 * We could have raced with io_destroy() and are currently holding a
1647 	 * reference to ctx which should be destroyed. We cannot submit IO
1648 	 * since ctx gets freed as soon as io_submit() puts its reference.  The
1649 	 * check here is reliable: io_destroy() sets ctx->dead before waiting
1650 	 * for outstanding IO and the barrier between these two is realized by
1651 	 * unlock of mm->ioctx_lock and lock of ctx->ctx_lock.  Analogously we
1652 	 * increment ctx->reqs_active before checking for ctx->dead and the
1653 	 * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we
1654 	 * don't see ctx->dead set here, io_destroy() waits for our IO to
1655 	 * finish.
1656 	 */
1657 	if (ctx->dead) {
1658 		spin_unlock_irq(&ctx->ctx_lock);
1659 		ret = -EINVAL;
1660 		goto out_put_req;
1661 	}
1662 	aio_run_iocb(req);
1663 	if (!list_empty(&ctx->run_list)) {
1664 		/* drain the run list */
1665 		while (__aio_run_iocbs(ctx))
1666 			;
1667 	}
1668 	spin_unlock_irq(&ctx->ctx_lock);
1669 
1670 	aio_put_req(req);	/* drop extra ref to req */
1671 	return 0;
1672 
1673 out_put_req:
1674 	aio_put_req(req);	/* drop extra ref to req */
1675 	aio_put_req(req);	/* drop i/o ref to req */
1676 	return ret;
1677 }
1678 
1679 long do_io_submit(aio_context_t ctx_id, long nr,
1680 		  struct iocb __user *__user *iocbpp, bool compat)
1681 {
1682 	struct kioctx *ctx;
1683 	long ret = 0;
1684 	int i = 0;
1685 	struct blk_plug plug;
1686 	struct kiocb_batch batch;
1687 
1688 	if (unlikely(nr < 0))
1689 		return -EINVAL;
1690 
1691 	if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
1692 		nr = LONG_MAX/sizeof(*iocbpp);
1693 
1694 	if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
1695 		return -EFAULT;
1696 
1697 	ctx = lookup_ioctx(ctx_id);
1698 	if (unlikely(!ctx)) {
1699 		pr_debug("EINVAL: io_submit: invalid context id\n");
1700 		return -EINVAL;
1701 	}
1702 
1703 	kiocb_batch_init(&batch, nr);
1704 
1705 	blk_start_plug(&plug);
1706 
1707 	/*
1708 	 * AKPM: should this return a partial result if some of the IOs were
1709 	 * successfully submitted?
1710 	 */
1711 	for (i=0; i<nr; i++) {
1712 		struct iocb __user *user_iocb;
1713 		struct iocb tmp;
1714 
1715 		if (unlikely(__get_user(user_iocb, iocbpp + i))) {
1716 			ret = -EFAULT;
1717 			break;
1718 		}
1719 
1720 		if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) {
1721 			ret = -EFAULT;
1722 			break;
1723 		}
1724 
1725 		ret = io_submit_one(ctx, user_iocb, &tmp, &batch, compat);
1726 		if (ret)
1727 			break;
1728 	}
1729 	blk_finish_plug(&plug);
1730 
1731 	kiocb_batch_free(ctx, &batch);
1732 	put_ioctx(ctx);
1733 	return i ? i : ret;
1734 }
1735 
1736 /* sys_io_submit:
1737  *	Queue the nr iocbs pointed to by iocbpp for processing.  Returns
1738  *	the number of iocbs queued.  May return -EINVAL if the aio_context
1739  *	specified by ctx_id is invalid, if nr is < 0, if the iocb at
1740  *	*iocbpp[0] is not properly initialized, if the operation specified
1741  *	is invalid for the file descriptor in the iocb.  May fail with
1742  *	-EFAULT if any of the data structures point to invalid data.  May
1743  *	fail with -EBADF if the file descriptor specified in the first
1744  *	iocb is invalid.  May fail with -EAGAIN if insufficient resources
1745  *	are available to queue any iocbs.  Will return 0 if nr is 0.  Will
1746  *	fail with -ENOSYS if not implemented.
1747  */
1748 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1749 		struct iocb __user * __user *, iocbpp)
1750 {
1751 	return do_io_submit(ctx_id, nr, iocbpp, 0);
1752 }
1753 
1754 /* lookup_kiocb
1755  *	Finds a given iocb for cancellation.
1756  */
1757 static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
1758 				  u32 key)
1759 {
1760 	struct list_head *pos;
1761 
1762 	assert_spin_locked(&ctx->ctx_lock);
1763 
1764 	/* TODO: use a hash or array, this sucks. */
1765 	list_for_each(pos, &ctx->active_reqs) {
1766 		struct kiocb *kiocb = list_kiocb(pos);
1767 		if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key)
1768 			return kiocb;
1769 	}
1770 	return NULL;
1771 }
1772 
1773 /* sys_io_cancel:
1774  *	Attempts to cancel an iocb previously passed to io_submit.  If
1775  *	the operation is successfully cancelled, the resulting event is
1776  *	copied into the memory pointed to by result without being placed
1777  *	into the completion queue and 0 is returned.  May fail with
1778  *	-EFAULT if any of the data structures pointed to are invalid.
1779  *	May fail with -EINVAL if aio_context specified by ctx_id is
1780  *	invalid.  May fail with -EAGAIN if the iocb specified was not
1781  *	cancelled.  Will fail with -ENOSYS if not implemented.
1782  */
1783 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1784 		struct io_event __user *, result)
1785 {
1786 	int (*cancel)(struct kiocb *iocb, struct io_event *res);
1787 	struct kioctx *ctx;
1788 	struct kiocb *kiocb;
1789 	u32 key;
1790 	int ret;
1791 
1792 	ret = get_user(key, &iocb->aio_key);
1793 	if (unlikely(ret))
1794 		return -EFAULT;
1795 
1796 	ctx = lookup_ioctx(ctx_id);
1797 	if (unlikely(!ctx))
1798 		return -EINVAL;
1799 
1800 	spin_lock_irq(&ctx->ctx_lock);
1801 	ret = -EAGAIN;
1802 	kiocb = lookup_kiocb(ctx, iocb, key);
1803 	if (kiocb && kiocb->ki_cancel) {
1804 		cancel = kiocb->ki_cancel;
1805 		kiocb->ki_users ++;
1806 		kiocbSetCancelled(kiocb);
1807 	} else
1808 		cancel = NULL;
1809 	spin_unlock_irq(&ctx->ctx_lock);
1810 
1811 	if (NULL != cancel) {
1812 		struct io_event tmp;
1813 		pr_debug("calling cancel\n");
1814 		memset(&tmp, 0, sizeof(tmp));
1815 		tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user;
1816 		tmp.data = kiocb->ki_user_data;
1817 		ret = cancel(kiocb, &tmp);
1818 		if (!ret) {
1819 			/* Cancellation succeeded -- copy the result
1820 			 * into the user's buffer.
1821 			 */
1822 			if (copy_to_user(result, &tmp, sizeof(tmp)))
1823 				ret = -EFAULT;
1824 		}
1825 	} else
1826 		ret = -EINVAL;
1827 
1828 	put_ioctx(ctx);
1829 
1830 	return ret;
1831 }
1832 
1833 /* io_getevents:
1834  *	Attempts to read at least min_nr events and up to nr events from
1835  *	the completion queue for the aio_context specified by ctx_id. If
1836  *	it succeeds, the number of read events is returned. May fail with
1837  *	-EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
1838  *	out of range, if timeout is out of range.  May fail with -EFAULT
1839  *	if any of the memory specified is invalid.  May return 0 or
1840  *	< min_nr if the timeout specified by timeout has elapsed
1841  *	before sufficient events are available, where timeout == NULL
1842  *	specifies an infinite timeout. Note that the timeout pointed to by
1843  *	timeout is relative and will be updated if not NULL and the
1844  *	operation blocks. Will fail with -ENOSYS if not implemented.
1845  */
1846 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
1847 		long, min_nr,
1848 		long, nr,
1849 		struct io_event __user *, events,
1850 		struct timespec __user *, timeout)
1851 {
1852 	struct kioctx *ioctx = lookup_ioctx(ctx_id);
1853 	long ret = -EINVAL;
1854 
1855 	if (likely(ioctx)) {
1856 		if (likely(min_nr <= nr && min_nr >= 0))
1857 			ret = read_events(ioctx, min_nr, nr, events, timeout);
1858 		put_ioctx(ioctx);
1859 	}
1860 
1861 	asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout);
1862 	return ret;
1863 }
1864