xref: /openbmc/linux/fs/aio.c (revision 97da55fc)
1 /*
2  *	An async IO implementation for Linux
3  *	Written by Benjamin LaHaise <bcrl@kvack.org>
4  *
5  *	Implements an efficient asynchronous io interface.
6  *
7  *	Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
8  *
9  *	See ../COPYING for licensing terms.
10  */
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/time.h>
15 #include <linux/aio_abi.h>
16 #include <linux/export.h>
17 #include <linux/syscalls.h>
18 #include <linux/backing-dev.h>
19 #include <linux/uio.h>
20 
21 #define DEBUG 0
22 
23 #include <linux/sched.h>
24 #include <linux/fs.h>
25 #include <linux/file.h>
26 #include <linux/mm.h>
27 #include <linux/mman.h>
28 #include <linux/mmu_context.h>
29 #include <linux/slab.h>
30 #include <linux/timer.h>
31 #include <linux/aio.h>
32 #include <linux/highmem.h>
33 #include <linux/workqueue.h>
34 #include <linux/security.h>
35 #include <linux/eventfd.h>
36 #include <linux/blkdev.h>
37 #include <linux/compat.h>
38 
39 #include <asm/kmap_types.h>
40 #include <asm/uaccess.h>
41 
42 #if DEBUG > 1
43 #define dprintk		printk
44 #else
45 #define dprintk(x...)	do { ; } while (0)
46 #endif
47 
48 /*------ sysctl variables----*/
49 static DEFINE_SPINLOCK(aio_nr_lock);
50 unsigned long aio_nr;		/* current system wide number of aio requests */
51 unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
52 /*----end sysctl variables---*/
53 
54 static struct kmem_cache	*kiocb_cachep;
55 static struct kmem_cache	*kioctx_cachep;
56 
57 static struct workqueue_struct *aio_wq;
58 
59 static void aio_kick_handler(struct work_struct *);
60 static void aio_queue_work(struct kioctx *);
61 
62 /* aio_setup
63  *	Creates the slab caches used by the aio routines, panic on
64  *	failure as this is done early during the boot sequence.
65  */
66 static int __init aio_setup(void)
67 {
68 	kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
69 	kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
70 
71 	aio_wq = alloc_workqueue("aio", 0, 1);	/* used to limit concurrency */
72 	BUG_ON(!aio_wq);
73 
74 	pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
75 
76 	return 0;
77 }
78 __initcall(aio_setup);
79 
80 static void aio_free_ring(struct kioctx *ctx)
81 {
82 	struct aio_ring_info *info = &ctx->ring_info;
83 	long i;
84 
85 	for (i=0; i<info->nr_pages; i++)
86 		put_page(info->ring_pages[i]);
87 
88 	if (info->mmap_size) {
89 		BUG_ON(ctx->mm != current->mm);
90 		vm_munmap(info->mmap_base, info->mmap_size);
91 	}
92 
93 	if (info->ring_pages && info->ring_pages != info->internal_pages)
94 		kfree(info->ring_pages);
95 	info->ring_pages = NULL;
96 	info->nr = 0;
97 }
98 
99 static int aio_setup_ring(struct kioctx *ctx)
100 {
101 	struct aio_ring *ring;
102 	struct aio_ring_info *info = &ctx->ring_info;
103 	unsigned nr_events = ctx->max_reqs;
104 	unsigned long size, populate;
105 	int nr_pages;
106 
107 	/* Compensate for the ring buffer's head/tail overlap entry */
108 	nr_events += 2;	/* 1 is required, 2 for good luck */
109 
110 	size = sizeof(struct aio_ring);
111 	size += sizeof(struct io_event) * nr_events;
112 	nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
113 
114 	if (nr_pages < 0)
115 		return -EINVAL;
116 
117 	nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
118 
119 	info->nr = 0;
120 	info->ring_pages = info->internal_pages;
121 	if (nr_pages > AIO_RING_PAGES) {
122 		info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
123 		if (!info->ring_pages)
124 			return -ENOMEM;
125 	}
126 
127 	info->mmap_size = nr_pages * PAGE_SIZE;
128 	dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
129 	down_write(&ctx->mm->mmap_sem);
130 	info->mmap_base = do_mmap_pgoff(NULL, 0, info->mmap_size,
131 					PROT_READ|PROT_WRITE,
132 					MAP_ANONYMOUS|MAP_PRIVATE, 0,
133 					&populate);
134 	if (IS_ERR((void *)info->mmap_base)) {
135 		up_write(&ctx->mm->mmap_sem);
136 		info->mmap_size = 0;
137 		aio_free_ring(ctx);
138 		return -EAGAIN;
139 	}
140 
141 	dprintk("mmap address: 0x%08lx\n", info->mmap_base);
142 	info->nr_pages = get_user_pages(current, ctx->mm,
143 					info->mmap_base, nr_pages,
144 					1, 0, info->ring_pages, NULL);
145 	up_write(&ctx->mm->mmap_sem);
146 
147 	if (unlikely(info->nr_pages != nr_pages)) {
148 		aio_free_ring(ctx);
149 		return -EAGAIN;
150 	}
151 	if (populate)
152 		mm_populate(info->mmap_base, populate);
153 
154 	ctx->user_id = info->mmap_base;
155 
156 	info->nr = nr_events;		/* trusted copy */
157 
158 	ring = kmap_atomic(info->ring_pages[0]);
159 	ring->nr = nr_events;	/* user copy */
160 	ring->id = ctx->user_id;
161 	ring->head = ring->tail = 0;
162 	ring->magic = AIO_RING_MAGIC;
163 	ring->compat_features = AIO_RING_COMPAT_FEATURES;
164 	ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
165 	ring->header_length = sizeof(struct aio_ring);
166 	kunmap_atomic(ring);
167 
168 	return 0;
169 }
170 
171 
172 /* aio_ring_event: returns a pointer to the event at the given index from
173  * kmap_atomic().  Release the pointer with put_aio_ring_event();
174  */
175 #define AIO_EVENTS_PER_PAGE	(PAGE_SIZE / sizeof(struct io_event))
176 #define AIO_EVENTS_FIRST_PAGE	((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
177 #define AIO_EVENTS_OFFSET	(AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
178 
179 #define aio_ring_event(info, nr) ({					\
180 	unsigned pos = (nr) + AIO_EVENTS_OFFSET;			\
181 	struct io_event *__event;					\
182 	__event = kmap_atomic(						\
183 			(info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \
184 	__event += pos % AIO_EVENTS_PER_PAGE;				\
185 	__event;							\
186 })
187 
188 #define put_aio_ring_event(event) do {		\
189 	struct io_event *__event = (event);	\
190 	(void)__event;				\
191 	kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \
192 } while(0)
193 
194 static void ctx_rcu_free(struct rcu_head *head)
195 {
196 	struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
197 	kmem_cache_free(kioctx_cachep, ctx);
198 }
199 
200 /* __put_ioctx
201  *	Called when the last user of an aio context has gone away,
202  *	and the struct needs to be freed.
203  */
204 static void __put_ioctx(struct kioctx *ctx)
205 {
206 	unsigned nr_events = ctx->max_reqs;
207 	BUG_ON(ctx->reqs_active);
208 
209 	cancel_delayed_work_sync(&ctx->wq);
210 	aio_free_ring(ctx);
211 	mmdrop(ctx->mm);
212 	ctx->mm = NULL;
213 	if (nr_events) {
214 		spin_lock(&aio_nr_lock);
215 		BUG_ON(aio_nr - nr_events > aio_nr);
216 		aio_nr -= nr_events;
217 		spin_unlock(&aio_nr_lock);
218 	}
219 	pr_debug("__put_ioctx: freeing %p\n", ctx);
220 	call_rcu(&ctx->rcu_head, ctx_rcu_free);
221 }
222 
223 static inline int try_get_ioctx(struct kioctx *kioctx)
224 {
225 	return atomic_inc_not_zero(&kioctx->users);
226 }
227 
228 static inline void put_ioctx(struct kioctx *kioctx)
229 {
230 	BUG_ON(atomic_read(&kioctx->users) <= 0);
231 	if (unlikely(atomic_dec_and_test(&kioctx->users)))
232 		__put_ioctx(kioctx);
233 }
234 
235 /* ioctx_alloc
236  *	Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
237  */
238 static struct kioctx *ioctx_alloc(unsigned nr_events)
239 {
240 	struct mm_struct *mm;
241 	struct kioctx *ctx;
242 	int err = -ENOMEM;
243 
244 	/* Prevent overflows */
245 	if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
246 	    (nr_events > (0x10000000U / sizeof(struct kiocb)))) {
247 		pr_debug("ENOMEM: nr_events too high\n");
248 		return ERR_PTR(-EINVAL);
249 	}
250 
251 	if (!nr_events || (unsigned long)nr_events > aio_max_nr)
252 		return ERR_PTR(-EAGAIN);
253 
254 	ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
255 	if (!ctx)
256 		return ERR_PTR(-ENOMEM);
257 
258 	ctx->max_reqs = nr_events;
259 	mm = ctx->mm = current->mm;
260 	atomic_inc(&mm->mm_count);
261 
262 	atomic_set(&ctx->users, 2);
263 	spin_lock_init(&ctx->ctx_lock);
264 	spin_lock_init(&ctx->ring_info.ring_lock);
265 	init_waitqueue_head(&ctx->wait);
266 
267 	INIT_LIST_HEAD(&ctx->active_reqs);
268 	INIT_LIST_HEAD(&ctx->run_list);
269 	INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
270 
271 	if (aio_setup_ring(ctx) < 0)
272 		goto out_freectx;
273 
274 	/* limit the number of system wide aios */
275 	spin_lock(&aio_nr_lock);
276 	if (aio_nr + nr_events > aio_max_nr ||
277 	    aio_nr + nr_events < aio_nr) {
278 		spin_unlock(&aio_nr_lock);
279 		goto out_cleanup;
280 	}
281 	aio_nr += ctx->max_reqs;
282 	spin_unlock(&aio_nr_lock);
283 
284 	/* now link into global list. */
285 	spin_lock(&mm->ioctx_lock);
286 	hlist_add_head_rcu(&ctx->list, &mm->ioctx_list);
287 	spin_unlock(&mm->ioctx_lock);
288 
289 	dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
290 		ctx, ctx->user_id, current->mm, ctx->ring_info.nr);
291 	return ctx;
292 
293 out_cleanup:
294 	err = -EAGAIN;
295 	aio_free_ring(ctx);
296 out_freectx:
297 	mmdrop(mm);
298 	kmem_cache_free(kioctx_cachep, ctx);
299 	dprintk("aio: error allocating ioctx %d\n", err);
300 	return ERR_PTR(err);
301 }
302 
303 /* kill_ctx
304  *	Cancels all outstanding aio requests on an aio context.  Used
305  *	when the processes owning a context have all exited to encourage
306  *	the rapid destruction of the kioctx.
307  */
308 static void kill_ctx(struct kioctx *ctx)
309 {
310 	int (*cancel)(struct kiocb *, struct io_event *);
311 	struct task_struct *tsk = current;
312 	DECLARE_WAITQUEUE(wait, tsk);
313 	struct io_event res;
314 
315 	spin_lock_irq(&ctx->ctx_lock);
316 	ctx->dead = 1;
317 	while (!list_empty(&ctx->active_reqs)) {
318 		struct list_head *pos = ctx->active_reqs.next;
319 		struct kiocb *iocb = list_kiocb(pos);
320 		list_del_init(&iocb->ki_list);
321 		cancel = iocb->ki_cancel;
322 		kiocbSetCancelled(iocb);
323 		if (cancel) {
324 			iocb->ki_users++;
325 			spin_unlock_irq(&ctx->ctx_lock);
326 			cancel(iocb, &res);
327 			spin_lock_irq(&ctx->ctx_lock);
328 		}
329 	}
330 
331 	if (!ctx->reqs_active)
332 		goto out;
333 
334 	add_wait_queue(&ctx->wait, &wait);
335 	set_task_state(tsk, TASK_UNINTERRUPTIBLE);
336 	while (ctx->reqs_active) {
337 		spin_unlock_irq(&ctx->ctx_lock);
338 		io_schedule();
339 		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
340 		spin_lock_irq(&ctx->ctx_lock);
341 	}
342 	__set_task_state(tsk, TASK_RUNNING);
343 	remove_wait_queue(&ctx->wait, &wait);
344 
345 out:
346 	spin_unlock_irq(&ctx->ctx_lock);
347 }
348 
349 /* wait_on_sync_kiocb:
350  *	Waits on the given sync kiocb to complete.
351  */
352 ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
353 {
354 	while (iocb->ki_users) {
355 		set_current_state(TASK_UNINTERRUPTIBLE);
356 		if (!iocb->ki_users)
357 			break;
358 		io_schedule();
359 	}
360 	__set_current_state(TASK_RUNNING);
361 	return iocb->ki_user_data;
362 }
363 EXPORT_SYMBOL(wait_on_sync_kiocb);
364 
365 /* exit_aio: called when the last user of mm goes away.  At this point,
366  * there is no way for any new requests to be submited or any of the
367  * io_* syscalls to be called on the context.  However, there may be
368  * outstanding requests which hold references to the context; as they
369  * go away, they will call put_ioctx and release any pinned memory
370  * associated with the request (held via struct page * references).
371  */
372 void exit_aio(struct mm_struct *mm)
373 {
374 	struct kioctx *ctx;
375 
376 	while (!hlist_empty(&mm->ioctx_list)) {
377 		ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
378 		hlist_del_rcu(&ctx->list);
379 
380 		kill_ctx(ctx);
381 
382 		if (1 != atomic_read(&ctx->users))
383 			printk(KERN_DEBUG
384 				"exit_aio:ioctx still alive: %d %d %d\n",
385 				atomic_read(&ctx->users), ctx->dead,
386 				ctx->reqs_active);
387 		/*
388 		 * We don't need to bother with munmap() here -
389 		 * exit_mmap(mm) is coming and it'll unmap everything.
390 		 * Since aio_free_ring() uses non-zero ->mmap_size
391 		 * as indicator that it needs to unmap the area,
392 		 * just set it to 0; aio_free_ring() is the only
393 		 * place that uses ->mmap_size, so it's safe.
394 		 * That way we get all munmap done to current->mm -
395 		 * all other callers have ctx->mm == current->mm.
396 		 */
397 		ctx->ring_info.mmap_size = 0;
398 		put_ioctx(ctx);
399 	}
400 }
401 
402 /* aio_get_req
403  *	Allocate a slot for an aio request.  Increments the users count
404  * of the kioctx so that the kioctx stays around until all requests are
405  * complete.  Returns NULL if no requests are free.
406  *
407  * Returns with kiocb->users set to 2.  The io submit code path holds
408  * an extra reference while submitting the i/o.
409  * This prevents races between the aio code path referencing the
410  * req (after submitting it) and aio_complete() freeing the req.
411  */
412 static struct kiocb *__aio_get_req(struct kioctx *ctx)
413 {
414 	struct kiocb *req = NULL;
415 
416 	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
417 	if (unlikely(!req))
418 		return NULL;
419 
420 	req->ki_flags = 0;
421 	req->ki_users = 2;
422 	req->ki_key = 0;
423 	req->ki_ctx = ctx;
424 	req->ki_cancel = NULL;
425 	req->ki_retry = NULL;
426 	req->ki_dtor = NULL;
427 	req->private = NULL;
428 	req->ki_iovec = NULL;
429 	INIT_LIST_HEAD(&req->ki_run_list);
430 	req->ki_eventfd = NULL;
431 
432 	return req;
433 }
434 
435 /*
436  * struct kiocb's are allocated in batches to reduce the number of
437  * times the ctx lock is acquired and released.
438  */
439 #define KIOCB_BATCH_SIZE	32L
440 struct kiocb_batch {
441 	struct list_head head;
442 	long count; /* number of requests left to allocate */
443 };
444 
445 static void kiocb_batch_init(struct kiocb_batch *batch, long total)
446 {
447 	INIT_LIST_HEAD(&batch->head);
448 	batch->count = total;
449 }
450 
451 static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
452 {
453 	struct kiocb *req, *n;
454 
455 	if (list_empty(&batch->head))
456 		return;
457 
458 	spin_lock_irq(&ctx->ctx_lock);
459 	list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
460 		list_del(&req->ki_batch);
461 		list_del(&req->ki_list);
462 		kmem_cache_free(kiocb_cachep, req);
463 		ctx->reqs_active--;
464 	}
465 	if (unlikely(!ctx->reqs_active && ctx->dead))
466 		wake_up_all(&ctx->wait);
467 	spin_unlock_irq(&ctx->ctx_lock);
468 }
469 
470 /*
471  * Allocate a batch of kiocbs.  This avoids taking and dropping the
472  * context lock a lot during setup.
473  */
474 static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
475 {
476 	unsigned short allocated, to_alloc;
477 	long avail;
478 	struct kiocb *req, *n;
479 	struct aio_ring *ring;
480 
481 	to_alloc = min(batch->count, KIOCB_BATCH_SIZE);
482 	for (allocated = 0; allocated < to_alloc; allocated++) {
483 		req = __aio_get_req(ctx);
484 		if (!req)
485 			/* allocation failed, go with what we've got */
486 			break;
487 		list_add(&req->ki_batch, &batch->head);
488 	}
489 
490 	if (allocated == 0)
491 		goto out;
492 
493 	spin_lock_irq(&ctx->ctx_lock);
494 	ring = kmap_atomic(ctx->ring_info.ring_pages[0]);
495 
496 	avail = aio_ring_avail(&ctx->ring_info, ring) - ctx->reqs_active;
497 	BUG_ON(avail < 0);
498 	if (avail < allocated) {
499 		/* Trim back the number of requests. */
500 		list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
501 			list_del(&req->ki_batch);
502 			kmem_cache_free(kiocb_cachep, req);
503 			if (--allocated <= avail)
504 				break;
505 		}
506 	}
507 
508 	batch->count -= allocated;
509 	list_for_each_entry(req, &batch->head, ki_batch) {
510 		list_add(&req->ki_list, &ctx->active_reqs);
511 		ctx->reqs_active++;
512 	}
513 
514 	kunmap_atomic(ring);
515 	spin_unlock_irq(&ctx->ctx_lock);
516 
517 out:
518 	return allocated;
519 }
520 
521 static inline struct kiocb *aio_get_req(struct kioctx *ctx,
522 					struct kiocb_batch *batch)
523 {
524 	struct kiocb *req;
525 
526 	if (list_empty(&batch->head))
527 		if (kiocb_batch_refill(ctx, batch) == 0)
528 			return NULL;
529 	req = list_first_entry(&batch->head, struct kiocb, ki_batch);
530 	list_del(&req->ki_batch);
531 	return req;
532 }
533 
534 static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
535 {
536 	assert_spin_locked(&ctx->ctx_lock);
537 
538 	if (req->ki_eventfd != NULL)
539 		eventfd_ctx_put(req->ki_eventfd);
540 	if (req->ki_dtor)
541 		req->ki_dtor(req);
542 	if (req->ki_iovec != &req->ki_inline_vec)
543 		kfree(req->ki_iovec);
544 	kmem_cache_free(kiocb_cachep, req);
545 	ctx->reqs_active--;
546 
547 	if (unlikely(!ctx->reqs_active && ctx->dead))
548 		wake_up_all(&ctx->wait);
549 }
550 
551 /* __aio_put_req
552  *	Returns true if this put was the last user of the request.
553  */
554 static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
555 {
556 	dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
557 		req, atomic_long_read(&req->ki_filp->f_count));
558 
559 	assert_spin_locked(&ctx->ctx_lock);
560 
561 	req->ki_users--;
562 	BUG_ON(req->ki_users < 0);
563 	if (likely(req->ki_users))
564 		return 0;
565 	list_del(&req->ki_list);		/* remove from active_reqs */
566 	req->ki_cancel = NULL;
567 	req->ki_retry = NULL;
568 
569 	fput(req->ki_filp);
570 	req->ki_filp = NULL;
571 	really_put_req(ctx, req);
572 	return 1;
573 }
574 
575 /* aio_put_req
576  *	Returns true if this put was the last user of the kiocb,
577  *	false if the request is still in use.
578  */
579 int aio_put_req(struct kiocb *req)
580 {
581 	struct kioctx *ctx = req->ki_ctx;
582 	int ret;
583 	spin_lock_irq(&ctx->ctx_lock);
584 	ret = __aio_put_req(ctx, req);
585 	spin_unlock_irq(&ctx->ctx_lock);
586 	return ret;
587 }
588 EXPORT_SYMBOL(aio_put_req);
589 
590 static struct kioctx *lookup_ioctx(unsigned long ctx_id)
591 {
592 	struct mm_struct *mm = current->mm;
593 	struct kioctx *ctx, *ret = NULL;
594 
595 	rcu_read_lock();
596 
597 	hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) {
598 		/*
599 		 * RCU protects us against accessing freed memory but
600 		 * we have to be careful not to get a reference when the
601 		 * reference count already dropped to 0 (ctx->dead test
602 		 * is unreliable because of races).
603 		 */
604 		if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){
605 			ret = ctx;
606 			break;
607 		}
608 	}
609 
610 	rcu_read_unlock();
611 	return ret;
612 }
613 
614 /*
615  * Queue up a kiocb to be retried. Assumes that the kiocb
616  * has already been marked as kicked, and places it on
617  * the retry run list for the corresponding ioctx, if it
618  * isn't already queued. Returns 1 if it actually queued
619  * the kiocb (to tell the caller to activate the work
620  * queue to process it), or 0, if it found that it was
621  * already queued.
622  */
623 static inline int __queue_kicked_iocb(struct kiocb *iocb)
624 {
625 	struct kioctx *ctx = iocb->ki_ctx;
626 
627 	assert_spin_locked(&ctx->ctx_lock);
628 
629 	if (list_empty(&iocb->ki_run_list)) {
630 		list_add_tail(&iocb->ki_run_list,
631 			&ctx->run_list);
632 		return 1;
633 	}
634 	return 0;
635 }
636 
637 /* aio_run_iocb
638  *	This is the core aio execution routine. It is
639  *	invoked both for initial i/o submission and
640  *	subsequent retries via the aio_kick_handler.
641  *	Expects to be invoked with iocb->ki_ctx->lock
642  *	already held. The lock is released and reacquired
643  *	as needed during processing.
644  *
645  * Calls the iocb retry method (already setup for the
646  * iocb on initial submission) for operation specific
647  * handling, but takes care of most of common retry
648  * execution details for a given iocb. The retry method
649  * needs to be non-blocking as far as possible, to avoid
650  * holding up other iocbs waiting to be serviced by the
651  * retry kernel thread.
652  *
653  * The trickier parts in this code have to do with
654  * ensuring that only one retry instance is in progress
655  * for a given iocb at any time. Providing that guarantee
656  * simplifies the coding of individual aio operations as
657  * it avoids various potential races.
658  */
659 static ssize_t aio_run_iocb(struct kiocb *iocb)
660 {
661 	struct kioctx	*ctx = iocb->ki_ctx;
662 	ssize_t (*retry)(struct kiocb *);
663 	ssize_t ret;
664 
665 	if (!(retry = iocb->ki_retry)) {
666 		printk("aio_run_iocb: iocb->ki_retry = NULL\n");
667 		return 0;
668 	}
669 
670 	/*
671 	 * We don't want the next retry iteration for this
672 	 * operation to start until this one has returned and
673 	 * updated the iocb state. However, wait_queue functions
674 	 * can trigger a kick_iocb from interrupt context in the
675 	 * meantime, indicating that data is available for the next
676 	 * iteration. We want to remember that and enable the
677 	 * next retry iteration _after_ we are through with
678 	 * this one.
679 	 *
680 	 * So, in order to be able to register a "kick", but
681 	 * prevent it from being queued now, we clear the kick
682 	 * flag, but make the kick code *think* that the iocb is
683 	 * still on the run list until we are actually done.
684 	 * When we are done with this iteration, we check if
685 	 * the iocb was kicked in the meantime and if so, queue
686 	 * it up afresh.
687 	 */
688 
689 	kiocbClearKicked(iocb);
690 
691 	/*
692 	 * This is so that aio_complete knows it doesn't need to
693 	 * pull the iocb off the run list (We can't just call
694 	 * INIT_LIST_HEAD because we don't want a kick_iocb to
695 	 * queue this on the run list yet)
696 	 */
697 	iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL;
698 	spin_unlock_irq(&ctx->ctx_lock);
699 
700 	/* Quit retrying if the i/o has been cancelled */
701 	if (kiocbIsCancelled(iocb)) {
702 		ret = -EINTR;
703 		aio_complete(iocb, ret, 0);
704 		/* must not access the iocb after this */
705 		goto out;
706 	}
707 
708 	/*
709 	 * Now we are all set to call the retry method in async
710 	 * context.
711 	 */
712 	ret = retry(iocb);
713 
714 	if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
715 		/*
716 		 * There's no easy way to restart the syscall since other AIO's
717 		 * may be already running. Just fail this IO with EINTR.
718 		 */
719 		if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
720 			     ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK))
721 			ret = -EINTR;
722 		aio_complete(iocb, ret, 0);
723 	}
724 out:
725 	spin_lock_irq(&ctx->ctx_lock);
726 
727 	if (-EIOCBRETRY == ret) {
728 		/*
729 		 * OK, now that we are done with this iteration
730 		 * and know that there is more left to go,
731 		 * this is where we let go so that a subsequent
732 		 * "kick" can start the next iteration
733 		 */
734 
735 		/* will make __queue_kicked_iocb succeed from here on */
736 		INIT_LIST_HEAD(&iocb->ki_run_list);
737 		/* we must queue the next iteration ourselves, if it
738 		 * has already been kicked */
739 		if (kiocbIsKicked(iocb)) {
740 			__queue_kicked_iocb(iocb);
741 
742 			/*
743 			 * __queue_kicked_iocb will always return 1 here, because
744 			 * iocb->ki_run_list is empty at this point so it should
745 			 * be safe to unconditionally queue the context into the
746 			 * work queue.
747 			 */
748 			aio_queue_work(ctx);
749 		}
750 	}
751 	return ret;
752 }
753 
754 /*
755  * __aio_run_iocbs:
756  * 	Process all pending retries queued on the ioctx
757  * 	run list.
758  * Assumes it is operating within the aio issuer's mm
759  * context.
760  */
761 static int __aio_run_iocbs(struct kioctx *ctx)
762 {
763 	struct kiocb *iocb;
764 	struct list_head run_list;
765 
766 	assert_spin_locked(&ctx->ctx_lock);
767 
768 	list_replace_init(&ctx->run_list, &run_list);
769 	while (!list_empty(&run_list)) {
770 		iocb = list_entry(run_list.next, struct kiocb,
771 			ki_run_list);
772 		list_del(&iocb->ki_run_list);
773 		/*
774 		 * Hold an extra reference while retrying i/o.
775 		 */
776 		iocb->ki_users++;       /* grab extra reference */
777 		aio_run_iocb(iocb);
778 		__aio_put_req(ctx, iocb);
779  	}
780 	if (!list_empty(&ctx->run_list))
781 		return 1;
782 	return 0;
783 }
784 
785 static void aio_queue_work(struct kioctx * ctx)
786 {
787 	unsigned long timeout;
788 	/*
789 	 * if someone is waiting, get the work started right
790 	 * away, otherwise, use a longer delay
791 	 */
792 	smp_mb();
793 	if (waitqueue_active(&ctx->wait))
794 		timeout = 1;
795 	else
796 		timeout = HZ/10;
797 	queue_delayed_work(aio_wq, &ctx->wq, timeout);
798 }
799 
800 /*
801  * aio_run_all_iocbs:
802  *	Process all pending retries queued on the ioctx
803  *	run list, and keep running them until the list
804  *	stays empty.
805  * Assumes it is operating within the aio issuer's mm context.
806  */
807 static inline void aio_run_all_iocbs(struct kioctx *ctx)
808 {
809 	spin_lock_irq(&ctx->ctx_lock);
810 	while (__aio_run_iocbs(ctx))
811 		;
812 	spin_unlock_irq(&ctx->ctx_lock);
813 }
814 
815 /*
816  * aio_kick_handler:
817  * 	Work queue handler triggered to process pending
818  * 	retries on an ioctx. Takes on the aio issuer's
819  *	mm context before running the iocbs, so that
820  *	copy_xxx_user operates on the issuer's address
821  *      space.
822  * Run on aiod's context.
823  */
824 static void aio_kick_handler(struct work_struct *work)
825 {
826 	struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
827 	mm_segment_t oldfs = get_fs();
828 	struct mm_struct *mm;
829 	int requeue;
830 
831 	set_fs(USER_DS);
832 	use_mm(ctx->mm);
833 	spin_lock_irq(&ctx->ctx_lock);
834 	requeue =__aio_run_iocbs(ctx);
835 	mm = ctx->mm;
836 	spin_unlock_irq(&ctx->ctx_lock);
837  	unuse_mm(mm);
838 	set_fs(oldfs);
839 	/*
840 	 * we're in a worker thread already; no point using non-zero delay
841 	 */
842 	if (requeue)
843 		queue_delayed_work(aio_wq, &ctx->wq, 0);
844 }
845 
846 
847 /*
848  * Called by kick_iocb to queue the kiocb for retry
849  * and if required activate the aio work queue to process
850  * it
851  */
852 static void try_queue_kicked_iocb(struct kiocb *iocb)
853 {
854  	struct kioctx	*ctx = iocb->ki_ctx;
855 	unsigned long flags;
856 	int run = 0;
857 
858 	spin_lock_irqsave(&ctx->ctx_lock, flags);
859 	/* set this inside the lock so that we can't race with aio_run_iocb()
860 	 * testing it and putting the iocb on the run list under the lock */
861 	if (!kiocbTryKick(iocb))
862 		run = __queue_kicked_iocb(iocb);
863 	spin_unlock_irqrestore(&ctx->ctx_lock, flags);
864 	if (run)
865 		aio_queue_work(ctx);
866 }
867 
868 /*
869  * kick_iocb:
870  *      Called typically from a wait queue callback context
871  *      to trigger a retry of the iocb.
872  *      The retry is usually executed by aio workqueue
873  *      threads (See aio_kick_handler).
874  */
875 void kick_iocb(struct kiocb *iocb)
876 {
877 	/* sync iocbs are easy: they can only ever be executing from a
878 	 * single context. */
879 	if (is_sync_kiocb(iocb)) {
880 		kiocbSetKicked(iocb);
881 	        wake_up_process(iocb->ki_obj.tsk);
882 		return;
883 	}
884 
885 	try_queue_kicked_iocb(iocb);
886 }
887 EXPORT_SYMBOL(kick_iocb);
888 
889 /* aio_complete
890  *	Called when the io request on the given iocb is complete.
891  *	Returns true if this is the last user of the request.  The
892  *	only other user of the request can be the cancellation code.
893  */
894 int aio_complete(struct kiocb *iocb, long res, long res2)
895 {
896 	struct kioctx	*ctx = iocb->ki_ctx;
897 	struct aio_ring_info	*info;
898 	struct aio_ring	*ring;
899 	struct io_event	*event;
900 	unsigned long	flags;
901 	unsigned long	tail;
902 	int		ret;
903 
904 	/*
905 	 * Special case handling for sync iocbs:
906 	 *  - events go directly into the iocb for fast handling
907 	 *  - the sync task with the iocb in its stack holds the single iocb
908 	 *    ref, no other paths have a way to get another ref
909 	 *  - the sync task helpfully left a reference to itself in the iocb
910 	 */
911 	if (is_sync_kiocb(iocb)) {
912 		BUG_ON(iocb->ki_users != 1);
913 		iocb->ki_user_data = res;
914 		iocb->ki_users = 0;
915 		wake_up_process(iocb->ki_obj.tsk);
916 		return 1;
917 	}
918 
919 	info = &ctx->ring_info;
920 
921 	/* add a completion event to the ring buffer.
922 	 * must be done holding ctx->ctx_lock to prevent
923 	 * other code from messing with the tail
924 	 * pointer since we might be called from irq
925 	 * context.
926 	 */
927 	spin_lock_irqsave(&ctx->ctx_lock, flags);
928 
929 	if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list))
930 		list_del_init(&iocb->ki_run_list);
931 
932 	/*
933 	 * cancelled requests don't get events, userland was given one
934 	 * when the event got cancelled.
935 	 */
936 	if (kiocbIsCancelled(iocb))
937 		goto put_rq;
938 
939 	ring = kmap_atomic(info->ring_pages[0]);
940 
941 	tail = info->tail;
942 	event = aio_ring_event(info, tail);
943 	if (++tail >= info->nr)
944 		tail = 0;
945 
946 	event->obj = (u64)(unsigned long)iocb->ki_obj.user;
947 	event->data = iocb->ki_user_data;
948 	event->res = res;
949 	event->res2 = res2;
950 
951 	dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n",
952 		ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
953 		res, res2);
954 
955 	/* after flagging the request as done, we
956 	 * must never even look at it again
957 	 */
958 	smp_wmb();	/* make event visible before updating tail */
959 
960 	info->tail = tail;
961 	ring->tail = tail;
962 
963 	put_aio_ring_event(event);
964 	kunmap_atomic(ring);
965 
966 	pr_debug("added to ring %p at [%lu]\n", iocb, tail);
967 
968 	/*
969 	 * Check if the user asked us to deliver the result through an
970 	 * eventfd. The eventfd_signal() function is safe to be called
971 	 * from IRQ context.
972 	 */
973 	if (iocb->ki_eventfd != NULL)
974 		eventfd_signal(iocb->ki_eventfd, 1);
975 
976 put_rq:
977 	/* everything turned out well, dispose of the aiocb. */
978 	ret = __aio_put_req(ctx, iocb);
979 
980 	/*
981 	 * We have to order our ring_info tail store above and test
982 	 * of the wait list below outside the wait lock.  This is
983 	 * like in wake_up_bit() where clearing a bit has to be
984 	 * ordered with the unlocked test.
985 	 */
986 	smp_mb();
987 
988 	if (waitqueue_active(&ctx->wait))
989 		wake_up(&ctx->wait);
990 
991 	spin_unlock_irqrestore(&ctx->ctx_lock, flags);
992 	return ret;
993 }
994 EXPORT_SYMBOL(aio_complete);
995 
996 /* aio_read_evt
997  *	Pull an event off of the ioctx's event ring.  Returns the number of
998  *	events fetched (0 or 1 ;-)
999  *	FIXME: make this use cmpxchg.
1000  *	TODO: make the ringbuffer user mmap()able (requires FIXME).
1001  */
1002 static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
1003 {
1004 	struct aio_ring_info *info = &ioctx->ring_info;
1005 	struct aio_ring *ring;
1006 	unsigned long head;
1007 	int ret = 0;
1008 
1009 	ring = kmap_atomic(info->ring_pages[0]);
1010 	dprintk("in aio_read_evt h%lu t%lu m%lu\n",
1011 		 (unsigned long)ring->head, (unsigned long)ring->tail,
1012 		 (unsigned long)ring->nr);
1013 
1014 	if (ring->head == ring->tail)
1015 		goto out;
1016 
1017 	spin_lock(&info->ring_lock);
1018 
1019 	head = ring->head % info->nr;
1020 	if (head != ring->tail) {
1021 		struct io_event *evp = aio_ring_event(info, head);
1022 		*ent = *evp;
1023 		head = (head + 1) % info->nr;
1024 		smp_mb(); /* finish reading the event before updatng the head */
1025 		ring->head = head;
1026 		ret = 1;
1027 		put_aio_ring_event(evp);
1028 	}
1029 	spin_unlock(&info->ring_lock);
1030 
1031 out:
1032 	kunmap_atomic(ring);
1033 	dprintk("leaving aio_read_evt: %d  h%lu t%lu\n", ret,
1034 		 (unsigned long)ring->head, (unsigned long)ring->tail);
1035 	return ret;
1036 }
1037 
1038 struct aio_timeout {
1039 	struct timer_list	timer;
1040 	int			timed_out;
1041 	struct task_struct	*p;
1042 };
1043 
1044 static void timeout_func(unsigned long data)
1045 {
1046 	struct aio_timeout *to = (struct aio_timeout *)data;
1047 
1048 	to->timed_out = 1;
1049 	wake_up_process(to->p);
1050 }
1051 
1052 static inline void init_timeout(struct aio_timeout *to)
1053 {
1054 	setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to);
1055 	to->timed_out = 0;
1056 	to->p = current;
1057 }
1058 
1059 static inline void set_timeout(long start_jiffies, struct aio_timeout *to,
1060 			       const struct timespec *ts)
1061 {
1062 	to->timer.expires = start_jiffies + timespec_to_jiffies(ts);
1063 	if (time_after(to->timer.expires, jiffies))
1064 		add_timer(&to->timer);
1065 	else
1066 		to->timed_out = 1;
1067 }
1068 
1069 static inline void clear_timeout(struct aio_timeout *to)
1070 {
1071 	del_singleshot_timer_sync(&to->timer);
1072 }
1073 
1074 static int read_events(struct kioctx *ctx,
1075 			long min_nr, long nr,
1076 			struct io_event __user *event,
1077 			struct timespec __user *timeout)
1078 {
1079 	long			start_jiffies = jiffies;
1080 	struct task_struct	*tsk = current;
1081 	DECLARE_WAITQUEUE(wait, tsk);
1082 	int			ret;
1083 	int			i = 0;
1084 	struct io_event		ent;
1085 	struct aio_timeout	to;
1086 	int			retry = 0;
1087 
1088 	/* needed to zero any padding within an entry (there shouldn't be
1089 	 * any, but C is fun!
1090 	 */
1091 	memset(&ent, 0, sizeof(ent));
1092 retry:
1093 	ret = 0;
1094 	while (likely(i < nr)) {
1095 		ret = aio_read_evt(ctx, &ent);
1096 		if (unlikely(ret <= 0))
1097 			break;
1098 
1099 		dprintk("read event: %Lx %Lx %Lx %Lx\n",
1100 			ent.data, ent.obj, ent.res, ent.res2);
1101 
1102 		/* Could we split the check in two? */
1103 		ret = -EFAULT;
1104 		if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
1105 			dprintk("aio: lost an event due to EFAULT.\n");
1106 			break;
1107 		}
1108 		ret = 0;
1109 
1110 		/* Good, event copied to userland, update counts. */
1111 		event ++;
1112 		i ++;
1113 	}
1114 
1115 	if (min_nr <= i)
1116 		return i;
1117 	if (ret)
1118 		return ret;
1119 
1120 	/* End fast path */
1121 
1122 	/* racey check, but it gets redone */
1123 	if (!retry && unlikely(!list_empty(&ctx->run_list))) {
1124 		retry = 1;
1125 		aio_run_all_iocbs(ctx);
1126 		goto retry;
1127 	}
1128 
1129 	init_timeout(&to);
1130 	if (timeout) {
1131 		struct timespec	ts;
1132 		ret = -EFAULT;
1133 		if (unlikely(copy_from_user(&ts, timeout, sizeof(ts))))
1134 			goto out;
1135 
1136 		set_timeout(start_jiffies, &to, &ts);
1137 	}
1138 
1139 	while (likely(i < nr)) {
1140 		add_wait_queue_exclusive(&ctx->wait, &wait);
1141 		do {
1142 			set_task_state(tsk, TASK_INTERRUPTIBLE);
1143 			ret = aio_read_evt(ctx, &ent);
1144 			if (ret)
1145 				break;
1146 			if (min_nr <= i)
1147 				break;
1148 			if (unlikely(ctx->dead)) {
1149 				ret = -EINVAL;
1150 				break;
1151 			}
1152 			if (to.timed_out)	/* Only check after read evt */
1153 				break;
1154 			/* Try to only show up in io wait if there are ops
1155 			 *  in flight */
1156 			if (ctx->reqs_active)
1157 				io_schedule();
1158 			else
1159 				schedule();
1160 			if (signal_pending(tsk)) {
1161 				ret = -EINTR;
1162 				break;
1163 			}
1164 			/*ret = aio_read_evt(ctx, &ent);*/
1165 		} while (1) ;
1166 
1167 		set_task_state(tsk, TASK_RUNNING);
1168 		remove_wait_queue(&ctx->wait, &wait);
1169 
1170 		if (unlikely(ret <= 0))
1171 			break;
1172 
1173 		ret = -EFAULT;
1174 		if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
1175 			dprintk("aio: lost an event due to EFAULT.\n");
1176 			break;
1177 		}
1178 
1179 		/* Good, event copied to userland, update counts. */
1180 		event ++;
1181 		i ++;
1182 	}
1183 
1184 	if (timeout)
1185 		clear_timeout(&to);
1186 out:
1187 	destroy_timer_on_stack(&to.timer);
1188 	return i ? i : ret;
1189 }
1190 
1191 /* Take an ioctx and remove it from the list of ioctx's.  Protects
1192  * against races with itself via ->dead.
1193  */
1194 static void io_destroy(struct kioctx *ioctx)
1195 {
1196 	struct mm_struct *mm = current->mm;
1197 	int was_dead;
1198 
1199 	/* delete the entry from the list is someone else hasn't already */
1200 	spin_lock(&mm->ioctx_lock);
1201 	was_dead = ioctx->dead;
1202 	ioctx->dead = 1;
1203 	hlist_del_rcu(&ioctx->list);
1204 	spin_unlock(&mm->ioctx_lock);
1205 
1206 	dprintk("aio_release(%p)\n", ioctx);
1207 	if (likely(!was_dead))
1208 		put_ioctx(ioctx);	/* twice for the list */
1209 
1210 	kill_ctx(ioctx);
1211 
1212 	/*
1213 	 * Wake up any waiters.  The setting of ctx->dead must be seen
1214 	 * by other CPUs at this point.  Right now, we rely on the
1215 	 * locking done by the above calls to ensure this consistency.
1216 	 */
1217 	wake_up_all(&ioctx->wait);
1218 }
1219 
1220 /* sys_io_setup:
1221  *	Create an aio_context capable of receiving at least nr_events.
1222  *	ctxp must not point to an aio_context that already exists, and
1223  *	must be initialized to 0 prior to the call.  On successful
1224  *	creation of the aio_context, *ctxp is filled in with the resulting
1225  *	handle.  May fail with -EINVAL if *ctxp is not initialized,
1226  *	if the specified nr_events exceeds internal limits.  May fail
1227  *	with -EAGAIN if the specified nr_events exceeds the user's limit
1228  *	of available events.  May fail with -ENOMEM if insufficient kernel
1229  *	resources are available.  May fail with -EFAULT if an invalid
1230  *	pointer is passed for ctxp.  Will fail with -ENOSYS if not
1231  *	implemented.
1232  */
1233 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1234 {
1235 	struct kioctx *ioctx = NULL;
1236 	unsigned long ctx;
1237 	long ret;
1238 
1239 	ret = get_user(ctx, ctxp);
1240 	if (unlikely(ret))
1241 		goto out;
1242 
1243 	ret = -EINVAL;
1244 	if (unlikely(ctx || nr_events == 0)) {
1245 		pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
1246 		         ctx, nr_events);
1247 		goto out;
1248 	}
1249 
1250 	ioctx = ioctx_alloc(nr_events);
1251 	ret = PTR_ERR(ioctx);
1252 	if (!IS_ERR(ioctx)) {
1253 		ret = put_user(ioctx->user_id, ctxp);
1254 		if (ret)
1255 			io_destroy(ioctx);
1256 		put_ioctx(ioctx);
1257 	}
1258 
1259 out:
1260 	return ret;
1261 }
1262 
1263 /* sys_io_destroy:
1264  *	Destroy the aio_context specified.  May cancel any outstanding
1265  *	AIOs and block on completion.  Will fail with -ENOSYS if not
1266  *	implemented.  May fail with -EINVAL if the context pointed to
1267  *	is invalid.
1268  */
1269 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1270 {
1271 	struct kioctx *ioctx = lookup_ioctx(ctx);
1272 	if (likely(NULL != ioctx)) {
1273 		io_destroy(ioctx);
1274 		put_ioctx(ioctx);
1275 		return 0;
1276 	}
1277 	pr_debug("EINVAL: io_destroy: invalid context id\n");
1278 	return -EINVAL;
1279 }
1280 
1281 static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret)
1282 {
1283 	struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg];
1284 
1285 	BUG_ON(ret <= 0);
1286 
1287 	while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) {
1288 		ssize_t this = min((ssize_t)iov->iov_len, ret);
1289 		iov->iov_base += this;
1290 		iov->iov_len -= this;
1291 		iocb->ki_left -= this;
1292 		ret -= this;
1293 		if (iov->iov_len == 0) {
1294 			iocb->ki_cur_seg++;
1295 			iov++;
1296 		}
1297 	}
1298 
1299 	/* the caller should not have done more io than what fit in
1300 	 * the remaining iovecs */
1301 	BUG_ON(ret > 0 && iocb->ki_left == 0);
1302 }
1303 
1304 static ssize_t aio_rw_vect_retry(struct kiocb *iocb)
1305 {
1306 	struct file *file = iocb->ki_filp;
1307 	struct address_space *mapping = file->f_mapping;
1308 	struct inode *inode = mapping->host;
1309 	ssize_t (*rw_op)(struct kiocb *, const struct iovec *,
1310 			 unsigned long, loff_t);
1311 	ssize_t ret = 0;
1312 	unsigned short opcode;
1313 
1314 	if ((iocb->ki_opcode == IOCB_CMD_PREADV) ||
1315 		(iocb->ki_opcode == IOCB_CMD_PREAD)) {
1316 		rw_op = file->f_op->aio_read;
1317 		opcode = IOCB_CMD_PREADV;
1318 	} else {
1319 		rw_op = file->f_op->aio_write;
1320 		opcode = IOCB_CMD_PWRITEV;
1321 	}
1322 
1323 	/* This matches the pread()/pwrite() logic */
1324 	if (iocb->ki_pos < 0)
1325 		return -EINVAL;
1326 
1327 	do {
1328 		ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg],
1329 			    iocb->ki_nr_segs - iocb->ki_cur_seg,
1330 			    iocb->ki_pos);
1331 		if (ret > 0)
1332 			aio_advance_iovec(iocb, ret);
1333 
1334 	/* retry all partial writes.  retry partial reads as long as its a
1335 	 * regular file. */
1336 	} while (ret > 0 && iocb->ki_left > 0 &&
1337 		 (opcode == IOCB_CMD_PWRITEV ||
1338 		  (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode))));
1339 
1340 	/* This means we must have transferred all that we could */
1341 	/* No need to retry anymore */
1342 	if ((ret == 0) || (iocb->ki_left == 0))
1343 		ret = iocb->ki_nbytes - iocb->ki_left;
1344 
1345 	/* If we managed to write some out we return that, rather than
1346 	 * the eventual error. */
1347 	if (opcode == IOCB_CMD_PWRITEV
1348 	    && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY
1349 	    && iocb->ki_nbytes - iocb->ki_left)
1350 		ret = iocb->ki_nbytes - iocb->ki_left;
1351 
1352 	return ret;
1353 }
1354 
1355 static ssize_t aio_fdsync(struct kiocb *iocb)
1356 {
1357 	struct file *file = iocb->ki_filp;
1358 	ssize_t ret = -EINVAL;
1359 
1360 	if (file->f_op->aio_fsync)
1361 		ret = file->f_op->aio_fsync(iocb, 1);
1362 	return ret;
1363 }
1364 
1365 static ssize_t aio_fsync(struct kiocb *iocb)
1366 {
1367 	struct file *file = iocb->ki_filp;
1368 	ssize_t ret = -EINVAL;
1369 
1370 	if (file->f_op->aio_fsync)
1371 		ret = file->f_op->aio_fsync(iocb, 0);
1372 	return ret;
1373 }
1374 
1375 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
1376 {
1377 	ssize_t ret;
1378 
1379 #ifdef CONFIG_COMPAT
1380 	if (compat)
1381 		ret = compat_rw_copy_check_uvector(type,
1382 				(struct compat_iovec __user *)kiocb->ki_buf,
1383 				kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
1384 				&kiocb->ki_iovec);
1385 	else
1386 #endif
1387 		ret = rw_copy_check_uvector(type,
1388 				(struct iovec __user *)kiocb->ki_buf,
1389 				kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
1390 				&kiocb->ki_iovec);
1391 	if (ret < 0)
1392 		goto out;
1393 
1394 	ret = rw_verify_area(type, kiocb->ki_filp, &kiocb->ki_pos, ret);
1395 	if (ret < 0)
1396 		goto out;
1397 
1398 	kiocb->ki_nr_segs = kiocb->ki_nbytes;
1399 	kiocb->ki_cur_seg = 0;
1400 	/* ki_nbytes/left now reflect bytes instead of segs */
1401 	kiocb->ki_nbytes = ret;
1402 	kiocb->ki_left = ret;
1403 
1404 	ret = 0;
1405 out:
1406 	return ret;
1407 }
1408 
1409 static ssize_t aio_setup_single_vector(int type, struct file * file, struct kiocb *kiocb)
1410 {
1411 	int bytes;
1412 
1413 	bytes = rw_verify_area(type, file, &kiocb->ki_pos, kiocb->ki_left);
1414 	if (bytes < 0)
1415 		return bytes;
1416 
1417 	kiocb->ki_iovec = &kiocb->ki_inline_vec;
1418 	kiocb->ki_iovec->iov_base = kiocb->ki_buf;
1419 	kiocb->ki_iovec->iov_len = bytes;
1420 	kiocb->ki_nr_segs = 1;
1421 	kiocb->ki_cur_seg = 0;
1422 	return 0;
1423 }
1424 
1425 /*
1426  * aio_setup_iocb:
1427  *	Performs the initial checks and aio retry method
1428  *	setup for the kiocb at the time of io submission.
1429  */
1430 static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
1431 {
1432 	struct file *file = kiocb->ki_filp;
1433 	ssize_t ret = 0;
1434 
1435 	switch (kiocb->ki_opcode) {
1436 	case IOCB_CMD_PREAD:
1437 		ret = -EBADF;
1438 		if (unlikely(!(file->f_mode & FMODE_READ)))
1439 			break;
1440 		ret = -EFAULT;
1441 		if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf,
1442 			kiocb->ki_left)))
1443 			break;
1444 		ret = aio_setup_single_vector(READ, file, kiocb);
1445 		if (ret)
1446 			break;
1447 		ret = -EINVAL;
1448 		if (file->f_op->aio_read)
1449 			kiocb->ki_retry = aio_rw_vect_retry;
1450 		break;
1451 	case IOCB_CMD_PWRITE:
1452 		ret = -EBADF;
1453 		if (unlikely(!(file->f_mode & FMODE_WRITE)))
1454 			break;
1455 		ret = -EFAULT;
1456 		if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf,
1457 			kiocb->ki_left)))
1458 			break;
1459 		ret = aio_setup_single_vector(WRITE, file, kiocb);
1460 		if (ret)
1461 			break;
1462 		ret = -EINVAL;
1463 		if (file->f_op->aio_write)
1464 			kiocb->ki_retry = aio_rw_vect_retry;
1465 		break;
1466 	case IOCB_CMD_PREADV:
1467 		ret = -EBADF;
1468 		if (unlikely(!(file->f_mode & FMODE_READ)))
1469 			break;
1470 		ret = aio_setup_vectored_rw(READ, kiocb, compat);
1471 		if (ret)
1472 			break;
1473 		ret = -EINVAL;
1474 		if (file->f_op->aio_read)
1475 			kiocb->ki_retry = aio_rw_vect_retry;
1476 		break;
1477 	case IOCB_CMD_PWRITEV:
1478 		ret = -EBADF;
1479 		if (unlikely(!(file->f_mode & FMODE_WRITE)))
1480 			break;
1481 		ret = aio_setup_vectored_rw(WRITE, kiocb, compat);
1482 		if (ret)
1483 			break;
1484 		ret = -EINVAL;
1485 		if (file->f_op->aio_write)
1486 			kiocb->ki_retry = aio_rw_vect_retry;
1487 		break;
1488 	case IOCB_CMD_FDSYNC:
1489 		ret = -EINVAL;
1490 		if (file->f_op->aio_fsync)
1491 			kiocb->ki_retry = aio_fdsync;
1492 		break;
1493 	case IOCB_CMD_FSYNC:
1494 		ret = -EINVAL;
1495 		if (file->f_op->aio_fsync)
1496 			kiocb->ki_retry = aio_fsync;
1497 		break;
1498 	default:
1499 		dprintk("EINVAL: io_submit: no operation provided\n");
1500 		ret = -EINVAL;
1501 	}
1502 
1503 	if (!kiocb->ki_retry)
1504 		return ret;
1505 
1506 	return 0;
1507 }
1508 
1509 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1510 			 struct iocb *iocb, struct kiocb_batch *batch,
1511 			 bool compat)
1512 {
1513 	struct kiocb *req;
1514 	struct file *file;
1515 	ssize_t ret;
1516 
1517 	/* enforce forwards compatibility on users */
1518 	if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) {
1519 		pr_debug("EINVAL: io_submit: reserve field set\n");
1520 		return -EINVAL;
1521 	}
1522 
1523 	/* prevent overflows */
1524 	if (unlikely(
1525 	    (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
1526 	    (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
1527 	    ((ssize_t)iocb->aio_nbytes < 0)
1528 	   )) {
1529 		pr_debug("EINVAL: io_submit: overflow check\n");
1530 		return -EINVAL;
1531 	}
1532 
1533 	file = fget(iocb->aio_fildes);
1534 	if (unlikely(!file))
1535 		return -EBADF;
1536 
1537 	req = aio_get_req(ctx, batch);  /* returns with 2 references to req */
1538 	if (unlikely(!req)) {
1539 		fput(file);
1540 		return -EAGAIN;
1541 	}
1542 	req->ki_filp = file;
1543 	if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1544 		/*
1545 		 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1546 		 * instance of the file* now. The file descriptor must be
1547 		 * an eventfd() fd, and will be signaled for each completed
1548 		 * event using the eventfd_signal() function.
1549 		 */
1550 		req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
1551 		if (IS_ERR(req->ki_eventfd)) {
1552 			ret = PTR_ERR(req->ki_eventfd);
1553 			req->ki_eventfd = NULL;
1554 			goto out_put_req;
1555 		}
1556 	}
1557 
1558 	ret = put_user(req->ki_key, &user_iocb->aio_key);
1559 	if (unlikely(ret)) {
1560 		dprintk("EFAULT: aio_key\n");
1561 		goto out_put_req;
1562 	}
1563 
1564 	req->ki_obj.user = user_iocb;
1565 	req->ki_user_data = iocb->aio_data;
1566 	req->ki_pos = iocb->aio_offset;
1567 
1568 	req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf;
1569 	req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
1570 	req->ki_opcode = iocb->aio_lio_opcode;
1571 
1572 	ret = aio_setup_iocb(req, compat);
1573 
1574 	if (ret)
1575 		goto out_put_req;
1576 
1577 	spin_lock_irq(&ctx->ctx_lock);
1578 	/*
1579 	 * We could have raced with io_destroy() and are currently holding a
1580 	 * reference to ctx which should be destroyed. We cannot submit IO
1581 	 * since ctx gets freed as soon as io_submit() puts its reference.  The
1582 	 * check here is reliable: io_destroy() sets ctx->dead before waiting
1583 	 * for outstanding IO and the barrier between these two is realized by
1584 	 * unlock of mm->ioctx_lock and lock of ctx->ctx_lock.  Analogously we
1585 	 * increment ctx->reqs_active before checking for ctx->dead and the
1586 	 * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we
1587 	 * don't see ctx->dead set here, io_destroy() waits for our IO to
1588 	 * finish.
1589 	 */
1590 	if (ctx->dead) {
1591 		spin_unlock_irq(&ctx->ctx_lock);
1592 		ret = -EINVAL;
1593 		goto out_put_req;
1594 	}
1595 	aio_run_iocb(req);
1596 	if (!list_empty(&ctx->run_list)) {
1597 		/* drain the run list */
1598 		while (__aio_run_iocbs(ctx))
1599 			;
1600 	}
1601 	spin_unlock_irq(&ctx->ctx_lock);
1602 
1603 	aio_put_req(req);	/* drop extra ref to req */
1604 	return 0;
1605 
1606 out_put_req:
1607 	aio_put_req(req);	/* drop extra ref to req */
1608 	aio_put_req(req);	/* drop i/o ref to req */
1609 	return ret;
1610 }
1611 
1612 long do_io_submit(aio_context_t ctx_id, long nr,
1613 		  struct iocb __user *__user *iocbpp, bool compat)
1614 {
1615 	struct kioctx *ctx;
1616 	long ret = 0;
1617 	int i = 0;
1618 	struct blk_plug plug;
1619 	struct kiocb_batch batch;
1620 
1621 	if (unlikely(nr < 0))
1622 		return -EINVAL;
1623 
1624 	if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
1625 		nr = LONG_MAX/sizeof(*iocbpp);
1626 
1627 	if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
1628 		return -EFAULT;
1629 
1630 	ctx = lookup_ioctx(ctx_id);
1631 	if (unlikely(!ctx)) {
1632 		pr_debug("EINVAL: io_submit: invalid context id\n");
1633 		return -EINVAL;
1634 	}
1635 
1636 	kiocb_batch_init(&batch, nr);
1637 
1638 	blk_start_plug(&plug);
1639 
1640 	/*
1641 	 * AKPM: should this return a partial result if some of the IOs were
1642 	 * successfully submitted?
1643 	 */
1644 	for (i=0; i<nr; i++) {
1645 		struct iocb __user *user_iocb;
1646 		struct iocb tmp;
1647 
1648 		if (unlikely(__get_user(user_iocb, iocbpp + i))) {
1649 			ret = -EFAULT;
1650 			break;
1651 		}
1652 
1653 		if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) {
1654 			ret = -EFAULT;
1655 			break;
1656 		}
1657 
1658 		ret = io_submit_one(ctx, user_iocb, &tmp, &batch, compat);
1659 		if (ret)
1660 			break;
1661 	}
1662 	blk_finish_plug(&plug);
1663 
1664 	kiocb_batch_free(ctx, &batch);
1665 	put_ioctx(ctx);
1666 	return i ? i : ret;
1667 }
1668 
1669 /* sys_io_submit:
1670  *	Queue the nr iocbs pointed to by iocbpp for processing.  Returns
1671  *	the number of iocbs queued.  May return -EINVAL if the aio_context
1672  *	specified by ctx_id is invalid, if nr is < 0, if the iocb at
1673  *	*iocbpp[0] is not properly initialized, if the operation specified
1674  *	is invalid for the file descriptor in the iocb.  May fail with
1675  *	-EFAULT if any of the data structures point to invalid data.  May
1676  *	fail with -EBADF if the file descriptor specified in the first
1677  *	iocb is invalid.  May fail with -EAGAIN if insufficient resources
1678  *	are available to queue any iocbs.  Will return 0 if nr is 0.  Will
1679  *	fail with -ENOSYS if not implemented.
1680  */
1681 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1682 		struct iocb __user * __user *, iocbpp)
1683 {
1684 	return do_io_submit(ctx_id, nr, iocbpp, 0);
1685 }
1686 
1687 /* lookup_kiocb
1688  *	Finds a given iocb for cancellation.
1689  */
1690 static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
1691 				  u32 key)
1692 {
1693 	struct list_head *pos;
1694 
1695 	assert_spin_locked(&ctx->ctx_lock);
1696 
1697 	/* TODO: use a hash or array, this sucks. */
1698 	list_for_each(pos, &ctx->active_reqs) {
1699 		struct kiocb *kiocb = list_kiocb(pos);
1700 		if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key)
1701 			return kiocb;
1702 	}
1703 	return NULL;
1704 }
1705 
1706 /* sys_io_cancel:
1707  *	Attempts to cancel an iocb previously passed to io_submit.  If
1708  *	the operation is successfully cancelled, the resulting event is
1709  *	copied into the memory pointed to by result without being placed
1710  *	into the completion queue and 0 is returned.  May fail with
1711  *	-EFAULT if any of the data structures pointed to are invalid.
1712  *	May fail with -EINVAL if aio_context specified by ctx_id is
1713  *	invalid.  May fail with -EAGAIN if the iocb specified was not
1714  *	cancelled.  Will fail with -ENOSYS if not implemented.
1715  */
1716 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1717 		struct io_event __user *, result)
1718 {
1719 	int (*cancel)(struct kiocb *iocb, struct io_event *res);
1720 	struct kioctx *ctx;
1721 	struct kiocb *kiocb;
1722 	u32 key;
1723 	int ret;
1724 
1725 	ret = get_user(key, &iocb->aio_key);
1726 	if (unlikely(ret))
1727 		return -EFAULT;
1728 
1729 	ctx = lookup_ioctx(ctx_id);
1730 	if (unlikely(!ctx))
1731 		return -EINVAL;
1732 
1733 	spin_lock_irq(&ctx->ctx_lock);
1734 	ret = -EAGAIN;
1735 	kiocb = lookup_kiocb(ctx, iocb, key);
1736 	if (kiocb && kiocb->ki_cancel) {
1737 		cancel = kiocb->ki_cancel;
1738 		kiocb->ki_users ++;
1739 		kiocbSetCancelled(kiocb);
1740 	} else
1741 		cancel = NULL;
1742 	spin_unlock_irq(&ctx->ctx_lock);
1743 
1744 	if (NULL != cancel) {
1745 		struct io_event tmp;
1746 		pr_debug("calling cancel\n");
1747 		memset(&tmp, 0, sizeof(tmp));
1748 		tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user;
1749 		tmp.data = kiocb->ki_user_data;
1750 		ret = cancel(kiocb, &tmp);
1751 		if (!ret) {
1752 			/* Cancellation succeeded -- copy the result
1753 			 * into the user's buffer.
1754 			 */
1755 			if (copy_to_user(result, &tmp, sizeof(tmp)))
1756 				ret = -EFAULT;
1757 		}
1758 	} else
1759 		ret = -EINVAL;
1760 
1761 	put_ioctx(ctx);
1762 
1763 	return ret;
1764 }
1765 
1766 /* io_getevents:
1767  *	Attempts to read at least min_nr events and up to nr events from
1768  *	the completion queue for the aio_context specified by ctx_id. If
1769  *	it succeeds, the number of read events is returned. May fail with
1770  *	-EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
1771  *	out of range, if timeout is out of range.  May fail with -EFAULT
1772  *	if any of the memory specified is invalid.  May return 0 or
1773  *	< min_nr if the timeout specified by timeout has elapsed
1774  *	before sufficient events are available, where timeout == NULL
1775  *	specifies an infinite timeout. Note that the timeout pointed to by
1776  *	timeout is relative and will be updated if not NULL and the
1777  *	operation blocks. Will fail with -ENOSYS if not implemented.
1778  */
1779 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
1780 		long, min_nr,
1781 		long, nr,
1782 		struct io_event __user *, events,
1783 		struct timespec __user *, timeout)
1784 {
1785 	struct kioctx *ioctx = lookup_ioctx(ctx_id);
1786 	long ret = -EINVAL;
1787 
1788 	if (likely(ioctx)) {
1789 		if (likely(min_nr <= nr && min_nr >= 0))
1790 			ret = read_events(ioctx, min_nr, nr, events, timeout);
1791 		put_ioctx(ioctx);
1792 	}
1793 
1794 	asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout);
1795 	return ret;
1796 }
1797