xref: /openbmc/linux/fs/aio.c (revision ab37d5a43162ab424e36be03684881df438378a7)
1  /*
2   *	An async IO implementation for Linux
3   *	Written by Benjamin LaHaise <bcrl@kvack.org>
4   *
5   *	Implements an efficient asynchronous io interface.
6   *
7   *	Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
8   *	Copyright 2018 Christoph Hellwig.
9   *
10   *	See ../COPYING for licensing terms.
11   */
12  #define pr_fmt(fmt) "%s: " fmt, __func__
13  
14  #include <linux/kernel.h>
15  #include <linux/init.h>
16  #include <linux/errno.h>
17  #include <linux/time.h>
18  #include <linux/aio_abi.h>
19  #include <linux/export.h>
20  #include <linux/syscalls.h>
21  #include <linux/backing-dev.h>
22  #include <linux/refcount.h>
23  #include <linux/uio.h>
24  
25  #include <linux/sched/signal.h>
26  #include <linux/fs.h>
27  #include <linux/file.h>
28  #include <linux/mm.h>
29  #include <linux/mman.h>
30  #include <linux/percpu.h>
31  #include <linux/slab.h>
32  #include <linux/timer.h>
33  #include <linux/aio.h>
34  #include <linux/highmem.h>
35  #include <linux/workqueue.h>
36  #include <linux/security.h>
37  #include <linux/eventfd.h>
38  #include <linux/blkdev.h>
39  #include <linux/compat.h>
40  #include <linux/migrate.h>
41  #include <linux/ramfs.h>
42  #include <linux/percpu-refcount.h>
43  #include <linux/mount.h>
44  #include <linux/pseudo_fs.h>
45  
46  #include <linux/uaccess.h>
47  #include <linux/nospec.h>
48  
49  #include "internal.h"
50  
51  #define KIOCB_KEY		0
52  
53  #define AIO_RING_MAGIC			0xa10a10a1
54  #define AIO_RING_COMPAT_FEATURES	1
55  #define AIO_RING_INCOMPAT_FEATURES	0
56  struct aio_ring {
57  	unsigned	id;	/* kernel internal index number */
58  	unsigned	nr;	/* number of io_events */
59  	unsigned	head;	/* Written to by userland or under ring_lock
60  				 * mutex by aio_read_events_ring(). */
61  	unsigned	tail;
62  
63  	unsigned	magic;
64  	unsigned	compat_features;
65  	unsigned	incompat_features;
66  	unsigned	header_length;	/* size of aio_ring */
67  
68  
69  	struct io_event		io_events[];
70  }; /* 128 bytes + ring size */
71  
72  /*
73   * Plugging is meant to work with larger batches of IOs. If we don't
74   * have more than the below, then don't bother setting up a plug.
75   */
76  #define AIO_PLUG_THRESHOLD	2
77  
78  #define AIO_RING_PAGES	8
79  
80  struct kioctx_table {
81  	struct rcu_head		rcu;
82  	unsigned		nr;
83  	struct kioctx __rcu	*table[];
84  };
85  
86  struct kioctx_cpu {
87  	unsigned		reqs_available;
88  };
89  
90  struct ctx_rq_wait {
91  	struct completion comp;
92  	atomic_t count;
93  };
94  
95  struct kioctx {
96  	struct percpu_ref	users;
97  	atomic_t		dead;
98  
99  	struct percpu_ref	reqs;
100  
101  	unsigned long		user_id;
102  
103  	struct __percpu kioctx_cpu *cpu;
104  
105  	/*
106  	 * For percpu reqs_available, number of slots we move to/from global
107  	 * counter at a time:
108  	 */
109  	unsigned		req_batch;
110  	/*
111  	 * This is what userspace passed to io_setup(), it's not used for
112  	 * anything but counting against the global max_reqs quota.
113  	 *
114  	 * The real limit is nr_events - 1, which will be larger (see
115  	 * aio_setup_ring())
116  	 */
117  	unsigned		max_reqs;
118  
119  	/* Size of ringbuffer, in units of struct io_event */
120  	unsigned		nr_events;
121  
122  	unsigned long		mmap_base;
123  	unsigned long		mmap_size;
124  
125  	struct page		**ring_pages;
126  	long			nr_pages;
127  
128  	struct rcu_work		free_rwork;	/* see free_ioctx() */
129  
130  	/*
131  	 * signals when all in-flight requests are done
132  	 */
133  	struct ctx_rq_wait	*rq_wait;
134  
135  	struct {
136  		/*
137  		 * This counts the number of available slots in the ringbuffer,
138  		 * so we avoid overflowing it: it's decremented (if positive)
139  		 * when allocating a kiocb and incremented when the resulting
140  		 * io_event is pulled off the ringbuffer.
141  		 *
142  		 * We batch accesses to it with a percpu version.
143  		 */
144  		atomic_t	reqs_available;
145  	} ____cacheline_aligned_in_smp;
146  
147  	struct {
148  		spinlock_t	ctx_lock;
149  		struct list_head active_reqs;	/* used for cancellation */
150  	} ____cacheline_aligned_in_smp;
151  
152  	struct {
153  		struct mutex	ring_lock;
154  		wait_queue_head_t wait;
155  	} ____cacheline_aligned_in_smp;
156  
157  	struct {
158  		unsigned	tail;
159  		unsigned	completed_events;
160  		spinlock_t	completion_lock;
161  	} ____cacheline_aligned_in_smp;
162  
163  	struct page		*internal_pages[AIO_RING_PAGES];
164  	struct file		*aio_ring_file;
165  
166  	unsigned		id;
167  };
168  
169  /*
170   * First field must be the file pointer in all the
171   * iocb unions! See also 'struct kiocb' in <linux/fs.h>
172   */
173  struct fsync_iocb {
174  	struct file		*file;
175  	struct work_struct	work;
176  	bool			datasync;
177  	struct cred		*creds;
178  };
179  
180  struct poll_iocb {
181  	struct file		*file;
182  	struct wait_queue_head	*head;
183  	__poll_t		events;
184  	bool			done;
185  	bool			cancelled;
186  	struct wait_queue_entry	wait;
187  	struct work_struct	work;
188  };
189  
190  /*
191   * NOTE! Each of the iocb union members has the file pointer
192   * as the first entry in their struct definition. So you can
193   * access the file pointer through any of the sub-structs,
194   * or directly as just 'ki_filp' in this struct.
195   */
196  struct aio_kiocb {
197  	union {
198  		struct file		*ki_filp;
199  		struct kiocb		rw;
200  		struct fsync_iocb	fsync;
201  		struct poll_iocb	poll;
202  	};
203  
204  	struct kioctx		*ki_ctx;
205  	kiocb_cancel_fn		*ki_cancel;
206  
207  	struct io_event		ki_res;
208  
209  	struct list_head	ki_list;	/* the aio core uses this
210  						 * for cancellation */
211  	refcount_t		ki_refcnt;
212  
213  	/*
214  	 * If the aio_resfd field of the userspace iocb is not zero,
215  	 * this is the underlying eventfd context to deliver events to.
216  	 */
217  	struct eventfd_ctx	*ki_eventfd;
218  };
219  
220  /*------ sysctl variables----*/
221  static DEFINE_SPINLOCK(aio_nr_lock);
222  unsigned long aio_nr;		/* current system wide number of aio requests */
223  unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
224  /*----end sysctl variables---*/
225  
226  static struct kmem_cache	*kiocb_cachep;
227  static struct kmem_cache	*kioctx_cachep;
228  
229  static struct vfsmount *aio_mnt;
230  
231  static const struct file_operations aio_ring_fops;
232  static const struct address_space_operations aio_ctx_aops;
233  
234  static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
235  {
236  	struct file *file;
237  	struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
238  	if (IS_ERR(inode))
239  		return ERR_CAST(inode);
240  
241  	inode->i_mapping->a_ops = &aio_ctx_aops;
242  	inode->i_mapping->private_data = ctx;
243  	inode->i_size = PAGE_SIZE * nr_pages;
244  
245  	file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
246  				O_RDWR, &aio_ring_fops);
247  	if (IS_ERR(file))
248  		iput(inode);
249  	return file;
250  }
251  
252  static int aio_init_fs_context(struct fs_context *fc)
253  {
254  	if (!init_pseudo(fc, AIO_RING_MAGIC))
255  		return -ENOMEM;
256  	fc->s_iflags |= SB_I_NOEXEC;
257  	return 0;
258  }
259  
260  /* aio_setup
261   *	Creates the slab caches used by the aio routines, panic on
262   *	failure as this is done early during the boot sequence.
263   */
264  static int __init aio_setup(void)
265  {
266  	static struct file_system_type aio_fs = {
267  		.name		= "aio",
268  		.init_fs_context = aio_init_fs_context,
269  		.kill_sb	= kill_anon_super,
270  	};
271  	aio_mnt = kern_mount(&aio_fs);
272  	if (IS_ERR(aio_mnt))
273  		panic("Failed to create aio fs mount.");
274  
275  	kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
276  	kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
277  	return 0;
278  }
279  __initcall(aio_setup);
280  
281  static void put_aio_ring_file(struct kioctx *ctx)
282  {
283  	struct file *aio_ring_file = ctx->aio_ring_file;
284  	struct address_space *i_mapping;
285  
286  	if (aio_ring_file) {
287  		truncate_setsize(file_inode(aio_ring_file), 0);
288  
289  		/* Prevent further access to the kioctx from migratepages */
290  		i_mapping = aio_ring_file->f_mapping;
291  		spin_lock(&i_mapping->private_lock);
292  		i_mapping->private_data = NULL;
293  		ctx->aio_ring_file = NULL;
294  		spin_unlock(&i_mapping->private_lock);
295  
296  		fput(aio_ring_file);
297  	}
298  }
299  
300  static void aio_free_ring(struct kioctx *ctx)
301  {
302  	int i;
303  
304  	/* Disconnect the kiotx from the ring file.  This prevents future
305  	 * accesses to the kioctx from page migration.
306  	 */
307  	put_aio_ring_file(ctx);
308  
309  	for (i = 0; i < ctx->nr_pages; i++) {
310  		struct page *page;
311  		pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
312  				page_count(ctx->ring_pages[i]));
313  		page = ctx->ring_pages[i];
314  		if (!page)
315  			continue;
316  		ctx->ring_pages[i] = NULL;
317  		put_page(page);
318  	}
319  
320  	if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
321  		kfree(ctx->ring_pages);
322  		ctx->ring_pages = NULL;
323  	}
324  }
325  
326  static int aio_ring_mremap(struct vm_area_struct *vma, unsigned long flags)
327  {
328  	struct file *file = vma->vm_file;
329  	struct mm_struct *mm = vma->vm_mm;
330  	struct kioctx_table *table;
331  	int i, res = -EINVAL;
332  
333  	if (flags & MREMAP_DONTUNMAP)
334  		return -EINVAL;
335  
336  	spin_lock(&mm->ioctx_lock);
337  	rcu_read_lock();
338  	table = rcu_dereference(mm->ioctx_table);
339  	for (i = 0; i < table->nr; i++) {
340  		struct kioctx *ctx;
341  
342  		ctx = rcu_dereference(table->table[i]);
343  		if (ctx && ctx->aio_ring_file == file) {
344  			if (!atomic_read(&ctx->dead)) {
345  				ctx->user_id = ctx->mmap_base = vma->vm_start;
346  				res = 0;
347  			}
348  			break;
349  		}
350  	}
351  
352  	rcu_read_unlock();
353  	spin_unlock(&mm->ioctx_lock);
354  	return res;
355  }
356  
357  static const struct vm_operations_struct aio_ring_vm_ops = {
358  	.mremap		= aio_ring_mremap,
359  #if IS_ENABLED(CONFIG_MMU)
360  	.fault		= filemap_fault,
361  	.map_pages	= filemap_map_pages,
362  	.page_mkwrite	= filemap_page_mkwrite,
363  #endif
364  };
365  
366  static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
367  {
368  	vma->vm_flags |= VM_DONTEXPAND;
369  	vma->vm_ops = &aio_ring_vm_ops;
370  	return 0;
371  }
372  
373  static const struct file_operations aio_ring_fops = {
374  	.mmap = aio_ring_mmap,
375  };
376  
377  #if IS_ENABLED(CONFIG_MIGRATION)
378  static int aio_migratepage(struct address_space *mapping, struct page *new,
379  			struct page *old, enum migrate_mode mode)
380  {
381  	struct kioctx *ctx;
382  	unsigned long flags;
383  	pgoff_t idx;
384  	int rc;
385  
386  	/*
387  	 * We cannot support the _NO_COPY case here, because copy needs to
388  	 * happen under the ctx->completion_lock. That does not work with the
389  	 * migration workflow of MIGRATE_SYNC_NO_COPY.
390  	 */
391  	if (mode == MIGRATE_SYNC_NO_COPY)
392  		return -EINVAL;
393  
394  	rc = 0;
395  
396  	/* mapping->private_lock here protects against the kioctx teardown.  */
397  	spin_lock(&mapping->private_lock);
398  	ctx = mapping->private_data;
399  	if (!ctx) {
400  		rc = -EINVAL;
401  		goto out;
402  	}
403  
404  	/* The ring_lock mutex.  The prevents aio_read_events() from writing
405  	 * to the ring's head, and prevents page migration from mucking in
406  	 * a partially initialized kiotx.
407  	 */
408  	if (!mutex_trylock(&ctx->ring_lock)) {
409  		rc = -EAGAIN;
410  		goto out;
411  	}
412  
413  	idx = old->index;
414  	if (idx < (pgoff_t)ctx->nr_pages) {
415  		/* Make sure the old page hasn't already been changed */
416  		if (ctx->ring_pages[idx] != old)
417  			rc = -EAGAIN;
418  	} else
419  		rc = -EINVAL;
420  
421  	if (rc != 0)
422  		goto out_unlock;
423  
424  	/* Writeback must be complete */
425  	BUG_ON(PageWriteback(old));
426  	get_page(new);
427  
428  	rc = migrate_page_move_mapping(mapping, new, old, 1);
429  	if (rc != MIGRATEPAGE_SUCCESS) {
430  		put_page(new);
431  		goto out_unlock;
432  	}
433  
434  	/* Take completion_lock to prevent other writes to the ring buffer
435  	 * while the old page is copied to the new.  This prevents new
436  	 * events from being lost.
437  	 */
438  	spin_lock_irqsave(&ctx->completion_lock, flags);
439  	migrate_page_copy(new, old);
440  	BUG_ON(ctx->ring_pages[idx] != old);
441  	ctx->ring_pages[idx] = new;
442  	spin_unlock_irqrestore(&ctx->completion_lock, flags);
443  
444  	/* The old page is no longer accessible. */
445  	put_page(old);
446  
447  out_unlock:
448  	mutex_unlock(&ctx->ring_lock);
449  out:
450  	spin_unlock(&mapping->private_lock);
451  	return rc;
452  }
453  #endif
454  
455  static const struct address_space_operations aio_ctx_aops = {
456  	.set_page_dirty = __set_page_dirty_no_writeback,
457  #if IS_ENABLED(CONFIG_MIGRATION)
458  	.migratepage	= aio_migratepage,
459  #endif
460  };
461  
462  static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
463  {
464  	struct aio_ring *ring;
465  	struct mm_struct *mm = current->mm;
466  	unsigned long size, unused;
467  	int nr_pages;
468  	int i;
469  	struct file *file;
470  
471  	/* Compensate for the ring buffer's head/tail overlap entry */
472  	nr_events += 2;	/* 1 is required, 2 for good luck */
473  
474  	size = sizeof(struct aio_ring);
475  	size += sizeof(struct io_event) * nr_events;
476  
477  	nr_pages = PFN_UP(size);
478  	if (nr_pages < 0)
479  		return -EINVAL;
480  
481  	file = aio_private_file(ctx, nr_pages);
482  	if (IS_ERR(file)) {
483  		ctx->aio_ring_file = NULL;
484  		return -ENOMEM;
485  	}
486  
487  	ctx->aio_ring_file = file;
488  	nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
489  			/ sizeof(struct io_event);
490  
491  	ctx->ring_pages = ctx->internal_pages;
492  	if (nr_pages > AIO_RING_PAGES) {
493  		ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
494  					  GFP_KERNEL);
495  		if (!ctx->ring_pages) {
496  			put_aio_ring_file(ctx);
497  			return -ENOMEM;
498  		}
499  	}
500  
501  	for (i = 0; i < nr_pages; i++) {
502  		struct page *page;
503  		page = find_or_create_page(file->f_mapping,
504  					   i, GFP_HIGHUSER | __GFP_ZERO);
505  		if (!page)
506  			break;
507  		pr_debug("pid(%d) page[%d]->count=%d\n",
508  			 current->pid, i, page_count(page));
509  		SetPageUptodate(page);
510  		unlock_page(page);
511  
512  		ctx->ring_pages[i] = page;
513  	}
514  	ctx->nr_pages = i;
515  
516  	if (unlikely(i != nr_pages)) {
517  		aio_free_ring(ctx);
518  		return -ENOMEM;
519  	}
520  
521  	ctx->mmap_size = nr_pages * PAGE_SIZE;
522  	pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
523  
524  	if (mmap_write_lock_killable(mm)) {
525  		ctx->mmap_size = 0;
526  		aio_free_ring(ctx);
527  		return -EINTR;
528  	}
529  
530  	ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size,
531  				 PROT_READ | PROT_WRITE,
532  				 MAP_SHARED, 0, &unused, NULL);
533  	mmap_write_unlock(mm);
534  	if (IS_ERR((void *)ctx->mmap_base)) {
535  		ctx->mmap_size = 0;
536  		aio_free_ring(ctx);
537  		return -ENOMEM;
538  	}
539  
540  	pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
541  
542  	ctx->user_id = ctx->mmap_base;
543  	ctx->nr_events = nr_events; /* trusted copy */
544  
545  	ring = kmap_atomic(ctx->ring_pages[0]);
546  	ring->nr = nr_events;	/* user copy */
547  	ring->id = ~0U;
548  	ring->head = ring->tail = 0;
549  	ring->magic = AIO_RING_MAGIC;
550  	ring->compat_features = AIO_RING_COMPAT_FEATURES;
551  	ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
552  	ring->header_length = sizeof(struct aio_ring);
553  	kunmap_atomic(ring);
554  	flush_dcache_page(ctx->ring_pages[0]);
555  
556  	return 0;
557  }
558  
559  #define AIO_EVENTS_PER_PAGE	(PAGE_SIZE / sizeof(struct io_event))
560  #define AIO_EVENTS_FIRST_PAGE	((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
561  #define AIO_EVENTS_OFFSET	(AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
562  
563  void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
564  {
565  	struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw);
566  	struct kioctx *ctx = req->ki_ctx;
567  	unsigned long flags;
568  
569  	if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
570  		return;
571  
572  	spin_lock_irqsave(&ctx->ctx_lock, flags);
573  	list_add_tail(&req->ki_list, &ctx->active_reqs);
574  	req->ki_cancel = cancel;
575  	spin_unlock_irqrestore(&ctx->ctx_lock, flags);
576  }
577  EXPORT_SYMBOL(kiocb_set_cancel_fn);
578  
579  /*
580   * free_ioctx() should be RCU delayed to synchronize against the RCU
581   * protected lookup_ioctx() and also needs process context to call
582   * aio_free_ring().  Use rcu_work.
583   */
584  static void free_ioctx(struct work_struct *work)
585  {
586  	struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx,
587  					  free_rwork);
588  	pr_debug("freeing %p\n", ctx);
589  
590  	aio_free_ring(ctx);
591  	free_percpu(ctx->cpu);
592  	percpu_ref_exit(&ctx->reqs);
593  	percpu_ref_exit(&ctx->users);
594  	kmem_cache_free(kioctx_cachep, ctx);
595  }
596  
597  static void free_ioctx_reqs(struct percpu_ref *ref)
598  {
599  	struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
600  
601  	/* At this point we know that there are no any in-flight requests */
602  	if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
603  		complete(&ctx->rq_wait->comp);
604  
605  	/* Synchronize against RCU protected table->table[] dereferences */
606  	INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
607  	queue_rcu_work(system_wq, &ctx->free_rwork);
608  }
609  
610  /*
611   * When this function runs, the kioctx has been removed from the "hash table"
612   * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
613   * now it's safe to cancel any that need to be.
614   */
615  static void free_ioctx_users(struct percpu_ref *ref)
616  {
617  	struct kioctx *ctx = container_of(ref, struct kioctx, users);
618  	struct aio_kiocb *req;
619  
620  	spin_lock_irq(&ctx->ctx_lock);
621  
622  	while (!list_empty(&ctx->active_reqs)) {
623  		req = list_first_entry(&ctx->active_reqs,
624  				       struct aio_kiocb, ki_list);
625  		req->ki_cancel(&req->rw);
626  		list_del_init(&req->ki_list);
627  	}
628  
629  	spin_unlock_irq(&ctx->ctx_lock);
630  
631  	percpu_ref_kill(&ctx->reqs);
632  	percpu_ref_put(&ctx->reqs);
633  }
634  
635  static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
636  {
637  	unsigned i, new_nr;
638  	struct kioctx_table *table, *old;
639  	struct aio_ring *ring;
640  
641  	spin_lock(&mm->ioctx_lock);
642  	table = rcu_dereference_raw(mm->ioctx_table);
643  
644  	while (1) {
645  		if (table)
646  			for (i = 0; i < table->nr; i++)
647  				if (!rcu_access_pointer(table->table[i])) {
648  					ctx->id = i;
649  					rcu_assign_pointer(table->table[i], ctx);
650  					spin_unlock(&mm->ioctx_lock);
651  
652  					/* While kioctx setup is in progress,
653  					 * we are protected from page migration
654  					 * changes ring_pages by ->ring_lock.
655  					 */
656  					ring = kmap_atomic(ctx->ring_pages[0]);
657  					ring->id = ctx->id;
658  					kunmap_atomic(ring);
659  					return 0;
660  				}
661  
662  		new_nr = (table ? table->nr : 1) * 4;
663  		spin_unlock(&mm->ioctx_lock);
664  
665  		table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) *
666  				new_nr, GFP_KERNEL);
667  		if (!table)
668  			return -ENOMEM;
669  
670  		table->nr = new_nr;
671  
672  		spin_lock(&mm->ioctx_lock);
673  		old = rcu_dereference_raw(mm->ioctx_table);
674  
675  		if (!old) {
676  			rcu_assign_pointer(mm->ioctx_table, table);
677  		} else if (table->nr > old->nr) {
678  			memcpy(table->table, old->table,
679  			       old->nr * sizeof(struct kioctx *));
680  
681  			rcu_assign_pointer(mm->ioctx_table, table);
682  			kfree_rcu(old, rcu);
683  		} else {
684  			kfree(table);
685  			table = old;
686  		}
687  	}
688  }
689  
690  static void aio_nr_sub(unsigned nr)
691  {
692  	spin_lock(&aio_nr_lock);
693  	if (WARN_ON(aio_nr - nr > aio_nr))
694  		aio_nr = 0;
695  	else
696  		aio_nr -= nr;
697  	spin_unlock(&aio_nr_lock);
698  }
699  
700  /* ioctx_alloc
701   *	Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
702   */
703  static struct kioctx *ioctx_alloc(unsigned nr_events)
704  {
705  	struct mm_struct *mm = current->mm;
706  	struct kioctx *ctx;
707  	int err = -ENOMEM;
708  
709  	/*
710  	 * Store the original nr_events -- what userspace passed to io_setup(),
711  	 * for counting against the global limit -- before it changes.
712  	 */
713  	unsigned int max_reqs = nr_events;
714  
715  	/*
716  	 * We keep track of the number of available ringbuffer slots, to prevent
717  	 * overflow (reqs_available), and we also use percpu counters for this.
718  	 *
719  	 * So since up to half the slots might be on other cpu's percpu counters
720  	 * and unavailable, double nr_events so userspace sees what they
721  	 * expected: additionally, we move req_batch slots to/from percpu
722  	 * counters at a time, so make sure that isn't 0:
723  	 */
724  	nr_events = max(nr_events, num_possible_cpus() * 4);
725  	nr_events *= 2;
726  
727  	/* Prevent overflows */
728  	if (nr_events > (0x10000000U / sizeof(struct io_event))) {
729  		pr_debug("ENOMEM: nr_events too high\n");
730  		return ERR_PTR(-EINVAL);
731  	}
732  
733  	if (!nr_events || (unsigned long)max_reqs > aio_max_nr)
734  		return ERR_PTR(-EAGAIN);
735  
736  	ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
737  	if (!ctx)
738  		return ERR_PTR(-ENOMEM);
739  
740  	ctx->max_reqs = max_reqs;
741  
742  	spin_lock_init(&ctx->ctx_lock);
743  	spin_lock_init(&ctx->completion_lock);
744  	mutex_init(&ctx->ring_lock);
745  	/* Protect against page migration throughout kiotx setup by keeping
746  	 * the ring_lock mutex held until setup is complete. */
747  	mutex_lock(&ctx->ring_lock);
748  	init_waitqueue_head(&ctx->wait);
749  
750  	INIT_LIST_HEAD(&ctx->active_reqs);
751  
752  	if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL))
753  		goto err;
754  
755  	if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL))
756  		goto err;
757  
758  	ctx->cpu = alloc_percpu(struct kioctx_cpu);
759  	if (!ctx->cpu)
760  		goto err;
761  
762  	err = aio_setup_ring(ctx, nr_events);
763  	if (err < 0)
764  		goto err;
765  
766  	atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
767  	ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
768  	if (ctx->req_batch < 1)
769  		ctx->req_batch = 1;
770  
771  	/* limit the number of system wide aios */
772  	spin_lock(&aio_nr_lock);
773  	if (aio_nr + ctx->max_reqs > aio_max_nr ||
774  	    aio_nr + ctx->max_reqs < aio_nr) {
775  		spin_unlock(&aio_nr_lock);
776  		err = -EAGAIN;
777  		goto err_ctx;
778  	}
779  	aio_nr += ctx->max_reqs;
780  	spin_unlock(&aio_nr_lock);
781  
782  	percpu_ref_get(&ctx->users);	/* io_setup() will drop this ref */
783  	percpu_ref_get(&ctx->reqs);	/* free_ioctx_users() will drop this */
784  
785  	err = ioctx_add_table(ctx, mm);
786  	if (err)
787  		goto err_cleanup;
788  
789  	/* Release the ring_lock mutex now that all setup is complete. */
790  	mutex_unlock(&ctx->ring_lock);
791  
792  	pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
793  		 ctx, ctx->user_id, mm, ctx->nr_events);
794  	return ctx;
795  
796  err_cleanup:
797  	aio_nr_sub(ctx->max_reqs);
798  err_ctx:
799  	atomic_set(&ctx->dead, 1);
800  	if (ctx->mmap_size)
801  		vm_munmap(ctx->mmap_base, ctx->mmap_size);
802  	aio_free_ring(ctx);
803  err:
804  	mutex_unlock(&ctx->ring_lock);
805  	free_percpu(ctx->cpu);
806  	percpu_ref_exit(&ctx->reqs);
807  	percpu_ref_exit(&ctx->users);
808  	kmem_cache_free(kioctx_cachep, ctx);
809  	pr_debug("error allocating ioctx %d\n", err);
810  	return ERR_PTR(err);
811  }
812  
813  /* kill_ioctx
814   *	Cancels all outstanding aio requests on an aio context.  Used
815   *	when the processes owning a context have all exited to encourage
816   *	the rapid destruction of the kioctx.
817   */
818  static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
819  		      struct ctx_rq_wait *wait)
820  {
821  	struct kioctx_table *table;
822  
823  	spin_lock(&mm->ioctx_lock);
824  	if (atomic_xchg(&ctx->dead, 1)) {
825  		spin_unlock(&mm->ioctx_lock);
826  		return -EINVAL;
827  	}
828  
829  	table = rcu_dereference_raw(mm->ioctx_table);
830  	WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
831  	RCU_INIT_POINTER(table->table[ctx->id], NULL);
832  	spin_unlock(&mm->ioctx_lock);
833  
834  	/* free_ioctx_reqs() will do the necessary RCU synchronization */
835  	wake_up_all(&ctx->wait);
836  
837  	/*
838  	 * It'd be more correct to do this in free_ioctx(), after all
839  	 * the outstanding kiocbs have finished - but by then io_destroy
840  	 * has already returned, so io_setup() could potentially return
841  	 * -EAGAIN with no ioctxs actually in use (as far as userspace
842  	 *  could tell).
843  	 */
844  	aio_nr_sub(ctx->max_reqs);
845  
846  	if (ctx->mmap_size)
847  		vm_munmap(ctx->mmap_base, ctx->mmap_size);
848  
849  	ctx->rq_wait = wait;
850  	percpu_ref_kill(&ctx->users);
851  	return 0;
852  }
853  
854  /*
855   * exit_aio: called when the last user of mm goes away.  At this point, there is
856   * no way for any new requests to be submited or any of the io_* syscalls to be
857   * called on the context.
858   *
859   * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
860   * them.
861   */
862  void exit_aio(struct mm_struct *mm)
863  {
864  	struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
865  	struct ctx_rq_wait wait;
866  	int i, skipped;
867  
868  	if (!table)
869  		return;
870  
871  	atomic_set(&wait.count, table->nr);
872  	init_completion(&wait.comp);
873  
874  	skipped = 0;
875  	for (i = 0; i < table->nr; ++i) {
876  		struct kioctx *ctx =
877  			rcu_dereference_protected(table->table[i], true);
878  
879  		if (!ctx) {
880  			skipped++;
881  			continue;
882  		}
883  
884  		/*
885  		 * We don't need to bother with munmap() here - exit_mmap(mm)
886  		 * is coming and it'll unmap everything. And we simply can't,
887  		 * this is not necessarily our ->mm.
888  		 * Since kill_ioctx() uses non-zero ->mmap_size as indicator
889  		 * that it needs to unmap the area, just set it to 0.
890  		 */
891  		ctx->mmap_size = 0;
892  		kill_ioctx(mm, ctx, &wait);
893  	}
894  
895  	if (!atomic_sub_and_test(skipped, &wait.count)) {
896  		/* Wait until all IO for the context are done. */
897  		wait_for_completion(&wait.comp);
898  	}
899  
900  	RCU_INIT_POINTER(mm->ioctx_table, NULL);
901  	kfree(table);
902  }
903  
904  static void put_reqs_available(struct kioctx *ctx, unsigned nr)
905  {
906  	struct kioctx_cpu *kcpu;
907  	unsigned long flags;
908  
909  	local_irq_save(flags);
910  	kcpu = this_cpu_ptr(ctx->cpu);
911  	kcpu->reqs_available += nr;
912  
913  	while (kcpu->reqs_available >= ctx->req_batch * 2) {
914  		kcpu->reqs_available -= ctx->req_batch;
915  		atomic_add(ctx->req_batch, &ctx->reqs_available);
916  	}
917  
918  	local_irq_restore(flags);
919  }
920  
921  static bool __get_reqs_available(struct kioctx *ctx)
922  {
923  	struct kioctx_cpu *kcpu;
924  	bool ret = false;
925  	unsigned long flags;
926  
927  	local_irq_save(flags);
928  	kcpu = this_cpu_ptr(ctx->cpu);
929  	if (!kcpu->reqs_available) {
930  		int old, avail = atomic_read(&ctx->reqs_available);
931  
932  		do {
933  			if (avail < ctx->req_batch)
934  				goto out;
935  
936  			old = avail;
937  			avail = atomic_cmpxchg(&ctx->reqs_available,
938  					       avail, avail - ctx->req_batch);
939  		} while (avail != old);
940  
941  		kcpu->reqs_available += ctx->req_batch;
942  	}
943  
944  	ret = true;
945  	kcpu->reqs_available--;
946  out:
947  	local_irq_restore(flags);
948  	return ret;
949  }
950  
951  /* refill_reqs_available
952   *	Updates the reqs_available reference counts used for tracking the
953   *	number of free slots in the completion ring.  This can be called
954   *	from aio_complete() (to optimistically update reqs_available) or
955   *	from aio_get_req() (the we're out of events case).  It must be
956   *	called holding ctx->completion_lock.
957   */
958  static void refill_reqs_available(struct kioctx *ctx, unsigned head,
959                                    unsigned tail)
960  {
961  	unsigned events_in_ring, completed;
962  
963  	/* Clamp head since userland can write to it. */
964  	head %= ctx->nr_events;
965  	if (head <= tail)
966  		events_in_ring = tail - head;
967  	else
968  		events_in_ring = ctx->nr_events - (head - tail);
969  
970  	completed = ctx->completed_events;
971  	if (events_in_ring < completed)
972  		completed -= events_in_ring;
973  	else
974  		completed = 0;
975  
976  	if (!completed)
977  		return;
978  
979  	ctx->completed_events -= completed;
980  	put_reqs_available(ctx, completed);
981  }
982  
983  /* user_refill_reqs_available
984   *	Called to refill reqs_available when aio_get_req() encounters an
985   *	out of space in the completion ring.
986   */
987  static void user_refill_reqs_available(struct kioctx *ctx)
988  {
989  	spin_lock_irq(&ctx->completion_lock);
990  	if (ctx->completed_events) {
991  		struct aio_ring *ring;
992  		unsigned head;
993  
994  		/* Access of ring->head may race with aio_read_events_ring()
995  		 * here, but that's okay since whether we read the old version
996  		 * or the new version, and either will be valid.  The important
997  		 * part is that head cannot pass tail since we prevent
998  		 * aio_complete() from updating tail by holding
999  		 * ctx->completion_lock.  Even if head is invalid, the check
1000  		 * against ctx->completed_events below will make sure we do the
1001  		 * safe/right thing.
1002  		 */
1003  		ring = kmap_atomic(ctx->ring_pages[0]);
1004  		head = ring->head;
1005  		kunmap_atomic(ring);
1006  
1007  		refill_reqs_available(ctx, head, ctx->tail);
1008  	}
1009  
1010  	spin_unlock_irq(&ctx->completion_lock);
1011  }
1012  
1013  static bool get_reqs_available(struct kioctx *ctx)
1014  {
1015  	if (__get_reqs_available(ctx))
1016  		return true;
1017  	user_refill_reqs_available(ctx);
1018  	return __get_reqs_available(ctx);
1019  }
1020  
1021  /* aio_get_req
1022   *	Allocate a slot for an aio request.
1023   * Returns NULL if no requests are free.
1024   *
1025   * The refcount is initialized to 2 - one for the async op completion,
1026   * one for the synchronous code that does this.
1027   */
1028  static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
1029  {
1030  	struct aio_kiocb *req;
1031  
1032  	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
1033  	if (unlikely(!req))
1034  		return NULL;
1035  
1036  	if (unlikely(!get_reqs_available(ctx))) {
1037  		kmem_cache_free(kiocb_cachep, req);
1038  		return NULL;
1039  	}
1040  
1041  	percpu_ref_get(&ctx->reqs);
1042  	req->ki_ctx = ctx;
1043  	INIT_LIST_HEAD(&req->ki_list);
1044  	refcount_set(&req->ki_refcnt, 2);
1045  	req->ki_eventfd = NULL;
1046  	return req;
1047  }
1048  
1049  static struct kioctx *lookup_ioctx(unsigned long ctx_id)
1050  {
1051  	struct aio_ring __user *ring  = (void __user *)ctx_id;
1052  	struct mm_struct *mm = current->mm;
1053  	struct kioctx *ctx, *ret = NULL;
1054  	struct kioctx_table *table;
1055  	unsigned id;
1056  
1057  	if (get_user(id, &ring->id))
1058  		return NULL;
1059  
1060  	rcu_read_lock();
1061  	table = rcu_dereference(mm->ioctx_table);
1062  
1063  	if (!table || id >= table->nr)
1064  		goto out;
1065  
1066  	id = array_index_nospec(id, table->nr);
1067  	ctx = rcu_dereference(table->table[id]);
1068  	if (ctx && ctx->user_id == ctx_id) {
1069  		if (percpu_ref_tryget_live(&ctx->users))
1070  			ret = ctx;
1071  	}
1072  out:
1073  	rcu_read_unlock();
1074  	return ret;
1075  }
1076  
1077  static inline void iocb_destroy(struct aio_kiocb *iocb)
1078  {
1079  	if (iocb->ki_eventfd)
1080  		eventfd_ctx_put(iocb->ki_eventfd);
1081  	if (iocb->ki_filp)
1082  		fput(iocb->ki_filp);
1083  	percpu_ref_put(&iocb->ki_ctx->reqs);
1084  	kmem_cache_free(kiocb_cachep, iocb);
1085  }
1086  
1087  /* aio_complete
1088   *	Called when the io request on the given iocb is complete.
1089   */
1090  static void aio_complete(struct aio_kiocb *iocb)
1091  {
1092  	struct kioctx	*ctx = iocb->ki_ctx;
1093  	struct aio_ring	*ring;
1094  	struct io_event	*ev_page, *event;
1095  	unsigned tail, pos, head;
1096  	unsigned long	flags;
1097  
1098  	/*
1099  	 * Add a completion event to the ring buffer. Must be done holding
1100  	 * ctx->completion_lock to prevent other code from messing with the tail
1101  	 * pointer since we might be called from irq context.
1102  	 */
1103  	spin_lock_irqsave(&ctx->completion_lock, flags);
1104  
1105  	tail = ctx->tail;
1106  	pos = tail + AIO_EVENTS_OFFSET;
1107  
1108  	if (++tail >= ctx->nr_events)
1109  		tail = 0;
1110  
1111  	ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1112  	event = ev_page + pos % AIO_EVENTS_PER_PAGE;
1113  
1114  	*event = iocb->ki_res;
1115  
1116  	kunmap_atomic(ev_page);
1117  	flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1118  
1119  	pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
1120  		 (void __user *)(unsigned long)iocb->ki_res.obj,
1121  		 iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
1122  
1123  	/* after flagging the request as done, we
1124  	 * must never even look at it again
1125  	 */
1126  	smp_wmb();	/* make event visible before updating tail */
1127  
1128  	ctx->tail = tail;
1129  
1130  	ring = kmap_atomic(ctx->ring_pages[0]);
1131  	head = ring->head;
1132  	ring->tail = tail;
1133  	kunmap_atomic(ring);
1134  	flush_dcache_page(ctx->ring_pages[0]);
1135  
1136  	ctx->completed_events++;
1137  	if (ctx->completed_events > 1)
1138  		refill_reqs_available(ctx, head, tail);
1139  	spin_unlock_irqrestore(&ctx->completion_lock, flags);
1140  
1141  	pr_debug("added to ring %p at [%u]\n", iocb, tail);
1142  
1143  	/*
1144  	 * Check if the user asked us to deliver the result through an
1145  	 * eventfd. The eventfd_signal() function is safe to be called
1146  	 * from IRQ context.
1147  	 */
1148  	if (iocb->ki_eventfd)
1149  		eventfd_signal(iocb->ki_eventfd, 1);
1150  
1151  	/*
1152  	 * We have to order our ring_info tail store above and test
1153  	 * of the wait list below outside the wait lock.  This is
1154  	 * like in wake_up_bit() where clearing a bit has to be
1155  	 * ordered with the unlocked test.
1156  	 */
1157  	smp_mb();
1158  
1159  	if (waitqueue_active(&ctx->wait))
1160  		wake_up(&ctx->wait);
1161  }
1162  
1163  static inline void iocb_put(struct aio_kiocb *iocb)
1164  {
1165  	if (refcount_dec_and_test(&iocb->ki_refcnt)) {
1166  		aio_complete(iocb);
1167  		iocb_destroy(iocb);
1168  	}
1169  }
1170  
1171  /* aio_read_events_ring
1172   *	Pull an event off of the ioctx's event ring.  Returns the number of
1173   *	events fetched
1174   */
1175  static long aio_read_events_ring(struct kioctx *ctx,
1176  				 struct io_event __user *event, long nr)
1177  {
1178  	struct aio_ring *ring;
1179  	unsigned head, tail, pos;
1180  	long ret = 0;
1181  	int copy_ret;
1182  
1183  	/*
1184  	 * The mutex can block and wake us up and that will cause
1185  	 * wait_event_interruptible_hrtimeout() to schedule without sleeping
1186  	 * and repeat. This should be rare enough that it doesn't cause
1187  	 * peformance issues. See the comment in read_events() for more detail.
1188  	 */
1189  	sched_annotate_sleep();
1190  	mutex_lock(&ctx->ring_lock);
1191  
1192  	/* Access to ->ring_pages here is protected by ctx->ring_lock. */
1193  	ring = kmap_atomic(ctx->ring_pages[0]);
1194  	head = ring->head;
1195  	tail = ring->tail;
1196  	kunmap_atomic(ring);
1197  
1198  	/*
1199  	 * Ensure that once we've read the current tail pointer, that
1200  	 * we also see the events that were stored up to the tail.
1201  	 */
1202  	smp_rmb();
1203  
1204  	pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
1205  
1206  	if (head == tail)
1207  		goto out;
1208  
1209  	head %= ctx->nr_events;
1210  	tail %= ctx->nr_events;
1211  
1212  	while (ret < nr) {
1213  		long avail;
1214  		struct io_event *ev;
1215  		struct page *page;
1216  
1217  		avail = (head <= tail ?  tail : ctx->nr_events) - head;
1218  		if (head == tail)
1219  			break;
1220  
1221  		pos = head + AIO_EVENTS_OFFSET;
1222  		page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
1223  		pos %= AIO_EVENTS_PER_PAGE;
1224  
1225  		avail = min(avail, nr - ret);
1226  		avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos);
1227  
1228  		ev = kmap(page);
1229  		copy_ret = copy_to_user(event + ret, ev + pos,
1230  					sizeof(*ev) * avail);
1231  		kunmap(page);
1232  
1233  		if (unlikely(copy_ret)) {
1234  			ret = -EFAULT;
1235  			goto out;
1236  		}
1237  
1238  		ret += avail;
1239  		head += avail;
1240  		head %= ctx->nr_events;
1241  	}
1242  
1243  	ring = kmap_atomic(ctx->ring_pages[0]);
1244  	ring->head = head;
1245  	kunmap_atomic(ring);
1246  	flush_dcache_page(ctx->ring_pages[0]);
1247  
1248  	pr_debug("%li  h%u t%u\n", ret, head, tail);
1249  out:
1250  	mutex_unlock(&ctx->ring_lock);
1251  
1252  	return ret;
1253  }
1254  
1255  static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
1256  			    struct io_event __user *event, long *i)
1257  {
1258  	long ret = aio_read_events_ring(ctx, event + *i, nr - *i);
1259  
1260  	if (ret > 0)
1261  		*i += ret;
1262  
1263  	if (unlikely(atomic_read(&ctx->dead)))
1264  		ret = -EINVAL;
1265  
1266  	if (!*i)
1267  		*i = ret;
1268  
1269  	return ret < 0 || *i >= min_nr;
1270  }
1271  
1272  static long read_events(struct kioctx *ctx, long min_nr, long nr,
1273  			struct io_event __user *event,
1274  			ktime_t until)
1275  {
1276  	long ret = 0;
1277  
1278  	/*
1279  	 * Note that aio_read_events() is being called as the conditional - i.e.
1280  	 * we're calling it after prepare_to_wait() has set task state to
1281  	 * TASK_INTERRUPTIBLE.
1282  	 *
1283  	 * But aio_read_events() can block, and if it blocks it's going to flip
1284  	 * the task state back to TASK_RUNNING.
1285  	 *
1286  	 * This should be ok, provided it doesn't flip the state back to
1287  	 * TASK_RUNNING and return 0 too much - that causes us to spin. That
1288  	 * will only happen if the mutex_lock() call blocks, and we then find
1289  	 * the ringbuffer empty. So in practice we should be ok, but it's
1290  	 * something to be aware of when touching this code.
1291  	 */
1292  	if (until == 0)
1293  		aio_read_events(ctx, min_nr, nr, event, &ret);
1294  	else
1295  		wait_event_interruptible_hrtimeout(ctx->wait,
1296  				aio_read_events(ctx, min_nr, nr, event, &ret),
1297  				until);
1298  	return ret;
1299  }
1300  
1301  /* sys_io_setup:
1302   *	Create an aio_context capable of receiving at least nr_events.
1303   *	ctxp must not point to an aio_context that already exists, and
1304   *	must be initialized to 0 prior to the call.  On successful
1305   *	creation of the aio_context, *ctxp is filled in with the resulting
1306   *	handle.  May fail with -EINVAL if *ctxp is not initialized,
1307   *	if the specified nr_events exceeds internal limits.  May fail
1308   *	with -EAGAIN if the specified nr_events exceeds the user's limit
1309   *	of available events.  May fail with -ENOMEM if insufficient kernel
1310   *	resources are available.  May fail with -EFAULT if an invalid
1311   *	pointer is passed for ctxp.  Will fail with -ENOSYS if not
1312   *	implemented.
1313   */
1314  SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1315  {
1316  	struct kioctx *ioctx = NULL;
1317  	unsigned long ctx;
1318  	long ret;
1319  
1320  	ret = get_user(ctx, ctxp);
1321  	if (unlikely(ret))
1322  		goto out;
1323  
1324  	ret = -EINVAL;
1325  	if (unlikely(ctx || nr_events == 0)) {
1326  		pr_debug("EINVAL: ctx %lu nr_events %u\n",
1327  		         ctx, nr_events);
1328  		goto out;
1329  	}
1330  
1331  	ioctx = ioctx_alloc(nr_events);
1332  	ret = PTR_ERR(ioctx);
1333  	if (!IS_ERR(ioctx)) {
1334  		ret = put_user(ioctx->user_id, ctxp);
1335  		if (ret)
1336  			kill_ioctx(current->mm, ioctx, NULL);
1337  		percpu_ref_put(&ioctx->users);
1338  	}
1339  
1340  out:
1341  	return ret;
1342  }
1343  
1344  #ifdef CONFIG_COMPAT
1345  COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p)
1346  {
1347  	struct kioctx *ioctx = NULL;
1348  	unsigned long ctx;
1349  	long ret;
1350  
1351  	ret = get_user(ctx, ctx32p);
1352  	if (unlikely(ret))
1353  		goto out;
1354  
1355  	ret = -EINVAL;
1356  	if (unlikely(ctx || nr_events == 0)) {
1357  		pr_debug("EINVAL: ctx %lu nr_events %u\n",
1358  		         ctx, nr_events);
1359  		goto out;
1360  	}
1361  
1362  	ioctx = ioctx_alloc(nr_events);
1363  	ret = PTR_ERR(ioctx);
1364  	if (!IS_ERR(ioctx)) {
1365  		/* truncating is ok because it's a user address */
1366  		ret = put_user((u32)ioctx->user_id, ctx32p);
1367  		if (ret)
1368  			kill_ioctx(current->mm, ioctx, NULL);
1369  		percpu_ref_put(&ioctx->users);
1370  	}
1371  
1372  out:
1373  	return ret;
1374  }
1375  #endif
1376  
1377  /* sys_io_destroy:
1378   *	Destroy the aio_context specified.  May cancel any outstanding
1379   *	AIOs and block on completion.  Will fail with -ENOSYS if not
1380   *	implemented.  May fail with -EINVAL if the context pointed to
1381   *	is invalid.
1382   */
1383  SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1384  {
1385  	struct kioctx *ioctx = lookup_ioctx(ctx);
1386  	if (likely(NULL != ioctx)) {
1387  		struct ctx_rq_wait wait;
1388  		int ret;
1389  
1390  		init_completion(&wait.comp);
1391  		atomic_set(&wait.count, 1);
1392  
1393  		/* Pass requests_done to kill_ioctx() where it can be set
1394  		 * in a thread-safe way. If we try to set it here then we have
1395  		 * a race condition if two io_destroy() called simultaneously.
1396  		 */
1397  		ret = kill_ioctx(current->mm, ioctx, &wait);
1398  		percpu_ref_put(&ioctx->users);
1399  
1400  		/* Wait until all IO for the context are done. Otherwise kernel
1401  		 * keep using user-space buffers even if user thinks the context
1402  		 * is destroyed.
1403  		 */
1404  		if (!ret)
1405  			wait_for_completion(&wait.comp);
1406  
1407  		return ret;
1408  	}
1409  	pr_debug("EINVAL: invalid context id\n");
1410  	return -EINVAL;
1411  }
1412  
1413  static void aio_remove_iocb(struct aio_kiocb *iocb)
1414  {
1415  	struct kioctx *ctx = iocb->ki_ctx;
1416  	unsigned long flags;
1417  
1418  	spin_lock_irqsave(&ctx->ctx_lock, flags);
1419  	list_del(&iocb->ki_list);
1420  	spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1421  }
1422  
1423  static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
1424  {
1425  	struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
1426  
1427  	if (!list_empty_careful(&iocb->ki_list))
1428  		aio_remove_iocb(iocb);
1429  
1430  	if (kiocb->ki_flags & IOCB_WRITE) {
1431  		struct inode *inode = file_inode(kiocb->ki_filp);
1432  
1433  		/*
1434  		 * Tell lockdep we inherited freeze protection from submission
1435  		 * thread.
1436  		 */
1437  		if (S_ISREG(inode->i_mode))
1438  			__sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
1439  		file_end_write(kiocb->ki_filp);
1440  	}
1441  
1442  	iocb->ki_res.res = res;
1443  	iocb->ki_res.res2 = res2;
1444  	iocb_put(iocb);
1445  }
1446  
1447  static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
1448  {
1449  	int ret;
1450  
1451  	req->ki_complete = aio_complete_rw;
1452  	req->private = NULL;
1453  	req->ki_pos = iocb->aio_offset;
1454  	req->ki_flags = iocb_flags(req->ki_filp);
1455  	if (iocb->aio_flags & IOCB_FLAG_RESFD)
1456  		req->ki_flags |= IOCB_EVENTFD;
1457  	req->ki_hint = ki_hint_validate(file_write_hint(req->ki_filp));
1458  	if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
1459  		/*
1460  		 * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then
1461  		 * aio_reqprio is interpreted as an I/O scheduling
1462  		 * class and priority.
1463  		 */
1464  		ret = ioprio_check_cap(iocb->aio_reqprio);
1465  		if (ret) {
1466  			pr_debug("aio ioprio check cap error: %d\n", ret);
1467  			return ret;
1468  		}
1469  
1470  		req->ki_ioprio = iocb->aio_reqprio;
1471  	} else
1472  		req->ki_ioprio = get_current_ioprio();
1473  
1474  	ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
1475  	if (unlikely(ret))
1476  		return ret;
1477  
1478  	req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */
1479  	return 0;
1480  }
1481  
1482  static ssize_t aio_setup_rw(int rw, const struct iocb *iocb,
1483  		struct iovec **iovec, bool vectored, bool compat,
1484  		struct iov_iter *iter)
1485  {
1486  	void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
1487  	size_t len = iocb->aio_nbytes;
1488  
1489  	if (!vectored) {
1490  		ssize_t ret = import_single_range(rw, buf, len, *iovec, iter);
1491  		*iovec = NULL;
1492  		return ret;
1493  	}
1494  
1495  	return __import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter, compat);
1496  }
1497  
1498  static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
1499  {
1500  	switch (ret) {
1501  	case -EIOCBQUEUED:
1502  		break;
1503  	case -ERESTARTSYS:
1504  	case -ERESTARTNOINTR:
1505  	case -ERESTARTNOHAND:
1506  	case -ERESTART_RESTARTBLOCK:
1507  		/*
1508  		 * There's no easy way to restart the syscall since other AIO's
1509  		 * may be already running. Just fail this IO with EINTR.
1510  		 */
1511  		ret = -EINTR;
1512  		fallthrough;
1513  	default:
1514  		req->ki_complete(req, ret, 0);
1515  	}
1516  }
1517  
1518  static int aio_read(struct kiocb *req, const struct iocb *iocb,
1519  			bool vectored, bool compat)
1520  {
1521  	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1522  	struct iov_iter iter;
1523  	struct file *file;
1524  	int ret;
1525  
1526  	ret = aio_prep_rw(req, iocb);
1527  	if (ret)
1528  		return ret;
1529  	file = req->ki_filp;
1530  	if (unlikely(!(file->f_mode & FMODE_READ)))
1531  		return -EBADF;
1532  	ret = -EINVAL;
1533  	if (unlikely(!file->f_op->read_iter))
1534  		return -EINVAL;
1535  
1536  	ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter);
1537  	if (ret < 0)
1538  		return ret;
1539  	ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
1540  	if (!ret)
1541  		aio_rw_done(req, call_read_iter(file, req, &iter));
1542  	kfree(iovec);
1543  	return ret;
1544  }
1545  
1546  static int aio_write(struct kiocb *req, const struct iocb *iocb,
1547  			 bool vectored, bool compat)
1548  {
1549  	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1550  	struct iov_iter iter;
1551  	struct file *file;
1552  	int ret;
1553  
1554  	ret = aio_prep_rw(req, iocb);
1555  	if (ret)
1556  		return ret;
1557  	file = req->ki_filp;
1558  
1559  	if (unlikely(!(file->f_mode & FMODE_WRITE)))
1560  		return -EBADF;
1561  	if (unlikely(!file->f_op->write_iter))
1562  		return -EINVAL;
1563  
1564  	ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter);
1565  	if (ret < 0)
1566  		return ret;
1567  	ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
1568  	if (!ret) {
1569  		/*
1570  		 * Open-code file_start_write here to grab freeze protection,
1571  		 * which will be released by another thread in
1572  		 * aio_complete_rw().  Fool lockdep by telling it the lock got
1573  		 * released so that it doesn't complain about the held lock when
1574  		 * we return to userspace.
1575  		 */
1576  		if (S_ISREG(file_inode(file)->i_mode)) {
1577  			sb_start_write(file_inode(file)->i_sb);
1578  			__sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
1579  		}
1580  		req->ki_flags |= IOCB_WRITE;
1581  		aio_rw_done(req, call_write_iter(file, req, &iter));
1582  	}
1583  	kfree(iovec);
1584  	return ret;
1585  }
1586  
1587  static void aio_fsync_work(struct work_struct *work)
1588  {
1589  	struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
1590  	const struct cred *old_cred = override_creds(iocb->fsync.creds);
1591  
1592  	iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
1593  	revert_creds(old_cred);
1594  	put_cred(iocb->fsync.creds);
1595  	iocb_put(iocb);
1596  }
1597  
1598  static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
1599  		     bool datasync)
1600  {
1601  	if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
1602  			iocb->aio_rw_flags))
1603  		return -EINVAL;
1604  
1605  	if (unlikely(!req->file->f_op->fsync))
1606  		return -EINVAL;
1607  
1608  	req->creds = prepare_creds();
1609  	if (!req->creds)
1610  		return -ENOMEM;
1611  
1612  	req->datasync = datasync;
1613  	INIT_WORK(&req->work, aio_fsync_work);
1614  	schedule_work(&req->work);
1615  	return 0;
1616  }
1617  
1618  static void aio_poll_put_work(struct work_struct *work)
1619  {
1620  	struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1621  	struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1622  
1623  	iocb_put(iocb);
1624  }
1625  
1626  static void aio_poll_complete_work(struct work_struct *work)
1627  {
1628  	struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1629  	struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1630  	struct poll_table_struct pt = { ._key = req->events };
1631  	struct kioctx *ctx = iocb->ki_ctx;
1632  	__poll_t mask = 0;
1633  
1634  	if (!READ_ONCE(req->cancelled))
1635  		mask = vfs_poll(req->file, &pt) & req->events;
1636  
1637  	/*
1638  	 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1639  	 * calling ->ki_cancel.  We need the ctx_lock roundtrip here to
1640  	 * synchronize with them.  In the cancellation case the list_del_init
1641  	 * itself is not actually needed, but harmless so we keep it in to
1642  	 * avoid further branches in the fast path.
1643  	 */
1644  	spin_lock_irq(&ctx->ctx_lock);
1645  	if (!mask && !READ_ONCE(req->cancelled)) {
1646  		add_wait_queue(req->head, &req->wait);
1647  		spin_unlock_irq(&ctx->ctx_lock);
1648  		return;
1649  	}
1650  	list_del_init(&iocb->ki_list);
1651  	iocb->ki_res.res = mangle_poll(mask);
1652  	req->done = true;
1653  	spin_unlock_irq(&ctx->ctx_lock);
1654  
1655  	iocb_put(iocb);
1656  }
1657  
1658  /* assumes we are called with irqs disabled */
1659  static int aio_poll_cancel(struct kiocb *iocb)
1660  {
1661  	struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
1662  	struct poll_iocb *req = &aiocb->poll;
1663  
1664  	spin_lock(&req->head->lock);
1665  	WRITE_ONCE(req->cancelled, true);
1666  	if (!list_empty(&req->wait.entry)) {
1667  		list_del_init(&req->wait.entry);
1668  		schedule_work(&aiocb->poll.work);
1669  	}
1670  	spin_unlock(&req->head->lock);
1671  
1672  	return 0;
1673  }
1674  
1675  static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1676  		void *key)
1677  {
1678  	struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
1679  	struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1680  	__poll_t mask = key_to_poll(key);
1681  	unsigned long flags;
1682  
1683  	/* for instances that support it check for an event match first: */
1684  	if (mask && !(mask & req->events))
1685  		return 0;
1686  
1687  	list_del_init(&req->wait.entry);
1688  
1689  	if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
1690  		struct kioctx *ctx = iocb->ki_ctx;
1691  
1692  		/*
1693  		 * Try to complete the iocb inline if we can. Use
1694  		 * irqsave/irqrestore because not all filesystems (e.g. fuse)
1695  		 * call this function with IRQs disabled and because IRQs
1696  		 * have to be disabled before ctx_lock is obtained.
1697  		 */
1698  		list_del(&iocb->ki_list);
1699  		iocb->ki_res.res = mangle_poll(mask);
1700  		req->done = true;
1701  		if (iocb->ki_eventfd && eventfd_signal_count()) {
1702  			iocb = NULL;
1703  			INIT_WORK(&req->work, aio_poll_put_work);
1704  			schedule_work(&req->work);
1705  		}
1706  		spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1707  		if (iocb)
1708  			iocb_put(iocb);
1709  	} else {
1710  		schedule_work(&req->work);
1711  	}
1712  	return 1;
1713  }
1714  
1715  struct aio_poll_table {
1716  	struct poll_table_struct	pt;
1717  	struct aio_kiocb		*iocb;
1718  	int				error;
1719  };
1720  
1721  static void
1722  aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1723  		struct poll_table_struct *p)
1724  {
1725  	struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
1726  
1727  	/* multiple wait queues per file are not supported */
1728  	if (unlikely(pt->iocb->poll.head)) {
1729  		pt->error = -EINVAL;
1730  		return;
1731  	}
1732  
1733  	pt->error = 0;
1734  	pt->iocb->poll.head = head;
1735  	add_wait_queue(head, &pt->iocb->poll.wait);
1736  }
1737  
1738  static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1739  {
1740  	struct kioctx *ctx = aiocb->ki_ctx;
1741  	struct poll_iocb *req = &aiocb->poll;
1742  	struct aio_poll_table apt;
1743  	bool cancel = false;
1744  	__poll_t mask;
1745  
1746  	/* reject any unknown events outside the normal event mask. */
1747  	if ((u16)iocb->aio_buf != iocb->aio_buf)
1748  		return -EINVAL;
1749  	/* reject fields that are not defined for poll */
1750  	if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
1751  		return -EINVAL;
1752  
1753  	INIT_WORK(&req->work, aio_poll_complete_work);
1754  	req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
1755  
1756  	req->head = NULL;
1757  	req->done = false;
1758  	req->cancelled = false;
1759  
1760  	apt.pt._qproc = aio_poll_queue_proc;
1761  	apt.pt._key = req->events;
1762  	apt.iocb = aiocb;
1763  	apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1764  
1765  	/* initialized the list so that we can do list_empty checks */
1766  	INIT_LIST_HEAD(&req->wait.entry);
1767  	init_waitqueue_func_entry(&req->wait, aio_poll_wake);
1768  
1769  	mask = vfs_poll(req->file, &apt.pt) & req->events;
1770  	spin_lock_irq(&ctx->ctx_lock);
1771  	if (likely(req->head)) {
1772  		spin_lock(&req->head->lock);
1773  		if (unlikely(list_empty(&req->wait.entry))) {
1774  			if (apt.error)
1775  				cancel = true;
1776  			apt.error = 0;
1777  			mask = 0;
1778  		}
1779  		if (mask || apt.error) {
1780  			list_del_init(&req->wait.entry);
1781  		} else if (cancel) {
1782  			WRITE_ONCE(req->cancelled, true);
1783  		} else if (!req->done) { /* actually waiting for an event */
1784  			list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
1785  			aiocb->ki_cancel = aio_poll_cancel;
1786  		}
1787  		spin_unlock(&req->head->lock);
1788  	}
1789  	if (mask) { /* no async, we'd stolen it */
1790  		aiocb->ki_res.res = mangle_poll(mask);
1791  		apt.error = 0;
1792  	}
1793  	spin_unlock_irq(&ctx->ctx_lock);
1794  	if (mask)
1795  		iocb_put(aiocb);
1796  	return apt.error;
1797  }
1798  
1799  static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1800  			   struct iocb __user *user_iocb, struct aio_kiocb *req,
1801  			   bool compat)
1802  {
1803  	req->ki_filp = fget(iocb->aio_fildes);
1804  	if (unlikely(!req->ki_filp))
1805  		return -EBADF;
1806  
1807  	if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1808  		struct eventfd_ctx *eventfd;
1809  		/*
1810  		 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1811  		 * instance of the file* now. The file descriptor must be
1812  		 * an eventfd() fd, and will be signaled for each completed
1813  		 * event using the eventfd_signal() function.
1814  		 */
1815  		eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
1816  		if (IS_ERR(eventfd))
1817  			return PTR_ERR(eventfd);
1818  
1819  		req->ki_eventfd = eventfd;
1820  	}
1821  
1822  	if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
1823  		pr_debug("EFAULT: aio_key\n");
1824  		return -EFAULT;
1825  	}
1826  
1827  	req->ki_res.obj = (u64)(unsigned long)user_iocb;
1828  	req->ki_res.data = iocb->aio_data;
1829  	req->ki_res.res = 0;
1830  	req->ki_res.res2 = 0;
1831  
1832  	switch (iocb->aio_lio_opcode) {
1833  	case IOCB_CMD_PREAD:
1834  		return aio_read(&req->rw, iocb, false, compat);
1835  	case IOCB_CMD_PWRITE:
1836  		return aio_write(&req->rw, iocb, false, compat);
1837  	case IOCB_CMD_PREADV:
1838  		return aio_read(&req->rw, iocb, true, compat);
1839  	case IOCB_CMD_PWRITEV:
1840  		return aio_write(&req->rw, iocb, true, compat);
1841  	case IOCB_CMD_FSYNC:
1842  		return aio_fsync(&req->fsync, iocb, false);
1843  	case IOCB_CMD_FDSYNC:
1844  		return aio_fsync(&req->fsync, iocb, true);
1845  	case IOCB_CMD_POLL:
1846  		return aio_poll(req, iocb);
1847  	default:
1848  		pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
1849  		return -EINVAL;
1850  	}
1851  }
1852  
1853  static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1854  			 bool compat)
1855  {
1856  	struct aio_kiocb *req;
1857  	struct iocb iocb;
1858  	int err;
1859  
1860  	if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
1861  		return -EFAULT;
1862  
1863  	/* enforce forwards compatibility on users */
1864  	if (unlikely(iocb.aio_reserved2)) {
1865  		pr_debug("EINVAL: reserve field set\n");
1866  		return -EINVAL;
1867  	}
1868  
1869  	/* prevent overflows */
1870  	if (unlikely(
1871  	    (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
1872  	    (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
1873  	    ((ssize_t)iocb.aio_nbytes < 0)
1874  	   )) {
1875  		pr_debug("EINVAL: overflow check\n");
1876  		return -EINVAL;
1877  	}
1878  
1879  	req = aio_get_req(ctx);
1880  	if (unlikely(!req))
1881  		return -EAGAIN;
1882  
1883  	err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
1884  
1885  	/* Done with the synchronous reference */
1886  	iocb_put(req);
1887  
1888  	/*
1889  	 * If err is 0, we'd either done aio_complete() ourselves or have
1890  	 * arranged for that to be done asynchronously.  Anything non-zero
1891  	 * means that we need to destroy req ourselves.
1892  	 */
1893  	if (unlikely(err)) {
1894  		iocb_destroy(req);
1895  		put_reqs_available(ctx, 1);
1896  	}
1897  	return err;
1898  }
1899  
1900  /* sys_io_submit:
1901   *	Queue the nr iocbs pointed to by iocbpp for processing.  Returns
1902   *	the number of iocbs queued.  May return -EINVAL if the aio_context
1903   *	specified by ctx_id is invalid, if nr is < 0, if the iocb at
1904   *	*iocbpp[0] is not properly initialized, if the operation specified
1905   *	is invalid for the file descriptor in the iocb.  May fail with
1906   *	-EFAULT if any of the data structures point to invalid data.  May
1907   *	fail with -EBADF if the file descriptor specified in the first
1908   *	iocb is invalid.  May fail with -EAGAIN if insufficient resources
1909   *	are available to queue any iocbs.  Will return 0 if nr is 0.  Will
1910   *	fail with -ENOSYS if not implemented.
1911   */
1912  SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1913  		struct iocb __user * __user *, iocbpp)
1914  {
1915  	struct kioctx *ctx;
1916  	long ret = 0;
1917  	int i = 0;
1918  	struct blk_plug plug;
1919  
1920  	if (unlikely(nr < 0))
1921  		return -EINVAL;
1922  
1923  	ctx = lookup_ioctx(ctx_id);
1924  	if (unlikely(!ctx)) {
1925  		pr_debug("EINVAL: invalid context id\n");
1926  		return -EINVAL;
1927  	}
1928  
1929  	if (nr > ctx->nr_events)
1930  		nr = ctx->nr_events;
1931  
1932  	if (nr > AIO_PLUG_THRESHOLD)
1933  		blk_start_plug(&plug);
1934  	for (i = 0; i < nr; i++) {
1935  		struct iocb __user *user_iocb;
1936  
1937  		if (unlikely(get_user(user_iocb, iocbpp + i))) {
1938  			ret = -EFAULT;
1939  			break;
1940  		}
1941  
1942  		ret = io_submit_one(ctx, user_iocb, false);
1943  		if (ret)
1944  			break;
1945  	}
1946  	if (nr > AIO_PLUG_THRESHOLD)
1947  		blk_finish_plug(&plug);
1948  
1949  	percpu_ref_put(&ctx->users);
1950  	return i ? i : ret;
1951  }
1952  
1953  #ifdef CONFIG_COMPAT
1954  COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
1955  		       int, nr, compat_uptr_t __user *, iocbpp)
1956  {
1957  	struct kioctx *ctx;
1958  	long ret = 0;
1959  	int i = 0;
1960  	struct blk_plug plug;
1961  
1962  	if (unlikely(nr < 0))
1963  		return -EINVAL;
1964  
1965  	ctx = lookup_ioctx(ctx_id);
1966  	if (unlikely(!ctx)) {
1967  		pr_debug("EINVAL: invalid context id\n");
1968  		return -EINVAL;
1969  	}
1970  
1971  	if (nr > ctx->nr_events)
1972  		nr = ctx->nr_events;
1973  
1974  	if (nr > AIO_PLUG_THRESHOLD)
1975  		blk_start_plug(&plug);
1976  	for (i = 0; i < nr; i++) {
1977  		compat_uptr_t user_iocb;
1978  
1979  		if (unlikely(get_user(user_iocb, iocbpp + i))) {
1980  			ret = -EFAULT;
1981  			break;
1982  		}
1983  
1984  		ret = io_submit_one(ctx, compat_ptr(user_iocb), true);
1985  		if (ret)
1986  			break;
1987  	}
1988  	if (nr > AIO_PLUG_THRESHOLD)
1989  		blk_finish_plug(&plug);
1990  
1991  	percpu_ref_put(&ctx->users);
1992  	return i ? i : ret;
1993  }
1994  #endif
1995  
1996  /* sys_io_cancel:
1997   *	Attempts to cancel an iocb previously passed to io_submit.  If
1998   *	the operation is successfully cancelled, the resulting event is
1999   *	copied into the memory pointed to by result without being placed
2000   *	into the completion queue and 0 is returned.  May fail with
2001   *	-EFAULT if any of the data structures pointed to are invalid.
2002   *	May fail with -EINVAL if aio_context specified by ctx_id is
2003   *	invalid.  May fail with -EAGAIN if the iocb specified was not
2004   *	cancelled.  Will fail with -ENOSYS if not implemented.
2005   */
2006  SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
2007  		struct io_event __user *, result)
2008  {
2009  	struct kioctx *ctx;
2010  	struct aio_kiocb *kiocb;
2011  	int ret = -EINVAL;
2012  	u32 key;
2013  	u64 obj = (u64)(unsigned long)iocb;
2014  
2015  	if (unlikely(get_user(key, &iocb->aio_key)))
2016  		return -EFAULT;
2017  	if (unlikely(key != KIOCB_KEY))
2018  		return -EINVAL;
2019  
2020  	ctx = lookup_ioctx(ctx_id);
2021  	if (unlikely(!ctx))
2022  		return -EINVAL;
2023  
2024  	spin_lock_irq(&ctx->ctx_lock);
2025  	/* TODO: use a hash or array, this sucks. */
2026  	list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
2027  		if (kiocb->ki_res.obj == obj) {
2028  			ret = kiocb->ki_cancel(&kiocb->rw);
2029  			list_del_init(&kiocb->ki_list);
2030  			break;
2031  		}
2032  	}
2033  	spin_unlock_irq(&ctx->ctx_lock);
2034  
2035  	if (!ret) {
2036  		/*
2037  		 * The result argument is no longer used - the io_event is
2038  		 * always delivered via the ring buffer. -EINPROGRESS indicates
2039  		 * cancellation is progress:
2040  		 */
2041  		ret = -EINPROGRESS;
2042  	}
2043  
2044  	percpu_ref_put(&ctx->users);
2045  
2046  	return ret;
2047  }
2048  
2049  static long do_io_getevents(aio_context_t ctx_id,
2050  		long min_nr,
2051  		long nr,
2052  		struct io_event __user *events,
2053  		struct timespec64 *ts)
2054  {
2055  	ktime_t until = ts ? timespec64_to_ktime(*ts) : KTIME_MAX;
2056  	struct kioctx *ioctx = lookup_ioctx(ctx_id);
2057  	long ret = -EINVAL;
2058  
2059  	if (likely(ioctx)) {
2060  		if (likely(min_nr <= nr && min_nr >= 0))
2061  			ret = read_events(ioctx, min_nr, nr, events, until);
2062  		percpu_ref_put(&ioctx->users);
2063  	}
2064  
2065  	return ret;
2066  }
2067  
2068  /* io_getevents:
2069   *	Attempts to read at least min_nr events and up to nr events from
2070   *	the completion queue for the aio_context specified by ctx_id. If
2071   *	it succeeds, the number of read events is returned. May fail with
2072   *	-EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
2073   *	out of range, if timeout is out of range.  May fail with -EFAULT
2074   *	if any of the memory specified is invalid.  May return 0 or
2075   *	< min_nr if the timeout specified by timeout has elapsed
2076   *	before sufficient events are available, where timeout == NULL
2077   *	specifies an infinite timeout. Note that the timeout pointed to by
2078   *	timeout is relative.  Will fail with -ENOSYS if not implemented.
2079   */
2080  #ifdef CONFIG_64BIT
2081  
2082  SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
2083  		long, min_nr,
2084  		long, nr,
2085  		struct io_event __user *, events,
2086  		struct __kernel_timespec __user *, timeout)
2087  {
2088  	struct timespec64	ts;
2089  	int			ret;
2090  
2091  	if (timeout && unlikely(get_timespec64(&ts, timeout)))
2092  		return -EFAULT;
2093  
2094  	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2095  	if (!ret && signal_pending(current))
2096  		ret = -EINTR;
2097  	return ret;
2098  }
2099  
2100  #endif
2101  
2102  struct __aio_sigset {
2103  	const sigset_t __user	*sigmask;
2104  	size_t		sigsetsize;
2105  };
2106  
2107  SYSCALL_DEFINE6(io_pgetevents,
2108  		aio_context_t, ctx_id,
2109  		long, min_nr,
2110  		long, nr,
2111  		struct io_event __user *, events,
2112  		struct __kernel_timespec __user *, timeout,
2113  		const struct __aio_sigset __user *, usig)
2114  {
2115  	struct __aio_sigset	ksig = { NULL, };
2116  	struct timespec64	ts;
2117  	bool interrupted;
2118  	int ret;
2119  
2120  	if (timeout && unlikely(get_timespec64(&ts, timeout)))
2121  		return -EFAULT;
2122  
2123  	if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2124  		return -EFAULT;
2125  
2126  	ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2127  	if (ret)
2128  		return ret;
2129  
2130  	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2131  
2132  	interrupted = signal_pending(current);
2133  	restore_saved_sigmask_unless(interrupted);
2134  	if (interrupted && !ret)
2135  		ret = -ERESTARTNOHAND;
2136  
2137  	return ret;
2138  }
2139  
2140  #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
2141  
2142  SYSCALL_DEFINE6(io_pgetevents_time32,
2143  		aio_context_t, ctx_id,
2144  		long, min_nr,
2145  		long, nr,
2146  		struct io_event __user *, events,
2147  		struct old_timespec32 __user *, timeout,
2148  		const struct __aio_sigset __user *, usig)
2149  {
2150  	struct __aio_sigset	ksig = { NULL, };
2151  	struct timespec64	ts;
2152  	bool interrupted;
2153  	int ret;
2154  
2155  	if (timeout && unlikely(get_old_timespec32(&ts, timeout)))
2156  		return -EFAULT;
2157  
2158  	if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2159  		return -EFAULT;
2160  
2161  
2162  	ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2163  	if (ret)
2164  		return ret;
2165  
2166  	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2167  
2168  	interrupted = signal_pending(current);
2169  	restore_saved_sigmask_unless(interrupted);
2170  	if (interrupted && !ret)
2171  		ret = -ERESTARTNOHAND;
2172  
2173  	return ret;
2174  }
2175  
2176  #endif
2177  
2178  #if defined(CONFIG_COMPAT_32BIT_TIME)
2179  
2180  SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id,
2181  		__s32, min_nr,
2182  		__s32, nr,
2183  		struct io_event __user *, events,
2184  		struct old_timespec32 __user *, timeout)
2185  {
2186  	struct timespec64 t;
2187  	int ret;
2188  
2189  	if (timeout && get_old_timespec32(&t, timeout))
2190  		return -EFAULT;
2191  
2192  	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2193  	if (!ret && signal_pending(current))
2194  		ret = -EINTR;
2195  	return ret;
2196  }
2197  
2198  #endif
2199  
2200  #ifdef CONFIG_COMPAT
2201  
2202  struct __compat_aio_sigset {
2203  	compat_uptr_t		sigmask;
2204  	compat_size_t		sigsetsize;
2205  };
2206  
2207  #if defined(CONFIG_COMPAT_32BIT_TIME)
2208  
2209  COMPAT_SYSCALL_DEFINE6(io_pgetevents,
2210  		compat_aio_context_t, ctx_id,
2211  		compat_long_t, min_nr,
2212  		compat_long_t, nr,
2213  		struct io_event __user *, events,
2214  		struct old_timespec32 __user *, timeout,
2215  		const struct __compat_aio_sigset __user *, usig)
2216  {
2217  	struct __compat_aio_sigset ksig = { 0, };
2218  	struct timespec64 t;
2219  	bool interrupted;
2220  	int ret;
2221  
2222  	if (timeout && get_old_timespec32(&t, timeout))
2223  		return -EFAULT;
2224  
2225  	if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2226  		return -EFAULT;
2227  
2228  	ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2229  	if (ret)
2230  		return ret;
2231  
2232  	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2233  
2234  	interrupted = signal_pending(current);
2235  	restore_saved_sigmask_unless(interrupted);
2236  	if (interrupted && !ret)
2237  		ret = -ERESTARTNOHAND;
2238  
2239  	return ret;
2240  }
2241  
2242  #endif
2243  
2244  COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
2245  		compat_aio_context_t, ctx_id,
2246  		compat_long_t, min_nr,
2247  		compat_long_t, nr,
2248  		struct io_event __user *, events,
2249  		struct __kernel_timespec __user *, timeout,
2250  		const struct __compat_aio_sigset __user *, usig)
2251  {
2252  	struct __compat_aio_sigset ksig = { 0, };
2253  	struct timespec64 t;
2254  	bool interrupted;
2255  	int ret;
2256  
2257  	if (timeout && get_timespec64(&t, timeout))
2258  		return -EFAULT;
2259  
2260  	if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2261  		return -EFAULT;
2262  
2263  	ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2264  	if (ret)
2265  		return ret;
2266  
2267  	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2268  
2269  	interrupted = signal_pending(current);
2270  	restore_saved_sigmask_unless(interrupted);
2271  	if (interrupted && !ret)
2272  		ret = -ERESTARTNOHAND;
2273  
2274  	return ret;
2275  }
2276  #endif
2277