xref: /openbmc/linux/fs/aio.c (revision efb339a83368ab25de1a18c0fdff85e01c13a1ea)
1  /*
2   *	An async IO implementation for Linux
3   *	Written by Benjamin LaHaise <bcrl@kvack.org>
4   *
5   *	Implements an efficient asynchronous io interface.
6   *
7   *	Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
8   *	Copyright 2018 Christoph Hellwig.
9   *
10   *	See ../COPYING for licensing terms.
11   */
12  #define pr_fmt(fmt) "%s: " fmt, __func__
13  
14  #include <linux/kernel.h>
15  #include <linux/init.h>
16  #include <linux/errno.h>
17  #include <linux/time.h>
18  #include <linux/aio_abi.h>
19  #include <linux/export.h>
20  #include <linux/syscalls.h>
21  #include <linux/backing-dev.h>
22  #include <linux/refcount.h>
23  #include <linux/uio.h>
24  
25  #include <linux/sched/signal.h>
26  #include <linux/fs.h>
27  #include <linux/file.h>
28  #include <linux/mm.h>
29  #include <linux/mman.h>
30  #include <linux/percpu.h>
31  #include <linux/slab.h>
32  #include <linux/timer.h>
33  #include <linux/aio.h>
34  #include <linux/highmem.h>
35  #include <linux/workqueue.h>
36  #include <linux/security.h>
37  #include <linux/eventfd.h>
38  #include <linux/blkdev.h>
39  #include <linux/compat.h>
40  #include <linux/migrate.h>
41  #include <linux/ramfs.h>
42  #include <linux/percpu-refcount.h>
43  #include <linux/mount.h>
44  #include <linux/pseudo_fs.h>
45  
46  #include <linux/uaccess.h>
47  #include <linux/nospec.h>
48  
49  #include "internal.h"
50  
51  #define KIOCB_KEY		0
52  
53  #define AIO_RING_MAGIC			0xa10a10a1
54  #define AIO_RING_COMPAT_FEATURES	1
55  #define AIO_RING_INCOMPAT_FEATURES	0
56  struct aio_ring {
57  	unsigned	id;	/* kernel internal index number */
58  	unsigned	nr;	/* number of io_events */
59  	unsigned	head;	/* Written to by userland or under ring_lock
60  				 * mutex by aio_read_events_ring(). */
61  	unsigned	tail;
62  
63  	unsigned	magic;
64  	unsigned	compat_features;
65  	unsigned	incompat_features;
66  	unsigned	header_length;	/* size of aio_ring */
67  
68  
69  	struct io_event		io_events[];
70  }; /* 128 bytes + ring size */
71  
72  /*
73   * Plugging is meant to work with larger batches of IOs. If we don't
74   * have more than the below, then don't bother setting up a plug.
75   */
76  #define AIO_PLUG_THRESHOLD	2
77  
78  #define AIO_RING_PAGES	8
79  
80  struct kioctx_table {
81  	struct rcu_head		rcu;
82  	unsigned		nr;
83  	struct kioctx __rcu	*table[];
84  };
85  
86  struct kioctx_cpu {
87  	unsigned		reqs_available;
88  };
89  
90  struct ctx_rq_wait {
91  	struct completion comp;
92  	atomic_t count;
93  };
94  
95  struct kioctx {
96  	struct percpu_ref	users;
97  	atomic_t		dead;
98  
99  	struct percpu_ref	reqs;
100  
101  	unsigned long		user_id;
102  
103  	struct __percpu kioctx_cpu *cpu;
104  
105  	/*
106  	 * For percpu reqs_available, number of slots we move to/from global
107  	 * counter at a time:
108  	 */
109  	unsigned		req_batch;
110  	/*
111  	 * This is what userspace passed to io_setup(), it's not used for
112  	 * anything but counting against the global max_reqs quota.
113  	 *
114  	 * The real limit is nr_events - 1, which will be larger (see
115  	 * aio_setup_ring())
116  	 */
117  	unsigned		max_reqs;
118  
119  	/* Size of ringbuffer, in units of struct io_event */
120  	unsigned		nr_events;
121  
122  	unsigned long		mmap_base;
123  	unsigned long		mmap_size;
124  
125  	struct page		**ring_pages;
126  	long			nr_pages;
127  
128  	struct rcu_work		free_rwork;	/* see free_ioctx() */
129  
130  	/*
131  	 * signals when all in-flight requests are done
132  	 */
133  	struct ctx_rq_wait	*rq_wait;
134  
135  	struct {
136  		/*
137  		 * This counts the number of available slots in the ringbuffer,
138  		 * so we avoid overflowing it: it's decremented (if positive)
139  		 * when allocating a kiocb and incremented when the resulting
140  		 * io_event is pulled off the ringbuffer.
141  		 *
142  		 * We batch accesses to it with a percpu version.
143  		 */
144  		atomic_t	reqs_available;
145  	} ____cacheline_aligned_in_smp;
146  
147  	struct {
148  		spinlock_t	ctx_lock;
149  		struct list_head active_reqs;	/* used for cancellation */
150  	} ____cacheline_aligned_in_smp;
151  
152  	struct {
153  		struct mutex	ring_lock;
154  		wait_queue_head_t wait;
155  	} ____cacheline_aligned_in_smp;
156  
157  	struct {
158  		unsigned	tail;
159  		unsigned	completed_events;
160  		spinlock_t	completion_lock;
161  	} ____cacheline_aligned_in_smp;
162  
163  	struct page		*internal_pages[AIO_RING_PAGES];
164  	struct file		*aio_ring_file;
165  
166  	unsigned		id;
167  };
168  
169  /*
170   * First field must be the file pointer in all the
171   * iocb unions! See also 'struct kiocb' in <linux/fs.h>
172   */
173  struct fsync_iocb {
174  	struct file		*file;
175  	struct work_struct	work;
176  	bool			datasync;
177  	struct cred		*creds;
178  };
179  
180  struct poll_iocb {
181  	struct file		*file;
182  	struct wait_queue_head	*head;
183  	__poll_t		events;
184  	bool			cancelled;
185  	bool			work_scheduled;
186  	bool			work_need_resched;
187  	struct wait_queue_entry	wait;
188  	struct work_struct	work;
189  };
190  
191  /*
192   * NOTE! Each of the iocb union members has the file pointer
193   * as the first entry in their struct definition. So you can
194   * access the file pointer through any of the sub-structs,
195   * or directly as just 'ki_filp' in this struct.
196   */
197  struct aio_kiocb {
198  	union {
199  		struct file		*ki_filp;
200  		struct kiocb		rw;
201  		struct fsync_iocb	fsync;
202  		struct poll_iocb	poll;
203  	};
204  
205  	struct kioctx		*ki_ctx;
206  	kiocb_cancel_fn		*ki_cancel;
207  
208  	struct io_event		ki_res;
209  
210  	struct list_head	ki_list;	/* the aio core uses this
211  						 * for cancellation */
212  	refcount_t		ki_refcnt;
213  
214  	/*
215  	 * If the aio_resfd field of the userspace iocb is not zero,
216  	 * this is the underlying eventfd context to deliver events to.
217  	 */
218  	struct eventfd_ctx	*ki_eventfd;
219  };
220  
221  /*------ sysctl variables----*/
222  static DEFINE_SPINLOCK(aio_nr_lock);
223  static unsigned long aio_nr;		/* current system wide number of aio requests */
224  static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
225  /*----end sysctl variables---*/
226  #ifdef CONFIG_SYSCTL
227  static struct ctl_table aio_sysctls[] = {
228  	{
229  		.procname	= "aio-nr",
230  		.data		= &aio_nr,
231  		.maxlen		= sizeof(aio_nr),
232  		.mode		= 0444,
233  		.proc_handler	= proc_doulongvec_minmax,
234  	},
235  	{
236  		.procname	= "aio-max-nr",
237  		.data		= &aio_max_nr,
238  		.maxlen		= sizeof(aio_max_nr),
239  		.mode		= 0644,
240  		.proc_handler	= proc_doulongvec_minmax,
241  	},
242  	{}
243  };
244  
245  static void __init aio_sysctl_init(void)
246  {
247  	register_sysctl_init("fs", aio_sysctls);
248  }
249  #else
250  #define aio_sysctl_init() do { } while (0)
251  #endif
252  
253  static struct kmem_cache	*kiocb_cachep;
254  static struct kmem_cache	*kioctx_cachep;
255  
256  static struct vfsmount *aio_mnt;
257  
258  static const struct file_operations aio_ring_fops;
259  static const struct address_space_operations aio_ctx_aops;
260  
261  static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
262  {
263  	struct file *file;
264  	struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
265  	if (IS_ERR(inode))
266  		return ERR_CAST(inode);
267  
268  	inode->i_mapping->a_ops = &aio_ctx_aops;
269  	inode->i_mapping->private_data = ctx;
270  	inode->i_size = PAGE_SIZE * nr_pages;
271  
272  	file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
273  				O_RDWR, &aio_ring_fops);
274  	if (IS_ERR(file))
275  		iput(inode);
276  	return file;
277  }
278  
279  static int aio_init_fs_context(struct fs_context *fc)
280  {
281  	if (!init_pseudo(fc, AIO_RING_MAGIC))
282  		return -ENOMEM;
283  	fc->s_iflags |= SB_I_NOEXEC;
284  	return 0;
285  }
286  
287  /* aio_setup
288   *	Creates the slab caches used by the aio routines, panic on
289   *	failure as this is done early during the boot sequence.
290   */
291  static int __init aio_setup(void)
292  {
293  	static struct file_system_type aio_fs = {
294  		.name		= "aio",
295  		.init_fs_context = aio_init_fs_context,
296  		.kill_sb	= kill_anon_super,
297  	};
298  	aio_mnt = kern_mount(&aio_fs);
299  	if (IS_ERR(aio_mnt))
300  		panic("Failed to create aio fs mount.");
301  
302  	kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
303  	kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
304  	aio_sysctl_init();
305  	return 0;
306  }
307  __initcall(aio_setup);
308  
309  static void put_aio_ring_file(struct kioctx *ctx)
310  {
311  	struct file *aio_ring_file = ctx->aio_ring_file;
312  	struct address_space *i_mapping;
313  
314  	if (aio_ring_file) {
315  		truncate_setsize(file_inode(aio_ring_file), 0);
316  
317  		/* Prevent further access to the kioctx from migratepages */
318  		i_mapping = aio_ring_file->f_mapping;
319  		spin_lock(&i_mapping->private_lock);
320  		i_mapping->private_data = NULL;
321  		ctx->aio_ring_file = NULL;
322  		spin_unlock(&i_mapping->private_lock);
323  
324  		fput(aio_ring_file);
325  	}
326  }
327  
328  static void aio_free_ring(struct kioctx *ctx)
329  {
330  	int i;
331  
332  	/* Disconnect the kiotx from the ring file.  This prevents future
333  	 * accesses to the kioctx from page migration.
334  	 */
335  	put_aio_ring_file(ctx);
336  
337  	for (i = 0; i < ctx->nr_pages; i++) {
338  		struct page *page;
339  		pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
340  				page_count(ctx->ring_pages[i]));
341  		page = ctx->ring_pages[i];
342  		if (!page)
343  			continue;
344  		ctx->ring_pages[i] = NULL;
345  		put_page(page);
346  	}
347  
348  	if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
349  		kfree(ctx->ring_pages);
350  		ctx->ring_pages = NULL;
351  	}
352  }
353  
354  static int aio_ring_mremap(struct vm_area_struct *vma)
355  {
356  	struct file *file = vma->vm_file;
357  	struct mm_struct *mm = vma->vm_mm;
358  	struct kioctx_table *table;
359  	int i, res = -EINVAL;
360  
361  	spin_lock(&mm->ioctx_lock);
362  	rcu_read_lock();
363  	table = rcu_dereference(mm->ioctx_table);
364  	if (!table)
365  		goto out_unlock;
366  
367  	for (i = 0; i < table->nr; i++) {
368  		struct kioctx *ctx;
369  
370  		ctx = rcu_dereference(table->table[i]);
371  		if (ctx && ctx->aio_ring_file == file) {
372  			if (!atomic_read(&ctx->dead)) {
373  				ctx->user_id = ctx->mmap_base = vma->vm_start;
374  				res = 0;
375  			}
376  			break;
377  		}
378  	}
379  
380  out_unlock:
381  	rcu_read_unlock();
382  	spin_unlock(&mm->ioctx_lock);
383  	return res;
384  }
385  
386  static const struct vm_operations_struct aio_ring_vm_ops = {
387  	.mremap		= aio_ring_mremap,
388  #if IS_ENABLED(CONFIG_MMU)
389  	.fault		= filemap_fault,
390  	.map_pages	= filemap_map_pages,
391  	.page_mkwrite	= filemap_page_mkwrite,
392  #endif
393  };
394  
395  static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
396  {
397  	vm_flags_set(vma, VM_DONTEXPAND);
398  	vma->vm_ops = &aio_ring_vm_ops;
399  	return 0;
400  }
401  
402  static const struct file_operations aio_ring_fops = {
403  	.mmap = aio_ring_mmap,
404  };
405  
406  #if IS_ENABLED(CONFIG_MIGRATION)
407  static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
408  			struct folio *src, enum migrate_mode mode)
409  {
410  	struct kioctx *ctx;
411  	unsigned long flags;
412  	pgoff_t idx;
413  	int rc;
414  
415  	/*
416  	 * We cannot support the _NO_COPY case here, because copy needs to
417  	 * happen under the ctx->completion_lock. That does not work with the
418  	 * migration workflow of MIGRATE_SYNC_NO_COPY.
419  	 */
420  	if (mode == MIGRATE_SYNC_NO_COPY)
421  		return -EINVAL;
422  
423  	rc = 0;
424  
425  	/* mapping->private_lock here protects against the kioctx teardown.  */
426  	spin_lock(&mapping->private_lock);
427  	ctx = mapping->private_data;
428  	if (!ctx) {
429  		rc = -EINVAL;
430  		goto out;
431  	}
432  
433  	/* The ring_lock mutex.  The prevents aio_read_events() from writing
434  	 * to the ring's head, and prevents page migration from mucking in
435  	 * a partially initialized kiotx.
436  	 */
437  	if (!mutex_trylock(&ctx->ring_lock)) {
438  		rc = -EAGAIN;
439  		goto out;
440  	}
441  
442  	idx = src->index;
443  	if (idx < (pgoff_t)ctx->nr_pages) {
444  		/* Make sure the old folio hasn't already been changed */
445  		if (ctx->ring_pages[idx] != &src->page)
446  			rc = -EAGAIN;
447  	} else
448  		rc = -EINVAL;
449  
450  	if (rc != 0)
451  		goto out_unlock;
452  
453  	/* Writeback must be complete */
454  	BUG_ON(folio_test_writeback(src));
455  	folio_get(dst);
456  
457  	rc = folio_migrate_mapping(mapping, dst, src, 1);
458  	if (rc != MIGRATEPAGE_SUCCESS) {
459  		folio_put(dst);
460  		goto out_unlock;
461  	}
462  
463  	/* Take completion_lock to prevent other writes to the ring buffer
464  	 * while the old folio is copied to the new.  This prevents new
465  	 * events from being lost.
466  	 */
467  	spin_lock_irqsave(&ctx->completion_lock, flags);
468  	folio_migrate_copy(dst, src);
469  	BUG_ON(ctx->ring_pages[idx] != &src->page);
470  	ctx->ring_pages[idx] = &dst->page;
471  	spin_unlock_irqrestore(&ctx->completion_lock, flags);
472  
473  	/* The old folio is no longer accessible. */
474  	folio_put(src);
475  
476  out_unlock:
477  	mutex_unlock(&ctx->ring_lock);
478  out:
479  	spin_unlock(&mapping->private_lock);
480  	return rc;
481  }
482  #else
483  #define aio_migrate_folio NULL
484  #endif
485  
486  static const struct address_space_operations aio_ctx_aops = {
487  	.dirty_folio	= noop_dirty_folio,
488  	.migrate_folio	= aio_migrate_folio,
489  };
490  
491  static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
492  {
493  	struct aio_ring *ring;
494  	struct mm_struct *mm = current->mm;
495  	unsigned long size, unused;
496  	int nr_pages;
497  	int i;
498  	struct file *file;
499  
500  	/* Compensate for the ring buffer's head/tail overlap entry */
501  	nr_events += 2;	/* 1 is required, 2 for good luck */
502  
503  	size = sizeof(struct aio_ring);
504  	size += sizeof(struct io_event) * nr_events;
505  
506  	nr_pages = PFN_UP(size);
507  	if (nr_pages < 0)
508  		return -EINVAL;
509  
510  	file = aio_private_file(ctx, nr_pages);
511  	if (IS_ERR(file)) {
512  		ctx->aio_ring_file = NULL;
513  		return -ENOMEM;
514  	}
515  
516  	ctx->aio_ring_file = file;
517  	nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
518  			/ sizeof(struct io_event);
519  
520  	ctx->ring_pages = ctx->internal_pages;
521  	if (nr_pages > AIO_RING_PAGES) {
522  		ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
523  					  GFP_KERNEL);
524  		if (!ctx->ring_pages) {
525  			put_aio_ring_file(ctx);
526  			return -ENOMEM;
527  		}
528  	}
529  
530  	for (i = 0; i < nr_pages; i++) {
531  		struct page *page;
532  		page = find_or_create_page(file->f_mapping,
533  					   i, GFP_HIGHUSER | __GFP_ZERO);
534  		if (!page)
535  			break;
536  		pr_debug("pid(%d) page[%d]->count=%d\n",
537  			 current->pid, i, page_count(page));
538  		SetPageUptodate(page);
539  		unlock_page(page);
540  
541  		ctx->ring_pages[i] = page;
542  	}
543  	ctx->nr_pages = i;
544  
545  	if (unlikely(i != nr_pages)) {
546  		aio_free_ring(ctx);
547  		return -ENOMEM;
548  	}
549  
550  	ctx->mmap_size = nr_pages * PAGE_SIZE;
551  	pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
552  
553  	if (mmap_write_lock_killable(mm)) {
554  		ctx->mmap_size = 0;
555  		aio_free_ring(ctx);
556  		return -EINTR;
557  	}
558  
559  	ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size,
560  				 PROT_READ | PROT_WRITE,
561  				 MAP_SHARED, 0, &unused, NULL);
562  	mmap_write_unlock(mm);
563  	if (IS_ERR((void *)ctx->mmap_base)) {
564  		ctx->mmap_size = 0;
565  		aio_free_ring(ctx);
566  		return -ENOMEM;
567  	}
568  
569  	pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
570  
571  	ctx->user_id = ctx->mmap_base;
572  	ctx->nr_events = nr_events; /* trusted copy */
573  
574  	ring = kmap_atomic(ctx->ring_pages[0]);
575  	ring->nr = nr_events;	/* user copy */
576  	ring->id = ~0U;
577  	ring->head = ring->tail = 0;
578  	ring->magic = AIO_RING_MAGIC;
579  	ring->compat_features = AIO_RING_COMPAT_FEATURES;
580  	ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
581  	ring->header_length = sizeof(struct aio_ring);
582  	kunmap_atomic(ring);
583  	flush_dcache_page(ctx->ring_pages[0]);
584  
585  	return 0;
586  }
587  
588  #define AIO_EVENTS_PER_PAGE	(PAGE_SIZE / sizeof(struct io_event))
589  #define AIO_EVENTS_FIRST_PAGE	((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
590  #define AIO_EVENTS_OFFSET	(AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
591  
592  void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
593  {
594  	struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw);
595  	struct kioctx *ctx = req->ki_ctx;
596  	unsigned long flags;
597  
598  	if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
599  		return;
600  
601  	spin_lock_irqsave(&ctx->ctx_lock, flags);
602  	list_add_tail(&req->ki_list, &ctx->active_reqs);
603  	req->ki_cancel = cancel;
604  	spin_unlock_irqrestore(&ctx->ctx_lock, flags);
605  }
606  EXPORT_SYMBOL(kiocb_set_cancel_fn);
607  
608  /*
609   * free_ioctx() should be RCU delayed to synchronize against the RCU
610   * protected lookup_ioctx() and also needs process context to call
611   * aio_free_ring().  Use rcu_work.
612   */
613  static void free_ioctx(struct work_struct *work)
614  {
615  	struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx,
616  					  free_rwork);
617  	pr_debug("freeing %p\n", ctx);
618  
619  	aio_free_ring(ctx);
620  	free_percpu(ctx->cpu);
621  	percpu_ref_exit(&ctx->reqs);
622  	percpu_ref_exit(&ctx->users);
623  	kmem_cache_free(kioctx_cachep, ctx);
624  }
625  
626  static void free_ioctx_reqs(struct percpu_ref *ref)
627  {
628  	struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
629  
630  	/* At this point we know that there are no any in-flight requests */
631  	if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
632  		complete(&ctx->rq_wait->comp);
633  
634  	/* Synchronize against RCU protected table->table[] dereferences */
635  	INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
636  	queue_rcu_work(system_wq, &ctx->free_rwork);
637  }
638  
639  /*
640   * When this function runs, the kioctx has been removed from the "hash table"
641   * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
642   * now it's safe to cancel any that need to be.
643   */
644  static void free_ioctx_users(struct percpu_ref *ref)
645  {
646  	struct kioctx *ctx = container_of(ref, struct kioctx, users);
647  	struct aio_kiocb *req;
648  
649  	spin_lock_irq(&ctx->ctx_lock);
650  
651  	while (!list_empty(&ctx->active_reqs)) {
652  		req = list_first_entry(&ctx->active_reqs,
653  				       struct aio_kiocb, ki_list);
654  		req->ki_cancel(&req->rw);
655  		list_del_init(&req->ki_list);
656  	}
657  
658  	spin_unlock_irq(&ctx->ctx_lock);
659  
660  	percpu_ref_kill(&ctx->reqs);
661  	percpu_ref_put(&ctx->reqs);
662  }
663  
664  static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
665  {
666  	unsigned i, new_nr;
667  	struct kioctx_table *table, *old;
668  	struct aio_ring *ring;
669  
670  	spin_lock(&mm->ioctx_lock);
671  	table = rcu_dereference_raw(mm->ioctx_table);
672  
673  	while (1) {
674  		if (table)
675  			for (i = 0; i < table->nr; i++)
676  				if (!rcu_access_pointer(table->table[i])) {
677  					ctx->id = i;
678  					rcu_assign_pointer(table->table[i], ctx);
679  					spin_unlock(&mm->ioctx_lock);
680  
681  					/* While kioctx setup is in progress,
682  					 * we are protected from page migration
683  					 * changes ring_pages by ->ring_lock.
684  					 */
685  					ring = kmap_atomic(ctx->ring_pages[0]);
686  					ring->id = ctx->id;
687  					kunmap_atomic(ring);
688  					return 0;
689  				}
690  
691  		new_nr = (table ? table->nr : 1) * 4;
692  		spin_unlock(&mm->ioctx_lock);
693  
694  		table = kzalloc(struct_size(table, table, new_nr), GFP_KERNEL);
695  		if (!table)
696  			return -ENOMEM;
697  
698  		table->nr = new_nr;
699  
700  		spin_lock(&mm->ioctx_lock);
701  		old = rcu_dereference_raw(mm->ioctx_table);
702  
703  		if (!old) {
704  			rcu_assign_pointer(mm->ioctx_table, table);
705  		} else if (table->nr > old->nr) {
706  			memcpy(table->table, old->table,
707  			       old->nr * sizeof(struct kioctx *));
708  
709  			rcu_assign_pointer(mm->ioctx_table, table);
710  			kfree_rcu(old, rcu);
711  		} else {
712  			kfree(table);
713  			table = old;
714  		}
715  	}
716  }
717  
718  static void aio_nr_sub(unsigned nr)
719  {
720  	spin_lock(&aio_nr_lock);
721  	if (WARN_ON(aio_nr - nr > aio_nr))
722  		aio_nr = 0;
723  	else
724  		aio_nr -= nr;
725  	spin_unlock(&aio_nr_lock);
726  }
727  
728  /* ioctx_alloc
729   *	Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
730   */
731  static struct kioctx *ioctx_alloc(unsigned nr_events)
732  {
733  	struct mm_struct *mm = current->mm;
734  	struct kioctx *ctx;
735  	int err = -ENOMEM;
736  
737  	/*
738  	 * Store the original nr_events -- what userspace passed to io_setup(),
739  	 * for counting against the global limit -- before it changes.
740  	 */
741  	unsigned int max_reqs = nr_events;
742  
743  	/*
744  	 * We keep track of the number of available ringbuffer slots, to prevent
745  	 * overflow (reqs_available), and we also use percpu counters for this.
746  	 *
747  	 * So since up to half the slots might be on other cpu's percpu counters
748  	 * and unavailable, double nr_events so userspace sees what they
749  	 * expected: additionally, we move req_batch slots to/from percpu
750  	 * counters at a time, so make sure that isn't 0:
751  	 */
752  	nr_events = max(nr_events, num_possible_cpus() * 4);
753  	nr_events *= 2;
754  
755  	/* Prevent overflows */
756  	if (nr_events > (0x10000000U / sizeof(struct io_event))) {
757  		pr_debug("ENOMEM: nr_events too high\n");
758  		return ERR_PTR(-EINVAL);
759  	}
760  
761  	if (!nr_events || (unsigned long)max_reqs > aio_max_nr)
762  		return ERR_PTR(-EAGAIN);
763  
764  	ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
765  	if (!ctx)
766  		return ERR_PTR(-ENOMEM);
767  
768  	ctx->max_reqs = max_reqs;
769  
770  	spin_lock_init(&ctx->ctx_lock);
771  	spin_lock_init(&ctx->completion_lock);
772  	mutex_init(&ctx->ring_lock);
773  	/* Protect against page migration throughout kiotx setup by keeping
774  	 * the ring_lock mutex held until setup is complete. */
775  	mutex_lock(&ctx->ring_lock);
776  	init_waitqueue_head(&ctx->wait);
777  
778  	INIT_LIST_HEAD(&ctx->active_reqs);
779  
780  	if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL))
781  		goto err;
782  
783  	if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL))
784  		goto err;
785  
786  	ctx->cpu = alloc_percpu(struct kioctx_cpu);
787  	if (!ctx->cpu)
788  		goto err;
789  
790  	err = aio_setup_ring(ctx, nr_events);
791  	if (err < 0)
792  		goto err;
793  
794  	atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
795  	ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
796  	if (ctx->req_batch < 1)
797  		ctx->req_batch = 1;
798  
799  	/* limit the number of system wide aios */
800  	spin_lock(&aio_nr_lock);
801  	if (aio_nr + ctx->max_reqs > aio_max_nr ||
802  	    aio_nr + ctx->max_reqs < aio_nr) {
803  		spin_unlock(&aio_nr_lock);
804  		err = -EAGAIN;
805  		goto err_ctx;
806  	}
807  	aio_nr += ctx->max_reqs;
808  	spin_unlock(&aio_nr_lock);
809  
810  	percpu_ref_get(&ctx->users);	/* io_setup() will drop this ref */
811  	percpu_ref_get(&ctx->reqs);	/* free_ioctx_users() will drop this */
812  
813  	err = ioctx_add_table(ctx, mm);
814  	if (err)
815  		goto err_cleanup;
816  
817  	/* Release the ring_lock mutex now that all setup is complete. */
818  	mutex_unlock(&ctx->ring_lock);
819  
820  	pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
821  		 ctx, ctx->user_id, mm, ctx->nr_events);
822  	return ctx;
823  
824  err_cleanup:
825  	aio_nr_sub(ctx->max_reqs);
826  err_ctx:
827  	atomic_set(&ctx->dead, 1);
828  	if (ctx->mmap_size)
829  		vm_munmap(ctx->mmap_base, ctx->mmap_size);
830  	aio_free_ring(ctx);
831  err:
832  	mutex_unlock(&ctx->ring_lock);
833  	free_percpu(ctx->cpu);
834  	percpu_ref_exit(&ctx->reqs);
835  	percpu_ref_exit(&ctx->users);
836  	kmem_cache_free(kioctx_cachep, ctx);
837  	pr_debug("error allocating ioctx %d\n", err);
838  	return ERR_PTR(err);
839  }
840  
841  /* kill_ioctx
842   *	Cancels all outstanding aio requests on an aio context.  Used
843   *	when the processes owning a context have all exited to encourage
844   *	the rapid destruction of the kioctx.
845   */
846  static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
847  		      struct ctx_rq_wait *wait)
848  {
849  	struct kioctx_table *table;
850  
851  	spin_lock(&mm->ioctx_lock);
852  	if (atomic_xchg(&ctx->dead, 1)) {
853  		spin_unlock(&mm->ioctx_lock);
854  		return -EINVAL;
855  	}
856  
857  	table = rcu_dereference_raw(mm->ioctx_table);
858  	WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
859  	RCU_INIT_POINTER(table->table[ctx->id], NULL);
860  	spin_unlock(&mm->ioctx_lock);
861  
862  	/* free_ioctx_reqs() will do the necessary RCU synchronization */
863  	wake_up_all(&ctx->wait);
864  
865  	/*
866  	 * It'd be more correct to do this in free_ioctx(), after all
867  	 * the outstanding kiocbs have finished - but by then io_destroy
868  	 * has already returned, so io_setup() could potentially return
869  	 * -EAGAIN with no ioctxs actually in use (as far as userspace
870  	 *  could tell).
871  	 */
872  	aio_nr_sub(ctx->max_reqs);
873  
874  	if (ctx->mmap_size)
875  		vm_munmap(ctx->mmap_base, ctx->mmap_size);
876  
877  	ctx->rq_wait = wait;
878  	percpu_ref_kill(&ctx->users);
879  	return 0;
880  }
881  
882  /*
883   * exit_aio: called when the last user of mm goes away.  At this point, there is
884   * no way for any new requests to be submited or any of the io_* syscalls to be
885   * called on the context.
886   *
887   * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
888   * them.
889   */
890  void exit_aio(struct mm_struct *mm)
891  {
892  	struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
893  	struct ctx_rq_wait wait;
894  	int i, skipped;
895  
896  	if (!table)
897  		return;
898  
899  	atomic_set(&wait.count, table->nr);
900  	init_completion(&wait.comp);
901  
902  	skipped = 0;
903  	for (i = 0; i < table->nr; ++i) {
904  		struct kioctx *ctx =
905  			rcu_dereference_protected(table->table[i], true);
906  
907  		if (!ctx) {
908  			skipped++;
909  			continue;
910  		}
911  
912  		/*
913  		 * We don't need to bother with munmap() here - exit_mmap(mm)
914  		 * is coming and it'll unmap everything. And we simply can't,
915  		 * this is not necessarily our ->mm.
916  		 * Since kill_ioctx() uses non-zero ->mmap_size as indicator
917  		 * that it needs to unmap the area, just set it to 0.
918  		 */
919  		ctx->mmap_size = 0;
920  		kill_ioctx(mm, ctx, &wait);
921  	}
922  
923  	if (!atomic_sub_and_test(skipped, &wait.count)) {
924  		/* Wait until all IO for the context are done. */
925  		wait_for_completion(&wait.comp);
926  	}
927  
928  	RCU_INIT_POINTER(mm->ioctx_table, NULL);
929  	kfree(table);
930  }
931  
932  static void put_reqs_available(struct kioctx *ctx, unsigned nr)
933  {
934  	struct kioctx_cpu *kcpu;
935  	unsigned long flags;
936  
937  	local_irq_save(flags);
938  	kcpu = this_cpu_ptr(ctx->cpu);
939  	kcpu->reqs_available += nr;
940  
941  	while (kcpu->reqs_available >= ctx->req_batch * 2) {
942  		kcpu->reqs_available -= ctx->req_batch;
943  		atomic_add(ctx->req_batch, &ctx->reqs_available);
944  	}
945  
946  	local_irq_restore(flags);
947  }
948  
949  static bool __get_reqs_available(struct kioctx *ctx)
950  {
951  	struct kioctx_cpu *kcpu;
952  	bool ret = false;
953  	unsigned long flags;
954  
955  	local_irq_save(flags);
956  	kcpu = this_cpu_ptr(ctx->cpu);
957  	if (!kcpu->reqs_available) {
958  		int avail = atomic_read(&ctx->reqs_available);
959  
960  		do {
961  			if (avail < ctx->req_batch)
962  				goto out;
963  		} while (!atomic_try_cmpxchg(&ctx->reqs_available,
964  					     &avail, avail - ctx->req_batch));
965  
966  		kcpu->reqs_available += ctx->req_batch;
967  	}
968  
969  	ret = true;
970  	kcpu->reqs_available--;
971  out:
972  	local_irq_restore(flags);
973  	return ret;
974  }
975  
976  /* refill_reqs_available
977   *	Updates the reqs_available reference counts used for tracking the
978   *	number of free slots in the completion ring.  This can be called
979   *	from aio_complete() (to optimistically update reqs_available) or
980   *	from aio_get_req() (the we're out of events case).  It must be
981   *	called holding ctx->completion_lock.
982   */
983  static void refill_reqs_available(struct kioctx *ctx, unsigned head,
984                                    unsigned tail)
985  {
986  	unsigned events_in_ring, completed;
987  
988  	/* Clamp head since userland can write to it. */
989  	head %= ctx->nr_events;
990  	if (head <= tail)
991  		events_in_ring = tail - head;
992  	else
993  		events_in_ring = ctx->nr_events - (head - tail);
994  
995  	completed = ctx->completed_events;
996  	if (events_in_ring < completed)
997  		completed -= events_in_ring;
998  	else
999  		completed = 0;
1000  
1001  	if (!completed)
1002  		return;
1003  
1004  	ctx->completed_events -= completed;
1005  	put_reqs_available(ctx, completed);
1006  }
1007  
1008  /* user_refill_reqs_available
1009   *	Called to refill reqs_available when aio_get_req() encounters an
1010   *	out of space in the completion ring.
1011   */
1012  static void user_refill_reqs_available(struct kioctx *ctx)
1013  {
1014  	spin_lock_irq(&ctx->completion_lock);
1015  	if (ctx->completed_events) {
1016  		struct aio_ring *ring;
1017  		unsigned head;
1018  
1019  		/* Access of ring->head may race with aio_read_events_ring()
1020  		 * here, but that's okay since whether we read the old version
1021  		 * or the new version, and either will be valid.  The important
1022  		 * part is that head cannot pass tail since we prevent
1023  		 * aio_complete() from updating tail by holding
1024  		 * ctx->completion_lock.  Even if head is invalid, the check
1025  		 * against ctx->completed_events below will make sure we do the
1026  		 * safe/right thing.
1027  		 */
1028  		ring = kmap_atomic(ctx->ring_pages[0]);
1029  		head = ring->head;
1030  		kunmap_atomic(ring);
1031  
1032  		refill_reqs_available(ctx, head, ctx->tail);
1033  	}
1034  
1035  	spin_unlock_irq(&ctx->completion_lock);
1036  }
1037  
1038  static bool get_reqs_available(struct kioctx *ctx)
1039  {
1040  	if (__get_reqs_available(ctx))
1041  		return true;
1042  	user_refill_reqs_available(ctx);
1043  	return __get_reqs_available(ctx);
1044  }
1045  
1046  /* aio_get_req
1047   *	Allocate a slot for an aio request.
1048   * Returns NULL if no requests are free.
1049   *
1050   * The refcount is initialized to 2 - one for the async op completion,
1051   * one for the synchronous code that does this.
1052   */
1053  static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
1054  {
1055  	struct aio_kiocb *req;
1056  
1057  	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
1058  	if (unlikely(!req))
1059  		return NULL;
1060  
1061  	if (unlikely(!get_reqs_available(ctx))) {
1062  		kmem_cache_free(kiocb_cachep, req);
1063  		return NULL;
1064  	}
1065  
1066  	percpu_ref_get(&ctx->reqs);
1067  	req->ki_ctx = ctx;
1068  	INIT_LIST_HEAD(&req->ki_list);
1069  	refcount_set(&req->ki_refcnt, 2);
1070  	req->ki_eventfd = NULL;
1071  	return req;
1072  }
1073  
1074  static struct kioctx *lookup_ioctx(unsigned long ctx_id)
1075  {
1076  	struct aio_ring __user *ring  = (void __user *)ctx_id;
1077  	struct mm_struct *mm = current->mm;
1078  	struct kioctx *ctx, *ret = NULL;
1079  	struct kioctx_table *table;
1080  	unsigned id;
1081  
1082  	if (get_user(id, &ring->id))
1083  		return NULL;
1084  
1085  	rcu_read_lock();
1086  	table = rcu_dereference(mm->ioctx_table);
1087  
1088  	if (!table || id >= table->nr)
1089  		goto out;
1090  
1091  	id = array_index_nospec(id, table->nr);
1092  	ctx = rcu_dereference(table->table[id]);
1093  	if (ctx && ctx->user_id == ctx_id) {
1094  		if (percpu_ref_tryget_live(&ctx->users))
1095  			ret = ctx;
1096  	}
1097  out:
1098  	rcu_read_unlock();
1099  	return ret;
1100  }
1101  
1102  static inline void iocb_destroy(struct aio_kiocb *iocb)
1103  {
1104  	if (iocb->ki_eventfd)
1105  		eventfd_ctx_put(iocb->ki_eventfd);
1106  	if (iocb->ki_filp)
1107  		fput(iocb->ki_filp);
1108  	percpu_ref_put(&iocb->ki_ctx->reqs);
1109  	kmem_cache_free(kiocb_cachep, iocb);
1110  }
1111  
1112  /* aio_complete
1113   *	Called when the io request on the given iocb is complete.
1114   */
1115  static void aio_complete(struct aio_kiocb *iocb)
1116  {
1117  	struct kioctx	*ctx = iocb->ki_ctx;
1118  	struct aio_ring	*ring;
1119  	struct io_event	*ev_page, *event;
1120  	unsigned tail, pos, head;
1121  	unsigned long	flags;
1122  
1123  	/*
1124  	 * Add a completion event to the ring buffer. Must be done holding
1125  	 * ctx->completion_lock to prevent other code from messing with the tail
1126  	 * pointer since we might be called from irq context.
1127  	 */
1128  	spin_lock_irqsave(&ctx->completion_lock, flags);
1129  
1130  	tail = ctx->tail;
1131  	pos = tail + AIO_EVENTS_OFFSET;
1132  
1133  	if (++tail >= ctx->nr_events)
1134  		tail = 0;
1135  
1136  	ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1137  	event = ev_page + pos % AIO_EVENTS_PER_PAGE;
1138  
1139  	*event = iocb->ki_res;
1140  
1141  	kunmap_atomic(ev_page);
1142  	flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1143  
1144  	pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
1145  		 (void __user *)(unsigned long)iocb->ki_res.obj,
1146  		 iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
1147  
1148  	/* after flagging the request as done, we
1149  	 * must never even look at it again
1150  	 */
1151  	smp_wmb();	/* make event visible before updating tail */
1152  
1153  	ctx->tail = tail;
1154  
1155  	ring = kmap_atomic(ctx->ring_pages[0]);
1156  	head = ring->head;
1157  	ring->tail = tail;
1158  	kunmap_atomic(ring);
1159  	flush_dcache_page(ctx->ring_pages[0]);
1160  
1161  	ctx->completed_events++;
1162  	if (ctx->completed_events > 1)
1163  		refill_reqs_available(ctx, head, tail);
1164  	spin_unlock_irqrestore(&ctx->completion_lock, flags);
1165  
1166  	pr_debug("added to ring %p at [%u]\n", iocb, tail);
1167  
1168  	/*
1169  	 * Check if the user asked us to deliver the result through an
1170  	 * eventfd. The eventfd_signal() function is safe to be called
1171  	 * from IRQ context.
1172  	 */
1173  	if (iocb->ki_eventfd)
1174  		eventfd_signal(iocb->ki_eventfd, 1);
1175  
1176  	/*
1177  	 * We have to order our ring_info tail store above and test
1178  	 * of the wait list below outside the wait lock.  This is
1179  	 * like in wake_up_bit() where clearing a bit has to be
1180  	 * ordered with the unlocked test.
1181  	 */
1182  	smp_mb();
1183  
1184  	if (waitqueue_active(&ctx->wait))
1185  		wake_up(&ctx->wait);
1186  }
1187  
1188  static inline void iocb_put(struct aio_kiocb *iocb)
1189  {
1190  	if (refcount_dec_and_test(&iocb->ki_refcnt)) {
1191  		aio_complete(iocb);
1192  		iocb_destroy(iocb);
1193  	}
1194  }
1195  
1196  /* aio_read_events_ring
1197   *	Pull an event off of the ioctx's event ring.  Returns the number of
1198   *	events fetched
1199   */
1200  static long aio_read_events_ring(struct kioctx *ctx,
1201  				 struct io_event __user *event, long nr)
1202  {
1203  	struct aio_ring *ring;
1204  	unsigned head, tail, pos;
1205  	long ret = 0;
1206  	int copy_ret;
1207  
1208  	/*
1209  	 * The mutex can block and wake us up and that will cause
1210  	 * wait_event_interruptible_hrtimeout() to schedule without sleeping
1211  	 * and repeat. This should be rare enough that it doesn't cause
1212  	 * peformance issues. See the comment in read_events() for more detail.
1213  	 */
1214  	sched_annotate_sleep();
1215  	mutex_lock(&ctx->ring_lock);
1216  
1217  	/* Access to ->ring_pages here is protected by ctx->ring_lock. */
1218  	ring = kmap_atomic(ctx->ring_pages[0]);
1219  	head = ring->head;
1220  	tail = ring->tail;
1221  	kunmap_atomic(ring);
1222  
1223  	/*
1224  	 * Ensure that once we've read the current tail pointer, that
1225  	 * we also see the events that were stored up to the tail.
1226  	 */
1227  	smp_rmb();
1228  
1229  	pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
1230  
1231  	if (head == tail)
1232  		goto out;
1233  
1234  	head %= ctx->nr_events;
1235  	tail %= ctx->nr_events;
1236  
1237  	while (ret < nr) {
1238  		long avail;
1239  		struct io_event *ev;
1240  		struct page *page;
1241  
1242  		avail = (head <= tail ?  tail : ctx->nr_events) - head;
1243  		if (head == tail)
1244  			break;
1245  
1246  		pos = head + AIO_EVENTS_OFFSET;
1247  		page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
1248  		pos %= AIO_EVENTS_PER_PAGE;
1249  
1250  		avail = min(avail, nr - ret);
1251  		avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos);
1252  
1253  		ev = kmap(page);
1254  		copy_ret = copy_to_user(event + ret, ev + pos,
1255  					sizeof(*ev) * avail);
1256  		kunmap(page);
1257  
1258  		if (unlikely(copy_ret)) {
1259  			ret = -EFAULT;
1260  			goto out;
1261  		}
1262  
1263  		ret += avail;
1264  		head += avail;
1265  		head %= ctx->nr_events;
1266  	}
1267  
1268  	ring = kmap_atomic(ctx->ring_pages[0]);
1269  	ring->head = head;
1270  	kunmap_atomic(ring);
1271  	flush_dcache_page(ctx->ring_pages[0]);
1272  
1273  	pr_debug("%li  h%u t%u\n", ret, head, tail);
1274  out:
1275  	mutex_unlock(&ctx->ring_lock);
1276  
1277  	return ret;
1278  }
1279  
1280  static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
1281  			    struct io_event __user *event, long *i)
1282  {
1283  	long ret = aio_read_events_ring(ctx, event + *i, nr - *i);
1284  
1285  	if (ret > 0)
1286  		*i += ret;
1287  
1288  	if (unlikely(atomic_read(&ctx->dead)))
1289  		ret = -EINVAL;
1290  
1291  	if (!*i)
1292  		*i = ret;
1293  
1294  	return ret < 0 || *i >= min_nr;
1295  }
1296  
1297  static long read_events(struct kioctx *ctx, long min_nr, long nr,
1298  			struct io_event __user *event,
1299  			ktime_t until)
1300  {
1301  	long ret = 0;
1302  
1303  	/*
1304  	 * Note that aio_read_events() is being called as the conditional - i.e.
1305  	 * we're calling it after prepare_to_wait() has set task state to
1306  	 * TASK_INTERRUPTIBLE.
1307  	 *
1308  	 * But aio_read_events() can block, and if it blocks it's going to flip
1309  	 * the task state back to TASK_RUNNING.
1310  	 *
1311  	 * This should be ok, provided it doesn't flip the state back to
1312  	 * TASK_RUNNING and return 0 too much - that causes us to spin. That
1313  	 * will only happen if the mutex_lock() call blocks, and we then find
1314  	 * the ringbuffer empty. So in practice we should be ok, but it's
1315  	 * something to be aware of when touching this code.
1316  	 */
1317  	if (until == 0)
1318  		aio_read_events(ctx, min_nr, nr, event, &ret);
1319  	else
1320  		wait_event_interruptible_hrtimeout(ctx->wait,
1321  				aio_read_events(ctx, min_nr, nr, event, &ret),
1322  				until);
1323  	return ret;
1324  }
1325  
1326  /* sys_io_setup:
1327   *	Create an aio_context capable of receiving at least nr_events.
1328   *	ctxp must not point to an aio_context that already exists, and
1329   *	must be initialized to 0 prior to the call.  On successful
1330   *	creation of the aio_context, *ctxp is filled in with the resulting
1331   *	handle.  May fail with -EINVAL if *ctxp is not initialized,
1332   *	if the specified nr_events exceeds internal limits.  May fail
1333   *	with -EAGAIN if the specified nr_events exceeds the user's limit
1334   *	of available events.  May fail with -ENOMEM if insufficient kernel
1335   *	resources are available.  May fail with -EFAULT if an invalid
1336   *	pointer is passed for ctxp.  Will fail with -ENOSYS if not
1337   *	implemented.
1338   */
1339  SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1340  {
1341  	struct kioctx *ioctx = NULL;
1342  	unsigned long ctx;
1343  	long ret;
1344  
1345  	ret = get_user(ctx, ctxp);
1346  	if (unlikely(ret))
1347  		goto out;
1348  
1349  	ret = -EINVAL;
1350  	if (unlikely(ctx || nr_events == 0)) {
1351  		pr_debug("EINVAL: ctx %lu nr_events %u\n",
1352  		         ctx, nr_events);
1353  		goto out;
1354  	}
1355  
1356  	ioctx = ioctx_alloc(nr_events);
1357  	ret = PTR_ERR(ioctx);
1358  	if (!IS_ERR(ioctx)) {
1359  		ret = put_user(ioctx->user_id, ctxp);
1360  		if (ret)
1361  			kill_ioctx(current->mm, ioctx, NULL);
1362  		percpu_ref_put(&ioctx->users);
1363  	}
1364  
1365  out:
1366  	return ret;
1367  }
1368  
1369  #ifdef CONFIG_COMPAT
1370  COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p)
1371  {
1372  	struct kioctx *ioctx = NULL;
1373  	unsigned long ctx;
1374  	long ret;
1375  
1376  	ret = get_user(ctx, ctx32p);
1377  	if (unlikely(ret))
1378  		goto out;
1379  
1380  	ret = -EINVAL;
1381  	if (unlikely(ctx || nr_events == 0)) {
1382  		pr_debug("EINVAL: ctx %lu nr_events %u\n",
1383  		         ctx, nr_events);
1384  		goto out;
1385  	}
1386  
1387  	ioctx = ioctx_alloc(nr_events);
1388  	ret = PTR_ERR(ioctx);
1389  	if (!IS_ERR(ioctx)) {
1390  		/* truncating is ok because it's a user address */
1391  		ret = put_user((u32)ioctx->user_id, ctx32p);
1392  		if (ret)
1393  			kill_ioctx(current->mm, ioctx, NULL);
1394  		percpu_ref_put(&ioctx->users);
1395  	}
1396  
1397  out:
1398  	return ret;
1399  }
1400  #endif
1401  
1402  /* sys_io_destroy:
1403   *	Destroy the aio_context specified.  May cancel any outstanding
1404   *	AIOs and block on completion.  Will fail with -ENOSYS if not
1405   *	implemented.  May fail with -EINVAL if the context pointed to
1406   *	is invalid.
1407   */
1408  SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1409  {
1410  	struct kioctx *ioctx = lookup_ioctx(ctx);
1411  	if (likely(NULL != ioctx)) {
1412  		struct ctx_rq_wait wait;
1413  		int ret;
1414  
1415  		init_completion(&wait.comp);
1416  		atomic_set(&wait.count, 1);
1417  
1418  		/* Pass requests_done to kill_ioctx() where it can be set
1419  		 * in a thread-safe way. If we try to set it here then we have
1420  		 * a race condition if two io_destroy() called simultaneously.
1421  		 */
1422  		ret = kill_ioctx(current->mm, ioctx, &wait);
1423  		percpu_ref_put(&ioctx->users);
1424  
1425  		/* Wait until all IO for the context are done. Otherwise kernel
1426  		 * keep using user-space buffers even if user thinks the context
1427  		 * is destroyed.
1428  		 */
1429  		if (!ret)
1430  			wait_for_completion(&wait.comp);
1431  
1432  		return ret;
1433  	}
1434  	pr_debug("EINVAL: invalid context id\n");
1435  	return -EINVAL;
1436  }
1437  
1438  static void aio_remove_iocb(struct aio_kiocb *iocb)
1439  {
1440  	struct kioctx *ctx = iocb->ki_ctx;
1441  	unsigned long flags;
1442  
1443  	spin_lock_irqsave(&ctx->ctx_lock, flags);
1444  	list_del(&iocb->ki_list);
1445  	spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1446  }
1447  
1448  static void aio_complete_rw(struct kiocb *kiocb, long res)
1449  {
1450  	struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
1451  
1452  	if (!list_empty_careful(&iocb->ki_list))
1453  		aio_remove_iocb(iocb);
1454  
1455  	if (kiocb->ki_flags & IOCB_WRITE) {
1456  		struct inode *inode = file_inode(kiocb->ki_filp);
1457  
1458  		/*
1459  		 * Tell lockdep we inherited freeze protection from submission
1460  		 * thread.
1461  		 */
1462  		if (S_ISREG(inode->i_mode))
1463  			__sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
1464  		file_end_write(kiocb->ki_filp);
1465  	}
1466  
1467  	iocb->ki_res.res = res;
1468  	iocb->ki_res.res2 = 0;
1469  	iocb_put(iocb);
1470  }
1471  
1472  static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
1473  {
1474  	int ret;
1475  
1476  	req->ki_complete = aio_complete_rw;
1477  	req->private = NULL;
1478  	req->ki_pos = iocb->aio_offset;
1479  	req->ki_flags = req->ki_filp->f_iocb_flags;
1480  	if (iocb->aio_flags & IOCB_FLAG_RESFD)
1481  		req->ki_flags |= IOCB_EVENTFD;
1482  	if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
1483  		/*
1484  		 * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then
1485  		 * aio_reqprio is interpreted as an I/O scheduling
1486  		 * class and priority.
1487  		 */
1488  		ret = ioprio_check_cap(iocb->aio_reqprio);
1489  		if (ret) {
1490  			pr_debug("aio ioprio check cap error: %d\n", ret);
1491  			return ret;
1492  		}
1493  
1494  		req->ki_ioprio = iocb->aio_reqprio;
1495  	} else
1496  		req->ki_ioprio = get_current_ioprio();
1497  
1498  	ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
1499  	if (unlikely(ret))
1500  		return ret;
1501  
1502  	req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */
1503  	return 0;
1504  }
1505  
1506  static ssize_t aio_setup_rw(int rw, const struct iocb *iocb,
1507  		struct iovec **iovec, bool vectored, bool compat,
1508  		struct iov_iter *iter)
1509  {
1510  	void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
1511  	size_t len = iocb->aio_nbytes;
1512  
1513  	if (!vectored) {
1514  		ssize_t ret = import_single_range(rw, buf, len, *iovec, iter);
1515  		*iovec = NULL;
1516  		return ret;
1517  	}
1518  
1519  	return __import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter, compat);
1520  }
1521  
1522  static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
1523  {
1524  	switch (ret) {
1525  	case -EIOCBQUEUED:
1526  		break;
1527  	case -ERESTARTSYS:
1528  	case -ERESTARTNOINTR:
1529  	case -ERESTARTNOHAND:
1530  	case -ERESTART_RESTARTBLOCK:
1531  		/*
1532  		 * There's no easy way to restart the syscall since other AIO's
1533  		 * may be already running. Just fail this IO with EINTR.
1534  		 */
1535  		ret = -EINTR;
1536  		fallthrough;
1537  	default:
1538  		req->ki_complete(req, ret);
1539  	}
1540  }
1541  
1542  static int aio_read(struct kiocb *req, const struct iocb *iocb,
1543  			bool vectored, bool compat)
1544  {
1545  	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1546  	struct iov_iter iter;
1547  	struct file *file;
1548  	int ret;
1549  
1550  	ret = aio_prep_rw(req, iocb);
1551  	if (ret)
1552  		return ret;
1553  	file = req->ki_filp;
1554  	if (unlikely(!(file->f_mode & FMODE_READ)))
1555  		return -EBADF;
1556  	if (unlikely(!file->f_op->read_iter))
1557  		return -EINVAL;
1558  
1559  	ret = aio_setup_rw(ITER_DEST, iocb, &iovec, vectored, compat, &iter);
1560  	if (ret < 0)
1561  		return ret;
1562  	ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
1563  	if (!ret)
1564  		aio_rw_done(req, call_read_iter(file, req, &iter));
1565  	kfree(iovec);
1566  	return ret;
1567  }
1568  
1569  static int aio_write(struct kiocb *req, const struct iocb *iocb,
1570  			 bool vectored, bool compat)
1571  {
1572  	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1573  	struct iov_iter iter;
1574  	struct file *file;
1575  	int ret;
1576  
1577  	ret = aio_prep_rw(req, iocb);
1578  	if (ret)
1579  		return ret;
1580  	file = req->ki_filp;
1581  
1582  	if (unlikely(!(file->f_mode & FMODE_WRITE)))
1583  		return -EBADF;
1584  	if (unlikely(!file->f_op->write_iter))
1585  		return -EINVAL;
1586  
1587  	ret = aio_setup_rw(ITER_SOURCE, iocb, &iovec, vectored, compat, &iter);
1588  	if (ret < 0)
1589  		return ret;
1590  	ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
1591  	if (!ret) {
1592  		/*
1593  		 * Open-code file_start_write here to grab freeze protection,
1594  		 * which will be released by another thread in
1595  		 * aio_complete_rw().  Fool lockdep by telling it the lock got
1596  		 * released so that it doesn't complain about the held lock when
1597  		 * we return to userspace.
1598  		 */
1599  		if (S_ISREG(file_inode(file)->i_mode)) {
1600  			sb_start_write(file_inode(file)->i_sb);
1601  			__sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
1602  		}
1603  		req->ki_flags |= IOCB_WRITE;
1604  		aio_rw_done(req, call_write_iter(file, req, &iter));
1605  	}
1606  	kfree(iovec);
1607  	return ret;
1608  }
1609  
1610  static void aio_fsync_work(struct work_struct *work)
1611  {
1612  	struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
1613  	const struct cred *old_cred = override_creds(iocb->fsync.creds);
1614  
1615  	iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
1616  	revert_creds(old_cred);
1617  	put_cred(iocb->fsync.creds);
1618  	iocb_put(iocb);
1619  }
1620  
1621  static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
1622  		     bool datasync)
1623  {
1624  	if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
1625  			iocb->aio_rw_flags))
1626  		return -EINVAL;
1627  
1628  	if (unlikely(!req->file->f_op->fsync))
1629  		return -EINVAL;
1630  
1631  	req->creds = prepare_creds();
1632  	if (!req->creds)
1633  		return -ENOMEM;
1634  
1635  	req->datasync = datasync;
1636  	INIT_WORK(&req->work, aio_fsync_work);
1637  	schedule_work(&req->work);
1638  	return 0;
1639  }
1640  
1641  static void aio_poll_put_work(struct work_struct *work)
1642  {
1643  	struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1644  	struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1645  
1646  	iocb_put(iocb);
1647  }
1648  
1649  /*
1650   * Safely lock the waitqueue which the request is on, synchronizing with the
1651   * case where the ->poll() provider decides to free its waitqueue early.
1652   *
1653   * Returns true on success, meaning that req->head->lock was locked, req->wait
1654   * is on req->head, and an RCU read lock was taken.  Returns false if the
1655   * request was already removed from its waitqueue (which might no longer exist).
1656   */
1657  static bool poll_iocb_lock_wq(struct poll_iocb *req)
1658  {
1659  	wait_queue_head_t *head;
1660  
1661  	/*
1662  	 * While we hold the waitqueue lock and the waitqueue is nonempty,
1663  	 * wake_up_pollfree() will wait for us.  However, taking the waitqueue
1664  	 * lock in the first place can race with the waitqueue being freed.
1665  	 *
1666  	 * We solve this as eventpoll does: by taking advantage of the fact that
1667  	 * all users of wake_up_pollfree() will RCU-delay the actual free.  If
1668  	 * we enter rcu_read_lock() and see that the pointer to the queue is
1669  	 * non-NULL, we can then lock it without the memory being freed out from
1670  	 * under us, then check whether the request is still on the queue.
1671  	 *
1672  	 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
1673  	 * case the caller deletes the entry from the queue, leaving it empty.
1674  	 * In that case, only RCU prevents the queue memory from being freed.
1675  	 */
1676  	rcu_read_lock();
1677  	head = smp_load_acquire(&req->head);
1678  	if (head) {
1679  		spin_lock(&head->lock);
1680  		if (!list_empty(&req->wait.entry))
1681  			return true;
1682  		spin_unlock(&head->lock);
1683  	}
1684  	rcu_read_unlock();
1685  	return false;
1686  }
1687  
1688  static void poll_iocb_unlock_wq(struct poll_iocb *req)
1689  {
1690  	spin_unlock(&req->head->lock);
1691  	rcu_read_unlock();
1692  }
1693  
1694  static void aio_poll_complete_work(struct work_struct *work)
1695  {
1696  	struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1697  	struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1698  	struct poll_table_struct pt = { ._key = req->events };
1699  	struct kioctx *ctx = iocb->ki_ctx;
1700  	__poll_t mask = 0;
1701  
1702  	if (!READ_ONCE(req->cancelled))
1703  		mask = vfs_poll(req->file, &pt) & req->events;
1704  
1705  	/*
1706  	 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1707  	 * calling ->ki_cancel.  We need the ctx_lock roundtrip here to
1708  	 * synchronize with them.  In the cancellation case the list_del_init
1709  	 * itself is not actually needed, but harmless so we keep it in to
1710  	 * avoid further branches in the fast path.
1711  	 */
1712  	spin_lock_irq(&ctx->ctx_lock);
1713  	if (poll_iocb_lock_wq(req)) {
1714  		if (!mask && !READ_ONCE(req->cancelled)) {
1715  			/*
1716  			 * The request isn't actually ready to be completed yet.
1717  			 * Reschedule completion if another wakeup came in.
1718  			 */
1719  			if (req->work_need_resched) {
1720  				schedule_work(&req->work);
1721  				req->work_need_resched = false;
1722  			} else {
1723  				req->work_scheduled = false;
1724  			}
1725  			poll_iocb_unlock_wq(req);
1726  			spin_unlock_irq(&ctx->ctx_lock);
1727  			return;
1728  		}
1729  		list_del_init(&req->wait.entry);
1730  		poll_iocb_unlock_wq(req);
1731  	} /* else, POLLFREE has freed the waitqueue, so we must complete */
1732  	list_del_init(&iocb->ki_list);
1733  	iocb->ki_res.res = mangle_poll(mask);
1734  	spin_unlock_irq(&ctx->ctx_lock);
1735  
1736  	iocb_put(iocb);
1737  }
1738  
1739  /* assumes we are called with irqs disabled */
1740  static int aio_poll_cancel(struct kiocb *iocb)
1741  {
1742  	struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
1743  	struct poll_iocb *req = &aiocb->poll;
1744  
1745  	if (poll_iocb_lock_wq(req)) {
1746  		WRITE_ONCE(req->cancelled, true);
1747  		if (!req->work_scheduled) {
1748  			schedule_work(&aiocb->poll.work);
1749  			req->work_scheduled = true;
1750  		}
1751  		poll_iocb_unlock_wq(req);
1752  	} /* else, the request was force-cancelled by POLLFREE already */
1753  
1754  	return 0;
1755  }
1756  
1757  static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1758  		void *key)
1759  {
1760  	struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
1761  	struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1762  	__poll_t mask = key_to_poll(key);
1763  	unsigned long flags;
1764  
1765  	/* for instances that support it check for an event match first: */
1766  	if (mask && !(mask & req->events))
1767  		return 0;
1768  
1769  	/*
1770  	 * Complete the request inline if possible.  This requires that three
1771  	 * conditions be met:
1772  	 *   1. An event mask must have been passed.  If a plain wakeup was done
1773  	 *	instead, then mask == 0 and we have to call vfs_poll() to get
1774  	 *	the events, so inline completion isn't possible.
1775  	 *   2. The completion work must not have already been scheduled.
1776  	 *   3. ctx_lock must not be busy.  We have to use trylock because we
1777  	 *	already hold the waitqueue lock, so this inverts the normal
1778  	 *	locking order.  Use irqsave/irqrestore because not all
1779  	 *	filesystems (e.g. fuse) call this function with IRQs disabled,
1780  	 *	yet IRQs have to be disabled before ctx_lock is obtained.
1781  	 */
1782  	if (mask && !req->work_scheduled &&
1783  	    spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
1784  		struct kioctx *ctx = iocb->ki_ctx;
1785  
1786  		list_del_init(&req->wait.entry);
1787  		list_del(&iocb->ki_list);
1788  		iocb->ki_res.res = mangle_poll(mask);
1789  		if (iocb->ki_eventfd && !eventfd_signal_allowed()) {
1790  			iocb = NULL;
1791  			INIT_WORK(&req->work, aio_poll_put_work);
1792  			schedule_work(&req->work);
1793  		}
1794  		spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1795  		if (iocb)
1796  			iocb_put(iocb);
1797  	} else {
1798  		/*
1799  		 * Schedule the completion work if needed.  If it was already
1800  		 * scheduled, record that another wakeup came in.
1801  		 *
1802  		 * Don't remove the request from the waitqueue here, as it might
1803  		 * not actually be complete yet (we won't know until vfs_poll()
1804  		 * is called), and we must not miss any wakeups.  POLLFREE is an
1805  		 * exception to this; see below.
1806  		 */
1807  		if (req->work_scheduled) {
1808  			req->work_need_resched = true;
1809  		} else {
1810  			schedule_work(&req->work);
1811  			req->work_scheduled = true;
1812  		}
1813  
1814  		/*
1815  		 * If the waitqueue is being freed early but we can't complete
1816  		 * the request inline, we have to tear down the request as best
1817  		 * we can.  That means immediately removing the request from its
1818  		 * waitqueue and preventing all further accesses to the
1819  		 * waitqueue via the request.  We also need to schedule the
1820  		 * completion work (done above).  Also mark the request as
1821  		 * cancelled, to potentially skip an unneeded call to ->poll().
1822  		 */
1823  		if (mask & POLLFREE) {
1824  			WRITE_ONCE(req->cancelled, true);
1825  			list_del_init(&req->wait.entry);
1826  
1827  			/*
1828  			 * Careful: this *must* be the last step, since as soon
1829  			 * as req->head is NULL'ed out, the request can be
1830  			 * completed and freed, since aio_poll_complete_work()
1831  			 * will no longer need to take the waitqueue lock.
1832  			 */
1833  			smp_store_release(&req->head, NULL);
1834  		}
1835  	}
1836  	return 1;
1837  }
1838  
1839  struct aio_poll_table {
1840  	struct poll_table_struct	pt;
1841  	struct aio_kiocb		*iocb;
1842  	bool				queued;
1843  	int				error;
1844  };
1845  
1846  static void
1847  aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1848  		struct poll_table_struct *p)
1849  {
1850  	struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
1851  
1852  	/* multiple wait queues per file are not supported */
1853  	if (unlikely(pt->queued)) {
1854  		pt->error = -EINVAL;
1855  		return;
1856  	}
1857  
1858  	pt->queued = true;
1859  	pt->error = 0;
1860  	pt->iocb->poll.head = head;
1861  	add_wait_queue(head, &pt->iocb->poll.wait);
1862  }
1863  
1864  static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1865  {
1866  	struct kioctx *ctx = aiocb->ki_ctx;
1867  	struct poll_iocb *req = &aiocb->poll;
1868  	struct aio_poll_table apt;
1869  	bool cancel = false;
1870  	__poll_t mask;
1871  
1872  	/* reject any unknown events outside the normal event mask. */
1873  	if ((u16)iocb->aio_buf != iocb->aio_buf)
1874  		return -EINVAL;
1875  	/* reject fields that are not defined for poll */
1876  	if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
1877  		return -EINVAL;
1878  
1879  	INIT_WORK(&req->work, aio_poll_complete_work);
1880  	req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
1881  
1882  	req->head = NULL;
1883  	req->cancelled = false;
1884  	req->work_scheduled = false;
1885  	req->work_need_resched = false;
1886  
1887  	apt.pt._qproc = aio_poll_queue_proc;
1888  	apt.pt._key = req->events;
1889  	apt.iocb = aiocb;
1890  	apt.queued = false;
1891  	apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1892  
1893  	/* initialized the list so that we can do list_empty checks */
1894  	INIT_LIST_HEAD(&req->wait.entry);
1895  	init_waitqueue_func_entry(&req->wait, aio_poll_wake);
1896  
1897  	mask = vfs_poll(req->file, &apt.pt) & req->events;
1898  	spin_lock_irq(&ctx->ctx_lock);
1899  	if (likely(apt.queued)) {
1900  		bool on_queue = poll_iocb_lock_wq(req);
1901  
1902  		if (!on_queue || req->work_scheduled) {
1903  			/*
1904  			 * aio_poll_wake() already either scheduled the async
1905  			 * completion work, or completed the request inline.
1906  			 */
1907  			if (apt.error) /* unsupported case: multiple queues */
1908  				cancel = true;
1909  			apt.error = 0;
1910  			mask = 0;
1911  		}
1912  		if (mask || apt.error) {
1913  			/* Steal to complete synchronously. */
1914  			list_del_init(&req->wait.entry);
1915  		} else if (cancel) {
1916  			/* Cancel if possible (may be too late though). */
1917  			WRITE_ONCE(req->cancelled, true);
1918  		} else if (on_queue) {
1919  			/*
1920  			 * Actually waiting for an event, so add the request to
1921  			 * active_reqs so that it can be cancelled if needed.
1922  			 */
1923  			list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
1924  			aiocb->ki_cancel = aio_poll_cancel;
1925  		}
1926  		if (on_queue)
1927  			poll_iocb_unlock_wq(req);
1928  	}
1929  	if (mask) { /* no async, we'd stolen it */
1930  		aiocb->ki_res.res = mangle_poll(mask);
1931  		apt.error = 0;
1932  	}
1933  	spin_unlock_irq(&ctx->ctx_lock);
1934  	if (mask)
1935  		iocb_put(aiocb);
1936  	return apt.error;
1937  }
1938  
1939  static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1940  			   struct iocb __user *user_iocb, struct aio_kiocb *req,
1941  			   bool compat)
1942  {
1943  	req->ki_filp = fget(iocb->aio_fildes);
1944  	if (unlikely(!req->ki_filp))
1945  		return -EBADF;
1946  
1947  	if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1948  		struct eventfd_ctx *eventfd;
1949  		/*
1950  		 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1951  		 * instance of the file* now. The file descriptor must be
1952  		 * an eventfd() fd, and will be signaled for each completed
1953  		 * event using the eventfd_signal() function.
1954  		 */
1955  		eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
1956  		if (IS_ERR(eventfd))
1957  			return PTR_ERR(eventfd);
1958  
1959  		req->ki_eventfd = eventfd;
1960  	}
1961  
1962  	if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
1963  		pr_debug("EFAULT: aio_key\n");
1964  		return -EFAULT;
1965  	}
1966  
1967  	req->ki_res.obj = (u64)(unsigned long)user_iocb;
1968  	req->ki_res.data = iocb->aio_data;
1969  	req->ki_res.res = 0;
1970  	req->ki_res.res2 = 0;
1971  
1972  	switch (iocb->aio_lio_opcode) {
1973  	case IOCB_CMD_PREAD:
1974  		return aio_read(&req->rw, iocb, false, compat);
1975  	case IOCB_CMD_PWRITE:
1976  		return aio_write(&req->rw, iocb, false, compat);
1977  	case IOCB_CMD_PREADV:
1978  		return aio_read(&req->rw, iocb, true, compat);
1979  	case IOCB_CMD_PWRITEV:
1980  		return aio_write(&req->rw, iocb, true, compat);
1981  	case IOCB_CMD_FSYNC:
1982  		return aio_fsync(&req->fsync, iocb, false);
1983  	case IOCB_CMD_FDSYNC:
1984  		return aio_fsync(&req->fsync, iocb, true);
1985  	case IOCB_CMD_POLL:
1986  		return aio_poll(req, iocb);
1987  	default:
1988  		pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
1989  		return -EINVAL;
1990  	}
1991  }
1992  
1993  static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1994  			 bool compat)
1995  {
1996  	struct aio_kiocb *req;
1997  	struct iocb iocb;
1998  	int err;
1999  
2000  	if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
2001  		return -EFAULT;
2002  
2003  	/* enforce forwards compatibility on users */
2004  	if (unlikely(iocb.aio_reserved2)) {
2005  		pr_debug("EINVAL: reserve field set\n");
2006  		return -EINVAL;
2007  	}
2008  
2009  	/* prevent overflows */
2010  	if (unlikely(
2011  	    (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
2012  	    (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
2013  	    ((ssize_t)iocb.aio_nbytes < 0)
2014  	   )) {
2015  		pr_debug("EINVAL: overflow check\n");
2016  		return -EINVAL;
2017  	}
2018  
2019  	req = aio_get_req(ctx);
2020  	if (unlikely(!req))
2021  		return -EAGAIN;
2022  
2023  	err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
2024  
2025  	/* Done with the synchronous reference */
2026  	iocb_put(req);
2027  
2028  	/*
2029  	 * If err is 0, we'd either done aio_complete() ourselves or have
2030  	 * arranged for that to be done asynchronously.  Anything non-zero
2031  	 * means that we need to destroy req ourselves.
2032  	 */
2033  	if (unlikely(err)) {
2034  		iocb_destroy(req);
2035  		put_reqs_available(ctx, 1);
2036  	}
2037  	return err;
2038  }
2039  
2040  /* sys_io_submit:
2041   *	Queue the nr iocbs pointed to by iocbpp for processing.  Returns
2042   *	the number of iocbs queued.  May return -EINVAL if the aio_context
2043   *	specified by ctx_id is invalid, if nr is < 0, if the iocb at
2044   *	*iocbpp[0] is not properly initialized, if the operation specified
2045   *	is invalid for the file descriptor in the iocb.  May fail with
2046   *	-EFAULT if any of the data structures point to invalid data.  May
2047   *	fail with -EBADF if the file descriptor specified in the first
2048   *	iocb is invalid.  May fail with -EAGAIN if insufficient resources
2049   *	are available to queue any iocbs.  Will return 0 if nr is 0.  Will
2050   *	fail with -ENOSYS if not implemented.
2051   */
2052  SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
2053  		struct iocb __user * __user *, iocbpp)
2054  {
2055  	struct kioctx *ctx;
2056  	long ret = 0;
2057  	int i = 0;
2058  	struct blk_plug plug;
2059  
2060  	if (unlikely(nr < 0))
2061  		return -EINVAL;
2062  
2063  	ctx = lookup_ioctx(ctx_id);
2064  	if (unlikely(!ctx)) {
2065  		pr_debug("EINVAL: invalid context id\n");
2066  		return -EINVAL;
2067  	}
2068  
2069  	if (nr > ctx->nr_events)
2070  		nr = ctx->nr_events;
2071  
2072  	if (nr > AIO_PLUG_THRESHOLD)
2073  		blk_start_plug(&plug);
2074  	for (i = 0; i < nr; i++) {
2075  		struct iocb __user *user_iocb;
2076  
2077  		if (unlikely(get_user(user_iocb, iocbpp + i))) {
2078  			ret = -EFAULT;
2079  			break;
2080  		}
2081  
2082  		ret = io_submit_one(ctx, user_iocb, false);
2083  		if (ret)
2084  			break;
2085  	}
2086  	if (nr > AIO_PLUG_THRESHOLD)
2087  		blk_finish_plug(&plug);
2088  
2089  	percpu_ref_put(&ctx->users);
2090  	return i ? i : ret;
2091  }
2092  
2093  #ifdef CONFIG_COMPAT
2094  COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
2095  		       int, nr, compat_uptr_t __user *, iocbpp)
2096  {
2097  	struct kioctx *ctx;
2098  	long ret = 0;
2099  	int i = 0;
2100  	struct blk_plug plug;
2101  
2102  	if (unlikely(nr < 0))
2103  		return -EINVAL;
2104  
2105  	ctx = lookup_ioctx(ctx_id);
2106  	if (unlikely(!ctx)) {
2107  		pr_debug("EINVAL: invalid context id\n");
2108  		return -EINVAL;
2109  	}
2110  
2111  	if (nr > ctx->nr_events)
2112  		nr = ctx->nr_events;
2113  
2114  	if (nr > AIO_PLUG_THRESHOLD)
2115  		blk_start_plug(&plug);
2116  	for (i = 0; i < nr; i++) {
2117  		compat_uptr_t user_iocb;
2118  
2119  		if (unlikely(get_user(user_iocb, iocbpp + i))) {
2120  			ret = -EFAULT;
2121  			break;
2122  		}
2123  
2124  		ret = io_submit_one(ctx, compat_ptr(user_iocb), true);
2125  		if (ret)
2126  			break;
2127  	}
2128  	if (nr > AIO_PLUG_THRESHOLD)
2129  		blk_finish_plug(&plug);
2130  
2131  	percpu_ref_put(&ctx->users);
2132  	return i ? i : ret;
2133  }
2134  #endif
2135  
2136  /* sys_io_cancel:
2137   *	Attempts to cancel an iocb previously passed to io_submit.  If
2138   *	the operation is successfully cancelled, the resulting event is
2139   *	copied into the memory pointed to by result without being placed
2140   *	into the completion queue and 0 is returned.  May fail with
2141   *	-EFAULT if any of the data structures pointed to are invalid.
2142   *	May fail with -EINVAL if aio_context specified by ctx_id is
2143   *	invalid.  May fail with -EAGAIN if the iocb specified was not
2144   *	cancelled.  Will fail with -ENOSYS if not implemented.
2145   */
2146  SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
2147  		struct io_event __user *, result)
2148  {
2149  	struct kioctx *ctx;
2150  	struct aio_kiocb *kiocb;
2151  	int ret = -EINVAL;
2152  	u32 key;
2153  	u64 obj = (u64)(unsigned long)iocb;
2154  
2155  	if (unlikely(get_user(key, &iocb->aio_key)))
2156  		return -EFAULT;
2157  	if (unlikely(key != KIOCB_KEY))
2158  		return -EINVAL;
2159  
2160  	ctx = lookup_ioctx(ctx_id);
2161  	if (unlikely(!ctx))
2162  		return -EINVAL;
2163  
2164  	spin_lock_irq(&ctx->ctx_lock);
2165  	/* TODO: use a hash or array, this sucks. */
2166  	list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
2167  		if (kiocb->ki_res.obj == obj) {
2168  			ret = kiocb->ki_cancel(&kiocb->rw);
2169  			list_del_init(&kiocb->ki_list);
2170  			break;
2171  		}
2172  	}
2173  	spin_unlock_irq(&ctx->ctx_lock);
2174  
2175  	if (!ret) {
2176  		/*
2177  		 * The result argument is no longer used - the io_event is
2178  		 * always delivered via the ring buffer. -EINPROGRESS indicates
2179  		 * cancellation is progress:
2180  		 */
2181  		ret = -EINPROGRESS;
2182  	}
2183  
2184  	percpu_ref_put(&ctx->users);
2185  
2186  	return ret;
2187  }
2188  
2189  static long do_io_getevents(aio_context_t ctx_id,
2190  		long min_nr,
2191  		long nr,
2192  		struct io_event __user *events,
2193  		struct timespec64 *ts)
2194  {
2195  	ktime_t until = ts ? timespec64_to_ktime(*ts) : KTIME_MAX;
2196  	struct kioctx *ioctx = lookup_ioctx(ctx_id);
2197  	long ret = -EINVAL;
2198  
2199  	if (likely(ioctx)) {
2200  		if (likely(min_nr <= nr && min_nr >= 0))
2201  			ret = read_events(ioctx, min_nr, nr, events, until);
2202  		percpu_ref_put(&ioctx->users);
2203  	}
2204  
2205  	return ret;
2206  }
2207  
2208  /* io_getevents:
2209   *	Attempts to read at least min_nr events and up to nr events from
2210   *	the completion queue for the aio_context specified by ctx_id. If
2211   *	it succeeds, the number of read events is returned. May fail with
2212   *	-EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
2213   *	out of range, if timeout is out of range.  May fail with -EFAULT
2214   *	if any of the memory specified is invalid.  May return 0 or
2215   *	< min_nr if the timeout specified by timeout has elapsed
2216   *	before sufficient events are available, where timeout == NULL
2217   *	specifies an infinite timeout. Note that the timeout pointed to by
2218   *	timeout is relative.  Will fail with -ENOSYS if not implemented.
2219   */
2220  #ifdef CONFIG_64BIT
2221  
2222  SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
2223  		long, min_nr,
2224  		long, nr,
2225  		struct io_event __user *, events,
2226  		struct __kernel_timespec __user *, timeout)
2227  {
2228  	struct timespec64	ts;
2229  	int			ret;
2230  
2231  	if (timeout && unlikely(get_timespec64(&ts, timeout)))
2232  		return -EFAULT;
2233  
2234  	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2235  	if (!ret && signal_pending(current))
2236  		ret = -EINTR;
2237  	return ret;
2238  }
2239  
2240  #endif
2241  
2242  struct __aio_sigset {
2243  	const sigset_t __user	*sigmask;
2244  	size_t		sigsetsize;
2245  };
2246  
2247  SYSCALL_DEFINE6(io_pgetevents,
2248  		aio_context_t, ctx_id,
2249  		long, min_nr,
2250  		long, nr,
2251  		struct io_event __user *, events,
2252  		struct __kernel_timespec __user *, timeout,
2253  		const struct __aio_sigset __user *, usig)
2254  {
2255  	struct __aio_sigset	ksig = { NULL, };
2256  	struct timespec64	ts;
2257  	bool interrupted;
2258  	int ret;
2259  
2260  	if (timeout && unlikely(get_timespec64(&ts, timeout)))
2261  		return -EFAULT;
2262  
2263  	if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2264  		return -EFAULT;
2265  
2266  	ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2267  	if (ret)
2268  		return ret;
2269  
2270  	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2271  
2272  	interrupted = signal_pending(current);
2273  	restore_saved_sigmask_unless(interrupted);
2274  	if (interrupted && !ret)
2275  		ret = -ERESTARTNOHAND;
2276  
2277  	return ret;
2278  }
2279  
2280  #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
2281  
2282  SYSCALL_DEFINE6(io_pgetevents_time32,
2283  		aio_context_t, ctx_id,
2284  		long, min_nr,
2285  		long, nr,
2286  		struct io_event __user *, events,
2287  		struct old_timespec32 __user *, timeout,
2288  		const struct __aio_sigset __user *, usig)
2289  {
2290  	struct __aio_sigset	ksig = { NULL, };
2291  	struct timespec64	ts;
2292  	bool interrupted;
2293  	int ret;
2294  
2295  	if (timeout && unlikely(get_old_timespec32(&ts, timeout)))
2296  		return -EFAULT;
2297  
2298  	if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2299  		return -EFAULT;
2300  
2301  
2302  	ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2303  	if (ret)
2304  		return ret;
2305  
2306  	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2307  
2308  	interrupted = signal_pending(current);
2309  	restore_saved_sigmask_unless(interrupted);
2310  	if (interrupted && !ret)
2311  		ret = -ERESTARTNOHAND;
2312  
2313  	return ret;
2314  }
2315  
2316  #endif
2317  
2318  #if defined(CONFIG_COMPAT_32BIT_TIME)
2319  
2320  SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id,
2321  		__s32, min_nr,
2322  		__s32, nr,
2323  		struct io_event __user *, events,
2324  		struct old_timespec32 __user *, timeout)
2325  {
2326  	struct timespec64 t;
2327  	int ret;
2328  
2329  	if (timeout && get_old_timespec32(&t, timeout))
2330  		return -EFAULT;
2331  
2332  	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2333  	if (!ret && signal_pending(current))
2334  		ret = -EINTR;
2335  	return ret;
2336  }
2337  
2338  #endif
2339  
2340  #ifdef CONFIG_COMPAT
2341  
2342  struct __compat_aio_sigset {
2343  	compat_uptr_t		sigmask;
2344  	compat_size_t		sigsetsize;
2345  };
2346  
2347  #if defined(CONFIG_COMPAT_32BIT_TIME)
2348  
2349  COMPAT_SYSCALL_DEFINE6(io_pgetevents,
2350  		compat_aio_context_t, ctx_id,
2351  		compat_long_t, min_nr,
2352  		compat_long_t, nr,
2353  		struct io_event __user *, events,
2354  		struct old_timespec32 __user *, timeout,
2355  		const struct __compat_aio_sigset __user *, usig)
2356  {
2357  	struct __compat_aio_sigset ksig = { 0, };
2358  	struct timespec64 t;
2359  	bool interrupted;
2360  	int ret;
2361  
2362  	if (timeout && get_old_timespec32(&t, timeout))
2363  		return -EFAULT;
2364  
2365  	if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2366  		return -EFAULT;
2367  
2368  	ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2369  	if (ret)
2370  		return ret;
2371  
2372  	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2373  
2374  	interrupted = signal_pending(current);
2375  	restore_saved_sigmask_unless(interrupted);
2376  	if (interrupted && !ret)
2377  		ret = -ERESTARTNOHAND;
2378  
2379  	return ret;
2380  }
2381  
2382  #endif
2383  
2384  COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
2385  		compat_aio_context_t, ctx_id,
2386  		compat_long_t, min_nr,
2387  		compat_long_t, nr,
2388  		struct io_event __user *, events,
2389  		struct __kernel_timespec __user *, timeout,
2390  		const struct __compat_aio_sigset __user *, usig)
2391  {
2392  	struct __compat_aio_sigset ksig = { 0, };
2393  	struct timespec64 t;
2394  	bool interrupted;
2395  	int ret;
2396  
2397  	if (timeout && get_timespec64(&t, timeout))
2398  		return -EFAULT;
2399  
2400  	if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2401  		return -EFAULT;
2402  
2403  	ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2404  	if (ret)
2405  		return ret;
2406  
2407  	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2408  
2409  	interrupted = signal_pending(current);
2410  	restore_saved_sigmask_unless(interrupted);
2411  	if (interrupted && !ret)
2412  		ret = -ERESTARTNOHAND;
2413  
2414  	return ret;
2415  }
2416  #endif
2417