xref: /openbmc/linux/fs/aio.c (revision 3c81195a04e13833196462ab398d8bcf282701f7)
1  /*
2   *	An async IO implementation for Linux
3   *	Written by Benjamin LaHaise <bcrl@kvack.org>
4   *
5   *	Implements an efficient asynchronous io interface.
6   *
7   *	Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
8   *	Copyright 2018 Christoph Hellwig.
9   *
10   *	See ../COPYING for licensing terms.
11   */
12  #define pr_fmt(fmt) "%s: " fmt, __func__
13  
14  #include <linux/kernel.h>
15  #include <linux/init.h>
16  #include <linux/errno.h>
17  #include <linux/time.h>
18  #include <linux/aio_abi.h>
19  #include <linux/export.h>
20  #include <linux/syscalls.h>
21  #include <linux/backing-dev.h>
22  #include <linux/refcount.h>
23  #include <linux/uio.h>
24  
25  #include <linux/sched/signal.h>
26  #include <linux/fs.h>
27  #include <linux/file.h>
28  #include <linux/mm.h>
29  #include <linux/mman.h>
30  #include <linux/percpu.h>
31  #include <linux/slab.h>
32  #include <linux/timer.h>
33  #include <linux/aio.h>
34  #include <linux/highmem.h>
35  #include <linux/workqueue.h>
36  #include <linux/security.h>
37  #include <linux/eventfd.h>
38  #include <linux/blkdev.h>
39  #include <linux/compat.h>
40  #include <linux/migrate.h>
41  #include <linux/ramfs.h>
42  #include <linux/percpu-refcount.h>
43  #include <linux/mount.h>
44  #include <linux/pseudo_fs.h>
45  
46  #include <linux/uaccess.h>
47  #include <linux/nospec.h>
48  
49  #include "internal.h"
50  
51  #define KIOCB_KEY		0
52  
53  #define AIO_RING_MAGIC			0xa10a10a1
54  #define AIO_RING_COMPAT_FEATURES	1
55  #define AIO_RING_INCOMPAT_FEATURES	0
56  struct aio_ring {
57  	unsigned	id;	/* kernel internal index number */
58  	unsigned	nr;	/* number of io_events */
59  	unsigned	head;	/* Written to by userland or under ring_lock
60  				 * mutex by aio_read_events_ring(). */
61  	unsigned	tail;
62  
63  	unsigned	magic;
64  	unsigned	compat_features;
65  	unsigned	incompat_features;
66  	unsigned	header_length;	/* size of aio_ring */
67  
68  
69  	struct io_event		io_events[];
70  }; /* 128 bytes + ring size */
71  
72  /*
73   * Plugging is meant to work with larger batches of IOs. If we don't
74   * have more than the below, then don't bother setting up a plug.
75   */
76  #define AIO_PLUG_THRESHOLD	2
77  
78  #define AIO_RING_PAGES	8
79  
80  struct kioctx_table {
81  	struct rcu_head		rcu;
82  	unsigned		nr;
83  	struct kioctx __rcu	*table[];
84  };
85  
86  struct kioctx_cpu {
87  	unsigned		reqs_available;
88  };
89  
90  struct ctx_rq_wait {
91  	struct completion comp;
92  	atomic_t count;
93  };
94  
95  struct kioctx {
96  	struct percpu_ref	users;
97  	atomic_t		dead;
98  
99  	struct percpu_ref	reqs;
100  
101  	unsigned long		user_id;
102  
103  	struct __percpu kioctx_cpu *cpu;
104  
105  	/*
106  	 * For percpu reqs_available, number of slots we move to/from global
107  	 * counter at a time:
108  	 */
109  	unsigned		req_batch;
110  	/*
111  	 * This is what userspace passed to io_setup(), it's not used for
112  	 * anything but counting against the global max_reqs quota.
113  	 *
114  	 * The real limit is nr_events - 1, which will be larger (see
115  	 * aio_setup_ring())
116  	 */
117  	unsigned		max_reqs;
118  
119  	/* Size of ringbuffer, in units of struct io_event */
120  	unsigned		nr_events;
121  
122  	unsigned long		mmap_base;
123  	unsigned long		mmap_size;
124  
125  	struct page		**ring_pages;
126  	long			nr_pages;
127  
128  	struct rcu_work		free_rwork;	/* see free_ioctx() */
129  
130  	/*
131  	 * signals when all in-flight requests are done
132  	 */
133  	struct ctx_rq_wait	*rq_wait;
134  
135  	struct {
136  		/*
137  		 * This counts the number of available slots in the ringbuffer,
138  		 * so we avoid overflowing it: it's decremented (if positive)
139  		 * when allocating a kiocb and incremented when the resulting
140  		 * io_event is pulled off the ringbuffer.
141  		 *
142  		 * We batch accesses to it with a percpu version.
143  		 */
144  		atomic_t	reqs_available;
145  	} ____cacheline_aligned_in_smp;
146  
147  	struct {
148  		spinlock_t	ctx_lock;
149  		struct list_head active_reqs;	/* used for cancellation */
150  	} ____cacheline_aligned_in_smp;
151  
152  	struct {
153  		struct mutex	ring_lock;
154  		wait_queue_head_t wait;
155  	} ____cacheline_aligned_in_smp;
156  
157  	struct {
158  		unsigned	tail;
159  		unsigned	completed_events;
160  		spinlock_t	completion_lock;
161  	} ____cacheline_aligned_in_smp;
162  
163  	struct page		*internal_pages[AIO_RING_PAGES];
164  	struct file		*aio_ring_file;
165  
166  	unsigned		id;
167  };
168  
169  /*
170   * First field must be the file pointer in all the
171   * iocb unions! See also 'struct kiocb' in <linux/fs.h>
172   */
173  struct fsync_iocb {
174  	struct file		*file;
175  	struct work_struct	work;
176  	bool			datasync;
177  	struct cred		*creds;
178  };
179  
180  struct poll_iocb {
181  	struct file		*file;
182  	struct wait_queue_head	*head;
183  	__poll_t		events;
184  	bool			cancelled;
185  	bool			work_scheduled;
186  	bool			work_need_resched;
187  	struct wait_queue_entry	wait;
188  	struct work_struct	work;
189  };
190  
191  /*
192   * NOTE! Each of the iocb union members has the file pointer
193   * as the first entry in their struct definition. So you can
194   * access the file pointer through any of the sub-structs,
195   * or directly as just 'ki_filp' in this struct.
196   */
197  struct aio_kiocb {
198  	union {
199  		struct file		*ki_filp;
200  		struct kiocb		rw;
201  		struct fsync_iocb	fsync;
202  		struct poll_iocb	poll;
203  	};
204  
205  	struct kioctx		*ki_ctx;
206  	kiocb_cancel_fn		*ki_cancel;
207  
208  	struct io_event		ki_res;
209  
210  	struct list_head	ki_list;	/* the aio core uses this
211  						 * for cancellation */
212  	refcount_t		ki_refcnt;
213  
214  	/*
215  	 * If the aio_resfd field of the userspace iocb is not zero,
216  	 * this is the underlying eventfd context to deliver events to.
217  	 */
218  	struct eventfd_ctx	*ki_eventfd;
219  };
220  
221  /*------ sysctl variables----*/
222  static DEFINE_SPINLOCK(aio_nr_lock);
223  static unsigned long aio_nr;		/* current system wide number of aio requests */
224  static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
225  /*----end sysctl variables---*/
226  #ifdef CONFIG_SYSCTL
227  static struct ctl_table aio_sysctls[] = {
228  	{
229  		.procname	= "aio-nr",
230  		.data		= &aio_nr,
231  		.maxlen		= sizeof(aio_nr),
232  		.mode		= 0444,
233  		.proc_handler	= proc_doulongvec_minmax,
234  	},
235  	{
236  		.procname	= "aio-max-nr",
237  		.data		= &aio_max_nr,
238  		.maxlen		= sizeof(aio_max_nr),
239  		.mode		= 0644,
240  		.proc_handler	= proc_doulongvec_minmax,
241  	},
242  	{}
243  };
244  
245  static void __init aio_sysctl_init(void)
246  {
247  	register_sysctl_init("fs", aio_sysctls);
248  }
249  #else
250  #define aio_sysctl_init() do { } while (0)
251  #endif
252  
253  static struct kmem_cache	*kiocb_cachep;
254  static struct kmem_cache	*kioctx_cachep;
255  
256  static struct vfsmount *aio_mnt;
257  
258  static const struct file_operations aio_ring_fops;
259  static const struct address_space_operations aio_ctx_aops;
260  
261  static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
262  {
263  	struct file *file;
264  	struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
265  	if (IS_ERR(inode))
266  		return ERR_CAST(inode);
267  
268  	inode->i_mapping->a_ops = &aio_ctx_aops;
269  	inode->i_mapping->private_data = ctx;
270  	inode->i_size = PAGE_SIZE * nr_pages;
271  
272  	file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
273  				O_RDWR, &aio_ring_fops);
274  	if (IS_ERR(file))
275  		iput(inode);
276  	return file;
277  }
278  
279  static int aio_init_fs_context(struct fs_context *fc)
280  {
281  	if (!init_pseudo(fc, AIO_RING_MAGIC))
282  		return -ENOMEM;
283  	fc->s_iflags |= SB_I_NOEXEC;
284  	return 0;
285  }
286  
287  /* aio_setup
288   *	Creates the slab caches used by the aio routines, panic on
289   *	failure as this is done early during the boot sequence.
290   */
291  static int __init aio_setup(void)
292  {
293  	static struct file_system_type aio_fs = {
294  		.name		= "aio",
295  		.init_fs_context = aio_init_fs_context,
296  		.kill_sb	= kill_anon_super,
297  	};
298  	aio_mnt = kern_mount(&aio_fs);
299  	if (IS_ERR(aio_mnt))
300  		panic("Failed to create aio fs mount.");
301  
302  	kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
303  	kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
304  	aio_sysctl_init();
305  	return 0;
306  }
307  __initcall(aio_setup);
308  
309  static void put_aio_ring_file(struct kioctx *ctx)
310  {
311  	struct file *aio_ring_file = ctx->aio_ring_file;
312  	struct address_space *i_mapping;
313  
314  	if (aio_ring_file) {
315  		truncate_setsize(file_inode(aio_ring_file), 0);
316  
317  		/* Prevent further access to the kioctx from migratepages */
318  		i_mapping = aio_ring_file->f_mapping;
319  		spin_lock(&i_mapping->private_lock);
320  		i_mapping->private_data = NULL;
321  		ctx->aio_ring_file = NULL;
322  		spin_unlock(&i_mapping->private_lock);
323  
324  		fput(aio_ring_file);
325  	}
326  }
327  
328  static void aio_free_ring(struct kioctx *ctx)
329  {
330  	int i;
331  
332  	/* Disconnect the kiotx from the ring file.  This prevents future
333  	 * accesses to the kioctx from page migration.
334  	 */
335  	put_aio_ring_file(ctx);
336  
337  	for (i = 0; i < ctx->nr_pages; i++) {
338  		struct page *page;
339  		pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
340  				page_count(ctx->ring_pages[i]));
341  		page = ctx->ring_pages[i];
342  		if (!page)
343  			continue;
344  		ctx->ring_pages[i] = NULL;
345  		put_page(page);
346  	}
347  
348  	if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
349  		kfree(ctx->ring_pages);
350  		ctx->ring_pages = NULL;
351  	}
352  }
353  
354  static int aio_ring_mremap(struct vm_area_struct *vma)
355  {
356  	struct file *file = vma->vm_file;
357  	struct mm_struct *mm = vma->vm_mm;
358  	struct kioctx_table *table;
359  	int i, res = -EINVAL;
360  
361  	spin_lock(&mm->ioctx_lock);
362  	rcu_read_lock();
363  	table = rcu_dereference(mm->ioctx_table);
364  	if (!table)
365  		goto out_unlock;
366  
367  	for (i = 0; i < table->nr; i++) {
368  		struct kioctx *ctx;
369  
370  		ctx = rcu_dereference(table->table[i]);
371  		if (ctx && ctx->aio_ring_file == file) {
372  			if (!atomic_read(&ctx->dead)) {
373  				ctx->user_id = ctx->mmap_base = vma->vm_start;
374  				res = 0;
375  			}
376  			break;
377  		}
378  	}
379  
380  out_unlock:
381  	rcu_read_unlock();
382  	spin_unlock(&mm->ioctx_lock);
383  	return res;
384  }
385  
386  static const struct vm_operations_struct aio_ring_vm_ops = {
387  	.mremap		= aio_ring_mremap,
388  #if IS_ENABLED(CONFIG_MMU)
389  	.fault		= filemap_fault,
390  	.map_pages	= filemap_map_pages,
391  	.page_mkwrite	= filemap_page_mkwrite,
392  #endif
393  };
394  
395  static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
396  {
397  	vm_flags_set(vma, VM_DONTEXPAND);
398  	vma->vm_ops = &aio_ring_vm_ops;
399  	return 0;
400  }
401  
402  static const struct file_operations aio_ring_fops = {
403  	.mmap = aio_ring_mmap,
404  };
405  
406  #if IS_ENABLED(CONFIG_MIGRATION)
407  static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
408  			struct folio *src, enum migrate_mode mode)
409  {
410  	struct kioctx *ctx;
411  	unsigned long flags;
412  	pgoff_t idx;
413  	int rc;
414  
415  	/*
416  	 * We cannot support the _NO_COPY case here, because copy needs to
417  	 * happen under the ctx->completion_lock. That does not work with the
418  	 * migration workflow of MIGRATE_SYNC_NO_COPY.
419  	 */
420  	if (mode == MIGRATE_SYNC_NO_COPY)
421  		return -EINVAL;
422  
423  	rc = 0;
424  
425  	/* mapping->private_lock here protects against the kioctx teardown.  */
426  	spin_lock(&mapping->private_lock);
427  	ctx = mapping->private_data;
428  	if (!ctx) {
429  		rc = -EINVAL;
430  		goto out;
431  	}
432  
433  	/* The ring_lock mutex.  The prevents aio_read_events() from writing
434  	 * to the ring's head, and prevents page migration from mucking in
435  	 * a partially initialized kiotx.
436  	 */
437  	if (!mutex_trylock(&ctx->ring_lock)) {
438  		rc = -EAGAIN;
439  		goto out;
440  	}
441  
442  	idx = src->index;
443  	if (idx < (pgoff_t)ctx->nr_pages) {
444  		/* Make sure the old folio hasn't already been changed */
445  		if (ctx->ring_pages[idx] != &src->page)
446  			rc = -EAGAIN;
447  	} else
448  		rc = -EINVAL;
449  
450  	if (rc != 0)
451  		goto out_unlock;
452  
453  	/* Writeback must be complete */
454  	BUG_ON(folio_test_writeback(src));
455  	folio_get(dst);
456  
457  	rc = folio_migrate_mapping(mapping, dst, src, 1);
458  	if (rc != MIGRATEPAGE_SUCCESS) {
459  		folio_put(dst);
460  		goto out_unlock;
461  	}
462  
463  	/* Take completion_lock to prevent other writes to the ring buffer
464  	 * while the old folio is copied to the new.  This prevents new
465  	 * events from being lost.
466  	 */
467  	spin_lock_irqsave(&ctx->completion_lock, flags);
468  	folio_migrate_copy(dst, src);
469  	BUG_ON(ctx->ring_pages[idx] != &src->page);
470  	ctx->ring_pages[idx] = &dst->page;
471  	spin_unlock_irqrestore(&ctx->completion_lock, flags);
472  
473  	/* The old folio is no longer accessible. */
474  	folio_put(src);
475  
476  out_unlock:
477  	mutex_unlock(&ctx->ring_lock);
478  out:
479  	spin_unlock(&mapping->private_lock);
480  	return rc;
481  }
482  #else
483  #define aio_migrate_folio NULL
484  #endif
485  
486  static const struct address_space_operations aio_ctx_aops = {
487  	.dirty_folio	= noop_dirty_folio,
488  	.migrate_folio	= aio_migrate_folio,
489  };
490  
491  static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
492  {
493  	struct aio_ring *ring;
494  	struct mm_struct *mm = current->mm;
495  	unsigned long size, unused;
496  	int nr_pages;
497  	int i;
498  	struct file *file;
499  
500  	/* Compensate for the ring buffer's head/tail overlap entry */
501  	nr_events += 2;	/* 1 is required, 2 for good luck */
502  
503  	size = sizeof(struct aio_ring);
504  	size += sizeof(struct io_event) * nr_events;
505  
506  	nr_pages = PFN_UP(size);
507  	if (nr_pages < 0)
508  		return -EINVAL;
509  
510  	file = aio_private_file(ctx, nr_pages);
511  	if (IS_ERR(file)) {
512  		ctx->aio_ring_file = NULL;
513  		return -ENOMEM;
514  	}
515  
516  	ctx->aio_ring_file = file;
517  	nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
518  			/ sizeof(struct io_event);
519  
520  	ctx->ring_pages = ctx->internal_pages;
521  	if (nr_pages > AIO_RING_PAGES) {
522  		ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
523  					  GFP_KERNEL);
524  		if (!ctx->ring_pages) {
525  			put_aio_ring_file(ctx);
526  			return -ENOMEM;
527  		}
528  	}
529  
530  	for (i = 0; i < nr_pages; i++) {
531  		struct page *page;
532  		page = find_or_create_page(file->f_mapping,
533  					   i, GFP_USER | __GFP_ZERO);
534  		if (!page)
535  			break;
536  		pr_debug("pid(%d) page[%d]->count=%d\n",
537  			 current->pid, i, page_count(page));
538  		SetPageUptodate(page);
539  		unlock_page(page);
540  
541  		ctx->ring_pages[i] = page;
542  	}
543  	ctx->nr_pages = i;
544  
545  	if (unlikely(i != nr_pages)) {
546  		aio_free_ring(ctx);
547  		return -ENOMEM;
548  	}
549  
550  	ctx->mmap_size = nr_pages * PAGE_SIZE;
551  	pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
552  
553  	if (mmap_write_lock_killable(mm)) {
554  		ctx->mmap_size = 0;
555  		aio_free_ring(ctx);
556  		return -EINTR;
557  	}
558  
559  	ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size,
560  				 PROT_READ | PROT_WRITE,
561  				 MAP_SHARED, 0, &unused, NULL);
562  	mmap_write_unlock(mm);
563  	if (IS_ERR((void *)ctx->mmap_base)) {
564  		ctx->mmap_size = 0;
565  		aio_free_ring(ctx);
566  		return -ENOMEM;
567  	}
568  
569  	pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
570  
571  	ctx->user_id = ctx->mmap_base;
572  	ctx->nr_events = nr_events; /* trusted copy */
573  
574  	ring = page_address(ctx->ring_pages[0]);
575  	ring->nr = nr_events;	/* user copy */
576  	ring->id = ~0U;
577  	ring->head = ring->tail = 0;
578  	ring->magic = AIO_RING_MAGIC;
579  	ring->compat_features = AIO_RING_COMPAT_FEATURES;
580  	ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
581  	ring->header_length = sizeof(struct aio_ring);
582  	flush_dcache_page(ctx->ring_pages[0]);
583  
584  	return 0;
585  }
586  
587  #define AIO_EVENTS_PER_PAGE	(PAGE_SIZE / sizeof(struct io_event))
588  #define AIO_EVENTS_FIRST_PAGE	((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
589  #define AIO_EVENTS_OFFSET	(AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
590  
591  void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
592  {
593  	struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw);
594  	struct kioctx *ctx = req->ki_ctx;
595  	unsigned long flags;
596  
597  	if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
598  		return;
599  
600  	spin_lock_irqsave(&ctx->ctx_lock, flags);
601  	list_add_tail(&req->ki_list, &ctx->active_reqs);
602  	req->ki_cancel = cancel;
603  	spin_unlock_irqrestore(&ctx->ctx_lock, flags);
604  }
605  EXPORT_SYMBOL(kiocb_set_cancel_fn);
606  
607  /*
608   * free_ioctx() should be RCU delayed to synchronize against the RCU
609   * protected lookup_ioctx() and also needs process context to call
610   * aio_free_ring().  Use rcu_work.
611   */
612  static void free_ioctx(struct work_struct *work)
613  {
614  	struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx,
615  					  free_rwork);
616  	pr_debug("freeing %p\n", ctx);
617  
618  	aio_free_ring(ctx);
619  	free_percpu(ctx->cpu);
620  	percpu_ref_exit(&ctx->reqs);
621  	percpu_ref_exit(&ctx->users);
622  	kmem_cache_free(kioctx_cachep, ctx);
623  }
624  
625  static void free_ioctx_reqs(struct percpu_ref *ref)
626  {
627  	struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
628  
629  	/* At this point we know that there are no any in-flight requests */
630  	if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
631  		complete(&ctx->rq_wait->comp);
632  
633  	/* Synchronize against RCU protected table->table[] dereferences */
634  	INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
635  	queue_rcu_work(system_wq, &ctx->free_rwork);
636  }
637  
638  /*
639   * When this function runs, the kioctx has been removed from the "hash table"
640   * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
641   * now it's safe to cancel any that need to be.
642   */
643  static void free_ioctx_users(struct percpu_ref *ref)
644  {
645  	struct kioctx *ctx = container_of(ref, struct kioctx, users);
646  	struct aio_kiocb *req;
647  
648  	spin_lock_irq(&ctx->ctx_lock);
649  
650  	while (!list_empty(&ctx->active_reqs)) {
651  		req = list_first_entry(&ctx->active_reqs,
652  				       struct aio_kiocb, ki_list);
653  		req->ki_cancel(&req->rw);
654  		list_del_init(&req->ki_list);
655  	}
656  
657  	spin_unlock_irq(&ctx->ctx_lock);
658  
659  	percpu_ref_kill(&ctx->reqs);
660  	percpu_ref_put(&ctx->reqs);
661  }
662  
663  static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
664  {
665  	unsigned i, new_nr;
666  	struct kioctx_table *table, *old;
667  	struct aio_ring *ring;
668  
669  	spin_lock(&mm->ioctx_lock);
670  	table = rcu_dereference_raw(mm->ioctx_table);
671  
672  	while (1) {
673  		if (table)
674  			for (i = 0; i < table->nr; i++)
675  				if (!rcu_access_pointer(table->table[i])) {
676  					ctx->id = i;
677  					rcu_assign_pointer(table->table[i], ctx);
678  					spin_unlock(&mm->ioctx_lock);
679  
680  					/* While kioctx setup is in progress,
681  					 * we are protected from page migration
682  					 * changes ring_pages by ->ring_lock.
683  					 */
684  					ring = page_address(ctx->ring_pages[0]);
685  					ring->id = ctx->id;
686  					return 0;
687  				}
688  
689  		new_nr = (table ? table->nr : 1) * 4;
690  		spin_unlock(&mm->ioctx_lock);
691  
692  		table = kzalloc(struct_size(table, table, new_nr), GFP_KERNEL);
693  		if (!table)
694  			return -ENOMEM;
695  
696  		table->nr = new_nr;
697  
698  		spin_lock(&mm->ioctx_lock);
699  		old = rcu_dereference_raw(mm->ioctx_table);
700  
701  		if (!old) {
702  			rcu_assign_pointer(mm->ioctx_table, table);
703  		} else if (table->nr > old->nr) {
704  			memcpy(table->table, old->table,
705  			       old->nr * sizeof(struct kioctx *));
706  
707  			rcu_assign_pointer(mm->ioctx_table, table);
708  			kfree_rcu(old, rcu);
709  		} else {
710  			kfree(table);
711  			table = old;
712  		}
713  	}
714  }
715  
716  static void aio_nr_sub(unsigned nr)
717  {
718  	spin_lock(&aio_nr_lock);
719  	if (WARN_ON(aio_nr - nr > aio_nr))
720  		aio_nr = 0;
721  	else
722  		aio_nr -= nr;
723  	spin_unlock(&aio_nr_lock);
724  }
725  
726  /* ioctx_alloc
727   *	Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
728   */
729  static struct kioctx *ioctx_alloc(unsigned nr_events)
730  {
731  	struct mm_struct *mm = current->mm;
732  	struct kioctx *ctx;
733  	int err = -ENOMEM;
734  
735  	/*
736  	 * Store the original nr_events -- what userspace passed to io_setup(),
737  	 * for counting against the global limit -- before it changes.
738  	 */
739  	unsigned int max_reqs = nr_events;
740  
741  	/*
742  	 * We keep track of the number of available ringbuffer slots, to prevent
743  	 * overflow (reqs_available), and we also use percpu counters for this.
744  	 *
745  	 * So since up to half the slots might be on other cpu's percpu counters
746  	 * and unavailable, double nr_events so userspace sees what they
747  	 * expected: additionally, we move req_batch slots to/from percpu
748  	 * counters at a time, so make sure that isn't 0:
749  	 */
750  	nr_events = max(nr_events, num_possible_cpus() * 4);
751  	nr_events *= 2;
752  
753  	/* Prevent overflows */
754  	if (nr_events > (0x10000000U / sizeof(struct io_event))) {
755  		pr_debug("ENOMEM: nr_events too high\n");
756  		return ERR_PTR(-EINVAL);
757  	}
758  
759  	if (!nr_events || (unsigned long)max_reqs > aio_max_nr)
760  		return ERR_PTR(-EAGAIN);
761  
762  	ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
763  	if (!ctx)
764  		return ERR_PTR(-ENOMEM);
765  
766  	ctx->max_reqs = max_reqs;
767  
768  	spin_lock_init(&ctx->ctx_lock);
769  	spin_lock_init(&ctx->completion_lock);
770  	mutex_init(&ctx->ring_lock);
771  	/* Protect against page migration throughout kiotx setup by keeping
772  	 * the ring_lock mutex held until setup is complete. */
773  	mutex_lock(&ctx->ring_lock);
774  	init_waitqueue_head(&ctx->wait);
775  
776  	INIT_LIST_HEAD(&ctx->active_reqs);
777  
778  	if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL))
779  		goto err;
780  
781  	if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL))
782  		goto err;
783  
784  	ctx->cpu = alloc_percpu(struct kioctx_cpu);
785  	if (!ctx->cpu)
786  		goto err;
787  
788  	err = aio_setup_ring(ctx, nr_events);
789  	if (err < 0)
790  		goto err;
791  
792  	atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
793  	ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
794  	if (ctx->req_batch < 1)
795  		ctx->req_batch = 1;
796  
797  	/* limit the number of system wide aios */
798  	spin_lock(&aio_nr_lock);
799  	if (aio_nr + ctx->max_reqs > aio_max_nr ||
800  	    aio_nr + ctx->max_reqs < aio_nr) {
801  		spin_unlock(&aio_nr_lock);
802  		err = -EAGAIN;
803  		goto err_ctx;
804  	}
805  	aio_nr += ctx->max_reqs;
806  	spin_unlock(&aio_nr_lock);
807  
808  	percpu_ref_get(&ctx->users);	/* io_setup() will drop this ref */
809  	percpu_ref_get(&ctx->reqs);	/* free_ioctx_users() will drop this */
810  
811  	err = ioctx_add_table(ctx, mm);
812  	if (err)
813  		goto err_cleanup;
814  
815  	/* Release the ring_lock mutex now that all setup is complete. */
816  	mutex_unlock(&ctx->ring_lock);
817  
818  	pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
819  		 ctx, ctx->user_id, mm, ctx->nr_events);
820  	return ctx;
821  
822  err_cleanup:
823  	aio_nr_sub(ctx->max_reqs);
824  err_ctx:
825  	atomic_set(&ctx->dead, 1);
826  	if (ctx->mmap_size)
827  		vm_munmap(ctx->mmap_base, ctx->mmap_size);
828  	aio_free_ring(ctx);
829  err:
830  	mutex_unlock(&ctx->ring_lock);
831  	free_percpu(ctx->cpu);
832  	percpu_ref_exit(&ctx->reqs);
833  	percpu_ref_exit(&ctx->users);
834  	kmem_cache_free(kioctx_cachep, ctx);
835  	pr_debug("error allocating ioctx %d\n", err);
836  	return ERR_PTR(err);
837  }
838  
839  /* kill_ioctx
840   *	Cancels all outstanding aio requests on an aio context.  Used
841   *	when the processes owning a context have all exited to encourage
842   *	the rapid destruction of the kioctx.
843   */
844  static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
845  		      struct ctx_rq_wait *wait)
846  {
847  	struct kioctx_table *table;
848  
849  	spin_lock(&mm->ioctx_lock);
850  	if (atomic_xchg(&ctx->dead, 1)) {
851  		spin_unlock(&mm->ioctx_lock);
852  		return -EINVAL;
853  	}
854  
855  	table = rcu_dereference_raw(mm->ioctx_table);
856  	WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
857  	RCU_INIT_POINTER(table->table[ctx->id], NULL);
858  	spin_unlock(&mm->ioctx_lock);
859  
860  	/* free_ioctx_reqs() will do the necessary RCU synchronization */
861  	wake_up_all(&ctx->wait);
862  
863  	/*
864  	 * It'd be more correct to do this in free_ioctx(), after all
865  	 * the outstanding kiocbs have finished - but by then io_destroy
866  	 * has already returned, so io_setup() could potentially return
867  	 * -EAGAIN with no ioctxs actually in use (as far as userspace
868  	 *  could tell).
869  	 */
870  	aio_nr_sub(ctx->max_reqs);
871  
872  	if (ctx->mmap_size)
873  		vm_munmap(ctx->mmap_base, ctx->mmap_size);
874  
875  	ctx->rq_wait = wait;
876  	percpu_ref_kill(&ctx->users);
877  	return 0;
878  }
879  
880  /*
881   * exit_aio: called when the last user of mm goes away.  At this point, there is
882   * no way for any new requests to be submited or any of the io_* syscalls to be
883   * called on the context.
884   *
885   * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
886   * them.
887   */
888  void exit_aio(struct mm_struct *mm)
889  {
890  	struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
891  	struct ctx_rq_wait wait;
892  	int i, skipped;
893  
894  	if (!table)
895  		return;
896  
897  	atomic_set(&wait.count, table->nr);
898  	init_completion(&wait.comp);
899  
900  	skipped = 0;
901  	for (i = 0; i < table->nr; ++i) {
902  		struct kioctx *ctx =
903  			rcu_dereference_protected(table->table[i], true);
904  
905  		if (!ctx) {
906  			skipped++;
907  			continue;
908  		}
909  
910  		/*
911  		 * We don't need to bother with munmap() here - exit_mmap(mm)
912  		 * is coming and it'll unmap everything. And we simply can't,
913  		 * this is not necessarily our ->mm.
914  		 * Since kill_ioctx() uses non-zero ->mmap_size as indicator
915  		 * that it needs to unmap the area, just set it to 0.
916  		 */
917  		ctx->mmap_size = 0;
918  		kill_ioctx(mm, ctx, &wait);
919  	}
920  
921  	if (!atomic_sub_and_test(skipped, &wait.count)) {
922  		/* Wait until all IO for the context are done. */
923  		wait_for_completion(&wait.comp);
924  	}
925  
926  	RCU_INIT_POINTER(mm->ioctx_table, NULL);
927  	kfree(table);
928  }
929  
930  static void put_reqs_available(struct kioctx *ctx, unsigned nr)
931  {
932  	struct kioctx_cpu *kcpu;
933  	unsigned long flags;
934  
935  	local_irq_save(flags);
936  	kcpu = this_cpu_ptr(ctx->cpu);
937  	kcpu->reqs_available += nr;
938  
939  	while (kcpu->reqs_available >= ctx->req_batch * 2) {
940  		kcpu->reqs_available -= ctx->req_batch;
941  		atomic_add(ctx->req_batch, &ctx->reqs_available);
942  	}
943  
944  	local_irq_restore(flags);
945  }
946  
947  static bool __get_reqs_available(struct kioctx *ctx)
948  {
949  	struct kioctx_cpu *kcpu;
950  	bool ret = false;
951  	unsigned long flags;
952  
953  	local_irq_save(flags);
954  	kcpu = this_cpu_ptr(ctx->cpu);
955  	if (!kcpu->reqs_available) {
956  		int avail = atomic_read(&ctx->reqs_available);
957  
958  		do {
959  			if (avail < ctx->req_batch)
960  				goto out;
961  		} while (!atomic_try_cmpxchg(&ctx->reqs_available,
962  					     &avail, avail - ctx->req_batch));
963  
964  		kcpu->reqs_available += ctx->req_batch;
965  	}
966  
967  	ret = true;
968  	kcpu->reqs_available--;
969  out:
970  	local_irq_restore(flags);
971  	return ret;
972  }
973  
974  /* refill_reqs_available
975   *	Updates the reqs_available reference counts used for tracking the
976   *	number of free slots in the completion ring.  This can be called
977   *	from aio_complete() (to optimistically update reqs_available) or
978   *	from aio_get_req() (the we're out of events case).  It must be
979   *	called holding ctx->completion_lock.
980   */
981  static void refill_reqs_available(struct kioctx *ctx, unsigned head,
982                                    unsigned tail)
983  {
984  	unsigned events_in_ring, completed;
985  
986  	/* Clamp head since userland can write to it. */
987  	head %= ctx->nr_events;
988  	if (head <= tail)
989  		events_in_ring = tail - head;
990  	else
991  		events_in_ring = ctx->nr_events - (head - tail);
992  
993  	completed = ctx->completed_events;
994  	if (events_in_ring < completed)
995  		completed -= events_in_ring;
996  	else
997  		completed = 0;
998  
999  	if (!completed)
1000  		return;
1001  
1002  	ctx->completed_events -= completed;
1003  	put_reqs_available(ctx, completed);
1004  }
1005  
1006  /* user_refill_reqs_available
1007   *	Called to refill reqs_available when aio_get_req() encounters an
1008   *	out of space in the completion ring.
1009   */
1010  static void user_refill_reqs_available(struct kioctx *ctx)
1011  {
1012  	spin_lock_irq(&ctx->completion_lock);
1013  	if (ctx->completed_events) {
1014  		struct aio_ring *ring;
1015  		unsigned head;
1016  
1017  		/* Access of ring->head may race with aio_read_events_ring()
1018  		 * here, but that's okay since whether we read the old version
1019  		 * or the new version, and either will be valid.  The important
1020  		 * part is that head cannot pass tail since we prevent
1021  		 * aio_complete() from updating tail by holding
1022  		 * ctx->completion_lock.  Even if head is invalid, the check
1023  		 * against ctx->completed_events below will make sure we do the
1024  		 * safe/right thing.
1025  		 */
1026  		ring = page_address(ctx->ring_pages[0]);
1027  		head = ring->head;
1028  
1029  		refill_reqs_available(ctx, head, ctx->tail);
1030  	}
1031  
1032  	spin_unlock_irq(&ctx->completion_lock);
1033  }
1034  
1035  static bool get_reqs_available(struct kioctx *ctx)
1036  {
1037  	if (__get_reqs_available(ctx))
1038  		return true;
1039  	user_refill_reqs_available(ctx);
1040  	return __get_reqs_available(ctx);
1041  }
1042  
1043  /* aio_get_req
1044   *	Allocate a slot for an aio request.
1045   * Returns NULL if no requests are free.
1046   *
1047   * The refcount is initialized to 2 - one for the async op completion,
1048   * one for the synchronous code that does this.
1049   */
1050  static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
1051  {
1052  	struct aio_kiocb *req;
1053  
1054  	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
1055  	if (unlikely(!req))
1056  		return NULL;
1057  
1058  	if (unlikely(!get_reqs_available(ctx))) {
1059  		kmem_cache_free(kiocb_cachep, req);
1060  		return NULL;
1061  	}
1062  
1063  	percpu_ref_get(&ctx->reqs);
1064  	req->ki_ctx = ctx;
1065  	INIT_LIST_HEAD(&req->ki_list);
1066  	refcount_set(&req->ki_refcnt, 2);
1067  	req->ki_eventfd = NULL;
1068  	return req;
1069  }
1070  
1071  static struct kioctx *lookup_ioctx(unsigned long ctx_id)
1072  {
1073  	struct aio_ring __user *ring  = (void __user *)ctx_id;
1074  	struct mm_struct *mm = current->mm;
1075  	struct kioctx *ctx, *ret = NULL;
1076  	struct kioctx_table *table;
1077  	unsigned id;
1078  
1079  	if (get_user(id, &ring->id))
1080  		return NULL;
1081  
1082  	rcu_read_lock();
1083  	table = rcu_dereference(mm->ioctx_table);
1084  
1085  	if (!table || id >= table->nr)
1086  		goto out;
1087  
1088  	id = array_index_nospec(id, table->nr);
1089  	ctx = rcu_dereference(table->table[id]);
1090  	if (ctx && ctx->user_id == ctx_id) {
1091  		if (percpu_ref_tryget_live(&ctx->users))
1092  			ret = ctx;
1093  	}
1094  out:
1095  	rcu_read_unlock();
1096  	return ret;
1097  }
1098  
1099  static inline void iocb_destroy(struct aio_kiocb *iocb)
1100  {
1101  	if (iocb->ki_eventfd)
1102  		eventfd_ctx_put(iocb->ki_eventfd);
1103  	if (iocb->ki_filp)
1104  		fput(iocb->ki_filp);
1105  	percpu_ref_put(&iocb->ki_ctx->reqs);
1106  	kmem_cache_free(kiocb_cachep, iocb);
1107  }
1108  
1109  /* aio_complete
1110   *	Called when the io request on the given iocb is complete.
1111   */
1112  static void aio_complete(struct aio_kiocb *iocb)
1113  {
1114  	struct kioctx	*ctx = iocb->ki_ctx;
1115  	struct aio_ring	*ring;
1116  	struct io_event	*ev_page, *event;
1117  	unsigned tail, pos, head;
1118  	unsigned long	flags;
1119  
1120  	/*
1121  	 * Add a completion event to the ring buffer. Must be done holding
1122  	 * ctx->completion_lock to prevent other code from messing with the tail
1123  	 * pointer since we might be called from irq context.
1124  	 */
1125  	spin_lock_irqsave(&ctx->completion_lock, flags);
1126  
1127  	tail = ctx->tail;
1128  	pos = tail + AIO_EVENTS_OFFSET;
1129  
1130  	if (++tail >= ctx->nr_events)
1131  		tail = 0;
1132  
1133  	ev_page = page_address(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1134  	event = ev_page + pos % AIO_EVENTS_PER_PAGE;
1135  
1136  	*event = iocb->ki_res;
1137  
1138  	flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1139  
1140  	pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
1141  		 (void __user *)(unsigned long)iocb->ki_res.obj,
1142  		 iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
1143  
1144  	/* after flagging the request as done, we
1145  	 * must never even look at it again
1146  	 */
1147  	smp_wmb();	/* make event visible before updating tail */
1148  
1149  	ctx->tail = tail;
1150  
1151  	ring = page_address(ctx->ring_pages[0]);
1152  	head = ring->head;
1153  	ring->tail = tail;
1154  	flush_dcache_page(ctx->ring_pages[0]);
1155  
1156  	ctx->completed_events++;
1157  	if (ctx->completed_events > 1)
1158  		refill_reqs_available(ctx, head, tail);
1159  	spin_unlock_irqrestore(&ctx->completion_lock, flags);
1160  
1161  	pr_debug("added to ring %p at [%u]\n", iocb, tail);
1162  
1163  	/*
1164  	 * Check if the user asked us to deliver the result through an
1165  	 * eventfd. The eventfd_signal() function is safe to be called
1166  	 * from IRQ context.
1167  	 */
1168  	if (iocb->ki_eventfd)
1169  		eventfd_signal(iocb->ki_eventfd, 1);
1170  
1171  	/*
1172  	 * We have to order our ring_info tail store above and test
1173  	 * of the wait list below outside the wait lock.  This is
1174  	 * like in wake_up_bit() where clearing a bit has to be
1175  	 * ordered with the unlocked test.
1176  	 */
1177  	smp_mb();
1178  
1179  	if (waitqueue_active(&ctx->wait))
1180  		wake_up(&ctx->wait);
1181  }
1182  
1183  static inline void iocb_put(struct aio_kiocb *iocb)
1184  {
1185  	if (refcount_dec_and_test(&iocb->ki_refcnt)) {
1186  		aio_complete(iocb);
1187  		iocb_destroy(iocb);
1188  	}
1189  }
1190  
1191  /* aio_read_events_ring
1192   *	Pull an event off of the ioctx's event ring.  Returns the number of
1193   *	events fetched
1194   */
1195  static long aio_read_events_ring(struct kioctx *ctx,
1196  				 struct io_event __user *event, long nr)
1197  {
1198  	struct aio_ring *ring;
1199  	unsigned head, tail, pos;
1200  	long ret = 0;
1201  	int copy_ret;
1202  
1203  	/*
1204  	 * The mutex can block and wake us up and that will cause
1205  	 * wait_event_interruptible_hrtimeout() to schedule without sleeping
1206  	 * and repeat. This should be rare enough that it doesn't cause
1207  	 * peformance issues. See the comment in read_events() for more detail.
1208  	 */
1209  	sched_annotate_sleep();
1210  	mutex_lock(&ctx->ring_lock);
1211  
1212  	/* Access to ->ring_pages here is protected by ctx->ring_lock. */
1213  	ring = page_address(ctx->ring_pages[0]);
1214  	head = ring->head;
1215  	tail = ring->tail;
1216  
1217  	/*
1218  	 * Ensure that once we've read the current tail pointer, that
1219  	 * we also see the events that were stored up to the tail.
1220  	 */
1221  	smp_rmb();
1222  
1223  	pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
1224  
1225  	if (head == tail)
1226  		goto out;
1227  
1228  	head %= ctx->nr_events;
1229  	tail %= ctx->nr_events;
1230  
1231  	while (ret < nr) {
1232  		long avail;
1233  		struct io_event *ev;
1234  		struct page *page;
1235  
1236  		avail = (head <= tail ?  tail : ctx->nr_events) - head;
1237  		if (head == tail)
1238  			break;
1239  
1240  		pos = head + AIO_EVENTS_OFFSET;
1241  		page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
1242  		pos %= AIO_EVENTS_PER_PAGE;
1243  
1244  		avail = min(avail, nr - ret);
1245  		avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos);
1246  
1247  		ev = page_address(page);
1248  		copy_ret = copy_to_user(event + ret, ev + pos,
1249  					sizeof(*ev) * avail);
1250  
1251  		if (unlikely(copy_ret)) {
1252  			ret = -EFAULT;
1253  			goto out;
1254  		}
1255  
1256  		ret += avail;
1257  		head += avail;
1258  		head %= ctx->nr_events;
1259  	}
1260  
1261  	ring = page_address(ctx->ring_pages[0]);
1262  	ring->head = head;
1263  	flush_dcache_page(ctx->ring_pages[0]);
1264  
1265  	pr_debug("%li  h%u t%u\n", ret, head, tail);
1266  out:
1267  	mutex_unlock(&ctx->ring_lock);
1268  
1269  	return ret;
1270  }
1271  
1272  static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
1273  			    struct io_event __user *event, long *i)
1274  {
1275  	long ret = aio_read_events_ring(ctx, event + *i, nr - *i);
1276  
1277  	if (ret > 0)
1278  		*i += ret;
1279  
1280  	if (unlikely(atomic_read(&ctx->dead)))
1281  		ret = -EINVAL;
1282  
1283  	if (!*i)
1284  		*i = ret;
1285  
1286  	return ret < 0 || *i >= min_nr;
1287  }
1288  
1289  static long read_events(struct kioctx *ctx, long min_nr, long nr,
1290  			struct io_event __user *event,
1291  			ktime_t until)
1292  {
1293  	long ret = 0;
1294  
1295  	/*
1296  	 * Note that aio_read_events() is being called as the conditional - i.e.
1297  	 * we're calling it after prepare_to_wait() has set task state to
1298  	 * TASK_INTERRUPTIBLE.
1299  	 *
1300  	 * But aio_read_events() can block, and if it blocks it's going to flip
1301  	 * the task state back to TASK_RUNNING.
1302  	 *
1303  	 * This should be ok, provided it doesn't flip the state back to
1304  	 * TASK_RUNNING and return 0 too much - that causes us to spin. That
1305  	 * will only happen if the mutex_lock() call blocks, and we then find
1306  	 * the ringbuffer empty. So in practice we should be ok, but it's
1307  	 * something to be aware of when touching this code.
1308  	 */
1309  	if (until == 0)
1310  		aio_read_events(ctx, min_nr, nr, event, &ret);
1311  	else
1312  		wait_event_interruptible_hrtimeout(ctx->wait,
1313  				aio_read_events(ctx, min_nr, nr, event, &ret),
1314  				until);
1315  	return ret;
1316  }
1317  
1318  /* sys_io_setup:
1319   *	Create an aio_context capable of receiving at least nr_events.
1320   *	ctxp must not point to an aio_context that already exists, and
1321   *	must be initialized to 0 prior to the call.  On successful
1322   *	creation of the aio_context, *ctxp is filled in with the resulting
1323   *	handle.  May fail with -EINVAL if *ctxp is not initialized,
1324   *	if the specified nr_events exceeds internal limits.  May fail
1325   *	with -EAGAIN if the specified nr_events exceeds the user's limit
1326   *	of available events.  May fail with -ENOMEM if insufficient kernel
1327   *	resources are available.  May fail with -EFAULT if an invalid
1328   *	pointer is passed for ctxp.  Will fail with -ENOSYS if not
1329   *	implemented.
1330   */
1331  SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1332  {
1333  	struct kioctx *ioctx = NULL;
1334  	unsigned long ctx;
1335  	long ret;
1336  
1337  	ret = get_user(ctx, ctxp);
1338  	if (unlikely(ret))
1339  		goto out;
1340  
1341  	ret = -EINVAL;
1342  	if (unlikely(ctx || nr_events == 0)) {
1343  		pr_debug("EINVAL: ctx %lu nr_events %u\n",
1344  		         ctx, nr_events);
1345  		goto out;
1346  	}
1347  
1348  	ioctx = ioctx_alloc(nr_events);
1349  	ret = PTR_ERR(ioctx);
1350  	if (!IS_ERR(ioctx)) {
1351  		ret = put_user(ioctx->user_id, ctxp);
1352  		if (ret)
1353  			kill_ioctx(current->mm, ioctx, NULL);
1354  		percpu_ref_put(&ioctx->users);
1355  	}
1356  
1357  out:
1358  	return ret;
1359  }
1360  
1361  #ifdef CONFIG_COMPAT
1362  COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p)
1363  {
1364  	struct kioctx *ioctx = NULL;
1365  	unsigned long ctx;
1366  	long ret;
1367  
1368  	ret = get_user(ctx, ctx32p);
1369  	if (unlikely(ret))
1370  		goto out;
1371  
1372  	ret = -EINVAL;
1373  	if (unlikely(ctx || nr_events == 0)) {
1374  		pr_debug("EINVAL: ctx %lu nr_events %u\n",
1375  		         ctx, nr_events);
1376  		goto out;
1377  	}
1378  
1379  	ioctx = ioctx_alloc(nr_events);
1380  	ret = PTR_ERR(ioctx);
1381  	if (!IS_ERR(ioctx)) {
1382  		/* truncating is ok because it's a user address */
1383  		ret = put_user((u32)ioctx->user_id, ctx32p);
1384  		if (ret)
1385  			kill_ioctx(current->mm, ioctx, NULL);
1386  		percpu_ref_put(&ioctx->users);
1387  	}
1388  
1389  out:
1390  	return ret;
1391  }
1392  #endif
1393  
1394  /* sys_io_destroy:
1395   *	Destroy the aio_context specified.  May cancel any outstanding
1396   *	AIOs and block on completion.  Will fail with -ENOSYS if not
1397   *	implemented.  May fail with -EINVAL if the context pointed to
1398   *	is invalid.
1399   */
1400  SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1401  {
1402  	struct kioctx *ioctx = lookup_ioctx(ctx);
1403  	if (likely(NULL != ioctx)) {
1404  		struct ctx_rq_wait wait;
1405  		int ret;
1406  
1407  		init_completion(&wait.comp);
1408  		atomic_set(&wait.count, 1);
1409  
1410  		/* Pass requests_done to kill_ioctx() where it can be set
1411  		 * in a thread-safe way. If we try to set it here then we have
1412  		 * a race condition if two io_destroy() called simultaneously.
1413  		 */
1414  		ret = kill_ioctx(current->mm, ioctx, &wait);
1415  		percpu_ref_put(&ioctx->users);
1416  
1417  		/* Wait until all IO for the context are done. Otherwise kernel
1418  		 * keep using user-space buffers even if user thinks the context
1419  		 * is destroyed.
1420  		 */
1421  		if (!ret)
1422  			wait_for_completion(&wait.comp);
1423  
1424  		return ret;
1425  	}
1426  	pr_debug("EINVAL: invalid context id\n");
1427  	return -EINVAL;
1428  }
1429  
1430  static void aio_remove_iocb(struct aio_kiocb *iocb)
1431  {
1432  	struct kioctx *ctx = iocb->ki_ctx;
1433  	unsigned long flags;
1434  
1435  	spin_lock_irqsave(&ctx->ctx_lock, flags);
1436  	list_del(&iocb->ki_list);
1437  	spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1438  }
1439  
1440  static void aio_complete_rw(struct kiocb *kiocb, long res)
1441  {
1442  	struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
1443  
1444  	if (!list_empty_careful(&iocb->ki_list))
1445  		aio_remove_iocb(iocb);
1446  
1447  	if (kiocb->ki_flags & IOCB_WRITE) {
1448  		struct inode *inode = file_inode(kiocb->ki_filp);
1449  
1450  		/*
1451  		 * Tell lockdep we inherited freeze protection from submission
1452  		 * thread.
1453  		 */
1454  		if (S_ISREG(inode->i_mode))
1455  			__sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
1456  		file_end_write(kiocb->ki_filp);
1457  	}
1458  
1459  	iocb->ki_res.res = res;
1460  	iocb->ki_res.res2 = 0;
1461  	iocb_put(iocb);
1462  }
1463  
1464  static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
1465  {
1466  	int ret;
1467  
1468  	req->ki_complete = aio_complete_rw;
1469  	req->private = NULL;
1470  	req->ki_pos = iocb->aio_offset;
1471  	req->ki_flags = req->ki_filp->f_iocb_flags;
1472  	if (iocb->aio_flags & IOCB_FLAG_RESFD)
1473  		req->ki_flags |= IOCB_EVENTFD;
1474  	if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
1475  		/*
1476  		 * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then
1477  		 * aio_reqprio is interpreted as an I/O scheduling
1478  		 * class and priority.
1479  		 */
1480  		ret = ioprio_check_cap(iocb->aio_reqprio);
1481  		if (ret) {
1482  			pr_debug("aio ioprio check cap error: %d\n", ret);
1483  			return ret;
1484  		}
1485  
1486  		req->ki_ioprio = iocb->aio_reqprio;
1487  	} else
1488  		req->ki_ioprio = get_current_ioprio();
1489  
1490  	ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
1491  	if (unlikely(ret))
1492  		return ret;
1493  
1494  	req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */
1495  	return 0;
1496  }
1497  
1498  static ssize_t aio_setup_rw(int rw, const struct iocb *iocb,
1499  		struct iovec **iovec, bool vectored, bool compat,
1500  		struct iov_iter *iter)
1501  {
1502  	void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
1503  	size_t len = iocb->aio_nbytes;
1504  
1505  	if (!vectored) {
1506  		ssize_t ret = import_single_range(rw, buf, len, *iovec, iter);
1507  		*iovec = NULL;
1508  		return ret;
1509  	}
1510  
1511  	return __import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter, compat);
1512  }
1513  
1514  static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
1515  {
1516  	switch (ret) {
1517  	case -EIOCBQUEUED:
1518  		break;
1519  	case -ERESTARTSYS:
1520  	case -ERESTARTNOINTR:
1521  	case -ERESTARTNOHAND:
1522  	case -ERESTART_RESTARTBLOCK:
1523  		/*
1524  		 * There's no easy way to restart the syscall since other AIO's
1525  		 * may be already running. Just fail this IO with EINTR.
1526  		 */
1527  		ret = -EINTR;
1528  		fallthrough;
1529  	default:
1530  		req->ki_complete(req, ret);
1531  	}
1532  }
1533  
1534  static int aio_read(struct kiocb *req, const struct iocb *iocb,
1535  			bool vectored, bool compat)
1536  {
1537  	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1538  	struct iov_iter iter;
1539  	struct file *file;
1540  	int ret;
1541  
1542  	ret = aio_prep_rw(req, iocb);
1543  	if (ret)
1544  		return ret;
1545  	file = req->ki_filp;
1546  	if (unlikely(!(file->f_mode & FMODE_READ)))
1547  		return -EBADF;
1548  	if (unlikely(!file->f_op->read_iter))
1549  		return -EINVAL;
1550  
1551  	ret = aio_setup_rw(ITER_DEST, iocb, &iovec, vectored, compat, &iter);
1552  	if (ret < 0)
1553  		return ret;
1554  	ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
1555  	if (!ret)
1556  		aio_rw_done(req, call_read_iter(file, req, &iter));
1557  	kfree(iovec);
1558  	return ret;
1559  }
1560  
1561  static int aio_write(struct kiocb *req, const struct iocb *iocb,
1562  			 bool vectored, bool compat)
1563  {
1564  	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1565  	struct iov_iter iter;
1566  	struct file *file;
1567  	int ret;
1568  
1569  	ret = aio_prep_rw(req, iocb);
1570  	if (ret)
1571  		return ret;
1572  	file = req->ki_filp;
1573  
1574  	if (unlikely(!(file->f_mode & FMODE_WRITE)))
1575  		return -EBADF;
1576  	if (unlikely(!file->f_op->write_iter))
1577  		return -EINVAL;
1578  
1579  	ret = aio_setup_rw(ITER_SOURCE, iocb, &iovec, vectored, compat, &iter);
1580  	if (ret < 0)
1581  		return ret;
1582  	ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
1583  	if (!ret) {
1584  		/*
1585  		 * Open-code file_start_write here to grab freeze protection,
1586  		 * which will be released by another thread in
1587  		 * aio_complete_rw().  Fool lockdep by telling it the lock got
1588  		 * released so that it doesn't complain about the held lock when
1589  		 * we return to userspace.
1590  		 */
1591  		if (S_ISREG(file_inode(file)->i_mode)) {
1592  			sb_start_write(file_inode(file)->i_sb);
1593  			__sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
1594  		}
1595  		req->ki_flags |= IOCB_WRITE;
1596  		aio_rw_done(req, call_write_iter(file, req, &iter));
1597  	}
1598  	kfree(iovec);
1599  	return ret;
1600  }
1601  
1602  static void aio_fsync_work(struct work_struct *work)
1603  {
1604  	struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
1605  	const struct cred *old_cred = override_creds(iocb->fsync.creds);
1606  
1607  	iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
1608  	revert_creds(old_cred);
1609  	put_cred(iocb->fsync.creds);
1610  	iocb_put(iocb);
1611  }
1612  
1613  static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
1614  		     bool datasync)
1615  {
1616  	if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
1617  			iocb->aio_rw_flags))
1618  		return -EINVAL;
1619  
1620  	if (unlikely(!req->file->f_op->fsync))
1621  		return -EINVAL;
1622  
1623  	req->creds = prepare_creds();
1624  	if (!req->creds)
1625  		return -ENOMEM;
1626  
1627  	req->datasync = datasync;
1628  	INIT_WORK(&req->work, aio_fsync_work);
1629  	schedule_work(&req->work);
1630  	return 0;
1631  }
1632  
1633  static void aio_poll_put_work(struct work_struct *work)
1634  {
1635  	struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1636  	struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1637  
1638  	iocb_put(iocb);
1639  }
1640  
1641  /*
1642   * Safely lock the waitqueue which the request is on, synchronizing with the
1643   * case where the ->poll() provider decides to free its waitqueue early.
1644   *
1645   * Returns true on success, meaning that req->head->lock was locked, req->wait
1646   * is on req->head, and an RCU read lock was taken.  Returns false if the
1647   * request was already removed from its waitqueue (which might no longer exist).
1648   */
1649  static bool poll_iocb_lock_wq(struct poll_iocb *req)
1650  {
1651  	wait_queue_head_t *head;
1652  
1653  	/*
1654  	 * While we hold the waitqueue lock and the waitqueue is nonempty,
1655  	 * wake_up_pollfree() will wait for us.  However, taking the waitqueue
1656  	 * lock in the first place can race with the waitqueue being freed.
1657  	 *
1658  	 * We solve this as eventpoll does: by taking advantage of the fact that
1659  	 * all users of wake_up_pollfree() will RCU-delay the actual free.  If
1660  	 * we enter rcu_read_lock() and see that the pointer to the queue is
1661  	 * non-NULL, we can then lock it without the memory being freed out from
1662  	 * under us, then check whether the request is still on the queue.
1663  	 *
1664  	 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
1665  	 * case the caller deletes the entry from the queue, leaving it empty.
1666  	 * In that case, only RCU prevents the queue memory from being freed.
1667  	 */
1668  	rcu_read_lock();
1669  	head = smp_load_acquire(&req->head);
1670  	if (head) {
1671  		spin_lock(&head->lock);
1672  		if (!list_empty(&req->wait.entry))
1673  			return true;
1674  		spin_unlock(&head->lock);
1675  	}
1676  	rcu_read_unlock();
1677  	return false;
1678  }
1679  
1680  static void poll_iocb_unlock_wq(struct poll_iocb *req)
1681  {
1682  	spin_unlock(&req->head->lock);
1683  	rcu_read_unlock();
1684  }
1685  
1686  static void aio_poll_complete_work(struct work_struct *work)
1687  {
1688  	struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1689  	struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1690  	struct poll_table_struct pt = { ._key = req->events };
1691  	struct kioctx *ctx = iocb->ki_ctx;
1692  	__poll_t mask = 0;
1693  
1694  	if (!READ_ONCE(req->cancelled))
1695  		mask = vfs_poll(req->file, &pt) & req->events;
1696  
1697  	/*
1698  	 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1699  	 * calling ->ki_cancel.  We need the ctx_lock roundtrip here to
1700  	 * synchronize with them.  In the cancellation case the list_del_init
1701  	 * itself is not actually needed, but harmless so we keep it in to
1702  	 * avoid further branches in the fast path.
1703  	 */
1704  	spin_lock_irq(&ctx->ctx_lock);
1705  	if (poll_iocb_lock_wq(req)) {
1706  		if (!mask && !READ_ONCE(req->cancelled)) {
1707  			/*
1708  			 * The request isn't actually ready to be completed yet.
1709  			 * Reschedule completion if another wakeup came in.
1710  			 */
1711  			if (req->work_need_resched) {
1712  				schedule_work(&req->work);
1713  				req->work_need_resched = false;
1714  			} else {
1715  				req->work_scheduled = false;
1716  			}
1717  			poll_iocb_unlock_wq(req);
1718  			spin_unlock_irq(&ctx->ctx_lock);
1719  			return;
1720  		}
1721  		list_del_init(&req->wait.entry);
1722  		poll_iocb_unlock_wq(req);
1723  	} /* else, POLLFREE has freed the waitqueue, so we must complete */
1724  	list_del_init(&iocb->ki_list);
1725  	iocb->ki_res.res = mangle_poll(mask);
1726  	spin_unlock_irq(&ctx->ctx_lock);
1727  
1728  	iocb_put(iocb);
1729  }
1730  
1731  /* assumes we are called with irqs disabled */
1732  static int aio_poll_cancel(struct kiocb *iocb)
1733  {
1734  	struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
1735  	struct poll_iocb *req = &aiocb->poll;
1736  
1737  	if (poll_iocb_lock_wq(req)) {
1738  		WRITE_ONCE(req->cancelled, true);
1739  		if (!req->work_scheduled) {
1740  			schedule_work(&aiocb->poll.work);
1741  			req->work_scheduled = true;
1742  		}
1743  		poll_iocb_unlock_wq(req);
1744  	} /* else, the request was force-cancelled by POLLFREE already */
1745  
1746  	return 0;
1747  }
1748  
1749  static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1750  		void *key)
1751  {
1752  	struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
1753  	struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1754  	__poll_t mask = key_to_poll(key);
1755  	unsigned long flags;
1756  
1757  	/* for instances that support it check for an event match first: */
1758  	if (mask && !(mask & req->events))
1759  		return 0;
1760  
1761  	/*
1762  	 * Complete the request inline if possible.  This requires that three
1763  	 * conditions be met:
1764  	 *   1. An event mask must have been passed.  If a plain wakeup was done
1765  	 *	instead, then mask == 0 and we have to call vfs_poll() to get
1766  	 *	the events, so inline completion isn't possible.
1767  	 *   2. The completion work must not have already been scheduled.
1768  	 *   3. ctx_lock must not be busy.  We have to use trylock because we
1769  	 *	already hold the waitqueue lock, so this inverts the normal
1770  	 *	locking order.  Use irqsave/irqrestore because not all
1771  	 *	filesystems (e.g. fuse) call this function with IRQs disabled,
1772  	 *	yet IRQs have to be disabled before ctx_lock is obtained.
1773  	 */
1774  	if (mask && !req->work_scheduled &&
1775  	    spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
1776  		struct kioctx *ctx = iocb->ki_ctx;
1777  
1778  		list_del_init(&req->wait.entry);
1779  		list_del(&iocb->ki_list);
1780  		iocb->ki_res.res = mangle_poll(mask);
1781  		if (iocb->ki_eventfd && !eventfd_signal_allowed()) {
1782  			iocb = NULL;
1783  			INIT_WORK(&req->work, aio_poll_put_work);
1784  			schedule_work(&req->work);
1785  		}
1786  		spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1787  		if (iocb)
1788  			iocb_put(iocb);
1789  	} else {
1790  		/*
1791  		 * Schedule the completion work if needed.  If it was already
1792  		 * scheduled, record that another wakeup came in.
1793  		 *
1794  		 * Don't remove the request from the waitqueue here, as it might
1795  		 * not actually be complete yet (we won't know until vfs_poll()
1796  		 * is called), and we must not miss any wakeups.  POLLFREE is an
1797  		 * exception to this; see below.
1798  		 */
1799  		if (req->work_scheduled) {
1800  			req->work_need_resched = true;
1801  		} else {
1802  			schedule_work(&req->work);
1803  			req->work_scheduled = true;
1804  		}
1805  
1806  		/*
1807  		 * If the waitqueue is being freed early but we can't complete
1808  		 * the request inline, we have to tear down the request as best
1809  		 * we can.  That means immediately removing the request from its
1810  		 * waitqueue and preventing all further accesses to the
1811  		 * waitqueue via the request.  We also need to schedule the
1812  		 * completion work (done above).  Also mark the request as
1813  		 * cancelled, to potentially skip an unneeded call to ->poll().
1814  		 */
1815  		if (mask & POLLFREE) {
1816  			WRITE_ONCE(req->cancelled, true);
1817  			list_del_init(&req->wait.entry);
1818  
1819  			/*
1820  			 * Careful: this *must* be the last step, since as soon
1821  			 * as req->head is NULL'ed out, the request can be
1822  			 * completed and freed, since aio_poll_complete_work()
1823  			 * will no longer need to take the waitqueue lock.
1824  			 */
1825  			smp_store_release(&req->head, NULL);
1826  		}
1827  	}
1828  	return 1;
1829  }
1830  
1831  struct aio_poll_table {
1832  	struct poll_table_struct	pt;
1833  	struct aio_kiocb		*iocb;
1834  	bool				queued;
1835  	int				error;
1836  };
1837  
1838  static void
1839  aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1840  		struct poll_table_struct *p)
1841  {
1842  	struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
1843  
1844  	/* multiple wait queues per file are not supported */
1845  	if (unlikely(pt->queued)) {
1846  		pt->error = -EINVAL;
1847  		return;
1848  	}
1849  
1850  	pt->queued = true;
1851  	pt->error = 0;
1852  	pt->iocb->poll.head = head;
1853  	add_wait_queue(head, &pt->iocb->poll.wait);
1854  }
1855  
1856  static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1857  {
1858  	struct kioctx *ctx = aiocb->ki_ctx;
1859  	struct poll_iocb *req = &aiocb->poll;
1860  	struct aio_poll_table apt;
1861  	bool cancel = false;
1862  	__poll_t mask;
1863  
1864  	/* reject any unknown events outside the normal event mask. */
1865  	if ((u16)iocb->aio_buf != iocb->aio_buf)
1866  		return -EINVAL;
1867  	/* reject fields that are not defined for poll */
1868  	if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
1869  		return -EINVAL;
1870  
1871  	INIT_WORK(&req->work, aio_poll_complete_work);
1872  	req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
1873  
1874  	req->head = NULL;
1875  	req->cancelled = false;
1876  	req->work_scheduled = false;
1877  	req->work_need_resched = false;
1878  
1879  	apt.pt._qproc = aio_poll_queue_proc;
1880  	apt.pt._key = req->events;
1881  	apt.iocb = aiocb;
1882  	apt.queued = false;
1883  	apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1884  
1885  	/* initialized the list so that we can do list_empty checks */
1886  	INIT_LIST_HEAD(&req->wait.entry);
1887  	init_waitqueue_func_entry(&req->wait, aio_poll_wake);
1888  
1889  	mask = vfs_poll(req->file, &apt.pt) & req->events;
1890  	spin_lock_irq(&ctx->ctx_lock);
1891  	if (likely(apt.queued)) {
1892  		bool on_queue = poll_iocb_lock_wq(req);
1893  
1894  		if (!on_queue || req->work_scheduled) {
1895  			/*
1896  			 * aio_poll_wake() already either scheduled the async
1897  			 * completion work, or completed the request inline.
1898  			 */
1899  			if (apt.error) /* unsupported case: multiple queues */
1900  				cancel = true;
1901  			apt.error = 0;
1902  			mask = 0;
1903  		}
1904  		if (mask || apt.error) {
1905  			/* Steal to complete synchronously. */
1906  			list_del_init(&req->wait.entry);
1907  		} else if (cancel) {
1908  			/* Cancel if possible (may be too late though). */
1909  			WRITE_ONCE(req->cancelled, true);
1910  		} else if (on_queue) {
1911  			/*
1912  			 * Actually waiting for an event, so add the request to
1913  			 * active_reqs so that it can be cancelled if needed.
1914  			 */
1915  			list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
1916  			aiocb->ki_cancel = aio_poll_cancel;
1917  		}
1918  		if (on_queue)
1919  			poll_iocb_unlock_wq(req);
1920  	}
1921  	if (mask) { /* no async, we'd stolen it */
1922  		aiocb->ki_res.res = mangle_poll(mask);
1923  		apt.error = 0;
1924  	}
1925  	spin_unlock_irq(&ctx->ctx_lock);
1926  	if (mask)
1927  		iocb_put(aiocb);
1928  	return apt.error;
1929  }
1930  
1931  static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1932  			   struct iocb __user *user_iocb, struct aio_kiocb *req,
1933  			   bool compat)
1934  {
1935  	req->ki_filp = fget(iocb->aio_fildes);
1936  	if (unlikely(!req->ki_filp))
1937  		return -EBADF;
1938  
1939  	if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1940  		struct eventfd_ctx *eventfd;
1941  		/*
1942  		 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1943  		 * instance of the file* now. The file descriptor must be
1944  		 * an eventfd() fd, and will be signaled for each completed
1945  		 * event using the eventfd_signal() function.
1946  		 */
1947  		eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
1948  		if (IS_ERR(eventfd))
1949  			return PTR_ERR(eventfd);
1950  
1951  		req->ki_eventfd = eventfd;
1952  	}
1953  
1954  	if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
1955  		pr_debug("EFAULT: aio_key\n");
1956  		return -EFAULT;
1957  	}
1958  
1959  	req->ki_res.obj = (u64)(unsigned long)user_iocb;
1960  	req->ki_res.data = iocb->aio_data;
1961  	req->ki_res.res = 0;
1962  	req->ki_res.res2 = 0;
1963  
1964  	switch (iocb->aio_lio_opcode) {
1965  	case IOCB_CMD_PREAD:
1966  		return aio_read(&req->rw, iocb, false, compat);
1967  	case IOCB_CMD_PWRITE:
1968  		return aio_write(&req->rw, iocb, false, compat);
1969  	case IOCB_CMD_PREADV:
1970  		return aio_read(&req->rw, iocb, true, compat);
1971  	case IOCB_CMD_PWRITEV:
1972  		return aio_write(&req->rw, iocb, true, compat);
1973  	case IOCB_CMD_FSYNC:
1974  		return aio_fsync(&req->fsync, iocb, false);
1975  	case IOCB_CMD_FDSYNC:
1976  		return aio_fsync(&req->fsync, iocb, true);
1977  	case IOCB_CMD_POLL:
1978  		return aio_poll(req, iocb);
1979  	default:
1980  		pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
1981  		return -EINVAL;
1982  	}
1983  }
1984  
1985  static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1986  			 bool compat)
1987  {
1988  	struct aio_kiocb *req;
1989  	struct iocb iocb;
1990  	int err;
1991  
1992  	if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
1993  		return -EFAULT;
1994  
1995  	/* enforce forwards compatibility on users */
1996  	if (unlikely(iocb.aio_reserved2)) {
1997  		pr_debug("EINVAL: reserve field set\n");
1998  		return -EINVAL;
1999  	}
2000  
2001  	/* prevent overflows */
2002  	if (unlikely(
2003  	    (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
2004  	    (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
2005  	    ((ssize_t)iocb.aio_nbytes < 0)
2006  	   )) {
2007  		pr_debug("EINVAL: overflow check\n");
2008  		return -EINVAL;
2009  	}
2010  
2011  	req = aio_get_req(ctx);
2012  	if (unlikely(!req))
2013  		return -EAGAIN;
2014  
2015  	err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
2016  
2017  	/* Done with the synchronous reference */
2018  	iocb_put(req);
2019  
2020  	/*
2021  	 * If err is 0, we'd either done aio_complete() ourselves or have
2022  	 * arranged for that to be done asynchronously.  Anything non-zero
2023  	 * means that we need to destroy req ourselves.
2024  	 */
2025  	if (unlikely(err)) {
2026  		iocb_destroy(req);
2027  		put_reqs_available(ctx, 1);
2028  	}
2029  	return err;
2030  }
2031  
2032  /* sys_io_submit:
2033   *	Queue the nr iocbs pointed to by iocbpp for processing.  Returns
2034   *	the number of iocbs queued.  May return -EINVAL if the aio_context
2035   *	specified by ctx_id is invalid, if nr is < 0, if the iocb at
2036   *	*iocbpp[0] is not properly initialized, if the operation specified
2037   *	is invalid for the file descriptor in the iocb.  May fail with
2038   *	-EFAULT if any of the data structures point to invalid data.  May
2039   *	fail with -EBADF if the file descriptor specified in the first
2040   *	iocb is invalid.  May fail with -EAGAIN if insufficient resources
2041   *	are available to queue any iocbs.  Will return 0 if nr is 0.  Will
2042   *	fail with -ENOSYS if not implemented.
2043   */
2044  SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
2045  		struct iocb __user * __user *, iocbpp)
2046  {
2047  	struct kioctx *ctx;
2048  	long ret = 0;
2049  	int i = 0;
2050  	struct blk_plug plug;
2051  
2052  	if (unlikely(nr < 0))
2053  		return -EINVAL;
2054  
2055  	ctx = lookup_ioctx(ctx_id);
2056  	if (unlikely(!ctx)) {
2057  		pr_debug("EINVAL: invalid context id\n");
2058  		return -EINVAL;
2059  	}
2060  
2061  	if (nr > ctx->nr_events)
2062  		nr = ctx->nr_events;
2063  
2064  	if (nr > AIO_PLUG_THRESHOLD)
2065  		blk_start_plug(&plug);
2066  	for (i = 0; i < nr; i++) {
2067  		struct iocb __user *user_iocb;
2068  
2069  		if (unlikely(get_user(user_iocb, iocbpp + i))) {
2070  			ret = -EFAULT;
2071  			break;
2072  		}
2073  
2074  		ret = io_submit_one(ctx, user_iocb, false);
2075  		if (ret)
2076  			break;
2077  	}
2078  	if (nr > AIO_PLUG_THRESHOLD)
2079  		blk_finish_plug(&plug);
2080  
2081  	percpu_ref_put(&ctx->users);
2082  	return i ? i : ret;
2083  }
2084  
2085  #ifdef CONFIG_COMPAT
2086  COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
2087  		       int, nr, compat_uptr_t __user *, iocbpp)
2088  {
2089  	struct kioctx *ctx;
2090  	long ret = 0;
2091  	int i = 0;
2092  	struct blk_plug plug;
2093  
2094  	if (unlikely(nr < 0))
2095  		return -EINVAL;
2096  
2097  	ctx = lookup_ioctx(ctx_id);
2098  	if (unlikely(!ctx)) {
2099  		pr_debug("EINVAL: invalid context id\n");
2100  		return -EINVAL;
2101  	}
2102  
2103  	if (nr > ctx->nr_events)
2104  		nr = ctx->nr_events;
2105  
2106  	if (nr > AIO_PLUG_THRESHOLD)
2107  		blk_start_plug(&plug);
2108  	for (i = 0; i < nr; i++) {
2109  		compat_uptr_t user_iocb;
2110  
2111  		if (unlikely(get_user(user_iocb, iocbpp + i))) {
2112  			ret = -EFAULT;
2113  			break;
2114  		}
2115  
2116  		ret = io_submit_one(ctx, compat_ptr(user_iocb), true);
2117  		if (ret)
2118  			break;
2119  	}
2120  	if (nr > AIO_PLUG_THRESHOLD)
2121  		blk_finish_plug(&plug);
2122  
2123  	percpu_ref_put(&ctx->users);
2124  	return i ? i : ret;
2125  }
2126  #endif
2127  
2128  /* sys_io_cancel:
2129   *	Attempts to cancel an iocb previously passed to io_submit.  If
2130   *	the operation is successfully cancelled, the resulting event is
2131   *	copied into the memory pointed to by result without being placed
2132   *	into the completion queue and 0 is returned.  May fail with
2133   *	-EFAULT if any of the data structures pointed to are invalid.
2134   *	May fail with -EINVAL if aio_context specified by ctx_id is
2135   *	invalid.  May fail with -EAGAIN if the iocb specified was not
2136   *	cancelled.  Will fail with -ENOSYS if not implemented.
2137   */
2138  SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
2139  		struct io_event __user *, result)
2140  {
2141  	struct kioctx *ctx;
2142  	struct aio_kiocb *kiocb;
2143  	int ret = -EINVAL;
2144  	u32 key;
2145  	u64 obj = (u64)(unsigned long)iocb;
2146  
2147  	if (unlikely(get_user(key, &iocb->aio_key)))
2148  		return -EFAULT;
2149  	if (unlikely(key != KIOCB_KEY))
2150  		return -EINVAL;
2151  
2152  	ctx = lookup_ioctx(ctx_id);
2153  	if (unlikely(!ctx))
2154  		return -EINVAL;
2155  
2156  	spin_lock_irq(&ctx->ctx_lock);
2157  	/* TODO: use a hash or array, this sucks. */
2158  	list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
2159  		if (kiocb->ki_res.obj == obj) {
2160  			ret = kiocb->ki_cancel(&kiocb->rw);
2161  			list_del_init(&kiocb->ki_list);
2162  			break;
2163  		}
2164  	}
2165  	spin_unlock_irq(&ctx->ctx_lock);
2166  
2167  	if (!ret) {
2168  		/*
2169  		 * The result argument is no longer used - the io_event is
2170  		 * always delivered via the ring buffer. -EINPROGRESS indicates
2171  		 * cancellation is progress:
2172  		 */
2173  		ret = -EINPROGRESS;
2174  	}
2175  
2176  	percpu_ref_put(&ctx->users);
2177  
2178  	return ret;
2179  }
2180  
2181  static long do_io_getevents(aio_context_t ctx_id,
2182  		long min_nr,
2183  		long nr,
2184  		struct io_event __user *events,
2185  		struct timespec64 *ts)
2186  {
2187  	ktime_t until = ts ? timespec64_to_ktime(*ts) : KTIME_MAX;
2188  	struct kioctx *ioctx = lookup_ioctx(ctx_id);
2189  	long ret = -EINVAL;
2190  
2191  	if (likely(ioctx)) {
2192  		if (likely(min_nr <= nr && min_nr >= 0))
2193  			ret = read_events(ioctx, min_nr, nr, events, until);
2194  		percpu_ref_put(&ioctx->users);
2195  	}
2196  
2197  	return ret;
2198  }
2199  
2200  /* io_getevents:
2201   *	Attempts to read at least min_nr events and up to nr events from
2202   *	the completion queue for the aio_context specified by ctx_id. If
2203   *	it succeeds, the number of read events is returned. May fail with
2204   *	-EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
2205   *	out of range, if timeout is out of range.  May fail with -EFAULT
2206   *	if any of the memory specified is invalid.  May return 0 or
2207   *	< min_nr if the timeout specified by timeout has elapsed
2208   *	before sufficient events are available, where timeout == NULL
2209   *	specifies an infinite timeout. Note that the timeout pointed to by
2210   *	timeout is relative.  Will fail with -ENOSYS if not implemented.
2211   */
2212  #ifdef CONFIG_64BIT
2213  
2214  SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
2215  		long, min_nr,
2216  		long, nr,
2217  		struct io_event __user *, events,
2218  		struct __kernel_timespec __user *, timeout)
2219  {
2220  	struct timespec64	ts;
2221  	int			ret;
2222  
2223  	if (timeout && unlikely(get_timespec64(&ts, timeout)))
2224  		return -EFAULT;
2225  
2226  	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2227  	if (!ret && signal_pending(current))
2228  		ret = -EINTR;
2229  	return ret;
2230  }
2231  
2232  #endif
2233  
2234  struct __aio_sigset {
2235  	const sigset_t __user	*sigmask;
2236  	size_t		sigsetsize;
2237  };
2238  
2239  SYSCALL_DEFINE6(io_pgetevents,
2240  		aio_context_t, ctx_id,
2241  		long, min_nr,
2242  		long, nr,
2243  		struct io_event __user *, events,
2244  		struct __kernel_timespec __user *, timeout,
2245  		const struct __aio_sigset __user *, usig)
2246  {
2247  	struct __aio_sigset	ksig = { NULL, };
2248  	struct timespec64	ts;
2249  	bool interrupted;
2250  	int ret;
2251  
2252  	if (timeout && unlikely(get_timespec64(&ts, timeout)))
2253  		return -EFAULT;
2254  
2255  	if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2256  		return -EFAULT;
2257  
2258  	ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2259  	if (ret)
2260  		return ret;
2261  
2262  	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2263  
2264  	interrupted = signal_pending(current);
2265  	restore_saved_sigmask_unless(interrupted);
2266  	if (interrupted && !ret)
2267  		ret = -ERESTARTNOHAND;
2268  
2269  	return ret;
2270  }
2271  
2272  #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
2273  
2274  SYSCALL_DEFINE6(io_pgetevents_time32,
2275  		aio_context_t, ctx_id,
2276  		long, min_nr,
2277  		long, nr,
2278  		struct io_event __user *, events,
2279  		struct old_timespec32 __user *, timeout,
2280  		const struct __aio_sigset __user *, usig)
2281  {
2282  	struct __aio_sigset	ksig = { NULL, };
2283  	struct timespec64	ts;
2284  	bool interrupted;
2285  	int ret;
2286  
2287  	if (timeout && unlikely(get_old_timespec32(&ts, timeout)))
2288  		return -EFAULT;
2289  
2290  	if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2291  		return -EFAULT;
2292  
2293  
2294  	ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2295  	if (ret)
2296  		return ret;
2297  
2298  	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2299  
2300  	interrupted = signal_pending(current);
2301  	restore_saved_sigmask_unless(interrupted);
2302  	if (interrupted && !ret)
2303  		ret = -ERESTARTNOHAND;
2304  
2305  	return ret;
2306  }
2307  
2308  #endif
2309  
2310  #if defined(CONFIG_COMPAT_32BIT_TIME)
2311  
2312  SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id,
2313  		__s32, min_nr,
2314  		__s32, nr,
2315  		struct io_event __user *, events,
2316  		struct old_timespec32 __user *, timeout)
2317  {
2318  	struct timespec64 t;
2319  	int ret;
2320  
2321  	if (timeout && get_old_timespec32(&t, timeout))
2322  		return -EFAULT;
2323  
2324  	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2325  	if (!ret && signal_pending(current))
2326  		ret = -EINTR;
2327  	return ret;
2328  }
2329  
2330  #endif
2331  
2332  #ifdef CONFIG_COMPAT
2333  
2334  struct __compat_aio_sigset {
2335  	compat_uptr_t		sigmask;
2336  	compat_size_t		sigsetsize;
2337  };
2338  
2339  #if defined(CONFIG_COMPAT_32BIT_TIME)
2340  
2341  COMPAT_SYSCALL_DEFINE6(io_pgetevents,
2342  		compat_aio_context_t, ctx_id,
2343  		compat_long_t, min_nr,
2344  		compat_long_t, nr,
2345  		struct io_event __user *, events,
2346  		struct old_timespec32 __user *, timeout,
2347  		const struct __compat_aio_sigset __user *, usig)
2348  {
2349  	struct __compat_aio_sigset ksig = { 0, };
2350  	struct timespec64 t;
2351  	bool interrupted;
2352  	int ret;
2353  
2354  	if (timeout && get_old_timespec32(&t, timeout))
2355  		return -EFAULT;
2356  
2357  	if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2358  		return -EFAULT;
2359  
2360  	ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2361  	if (ret)
2362  		return ret;
2363  
2364  	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2365  
2366  	interrupted = signal_pending(current);
2367  	restore_saved_sigmask_unless(interrupted);
2368  	if (interrupted && !ret)
2369  		ret = -ERESTARTNOHAND;
2370  
2371  	return ret;
2372  }
2373  
2374  #endif
2375  
2376  COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
2377  		compat_aio_context_t, ctx_id,
2378  		compat_long_t, min_nr,
2379  		compat_long_t, nr,
2380  		struct io_event __user *, events,
2381  		struct __kernel_timespec __user *, timeout,
2382  		const struct __compat_aio_sigset __user *, usig)
2383  {
2384  	struct __compat_aio_sigset ksig = { 0, };
2385  	struct timespec64 t;
2386  	bool interrupted;
2387  	int ret;
2388  
2389  	if (timeout && get_timespec64(&t, timeout))
2390  		return -EFAULT;
2391  
2392  	if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2393  		return -EFAULT;
2394  
2395  	ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2396  	if (ret)
2397  		return ret;
2398  
2399  	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2400  
2401  	interrupted = signal_pending(current);
2402  	restore_saved_sigmask_unless(interrupted);
2403  	if (interrupted && !ret)
2404  		ret = -ERESTARTNOHAND;
2405  
2406  	return ret;
2407  }
2408  #endif
2409