xref: /openbmc/linux/fs/userfaultfd.c (revision 72981e0e7b609c741d7764cc920c8fec00920bd5)
120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
286039bd3SAndrea Arcangeli /*
386039bd3SAndrea Arcangeli  *  fs/userfaultfd.c
486039bd3SAndrea Arcangeli  *
586039bd3SAndrea Arcangeli  *  Copyright (C) 2007  Davide Libenzi <davidel@xmailserver.org>
686039bd3SAndrea Arcangeli  *  Copyright (C) 2008-2009 Red Hat, Inc.
786039bd3SAndrea Arcangeli  *  Copyright (C) 2015  Red Hat, Inc.
886039bd3SAndrea Arcangeli  *
986039bd3SAndrea Arcangeli  *  Some part derived from fs/eventfd.c (anon inode setup) and
1086039bd3SAndrea Arcangeli  *  mm/ksm.c (mm hashing).
1186039bd3SAndrea Arcangeli  */
1286039bd3SAndrea Arcangeli 
139cd75c3cSPavel Emelyanov #include <linux/list.h>
1486039bd3SAndrea Arcangeli #include <linux/hashtable.h>
15174cd4b1SIngo Molnar #include <linux/sched/signal.h>
166e84f315SIngo Molnar #include <linux/sched/mm.h>
1786039bd3SAndrea Arcangeli #include <linux/mm.h>
1886039bd3SAndrea Arcangeli #include <linux/poll.h>
1986039bd3SAndrea Arcangeli #include <linux/slab.h>
2086039bd3SAndrea Arcangeli #include <linux/seq_file.h>
2186039bd3SAndrea Arcangeli #include <linux/file.h>
2286039bd3SAndrea Arcangeli #include <linux/bug.h>
2386039bd3SAndrea Arcangeli #include <linux/anon_inodes.h>
2486039bd3SAndrea Arcangeli #include <linux/syscalls.h>
2586039bd3SAndrea Arcangeli #include <linux/userfaultfd_k.h>
2686039bd3SAndrea Arcangeli #include <linux/mempolicy.h>
2786039bd3SAndrea Arcangeli #include <linux/ioctl.h>
2886039bd3SAndrea Arcangeli #include <linux/security.h>
29cab350afSMike Kravetz #include <linux/hugetlb.h>
3086039bd3SAndrea Arcangeli 
31cefdca0aSPeter Xu int sysctl_unprivileged_userfaultfd __read_mostly = 1;
32cefdca0aSPeter Xu 
333004ec9cSAndrea Arcangeli static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
343004ec9cSAndrea Arcangeli 
3586039bd3SAndrea Arcangeli enum userfaultfd_state {
3686039bd3SAndrea Arcangeli 	UFFD_STATE_WAIT_API,
3786039bd3SAndrea Arcangeli 	UFFD_STATE_RUNNING,
3886039bd3SAndrea Arcangeli };
3986039bd3SAndrea Arcangeli 
403004ec9cSAndrea Arcangeli /*
413004ec9cSAndrea Arcangeli  * Start with fault_pending_wqh and fault_wqh so they're more likely
423004ec9cSAndrea Arcangeli  * to be in the same cacheline.
43cbcfa130SEric Biggers  *
44cbcfa130SEric Biggers  * Locking order:
45cbcfa130SEric Biggers  *	fd_wqh.lock
46cbcfa130SEric Biggers  *		fault_pending_wqh.lock
47cbcfa130SEric Biggers  *			fault_wqh.lock
48cbcfa130SEric Biggers  *		event_wqh.lock
49cbcfa130SEric Biggers  *
50cbcfa130SEric Biggers  * To avoid deadlocks, IRQs must be disabled when taking any of the above locks,
51cbcfa130SEric Biggers  * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's
52cbcfa130SEric Biggers  * also taken in IRQ context.
533004ec9cSAndrea Arcangeli  */
5486039bd3SAndrea Arcangeli struct userfaultfd_ctx {
5515b726efSAndrea Arcangeli 	/* waitqueue head for the pending (i.e. not read) userfaults */
5615b726efSAndrea Arcangeli 	wait_queue_head_t fault_pending_wqh;
5715b726efSAndrea Arcangeli 	/* waitqueue head for the userfaults */
5886039bd3SAndrea Arcangeli 	wait_queue_head_t fault_wqh;
5986039bd3SAndrea Arcangeli 	/* waitqueue head for the pseudo fd to wakeup poll/read */
6086039bd3SAndrea Arcangeli 	wait_queue_head_t fd_wqh;
619cd75c3cSPavel Emelyanov 	/* waitqueue head for events */
629cd75c3cSPavel Emelyanov 	wait_queue_head_t event_wqh;
632c5b7e1bSAndrea Arcangeli 	/* a refile sequence protected by fault_pending_wqh lock */
642c5b7e1bSAndrea Arcangeli 	struct seqcount refile_seq;
653004ec9cSAndrea Arcangeli 	/* pseudo fd refcounting */
66ca880420SEric Biggers 	refcount_t refcount;
6786039bd3SAndrea Arcangeli 	/* userfaultfd syscall flags */
6886039bd3SAndrea Arcangeli 	unsigned int flags;
699cd75c3cSPavel Emelyanov 	/* features requested from the userspace */
709cd75c3cSPavel Emelyanov 	unsigned int features;
7186039bd3SAndrea Arcangeli 	/* state machine */
7286039bd3SAndrea Arcangeli 	enum userfaultfd_state state;
7386039bd3SAndrea Arcangeli 	/* released */
7486039bd3SAndrea Arcangeli 	bool released;
75df2cc96eSMike Rapoport 	/* memory mappings are changing because of non-cooperative event */
76df2cc96eSMike Rapoport 	bool mmap_changing;
7786039bd3SAndrea Arcangeli 	/* mm with one ore more vmas attached to this userfaultfd_ctx */
7886039bd3SAndrea Arcangeli 	struct mm_struct *mm;
7986039bd3SAndrea Arcangeli };
8086039bd3SAndrea Arcangeli 
81893e26e6SPavel Emelyanov struct userfaultfd_fork_ctx {
82893e26e6SPavel Emelyanov 	struct userfaultfd_ctx *orig;
83893e26e6SPavel Emelyanov 	struct userfaultfd_ctx *new;
84893e26e6SPavel Emelyanov 	struct list_head list;
85893e26e6SPavel Emelyanov };
86893e26e6SPavel Emelyanov 
87897ab3e0SMike Rapoport struct userfaultfd_unmap_ctx {
88897ab3e0SMike Rapoport 	struct userfaultfd_ctx *ctx;
89897ab3e0SMike Rapoport 	unsigned long start;
90897ab3e0SMike Rapoport 	unsigned long end;
91897ab3e0SMike Rapoport 	struct list_head list;
92897ab3e0SMike Rapoport };
93897ab3e0SMike Rapoport 
9486039bd3SAndrea Arcangeli struct userfaultfd_wait_queue {
95a9b85f94SAndrea Arcangeli 	struct uffd_msg msg;
96ac6424b9SIngo Molnar 	wait_queue_entry_t wq;
9786039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx;
9815a77c6fSAndrea Arcangeli 	bool waken;
9986039bd3SAndrea Arcangeli };
10086039bd3SAndrea Arcangeli 
10186039bd3SAndrea Arcangeli struct userfaultfd_wake_range {
10286039bd3SAndrea Arcangeli 	unsigned long start;
10386039bd3SAndrea Arcangeli 	unsigned long len;
10486039bd3SAndrea Arcangeli };
10586039bd3SAndrea Arcangeli 
106ac6424b9SIngo Molnar static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
10786039bd3SAndrea Arcangeli 				     int wake_flags, void *key)
10886039bd3SAndrea Arcangeli {
10986039bd3SAndrea Arcangeli 	struct userfaultfd_wake_range *range = key;
11086039bd3SAndrea Arcangeli 	int ret;
11186039bd3SAndrea Arcangeli 	struct userfaultfd_wait_queue *uwq;
11286039bd3SAndrea Arcangeli 	unsigned long start, len;
11386039bd3SAndrea Arcangeli 
11486039bd3SAndrea Arcangeli 	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
11586039bd3SAndrea Arcangeli 	ret = 0;
11686039bd3SAndrea Arcangeli 	/* len == 0 means wake all */
11786039bd3SAndrea Arcangeli 	start = range->start;
11886039bd3SAndrea Arcangeli 	len = range->len;
119a9b85f94SAndrea Arcangeli 	if (len && (start > uwq->msg.arg.pagefault.address ||
120a9b85f94SAndrea Arcangeli 		    start + len <= uwq->msg.arg.pagefault.address))
12186039bd3SAndrea Arcangeli 		goto out;
12215a77c6fSAndrea Arcangeli 	WRITE_ONCE(uwq->waken, true);
12315a77c6fSAndrea Arcangeli 	/*
124a9668cd6SPeter Zijlstra 	 * The Program-Order guarantees provided by the scheduler
125a9668cd6SPeter Zijlstra 	 * ensure uwq->waken is visible before the task is woken.
12615a77c6fSAndrea Arcangeli 	 */
12786039bd3SAndrea Arcangeli 	ret = wake_up_state(wq->private, mode);
128a9668cd6SPeter Zijlstra 	if (ret) {
12986039bd3SAndrea Arcangeli 		/*
13086039bd3SAndrea Arcangeli 		 * Wake only once, autoremove behavior.
13186039bd3SAndrea Arcangeli 		 *
132a9668cd6SPeter Zijlstra 		 * After the effect of list_del_init is visible to the other
133a9668cd6SPeter Zijlstra 		 * CPUs, the waitqueue may disappear from under us, see the
134a9668cd6SPeter Zijlstra 		 * !list_empty_careful() in handle_userfault().
135a9668cd6SPeter Zijlstra 		 *
136a9668cd6SPeter Zijlstra 		 * try_to_wake_up() has an implicit smp_mb(), and the
137a9668cd6SPeter Zijlstra 		 * wq->private is read before calling the extern function
138a9668cd6SPeter Zijlstra 		 * "wake_up_state" (which in turns calls try_to_wake_up).
13986039bd3SAndrea Arcangeli 		 */
1402055da97SIngo Molnar 		list_del_init(&wq->entry);
141a9668cd6SPeter Zijlstra 	}
14286039bd3SAndrea Arcangeli out:
14386039bd3SAndrea Arcangeli 	return ret;
14486039bd3SAndrea Arcangeli }
14586039bd3SAndrea Arcangeli 
14686039bd3SAndrea Arcangeli /**
14786039bd3SAndrea Arcangeli  * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
14886039bd3SAndrea Arcangeli  * context.
14986039bd3SAndrea Arcangeli  * @ctx: [in] Pointer to the userfaultfd context.
15086039bd3SAndrea Arcangeli  */
15186039bd3SAndrea Arcangeli static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
15286039bd3SAndrea Arcangeli {
153ca880420SEric Biggers 	refcount_inc(&ctx->refcount);
15486039bd3SAndrea Arcangeli }
15586039bd3SAndrea Arcangeli 
15686039bd3SAndrea Arcangeli /**
15786039bd3SAndrea Arcangeli  * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
15886039bd3SAndrea Arcangeli  * context.
15986039bd3SAndrea Arcangeli  * @ctx: [in] Pointer to userfaultfd context.
16086039bd3SAndrea Arcangeli  *
16186039bd3SAndrea Arcangeli  * The userfaultfd context reference must have been previously acquired either
16286039bd3SAndrea Arcangeli  * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget().
16386039bd3SAndrea Arcangeli  */
16486039bd3SAndrea Arcangeli static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
16586039bd3SAndrea Arcangeli {
166ca880420SEric Biggers 	if (refcount_dec_and_test(&ctx->refcount)) {
16786039bd3SAndrea Arcangeli 		VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
16886039bd3SAndrea Arcangeli 		VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
16986039bd3SAndrea Arcangeli 		VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
17086039bd3SAndrea Arcangeli 		VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
1719cd75c3cSPavel Emelyanov 		VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
1729cd75c3cSPavel Emelyanov 		VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
17386039bd3SAndrea Arcangeli 		VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
17486039bd3SAndrea Arcangeli 		VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
175d2005e3fSOleg Nesterov 		mmdrop(ctx->mm);
1763004ec9cSAndrea Arcangeli 		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
17786039bd3SAndrea Arcangeli 	}
17886039bd3SAndrea Arcangeli }
17986039bd3SAndrea Arcangeli 
180a9b85f94SAndrea Arcangeli static inline void msg_init(struct uffd_msg *msg)
181a9b85f94SAndrea Arcangeli {
182a9b85f94SAndrea Arcangeli 	BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
183a9b85f94SAndrea Arcangeli 	/*
184a9b85f94SAndrea Arcangeli 	 * Must use memset to zero out the paddings or kernel data is
185a9b85f94SAndrea Arcangeli 	 * leaked to userland.
186a9b85f94SAndrea Arcangeli 	 */
187a9b85f94SAndrea Arcangeli 	memset(msg, 0, sizeof(struct uffd_msg));
188a9b85f94SAndrea Arcangeli }
189a9b85f94SAndrea Arcangeli 
190a9b85f94SAndrea Arcangeli static inline struct uffd_msg userfault_msg(unsigned long address,
19186039bd3SAndrea Arcangeli 					    unsigned int flags,
1929d4ac934SAlexey Perevalov 					    unsigned long reason,
1939d4ac934SAlexey Perevalov 					    unsigned int features)
19486039bd3SAndrea Arcangeli {
195a9b85f94SAndrea Arcangeli 	struct uffd_msg msg;
196a9b85f94SAndrea Arcangeli 	msg_init(&msg);
197a9b85f94SAndrea Arcangeli 	msg.event = UFFD_EVENT_PAGEFAULT;
198a9b85f94SAndrea Arcangeli 	msg.arg.pagefault.address = address;
19986039bd3SAndrea Arcangeli 	if (flags & FAULT_FLAG_WRITE)
20086039bd3SAndrea Arcangeli 		/*
201a4605a61SAndrea Arcangeli 		 * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the
202a9b85f94SAndrea Arcangeli 		 * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WRITE
203a9b85f94SAndrea Arcangeli 		 * was not set in a UFFD_EVENT_PAGEFAULT, it means it
204a9b85f94SAndrea Arcangeli 		 * was a read fault, otherwise if set it means it's
205a9b85f94SAndrea Arcangeli 		 * a write fault.
20686039bd3SAndrea Arcangeli 		 */
207a9b85f94SAndrea Arcangeli 		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
20886039bd3SAndrea Arcangeli 	if (reason & VM_UFFD_WP)
20986039bd3SAndrea Arcangeli 		/*
210a9b85f94SAndrea Arcangeli 		 * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the
211a9b85f94SAndrea Arcangeli 		 * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WP was
212a9b85f94SAndrea Arcangeli 		 * not set in a UFFD_EVENT_PAGEFAULT, it means it was
213a9b85f94SAndrea Arcangeli 		 * a missing fault, otherwise if set it means it's a
214a9b85f94SAndrea Arcangeli 		 * write protect fault.
21586039bd3SAndrea Arcangeli 		 */
216a9b85f94SAndrea Arcangeli 		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
2179d4ac934SAlexey Perevalov 	if (features & UFFD_FEATURE_THREAD_ID)
218a36985d3SAndrea Arcangeli 		msg.arg.pagefault.feat.ptid = task_pid_vnr(current);
219a9b85f94SAndrea Arcangeli 	return msg;
22086039bd3SAndrea Arcangeli }
22186039bd3SAndrea Arcangeli 
222369cd212SMike Kravetz #ifdef CONFIG_HUGETLB_PAGE
223369cd212SMike Kravetz /*
224369cd212SMike Kravetz  * Same functionality as userfaultfd_must_wait below with modifications for
225369cd212SMike Kravetz  * hugepmd ranges.
226369cd212SMike Kravetz  */
227369cd212SMike Kravetz static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
2287868a208SPunit Agrawal 					 struct vm_area_struct *vma,
229369cd212SMike Kravetz 					 unsigned long address,
230369cd212SMike Kravetz 					 unsigned long flags,
231369cd212SMike Kravetz 					 unsigned long reason)
232369cd212SMike Kravetz {
233369cd212SMike Kravetz 	struct mm_struct *mm = ctx->mm;
2341e2c0436SJanosch Frank 	pte_t *ptep, pte;
235369cd212SMike Kravetz 	bool ret = true;
236369cd212SMike Kravetz 
237369cd212SMike Kravetz 	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
238369cd212SMike Kravetz 
2391e2c0436SJanosch Frank 	ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
2401e2c0436SJanosch Frank 
2411e2c0436SJanosch Frank 	if (!ptep)
242369cd212SMike Kravetz 		goto out;
243369cd212SMike Kravetz 
244369cd212SMike Kravetz 	ret = false;
2451e2c0436SJanosch Frank 	pte = huge_ptep_get(ptep);
246369cd212SMike Kravetz 
247369cd212SMike Kravetz 	/*
248369cd212SMike Kravetz 	 * Lockless access: we're in a wait_event so it's ok if it
249369cd212SMike Kravetz 	 * changes under us.
250369cd212SMike Kravetz 	 */
2511e2c0436SJanosch Frank 	if (huge_pte_none(pte))
252369cd212SMike Kravetz 		ret = true;
2531e2c0436SJanosch Frank 	if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
254369cd212SMike Kravetz 		ret = true;
255369cd212SMike Kravetz out:
256369cd212SMike Kravetz 	return ret;
257369cd212SMike Kravetz }
258369cd212SMike Kravetz #else
259369cd212SMike Kravetz static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
2607868a208SPunit Agrawal 					 struct vm_area_struct *vma,
261369cd212SMike Kravetz 					 unsigned long address,
262369cd212SMike Kravetz 					 unsigned long flags,
263369cd212SMike Kravetz 					 unsigned long reason)
264369cd212SMike Kravetz {
265369cd212SMike Kravetz 	return false;	/* should never get here */
266369cd212SMike Kravetz }
267369cd212SMike Kravetz #endif /* CONFIG_HUGETLB_PAGE */
268369cd212SMike Kravetz 
26986039bd3SAndrea Arcangeli /*
2708d2afd96SAndrea Arcangeli  * Verify the pagetables are still not ok after having reigstered into
2718d2afd96SAndrea Arcangeli  * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
2728d2afd96SAndrea Arcangeli  * userfault that has already been resolved, if userfaultfd_read and
2738d2afd96SAndrea Arcangeli  * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different
2748d2afd96SAndrea Arcangeli  * threads.
2758d2afd96SAndrea Arcangeli  */
2768d2afd96SAndrea Arcangeli static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
2778d2afd96SAndrea Arcangeli 					 unsigned long address,
2788d2afd96SAndrea Arcangeli 					 unsigned long flags,
2798d2afd96SAndrea Arcangeli 					 unsigned long reason)
2808d2afd96SAndrea Arcangeli {
2818d2afd96SAndrea Arcangeli 	struct mm_struct *mm = ctx->mm;
2828d2afd96SAndrea Arcangeli 	pgd_t *pgd;
283c2febafcSKirill A. Shutemov 	p4d_t *p4d;
2848d2afd96SAndrea Arcangeli 	pud_t *pud;
2858d2afd96SAndrea Arcangeli 	pmd_t *pmd, _pmd;
2868d2afd96SAndrea Arcangeli 	pte_t *pte;
2878d2afd96SAndrea Arcangeli 	bool ret = true;
2888d2afd96SAndrea Arcangeli 
2898d2afd96SAndrea Arcangeli 	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
2908d2afd96SAndrea Arcangeli 
2918d2afd96SAndrea Arcangeli 	pgd = pgd_offset(mm, address);
2928d2afd96SAndrea Arcangeli 	if (!pgd_present(*pgd))
2938d2afd96SAndrea Arcangeli 		goto out;
294c2febafcSKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
295c2febafcSKirill A. Shutemov 	if (!p4d_present(*p4d))
296c2febafcSKirill A. Shutemov 		goto out;
297c2febafcSKirill A. Shutemov 	pud = pud_offset(p4d, address);
2988d2afd96SAndrea Arcangeli 	if (!pud_present(*pud))
2998d2afd96SAndrea Arcangeli 		goto out;
3008d2afd96SAndrea Arcangeli 	pmd = pmd_offset(pud, address);
3018d2afd96SAndrea Arcangeli 	/*
3028d2afd96SAndrea Arcangeli 	 * READ_ONCE must function as a barrier with narrower scope
3038d2afd96SAndrea Arcangeli 	 * and it must be equivalent to:
3048d2afd96SAndrea Arcangeli 	 *	_pmd = *pmd; barrier();
3058d2afd96SAndrea Arcangeli 	 *
3068d2afd96SAndrea Arcangeli 	 * This is to deal with the instability (as in
3078d2afd96SAndrea Arcangeli 	 * pmd_trans_unstable) of the pmd.
3088d2afd96SAndrea Arcangeli 	 */
3098d2afd96SAndrea Arcangeli 	_pmd = READ_ONCE(*pmd);
310a365ac09SHuang Ying 	if (pmd_none(_pmd))
3118d2afd96SAndrea Arcangeli 		goto out;
3128d2afd96SAndrea Arcangeli 
3138d2afd96SAndrea Arcangeli 	ret = false;
314a365ac09SHuang Ying 	if (!pmd_present(_pmd))
315a365ac09SHuang Ying 		goto out;
316a365ac09SHuang Ying 
3178d2afd96SAndrea Arcangeli 	if (pmd_trans_huge(_pmd))
3188d2afd96SAndrea Arcangeli 		goto out;
3198d2afd96SAndrea Arcangeli 
3208d2afd96SAndrea Arcangeli 	/*
3218d2afd96SAndrea Arcangeli 	 * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it
3228d2afd96SAndrea Arcangeli 	 * and use the standard pte_offset_map() instead of parsing _pmd.
3238d2afd96SAndrea Arcangeli 	 */
3248d2afd96SAndrea Arcangeli 	pte = pte_offset_map(pmd, address);
3258d2afd96SAndrea Arcangeli 	/*
3268d2afd96SAndrea Arcangeli 	 * Lockless access: we're in a wait_event so it's ok if it
3278d2afd96SAndrea Arcangeli 	 * changes under us.
3288d2afd96SAndrea Arcangeli 	 */
3298d2afd96SAndrea Arcangeli 	if (pte_none(*pte))
3308d2afd96SAndrea Arcangeli 		ret = true;
3318d2afd96SAndrea Arcangeli 	pte_unmap(pte);
3328d2afd96SAndrea Arcangeli 
3338d2afd96SAndrea Arcangeli out:
3348d2afd96SAndrea Arcangeli 	return ret;
3358d2afd96SAndrea Arcangeli }
3368d2afd96SAndrea Arcangeli 
3373e69ad08SPeter Xu /* Should pair with userfaultfd_signal_pending() */
3383e69ad08SPeter Xu static inline long userfaultfd_get_blocking_state(unsigned int flags)
3393e69ad08SPeter Xu {
3403e69ad08SPeter Xu 	if (flags & FAULT_FLAG_INTERRUPTIBLE)
3413e69ad08SPeter Xu 		return TASK_INTERRUPTIBLE;
3423e69ad08SPeter Xu 
3433e69ad08SPeter Xu 	if (flags & FAULT_FLAG_KILLABLE)
3443e69ad08SPeter Xu 		return TASK_KILLABLE;
3453e69ad08SPeter Xu 
3463e69ad08SPeter Xu 	return TASK_UNINTERRUPTIBLE;
3473e69ad08SPeter Xu }
3483e69ad08SPeter Xu 
3493e69ad08SPeter Xu /* Should pair with userfaultfd_get_blocking_state() */
3503e69ad08SPeter Xu static inline bool userfaultfd_signal_pending(unsigned int flags)
3513e69ad08SPeter Xu {
3523e69ad08SPeter Xu 	if (flags & FAULT_FLAG_INTERRUPTIBLE)
3533e69ad08SPeter Xu 		return signal_pending(current);
3543e69ad08SPeter Xu 
3553e69ad08SPeter Xu 	if (flags & FAULT_FLAG_KILLABLE)
3563e69ad08SPeter Xu 		return fatal_signal_pending(current);
3573e69ad08SPeter Xu 
3583e69ad08SPeter Xu 	return false;
3593e69ad08SPeter Xu }
3603e69ad08SPeter Xu 
3618d2afd96SAndrea Arcangeli /*
36286039bd3SAndrea Arcangeli  * The locking rules involved in returning VM_FAULT_RETRY depending on
36386039bd3SAndrea Arcangeli  * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
36486039bd3SAndrea Arcangeli  * FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
36586039bd3SAndrea Arcangeli  * recommendation in __lock_page_or_retry is not an understatement.
36686039bd3SAndrea Arcangeli  *
36786039bd3SAndrea Arcangeli  * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_sem must be released
36886039bd3SAndrea Arcangeli  * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
36986039bd3SAndrea Arcangeli  * not set.
37086039bd3SAndrea Arcangeli  *
37186039bd3SAndrea Arcangeli  * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
37286039bd3SAndrea Arcangeli  * set, VM_FAULT_RETRY can still be returned if and only if there are
37386039bd3SAndrea Arcangeli  * fatal_signal_pending()s, and the mmap_sem must be released before
37486039bd3SAndrea Arcangeli  * returning it.
37586039bd3SAndrea Arcangeli  */
3762b740303SSouptick Joarder vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
37786039bd3SAndrea Arcangeli {
37882b0f8c3SJan Kara 	struct mm_struct *mm = vmf->vma->vm_mm;
37986039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx;
38086039bd3SAndrea Arcangeli 	struct userfaultfd_wait_queue uwq;
3812b740303SSouptick Joarder 	vm_fault_t ret = VM_FAULT_SIGBUS;
3823e69ad08SPeter Xu 	bool must_wait;
38315a77c6fSAndrea Arcangeli 	long blocking_state;
38486039bd3SAndrea Arcangeli 
38564c2b203SAndrea Arcangeli 	/*
38664c2b203SAndrea Arcangeli 	 * We don't do userfault handling for the final child pid update.
38764c2b203SAndrea Arcangeli 	 *
38864c2b203SAndrea Arcangeli 	 * We also don't do userfault handling during
38964c2b203SAndrea Arcangeli 	 * coredumping. hugetlbfs has the special
39064c2b203SAndrea Arcangeli 	 * follow_hugetlb_page() to skip missing pages in the
39164c2b203SAndrea Arcangeli 	 * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
39264c2b203SAndrea Arcangeli 	 * the no_page_table() helper in follow_page_mask(), but the
39364c2b203SAndrea Arcangeli 	 * shmem_vm_ops->fault method is invoked even during
39464c2b203SAndrea Arcangeli 	 * coredumping without mmap_sem and it ends up here.
39564c2b203SAndrea Arcangeli 	 */
39664c2b203SAndrea Arcangeli 	if (current->flags & (PF_EXITING|PF_DUMPCORE))
39764c2b203SAndrea Arcangeli 		goto out;
39864c2b203SAndrea Arcangeli 
39964c2b203SAndrea Arcangeli 	/*
40064c2b203SAndrea Arcangeli 	 * Coredumping runs without mmap_sem so we can only check that
40164c2b203SAndrea Arcangeli 	 * the mmap_sem is held, if PF_DUMPCORE was not set.
40264c2b203SAndrea Arcangeli 	 */
40364c2b203SAndrea Arcangeli 	WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
40464c2b203SAndrea Arcangeli 
40582b0f8c3SJan Kara 	ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
40686039bd3SAndrea Arcangeli 	if (!ctx)
407ba85c702SAndrea Arcangeli 		goto out;
40886039bd3SAndrea Arcangeli 
40986039bd3SAndrea Arcangeli 	BUG_ON(ctx->mm != mm);
41086039bd3SAndrea Arcangeli 
41186039bd3SAndrea Arcangeli 	VM_BUG_ON(reason & ~(VM_UFFD_MISSING|VM_UFFD_WP));
41286039bd3SAndrea Arcangeli 	VM_BUG_ON(!(reason & VM_UFFD_MISSING) ^ !!(reason & VM_UFFD_WP));
41386039bd3SAndrea Arcangeli 
4142d6d6f5aSPrakash Sangappa 	if (ctx->features & UFFD_FEATURE_SIGBUS)
4152d6d6f5aSPrakash Sangappa 		goto out;
4162d6d6f5aSPrakash Sangappa 
41786039bd3SAndrea Arcangeli 	/*
41886039bd3SAndrea Arcangeli 	 * If it's already released don't get it. This avoids to loop
41986039bd3SAndrea Arcangeli 	 * in __get_user_pages if userfaultfd_release waits on the
42086039bd3SAndrea Arcangeli 	 * caller of handle_userfault to release the mmap_sem.
42186039bd3SAndrea Arcangeli 	 */
4226aa7de05SMark Rutland 	if (unlikely(READ_ONCE(ctx->released))) {
423656710a6SAndrea Arcangeli 		/*
424656710a6SAndrea Arcangeli 		 * Don't return VM_FAULT_SIGBUS in this case, so a non
425656710a6SAndrea Arcangeli 		 * cooperative manager can close the uffd after the
426656710a6SAndrea Arcangeli 		 * last UFFDIO_COPY, without risking to trigger an
427656710a6SAndrea Arcangeli 		 * involuntary SIGBUS if the process was starting the
428656710a6SAndrea Arcangeli 		 * userfaultfd while the userfaultfd was still armed
429656710a6SAndrea Arcangeli 		 * (but after the last UFFDIO_COPY). If the uffd
430656710a6SAndrea Arcangeli 		 * wasn't already closed when the userfault reached
431656710a6SAndrea Arcangeli 		 * this point, that would normally be solved by
432656710a6SAndrea Arcangeli 		 * userfaultfd_must_wait returning 'false'.
433656710a6SAndrea Arcangeli 		 *
434656710a6SAndrea Arcangeli 		 * If we were to return VM_FAULT_SIGBUS here, the non
435656710a6SAndrea Arcangeli 		 * cooperative manager would be instead forced to
436656710a6SAndrea Arcangeli 		 * always call UFFDIO_UNREGISTER before it can safely
437656710a6SAndrea Arcangeli 		 * close the uffd.
438656710a6SAndrea Arcangeli 		 */
439656710a6SAndrea Arcangeli 		ret = VM_FAULT_NOPAGE;
440ba85c702SAndrea Arcangeli 		goto out;
441656710a6SAndrea Arcangeli 	}
44286039bd3SAndrea Arcangeli 
44386039bd3SAndrea Arcangeli 	/*
44486039bd3SAndrea Arcangeli 	 * Check that we can return VM_FAULT_RETRY.
44586039bd3SAndrea Arcangeli 	 *
44686039bd3SAndrea Arcangeli 	 * NOTE: it should become possible to return VM_FAULT_RETRY
44786039bd3SAndrea Arcangeli 	 * even if FAULT_FLAG_TRIED is set without leading to gup()
44886039bd3SAndrea Arcangeli 	 * -EBUSY failures, if the userfaultfd is to be extended for
44986039bd3SAndrea Arcangeli 	 * VM_UFFD_WP tracking and we intend to arm the userfault
45086039bd3SAndrea Arcangeli 	 * without first stopping userland access to the memory. For
45186039bd3SAndrea Arcangeli 	 * VM_UFFD_MISSING userfaults this is enough for now.
45286039bd3SAndrea Arcangeli 	 */
45382b0f8c3SJan Kara 	if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
45486039bd3SAndrea Arcangeli 		/*
45586039bd3SAndrea Arcangeli 		 * Validate the invariant that nowait must allow retry
45686039bd3SAndrea Arcangeli 		 * to be sure not to return SIGBUS erroneously on
45786039bd3SAndrea Arcangeli 		 * nowait invocations.
45886039bd3SAndrea Arcangeli 		 */
45982b0f8c3SJan Kara 		BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
46086039bd3SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM
46186039bd3SAndrea Arcangeli 		if (printk_ratelimit()) {
46286039bd3SAndrea Arcangeli 			printk(KERN_WARNING
46382b0f8c3SJan Kara 			       "FAULT_FLAG_ALLOW_RETRY missing %x\n",
46482b0f8c3SJan Kara 			       vmf->flags);
46586039bd3SAndrea Arcangeli 			dump_stack();
46686039bd3SAndrea Arcangeli 		}
46786039bd3SAndrea Arcangeli #endif
468ba85c702SAndrea Arcangeli 		goto out;
46986039bd3SAndrea Arcangeli 	}
47086039bd3SAndrea Arcangeli 
47186039bd3SAndrea Arcangeli 	/*
47286039bd3SAndrea Arcangeli 	 * Handle nowait, not much to do other than tell it to retry
47386039bd3SAndrea Arcangeli 	 * and wait.
47486039bd3SAndrea Arcangeli 	 */
475ba85c702SAndrea Arcangeli 	ret = VM_FAULT_RETRY;
47682b0f8c3SJan Kara 	if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
477ba85c702SAndrea Arcangeli 		goto out;
47886039bd3SAndrea Arcangeli 
47986039bd3SAndrea Arcangeli 	/* take the reference before dropping the mmap_sem */
48086039bd3SAndrea Arcangeli 	userfaultfd_ctx_get(ctx);
48186039bd3SAndrea Arcangeli 
48286039bd3SAndrea Arcangeli 	init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
48386039bd3SAndrea Arcangeli 	uwq.wq.private = current;
4849d4ac934SAlexey Perevalov 	uwq.msg = userfault_msg(vmf->address, vmf->flags, reason,
4859d4ac934SAlexey Perevalov 			ctx->features);
48686039bd3SAndrea Arcangeli 	uwq.ctx = ctx;
48715a77c6fSAndrea Arcangeli 	uwq.waken = false;
48886039bd3SAndrea Arcangeli 
4893e69ad08SPeter Xu 	blocking_state = userfaultfd_get_blocking_state(vmf->flags);
490dfa37dc3SAndrea Arcangeli 
491cbcfa130SEric Biggers 	spin_lock_irq(&ctx->fault_pending_wqh.lock);
49286039bd3SAndrea Arcangeli 	/*
49386039bd3SAndrea Arcangeli 	 * After the __add_wait_queue the uwq is visible to userland
49486039bd3SAndrea Arcangeli 	 * through poll/read().
49586039bd3SAndrea Arcangeli 	 */
49615b726efSAndrea Arcangeli 	__add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
49715b726efSAndrea Arcangeli 	/*
49815b726efSAndrea Arcangeli 	 * The smp_mb() after __set_current_state prevents the reads
49915b726efSAndrea Arcangeli 	 * following the spin_unlock to happen before the list_add in
50015b726efSAndrea Arcangeli 	 * __add_wait_queue.
50115b726efSAndrea Arcangeli 	 */
50215a77c6fSAndrea Arcangeli 	set_current_state(blocking_state);
503cbcfa130SEric Biggers 	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
50486039bd3SAndrea Arcangeli 
505369cd212SMike Kravetz 	if (!is_vm_hugetlb_page(vmf->vma))
50682b0f8c3SJan Kara 		must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
50782b0f8c3SJan Kara 						  reason);
508369cd212SMike Kravetz 	else
5097868a208SPunit Agrawal 		must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
5107868a208SPunit Agrawal 						       vmf->address,
511369cd212SMike Kravetz 						       vmf->flags, reason);
5128d2afd96SAndrea Arcangeli 	up_read(&mm->mmap_sem);
5138d2afd96SAndrea Arcangeli 
5146aa7de05SMark Rutland 	if (likely(must_wait && !READ_ONCE(ctx->released) &&
5153e69ad08SPeter Xu 		   !userfaultfd_signal_pending(vmf->flags))) {
516a9a08845SLinus Torvalds 		wake_up_poll(&ctx->fd_wqh, EPOLLIN);
51786039bd3SAndrea Arcangeli 		schedule();
518ba85c702SAndrea Arcangeli 		ret |= VM_FAULT_MAJOR;
51915a77c6fSAndrea Arcangeli 
52015a77c6fSAndrea Arcangeli 		/*
52115a77c6fSAndrea Arcangeli 		 * False wakeups can orginate even from rwsem before
52215a77c6fSAndrea Arcangeli 		 * up_read() however userfaults will wait either for a
52315a77c6fSAndrea Arcangeli 		 * targeted wakeup on the specific uwq waitqueue from
52415a77c6fSAndrea Arcangeli 		 * wake_userfault() or for signals or for uffd
52515a77c6fSAndrea Arcangeli 		 * release.
52615a77c6fSAndrea Arcangeli 		 */
52715a77c6fSAndrea Arcangeli 		while (!READ_ONCE(uwq.waken)) {
52815a77c6fSAndrea Arcangeli 			/*
52915a77c6fSAndrea Arcangeli 			 * This needs the full smp_store_mb()
53015a77c6fSAndrea Arcangeli 			 * guarantee as the state write must be
53115a77c6fSAndrea Arcangeli 			 * visible to other CPUs before reading
53215a77c6fSAndrea Arcangeli 			 * uwq.waken from other CPUs.
53315a77c6fSAndrea Arcangeli 			 */
53415a77c6fSAndrea Arcangeli 			set_current_state(blocking_state);
53515a77c6fSAndrea Arcangeli 			if (READ_ONCE(uwq.waken) ||
53615a77c6fSAndrea Arcangeli 			    READ_ONCE(ctx->released) ||
5373e69ad08SPeter Xu 			    userfaultfd_signal_pending(vmf->flags))
53815a77c6fSAndrea Arcangeli 				break;
53915a77c6fSAndrea Arcangeli 			schedule();
54015a77c6fSAndrea Arcangeli 		}
54186039bd3SAndrea Arcangeli 	}
542ba85c702SAndrea Arcangeli 
54386039bd3SAndrea Arcangeli 	__set_current_state(TASK_RUNNING);
54415b726efSAndrea Arcangeli 
54515b726efSAndrea Arcangeli 	/*
54615b726efSAndrea Arcangeli 	 * Here we race with the list_del; list_add in
54715b726efSAndrea Arcangeli 	 * userfaultfd_ctx_read(), however because we don't ever run
54815b726efSAndrea Arcangeli 	 * list_del_init() to refile across the two lists, the prev
54915b726efSAndrea Arcangeli 	 * and next pointers will never point to self. list_add also
55015b726efSAndrea Arcangeli 	 * would never let any of the two pointers to point to
55115b726efSAndrea Arcangeli 	 * self. So list_empty_careful won't risk to see both pointers
55215b726efSAndrea Arcangeli 	 * pointing to self at any time during the list refile. The
55315b726efSAndrea Arcangeli 	 * only case where list_del_init() is called is the full
55415b726efSAndrea Arcangeli 	 * removal in the wake function and there we don't re-list_add
55515b726efSAndrea Arcangeli 	 * and it's fine not to block on the spinlock. The uwq on this
55615b726efSAndrea Arcangeli 	 * kernel stack can be released after the list_del_init.
55715b726efSAndrea Arcangeli 	 */
5582055da97SIngo Molnar 	if (!list_empty_careful(&uwq.wq.entry)) {
559cbcfa130SEric Biggers 		spin_lock_irq(&ctx->fault_pending_wqh.lock);
56015b726efSAndrea Arcangeli 		/*
56115b726efSAndrea Arcangeli 		 * No need of list_del_init(), the uwq on the stack
56215b726efSAndrea Arcangeli 		 * will be freed shortly anyway.
56315b726efSAndrea Arcangeli 		 */
5642055da97SIngo Molnar 		list_del(&uwq.wq.entry);
565cbcfa130SEric Biggers 		spin_unlock_irq(&ctx->fault_pending_wqh.lock);
566ba85c702SAndrea Arcangeli 	}
56786039bd3SAndrea Arcangeli 
56886039bd3SAndrea Arcangeli 	/*
56986039bd3SAndrea Arcangeli 	 * ctx may go away after this if the userfault pseudo fd is
57086039bd3SAndrea Arcangeli 	 * already released.
57186039bd3SAndrea Arcangeli 	 */
57286039bd3SAndrea Arcangeli 	userfaultfd_ctx_put(ctx);
57386039bd3SAndrea Arcangeli 
574ba85c702SAndrea Arcangeli out:
575ba85c702SAndrea Arcangeli 	return ret;
57686039bd3SAndrea Arcangeli }
57786039bd3SAndrea Arcangeli 
5788c9e7bb7SAndrea Arcangeli static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
5799cd75c3cSPavel Emelyanov 					      struct userfaultfd_wait_queue *ewq)
5809cd75c3cSPavel Emelyanov {
5810cbb4b4fSAndrea Arcangeli 	struct userfaultfd_ctx *release_new_ctx;
5820cbb4b4fSAndrea Arcangeli 
5839a69a829SAndrea Arcangeli 	if (WARN_ON_ONCE(current->flags & PF_EXITING))
5849a69a829SAndrea Arcangeli 		goto out;
5859a69a829SAndrea Arcangeli 
5869cd75c3cSPavel Emelyanov 	ewq->ctx = ctx;
5879cd75c3cSPavel Emelyanov 	init_waitqueue_entry(&ewq->wq, current);
5880cbb4b4fSAndrea Arcangeli 	release_new_ctx = NULL;
5899cd75c3cSPavel Emelyanov 
590cbcfa130SEric Biggers 	spin_lock_irq(&ctx->event_wqh.lock);
5919cd75c3cSPavel Emelyanov 	/*
5929cd75c3cSPavel Emelyanov 	 * After the __add_wait_queue the uwq is visible to userland
5939cd75c3cSPavel Emelyanov 	 * through poll/read().
5949cd75c3cSPavel Emelyanov 	 */
5959cd75c3cSPavel Emelyanov 	__add_wait_queue(&ctx->event_wqh, &ewq->wq);
5969cd75c3cSPavel Emelyanov 	for (;;) {
5979cd75c3cSPavel Emelyanov 		set_current_state(TASK_KILLABLE);
5989cd75c3cSPavel Emelyanov 		if (ewq->msg.event == 0)
5999cd75c3cSPavel Emelyanov 			break;
6006aa7de05SMark Rutland 		if (READ_ONCE(ctx->released) ||
6019cd75c3cSPavel Emelyanov 		    fatal_signal_pending(current)) {
602384632e6SAndrea Arcangeli 			/*
603384632e6SAndrea Arcangeli 			 * &ewq->wq may be queued in fork_event, but
604384632e6SAndrea Arcangeli 			 * __remove_wait_queue ignores the head
605384632e6SAndrea Arcangeli 			 * parameter. It would be a problem if it
606384632e6SAndrea Arcangeli 			 * didn't.
607384632e6SAndrea Arcangeli 			 */
6089cd75c3cSPavel Emelyanov 			__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
6097eb76d45SMike Rapoport 			if (ewq->msg.event == UFFD_EVENT_FORK) {
6107eb76d45SMike Rapoport 				struct userfaultfd_ctx *new;
6117eb76d45SMike Rapoport 
6127eb76d45SMike Rapoport 				new = (struct userfaultfd_ctx *)
6137eb76d45SMike Rapoport 					(unsigned long)
6147eb76d45SMike Rapoport 					ewq->msg.arg.reserved.reserved1;
6150cbb4b4fSAndrea Arcangeli 				release_new_ctx = new;
6167eb76d45SMike Rapoport 			}
6179cd75c3cSPavel Emelyanov 			break;
6189cd75c3cSPavel Emelyanov 		}
6199cd75c3cSPavel Emelyanov 
620cbcfa130SEric Biggers 		spin_unlock_irq(&ctx->event_wqh.lock);
6219cd75c3cSPavel Emelyanov 
622a9a08845SLinus Torvalds 		wake_up_poll(&ctx->fd_wqh, EPOLLIN);
6239cd75c3cSPavel Emelyanov 		schedule();
6249cd75c3cSPavel Emelyanov 
625cbcfa130SEric Biggers 		spin_lock_irq(&ctx->event_wqh.lock);
6269cd75c3cSPavel Emelyanov 	}
6279cd75c3cSPavel Emelyanov 	__set_current_state(TASK_RUNNING);
628cbcfa130SEric Biggers 	spin_unlock_irq(&ctx->event_wqh.lock);
6299cd75c3cSPavel Emelyanov 
6300cbb4b4fSAndrea Arcangeli 	if (release_new_ctx) {
6310cbb4b4fSAndrea Arcangeli 		struct vm_area_struct *vma;
6320cbb4b4fSAndrea Arcangeli 		struct mm_struct *mm = release_new_ctx->mm;
6330cbb4b4fSAndrea Arcangeli 
6340cbb4b4fSAndrea Arcangeli 		/* the various vma->vm_userfaultfd_ctx still points to it */
6350cbb4b4fSAndrea Arcangeli 		down_write(&mm->mmap_sem);
63604f5866eSAndrea Arcangeli 		/* no task can run (and in turn coredump) yet */
63704f5866eSAndrea Arcangeli 		VM_WARN_ON(!mmget_still_valid(mm));
6380cbb4b4fSAndrea Arcangeli 		for (vma = mm->mmap; vma; vma = vma->vm_next)
63931e810aaSMike Rapoport 			if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
6400cbb4b4fSAndrea Arcangeli 				vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
64131e810aaSMike Rapoport 				vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
64231e810aaSMike Rapoport 			}
6430cbb4b4fSAndrea Arcangeli 		up_write(&mm->mmap_sem);
6440cbb4b4fSAndrea Arcangeli 
6450cbb4b4fSAndrea Arcangeli 		userfaultfd_ctx_put(release_new_ctx);
6460cbb4b4fSAndrea Arcangeli 	}
6470cbb4b4fSAndrea Arcangeli 
6489cd75c3cSPavel Emelyanov 	/*
6499cd75c3cSPavel Emelyanov 	 * ctx may go away after this if the userfault pseudo fd is
6509cd75c3cSPavel Emelyanov 	 * already released.
6519cd75c3cSPavel Emelyanov 	 */
6529a69a829SAndrea Arcangeli out:
653df2cc96eSMike Rapoport 	WRITE_ONCE(ctx->mmap_changing, false);
6549cd75c3cSPavel Emelyanov 	userfaultfd_ctx_put(ctx);
6559cd75c3cSPavel Emelyanov }
6569cd75c3cSPavel Emelyanov 
6579cd75c3cSPavel Emelyanov static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
6589cd75c3cSPavel Emelyanov 				       struct userfaultfd_wait_queue *ewq)
6599cd75c3cSPavel Emelyanov {
6609cd75c3cSPavel Emelyanov 	ewq->msg.event = 0;
6619cd75c3cSPavel Emelyanov 	wake_up_locked(&ctx->event_wqh);
6629cd75c3cSPavel Emelyanov 	__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
6639cd75c3cSPavel Emelyanov }
6649cd75c3cSPavel Emelyanov 
665893e26e6SPavel Emelyanov int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
666893e26e6SPavel Emelyanov {
667893e26e6SPavel Emelyanov 	struct userfaultfd_ctx *ctx = NULL, *octx;
668893e26e6SPavel Emelyanov 	struct userfaultfd_fork_ctx *fctx;
669893e26e6SPavel Emelyanov 
670893e26e6SPavel Emelyanov 	octx = vma->vm_userfaultfd_ctx.ctx;
671893e26e6SPavel Emelyanov 	if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
672893e26e6SPavel Emelyanov 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
673893e26e6SPavel Emelyanov 		vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
674893e26e6SPavel Emelyanov 		return 0;
675893e26e6SPavel Emelyanov 	}
676893e26e6SPavel Emelyanov 
677893e26e6SPavel Emelyanov 	list_for_each_entry(fctx, fcs, list)
678893e26e6SPavel Emelyanov 		if (fctx->orig == octx) {
679893e26e6SPavel Emelyanov 			ctx = fctx->new;
680893e26e6SPavel Emelyanov 			break;
681893e26e6SPavel Emelyanov 		}
682893e26e6SPavel Emelyanov 
683893e26e6SPavel Emelyanov 	if (!ctx) {
684893e26e6SPavel Emelyanov 		fctx = kmalloc(sizeof(*fctx), GFP_KERNEL);
685893e26e6SPavel Emelyanov 		if (!fctx)
686893e26e6SPavel Emelyanov 			return -ENOMEM;
687893e26e6SPavel Emelyanov 
688893e26e6SPavel Emelyanov 		ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
689893e26e6SPavel Emelyanov 		if (!ctx) {
690893e26e6SPavel Emelyanov 			kfree(fctx);
691893e26e6SPavel Emelyanov 			return -ENOMEM;
692893e26e6SPavel Emelyanov 		}
693893e26e6SPavel Emelyanov 
694ca880420SEric Biggers 		refcount_set(&ctx->refcount, 1);
695893e26e6SPavel Emelyanov 		ctx->flags = octx->flags;
696893e26e6SPavel Emelyanov 		ctx->state = UFFD_STATE_RUNNING;
697893e26e6SPavel Emelyanov 		ctx->features = octx->features;
698893e26e6SPavel Emelyanov 		ctx->released = false;
699df2cc96eSMike Rapoport 		ctx->mmap_changing = false;
700893e26e6SPavel Emelyanov 		ctx->mm = vma->vm_mm;
70100bb31faSMike Rapoport 		mmgrab(ctx->mm);
702893e26e6SPavel Emelyanov 
703893e26e6SPavel Emelyanov 		userfaultfd_ctx_get(octx);
704df2cc96eSMike Rapoport 		WRITE_ONCE(octx->mmap_changing, true);
705893e26e6SPavel Emelyanov 		fctx->orig = octx;
706893e26e6SPavel Emelyanov 		fctx->new = ctx;
707893e26e6SPavel Emelyanov 		list_add_tail(&fctx->list, fcs);
708893e26e6SPavel Emelyanov 	}
709893e26e6SPavel Emelyanov 
710893e26e6SPavel Emelyanov 	vma->vm_userfaultfd_ctx.ctx = ctx;
711893e26e6SPavel Emelyanov 	return 0;
712893e26e6SPavel Emelyanov }
713893e26e6SPavel Emelyanov 
7148c9e7bb7SAndrea Arcangeli static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
715893e26e6SPavel Emelyanov {
716893e26e6SPavel Emelyanov 	struct userfaultfd_ctx *ctx = fctx->orig;
717893e26e6SPavel Emelyanov 	struct userfaultfd_wait_queue ewq;
718893e26e6SPavel Emelyanov 
719893e26e6SPavel Emelyanov 	msg_init(&ewq.msg);
720893e26e6SPavel Emelyanov 
721893e26e6SPavel Emelyanov 	ewq.msg.event = UFFD_EVENT_FORK;
722893e26e6SPavel Emelyanov 	ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
723893e26e6SPavel Emelyanov 
7248c9e7bb7SAndrea Arcangeli 	userfaultfd_event_wait_completion(ctx, &ewq);
725893e26e6SPavel Emelyanov }
726893e26e6SPavel Emelyanov 
727893e26e6SPavel Emelyanov void dup_userfaultfd_complete(struct list_head *fcs)
728893e26e6SPavel Emelyanov {
729893e26e6SPavel Emelyanov 	struct userfaultfd_fork_ctx *fctx, *n;
730893e26e6SPavel Emelyanov 
731893e26e6SPavel Emelyanov 	list_for_each_entry_safe(fctx, n, fcs, list) {
7328c9e7bb7SAndrea Arcangeli 		dup_fctx(fctx);
733893e26e6SPavel Emelyanov 		list_del(&fctx->list);
734893e26e6SPavel Emelyanov 		kfree(fctx);
735893e26e6SPavel Emelyanov 	}
736893e26e6SPavel Emelyanov }
737893e26e6SPavel Emelyanov 
73872f87654SPavel Emelyanov void mremap_userfaultfd_prep(struct vm_area_struct *vma,
73972f87654SPavel Emelyanov 			     struct vm_userfaultfd_ctx *vm_ctx)
74072f87654SPavel Emelyanov {
74172f87654SPavel Emelyanov 	struct userfaultfd_ctx *ctx;
74272f87654SPavel Emelyanov 
74372f87654SPavel Emelyanov 	ctx = vma->vm_userfaultfd_ctx.ctx;
7443cfd22beSPeter Xu 
7453cfd22beSPeter Xu 	if (!ctx)
7463cfd22beSPeter Xu 		return;
7473cfd22beSPeter Xu 
7483cfd22beSPeter Xu 	if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
74972f87654SPavel Emelyanov 		vm_ctx->ctx = ctx;
75072f87654SPavel Emelyanov 		userfaultfd_ctx_get(ctx);
751df2cc96eSMike Rapoport 		WRITE_ONCE(ctx->mmap_changing, true);
7523cfd22beSPeter Xu 	} else {
7533cfd22beSPeter Xu 		/* Drop uffd context if remap feature not enabled */
7543cfd22beSPeter Xu 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
7553cfd22beSPeter Xu 		vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
75672f87654SPavel Emelyanov 	}
75772f87654SPavel Emelyanov }
75872f87654SPavel Emelyanov 
75990794bf1SAndrea Arcangeli void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
76072f87654SPavel Emelyanov 				 unsigned long from, unsigned long to,
76172f87654SPavel Emelyanov 				 unsigned long len)
76272f87654SPavel Emelyanov {
76390794bf1SAndrea Arcangeli 	struct userfaultfd_ctx *ctx = vm_ctx->ctx;
76472f87654SPavel Emelyanov 	struct userfaultfd_wait_queue ewq;
76572f87654SPavel Emelyanov 
76672f87654SPavel Emelyanov 	if (!ctx)
76772f87654SPavel Emelyanov 		return;
76872f87654SPavel Emelyanov 
76972f87654SPavel Emelyanov 	if (to & ~PAGE_MASK) {
77072f87654SPavel Emelyanov 		userfaultfd_ctx_put(ctx);
77172f87654SPavel Emelyanov 		return;
77272f87654SPavel Emelyanov 	}
77372f87654SPavel Emelyanov 
77472f87654SPavel Emelyanov 	msg_init(&ewq.msg);
77572f87654SPavel Emelyanov 
77672f87654SPavel Emelyanov 	ewq.msg.event = UFFD_EVENT_REMAP;
77772f87654SPavel Emelyanov 	ewq.msg.arg.remap.from = from;
77872f87654SPavel Emelyanov 	ewq.msg.arg.remap.to = to;
77972f87654SPavel Emelyanov 	ewq.msg.arg.remap.len = len;
78072f87654SPavel Emelyanov 
78172f87654SPavel Emelyanov 	userfaultfd_event_wait_completion(ctx, &ewq);
78272f87654SPavel Emelyanov }
78372f87654SPavel Emelyanov 
78470ccb92fSAndrea Arcangeli bool userfaultfd_remove(struct vm_area_struct *vma,
78505ce7724SPavel Emelyanov 			unsigned long start, unsigned long end)
78605ce7724SPavel Emelyanov {
78705ce7724SPavel Emelyanov 	struct mm_struct *mm = vma->vm_mm;
78805ce7724SPavel Emelyanov 	struct userfaultfd_ctx *ctx;
78905ce7724SPavel Emelyanov 	struct userfaultfd_wait_queue ewq;
79005ce7724SPavel Emelyanov 
79105ce7724SPavel Emelyanov 	ctx = vma->vm_userfaultfd_ctx.ctx;
792d811914dSMike Rapoport 	if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
79370ccb92fSAndrea Arcangeli 		return true;
79405ce7724SPavel Emelyanov 
79505ce7724SPavel Emelyanov 	userfaultfd_ctx_get(ctx);
796df2cc96eSMike Rapoport 	WRITE_ONCE(ctx->mmap_changing, true);
79705ce7724SPavel Emelyanov 	up_read(&mm->mmap_sem);
79805ce7724SPavel Emelyanov 
79905ce7724SPavel Emelyanov 	msg_init(&ewq.msg);
80005ce7724SPavel Emelyanov 
801d811914dSMike Rapoport 	ewq.msg.event = UFFD_EVENT_REMOVE;
802d811914dSMike Rapoport 	ewq.msg.arg.remove.start = start;
803d811914dSMike Rapoport 	ewq.msg.arg.remove.end = end;
80405ce7724SPavel Emelyanov 
80505ce7724SPavel Emelyanov 	userfaultfd_event_wait_completion(ctx, &ewq);
80605ce7724SPavel Emelyanov 
80770ccb92fSAndrea Arcangeli 	return false;
80805ce7724SPavel Emelyanov }
80905ce7724SPavel Emelyanov 
810897ab3e0SMike Rapoport static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
811897ab3e0SMike Rapoport 			  unsigned long start, unsigned long end)
812897ab3e0SMike Rapoport {
813897ab3e0SMike Rapoport 	struct userfaultfd_unmap_ctx *unmap_ctx;
814897ab3e0SMike Rapoport 
815897ab3e0SMike Rapoport 	list_for_each_entry(unmap_ctx, unmaps, list)
816897ab3e0SMike Rapoport 		if (unmap_ctx->ctx == ctx && unmap_ctx->start == start &&
817897ab3e0SMike Rapoport 		    unmap_ctx->end == end)
818897ab3e0SMike Rapoport 			return true;
819897ab3e0SMike Rapoport 
820897ab3e0SMike Rapoport 	return false;
821897ab3e0SMike Rapoport }
822897ab3e0SMike Rapoport 
823897ab3e0SMike Rapoport int userfaultfd_unmap_prep(struct vm_area_struct *vma,
824897ab3e0SMike Rapoport 			   unsigned long start, unsigned long end,
825897ab3e0SMike Rapoport 			   struct list_head *unmaps)
826897ab3e0SMike Rapoport {
827897ab3e0SMike Rapoport 	for ( ; vma && vma->vm_start < end; vma = vma->vm_next) {
828897ab3e0SMike Rapoport 		struct userfaultfd_unmap_ctx *unmap_ctx;
829897ab3e0SMike Rapoport 		struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
830897ab3e0SMike Rapoport 
831897ab3e0SMike Rapoport 		if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
832897ab3e0SMike Rapoport 		    has_unmap_ctx(ctx, unmaps, start, end))
833897ab3e0SMike Rapoport 			continue;
834897ab3e0SMike Rapoport 
835897ab3e0SMike Rapoport 		unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL);
836897ab3e0SMike Rapoport 		if (!unmap_ctx)
837897ab3e0SMike Rapoport 			return -ENOMEM;
838897ab3e0SMike Rapoport 
839897ab3e0SMike Rapoport 		userfaultfd_ctx_get(ctx);
840df2cc96eSMike Rapoport 		WRITE_ONCE(ctx->mmap_changing, true);
841897ab3e0SMike Rapoport 		unmap_ctx->ctx = ctx;
842897ab3e0SMike Rapoport 		unmap_ctx->start = start;
843897ab3e0SMike Rapoport 		unmap_ctx->end = end;
844897ab3e0SMike Rapoport 		list_add_tail(&unmap_ctx->list, unmaps);
845897ab3e0SMike Rapoport 	}
846897ab3e0SMike Rapoport 
847897ab3e0SMike Rapoport 	return 0;
848897ab3e0SMike Rapoport }
849897ab3e0SMike Rapoport 
850897ab3e0SMike Rapoport void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
851897ab3e0SMike Rapoport {
852897ab3e0SMike Rapoport 	struct userfaultfd_unmap_ctx *ctx, *n;
853897ab3e0SMike Rapoport 	struct userfaultfd_wait_queue ewq;
854897ab3e0SMike Rapoport 
855897ab3e0SMike Rapoport 	list_for_each_entry_safe(ctx, n, uf, list) {
856897ab3e0SMike Rapoport 		msg_init(&ewq.msg);
857897ab3e0SMike Rapoport 
858897ab3e0SMike Rapoport 		ewq.msg.event = UFFD_EVENT_UNMAP;
859897ab3e0SMike Rapoport 		ewq.msg.arg.remove.start = ctx->start;
860897ab3e0SMike Rapoport 		ewq.msg.arg.remove.end = ctx->end;
861897ab3e0SMike Rapoport 
862897ab3e0SMike Rapoport 		userfaultfd_event_wait_completion(ctx->ctx, &ewq);
863897ab3e0SMike Rapoport 
864897ab3e0SMike Rapoport 		list_del(&ctx->list);
865897ab3e0SMike Rapoport 		kfree(ctx);
866897ab3e0SMike Rapoport 	}
867897ab3e0SMike Rapoport }
868897ab3e0SMike Rapoport 
86986039bd3SAndrea Arcangeli static int userfaultfd_release(struct inode *inode, struct file *file)
87086039bd3SAndrea Arcangeli {
87186039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx = file->private_data;
87286039bd3SAndrea Arcangeli 	struct mm_struct *mm = ctx->mm;
87386039bd3SAndrea Arcangeli 	struct vm_area_struct *vma, *prev;
87486039bd3SAndrea Arcangeli 	/* len == 0 means wake all */
87586039bd3SAndrea Arcangeli 	struct userfaultfd_wake_range range = { .len = 0, };
87686039bd3SAndrea Arcangeli 	unsigned long new_flags;
87746d0b24cSOleg Nesterov 	bool still_valid;
87886039bd3SAndrea Arcangeli 
8796aa7de05SMark Rutland 	WRITE_ONCE(ctx->released, true);
88086039bd3SAndrea Arcangeli 
881d2005e3fSOleg Nesterov 	if (!mmget_not_zero(mm))
882d2005e3fSOleg Nesterov 		goto wakeup;
883d2005e3fSOleg Nesterov 
88486039bd3SAndrea Arcangeli 	/*
88586039bd3SAndrea Arcangeli 	 * Flush page faults out of all CPUs. NOTE: all page faults
88686039bd3SAndrea Arcangeli 	 * must be retried without returning VM_FAULT_SIGBUS if
88786039bd3SAndrea Arcangeli 	 * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
88886039bd3SAndrea Arcangeli 	 * changes while handle_userfault released the mmap_sem. So
88986039bd3SAndrea Arcangeli 	 * it's critical that released is set to true (above), before
89086039bd3SAndrea Arcangeli 	 * taking the mmap_sem for writing.
89186039bd3SAndrea Arcangeli 	 */
89286039bd3SAndrea Arcangeli 	down_write(&mm->mmap_sem);
89346d0b24cSOleg Nesterov 	still_valid = mmget_still_valid(mm);
89486039bd3SAndrea Arcangeli 	prev = NULL;
89586039bd3SAndrea Arcangeli 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
89686039bd3SAndrea Arcangeli 		cond_resched();
89786039bd3SAndrea Arcangeli 		BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
89886039bd3SAndrea Arcangeli 		       !!(vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
89986039bd3SAndrea Arcangeli 		if (vma->vm_userfaultfd_ctx.ctx != ctx) {
90086039bd3SAndrea Arcangeli 			prev = vma;
90186039bd3SAndrea Arcangeli 			continue;
90286039bd3SAndrea Arcangeli 		}
90386039bd3SAndrea Arcangeli 		new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
90446d0b24cSOleg Nesterov 		if (still_valid) {
90586039bd3SAndrea Arcangeli 			prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
90686039bd3SAndrea Arcangeli 					 new_flags, vma->anon_vma,
90786039bd3SAndrea Arcangeli 					 vma->vm_file, vma->vm_pgoff,
90886039bd3SAndrea Arcangeli 					 vma_policy(vma),
90986039bd3SAndrea Arcangeli 					 NULL_VM_UFFD_CTX);
91086039bd3SAndrea Arcangeli 			if (prev)
91186039bd3SAndrea Arcangeli 				vma = prev;
91286039bd3SAndrea Arcangeli 			else
91386039bd3SAndrea Arcangeli 				prev = vma;
91446d0b24cSOleg Nesterov 		}
91586039bd3SAndrea Arcangeli 		vma->vm_flags = new_flags;
91686039bd3SAndrea Arcangeli 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
91786039bd3SAndrea Arcangeli 	}
91886039bd3SAndrea Arcangeli 	up_write(&mm->mmap_sem);
919d2005e3fSOleg Nesterov 	mmput(mm);
920d2005e3fSOleg Nesterov wakeup:
92186039bd3SAndrea Arcangeli 	/*
92215b726efSAndrea Arcangeli 	 * After no new page faults can wait on this fault_*wqh, flush
92386039bd3SAndrea Arcangeli 	 * the last page faults that may have been already waiting on
92415b726efSAndrea Arcangeli 	 * the fault_*wqh.
92586039bd3SAndrea Arcangeli 	 */
926cbcfa130SEric Biggers 	spin_lock_irq(&ctx->fault_pending_wqh.lock);
927ac5be6b4SAndrea Arcangeli 	__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
928c430d1e8SMatthew Wilcox 	__wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
929cbcfa130SEric Biggers 	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
93086039bd3SAndrea Arcangeli 
9315a18b64eSMike Rapoport 	/* Flush pending events that may still wait on event_wqh */
9325a18b64eSMike Rapoport 	wake_up_all(&ctx->event_wqh);
9335a18b64eSMike Rapoport 
934a9a08845SLinus Torvalds 	wake_up_poll(&ctx->fd_wqh, EPOLLHUP);
93586039bd3SAndrea Arcangeli 	userfaultfd_ctx_put(ctx);
93686039bd3SAndrea Arcangeli 	return 0;
93786039bd3SAndrea Arcangeli }
93886039bd3SAndrea Arcangeli 
93915b726efSAndrea Arcangeli /* fault_pending_wqh.lock must be hold by the caller */
9406dcc27fdSPavel Emelyanov static inline struct userfaultfd_wait_queue *find_userfault_in(
9416dcc27fdSPavel Emelyanov 		wait_queue_head_t *wqh)
94286039bd3SAndrea Arcangeli {
943ac6424b9SIngo Molnar 	wait_queue_entry_t *wq;
94415b726efSAndrea Arcangeli 	struct userfaultfd_wait_queue *uwq;
94586039bd3SAndrea Arcangeli 
946456a7378SLance Roy 	lockdep_assert_held(&wqh->lock);
94786039bd3SAndrea Arcangeli 
94815b726efSAndrea Arcangeli 	uwq = NULL;
9496dcc27fdSPavel Emelyanov 	if (!waitqueue_active(wqh))
95015b726efSAndrea Arcangeli 		goto out;
95115b726efSAndrea Arcangeli 	/* walk in reverse to provide FIFO behavior to read userfaults */
9522055da97SIngo Molnar 	wq = list_last_entry(&wqh->head, typeof(*wq), entry);
95315b726efSAndrea Arcangeli 	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
95415b726efSAndrea Arcangeli out:
95515b726efSAndrea Arcangeli 	return uwq;
95686039bd3SAndrea Arcangeli }
95786039bd3SAndrea Arcangeli 
9586dcc27fdSPavel Emelyanov static inline struct userfaultfd_wait_queue *find_userfault(
9596dcc27fdSPavel Emelyanov 		struct userfaultfd_ctx *ctx)
9606dcc27fdSPavel Emelyanov {
9616dcc27fdSPavel Emelyanov 	return find_userfault_in(&ctx->fault_pending_wqh);
9626dcc27fdSPavel Emelyanov }
9636dcc27fdSPavel Emelyanov 
9649cd75c3cSPavel Emelyanov static inline struct userfaultfd_wait_queue *find_userfault_evt(
9659cd75c3cSPavel Emelyanov 		struct userfaultfd_ctx *ctx)
9669cd75c3cSPavel Emelyanov {
9679cd75c3cSPavel Emelyanov 	return find_userfault_in(&ctx->event_wqh);
9689cd75c3cSPavel Emelyanov }
9699cd75c3cSPavel Emelyanov 
970076ccb76SAl Viro static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
97186039bd3SAndrea Arcangeli {
97286039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx = file->private_data;
973076ccb76SAl Viro 	__poll_t ret;
97486039bd3SAndrea Arcangeli 
97586039bd3SAndrea Arcangeli 	poll_wait(file, &ctx->fd_wqh, wait);
97686039bd3SAndrea Arcangeli 
97786039bd3SAndrea Arcangeli 	switch (ctx->state) {
97886039bd3SAndrea Arcangeli 	case UFFD_STATE_WAIT_API:
979a9a08845SLinus Torvalds 		return EPOLLERR;
98086039bd3SAndrea Arcangeli 	case UFFD_STATE_RUNNING:
981ba85c702SAndrea Arcangeli 		/*
982ba85c702SAndrea Arcangeli 		 * poll() never guarantees that read won't block.
983ba85c702SAndrea Arcangeli 		 * userfaults can be waken before they're read().
984ba85c702SAndrea Arcangeli 		 */
985ba85c702SAndrea Arcangeli 		if (unlikely(!(file->f_flags & O_NONBLOCK)))
986a9a08845SLinus Torvalds 			return EPOLLERR;
98715b726efSAndrea Arcangeli 		/*
98815b726efSAndrea Arcangeli 		 * lockless access to see if there are pending faults
98915b726efSAndrea Arcangeli 		 * __pollwait last action is the add_wait_queue but
99015b726efSAndrea Arcangeli 		 * the spin_unlock would allow the waitqueue_active to
99115b726efSAndrea Arcangeli 		 * pass above the actual list_add inside
99215b726efSAndrea Arcangeli 		 * add_wait_queue critical section. So use a full
99315b726efSAndrea Arcangeli 		 * memory barrier to serialize the list_add write of
99415b726efSAndrea Arcangeli 		 * add_wait_queue() with the waitqueue_active read
99515b726efSAndrea Arcangeli 		 * below.
99615b726efSAndrea Arcangeli 		 */
99715b726efSAndrea Arcangeli 		ret = 0;
99815b726efSAndrea Arcangeli 		smp_mb();
99915b726efSAndrea Arcangeli 		if (waitqueue_active(&ctx->fault_pending_wqh))
1000a9a08845SLinus Torvalds 			ret = EPOLLIN;
10019cd75c3cSPavel Emelyanov 		else if (waitqueue_active(&ctx->event_wqh))
1002a9a08845SLinus Torvalds 			ret = EPOLLIN;
10039cd75c3cSPavel Emelyanov 
100486039bd3SAndrea Arcangeli 		return ret;
100586039bd3SAndrea Arcangeli 	default:
10068474901aSAndrea Arcangeli 		WARN_ON_ONCE(1);
1007a9a08845SLinus Torvalds 		return EPOLLERR;
100886039bd3SAndrea Arcangeli 	}
100986039bd3SAndrea Arcangeli }
101086039bd3SAndrea Arcangeli 
1011893e26e6SPavel Emelyanov static const struct file_operations userfaultfd_fops;
1012893e26e6SPavel Emelyanov 
1013893e26e6SPavel Emelyanov static int resolve_userfault_fork(struct userfaultfd_ctx *ctx,
1014893e26e6SPavel Emelyanov 				  struct userfaultfd_ctx *new,
1015893e26e6SPavel Emelyanov 				  struct uffd_msg *msg)
1016893e26e6SPavel Emelyanov {
1017893e26e6SPavel Emelyanov 	int fd;
1018893e26e6SPavel Emelyanov 
1019284cd241SEric Biggers 	fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, new,
1020284cd241SEric Biggers 			      O_RDWR | (new->flags & UFFD_SHARED_FCNTL_FLAGS));
1021893e26e6SPavel Emelyanov 	if (fd < 0)
1022893e26e6SPavel Emelyanov 		return fd;
1023893e26e6SPavel Emelyanov 
1024893e26e6SPavel Emelyanov 	msg->arg.reserved.reserved1 = 0;
1025893e26e6SPavel Emelyanov 	msg->arg.fork.ufd = fd;
1026893e26e6SPavel Emelyanov 	return 0;
1027893e26e6SPavel Emelyanov }
1028893e26e6SPavel Emelyanov 
102986039bd3SAndrea Arcangeli static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
1030a9b85f94SAndrea Arcangeli 				    struct uffd_msg *msg)
103186039bd3SAndrea Arcangeli {
103286039bd3SAndrea Arcangeli 	ssize_t ret;
103386039bd3SAndrea Arcangeli 	DECLARE_WAITQUEUE(wait, current);
103415b726efSAndrea Arcangeli 	struct userfaultfd_wait_queue *uwq;
1035893e26e6SPavel Emelyanov 	/*
1036893e26e6SPavel Emelyanov 	 * Handling fork event requires sleeping operations, so
1037893e26e6SPavel Emelyanov 	 * we drop the event_wqh lock, then do these ops, then
1038893e26e6SPavel Emelyanov 	 * lock it back and wake up the waiter. While the lock is
1039893e26e6SPavel Emelyanov 	 * dropped the ewq may go away so we keep track of it
1040893e26e6SPavel Emelyanov 	 * carefully.
1041893e26e6SPavel Emelyanov 	 */
1042893e26e6SPavel Emelyanov 	LIST_HEAD(fork_event);
1043893e26e6SPavel Emelyanov 	struct userfaultfd_ctx *fork_nctx = NULL;
104486039bd3SAndrea Arcangeli 
104515b726efSAndrea Arcangeli 	/* always take the fd_wqh lock before the fault_pending_wqh lock */
1046ae62c16eSChristoph Hellwig 	spin_lock_irq(&ctx->fd_wqh.lock);
104786039bd3SAndrea Arcangeli 	__add_wait_queue(&ctx->fd_wqh, &wait);
104886039bd3SAndrea Arcangeli 	for (;;) {
104986039bd3SAndrea Arcangeli 		set_current_state(TASK_INTERRUPTIBLE);
105015b726efSAndrea Arcangeli 		spin_lock(&ctx->fault_pending_wqh.lock);
105115b726efSAndrea Arcangeli 		uwq = find_userfault(ctx);
105215b726efSAndrea Arcangeli 		if (uwq) {
105386039bd3SAndrea Arcangeli 			/*
10542c5b7e1bSAndrea Arcangeli 			 * Use a seqcount to repeat the lockless check
10552c5b7e1bSAndrea Arcangeli 			 * in wake_userfault() to avoid missing
10562c5b7e1bSAndrea Arcangeli 			 * wakeups because during the refile both
10572c5b7e1bSAndrea Arcangeli 			 * waitqueue could become empty if this is the
10582c5b7e1bSAndrea Arcangeli 			 * only userfault.
10592c5b7e1bSAndrea Arcangeli 			 */
10602c5b7e1bSAndrea Arcangeli 			write_seqcount_begin(&ctx->refile_seq);
10612c5b7e1bSAndrea Arcangeli 
10622c5b7e1bSAndrea Arcangeli 			/*
106315b726efSAndrea Arcangeli 			 * The fault_pending_wqh.lock prevents the uwq
106415b726efSAndrea Arcangeli 			 * to disappear from under us.
106515b726efSAndrea Arcangeli 			 *
106615b726efSAndrea Arcangeli 			 * Refile this userfault from
106715b726efSAndrea Arcangeli 			 * fault_pending_wqh to fault_wqh, it's not
106815b726efSAndrea Arcangeli 			 * pending anymore after we read it.
106915b726efSAndrea Arcangeli 			 *
107015b726efSAndrea Arcangeli 			 * Use list_del() by hand (as
107115b726efSAndrea Arcangeli 			 * userfaultfd_wake_function also uses
107215b726efSAndrea Arcangeli 			 * list_del_init() by hand) to be sure nobody
107315b726efSAndrea Arcangeli 			 * changes __remove_wait_queue() to use
107415b726efSAndrea Arcangeli 			 * list_del_init() in turn breaking the
107515b726efSAndrea Arcangeli 			 * !list_empty_careful() check in
10762055da97SIngo Molnar 			 * handle_userfault(). The uwq->wq.head list
107715b726efSAndrea Arcangeli 			 * must never be empty at any time during the
107815b726efSAndrea Arcangeli 			 * refile, or the waitqueue could disappear
107915b726efSAndrea Arcangeli 			 * from under us. The "wait_queue_head_t"
108015b726efSAndrea Arcangeli 			 * parameter of __remove_wait_queue() is unused
108115b726efSAndrea Arcangeli 			 * anyway.
108286039bd3SAndrea Arcangeli 			 */
10832055da97SIngo Molnar 			list_del(&uwq->wq.entry);
1084c430d1e8SMatthew Wilcox 			add_wait_queue(&ctx->fault_wqh, &uwq->wq);
108515b726efSAndrea Arcangeli 
10862c5b7e1bSAndrea Arcangeli 			write_seqcount_end(&ctx->refile_seq);
10872c5b7e1bSAndrea Arcangeli 
1088a9b85f94SAndrea Arcangeli 			/* careful to always initialize msg if ret == 0 */
1089a9b85f94SAndrea Arcangeli 			*msg = uwq->msg;
109015b726efSAndrea Arcangeli 			spin_unlock(&ctx->fault_pending_wqh.lock);
109186039bd3SAndrea Arcangeli 			ret = 0;
109286039bd3SAndrea Arcangeli 			break;
109386039bd3SAndrea Arcangeli 		}
109415b726efSAndrea Arcangeli 		spin_unlock(&ctx->fault_pending_wqh.lock);
10959cd75c3cSPavel Emelyanov 
10969cd75c3cSPavel Emelyanov 		spin_lock(&ctx->event_wqh.lock);
10979cd75c3cSPavel Emelyanov 		uwq = find_userfault_evt(ctx);
10989cd75c3cSPavel Emelyanov 		if (uwq) {
10999cd75c3cSPavel Emelyanov 			*msg = uwq->msg;
11009cd75c3cSPavel Emelyanov 
1101893e26e6SPavel Emelyanov 			if (uwq->msg.event == UFFD_EVENT_FORK) {
1102893e26e6SPavel Emelyanov 				fork_nctx = (struct userfaultfd_ctx *)
1103893e26e6SPavel Emelyanov 					(unsigned long)
1104893e26e6SPavel Emelyanov 					uwq->msg.arg.reserved.reserved1;
11052055da97SIngo Molnar 				list_move(&uwq->wq.entry, &fork_event);
1106384632e6SAndrea Arcangeli 				/*
1107384632e6SAndrea Arcangeli 				 * fork_nctx can be freed as soon as
1108384632e6SAndrea Arcangeli 				 * we drop the lock, unless we take a
1109384632e6SAndrea Arcangeli 				 * reference on it.
1110384632e6SAndrea Arcangeli 				 */
1111384632e6SAndrea Arcangeli 				userfaultfd_ctx_get(fork_nctx);
1112893e26e6SPavel Emelyanov 				spin_unlock(&ctx->event_wqh.lock);
1113893e26e6SPavel Emelyanov 				ret = 0;
1114893e26e6SPavel Emelyanov 				break;
1115893e26e6SPavel Emelyanov 			}
1116893e26e6SPavel Emelyanov 
11179cd75c3cSPavel Emelyanov 			userfaultfd_event_complete(ctx, uwq);
11189cd75c3cSPavel Emelyanov 			spin_unlock(&ctx->event_wqh.lock);
11199cd75c3cSPavel Emelyanov 			ret = 0;
11209cd75c3cSPavel Emelyanov 			break;
11219cd75c3cSPavel Emelyanov 		}
11229cd75c3cSPavel Emelyanov 		spin_unlock(&ctx->event_wqh.lock);
11239cd75c3cSPavel Emelyanov 
112486039bd3SAndrea Arcangeli 		if (signal_pending(current)) {
112586039bd3SAndrea Arcangeli 			ret = -ERESTARTSYS;
112686039bd3SAndrea Arcangeli 			break;
112786039bd3SAndrea Arcangeli 		}
112886039bd3SAndrea Arcangeli 		if (no_wait) {
112986039bd3SAndrea Arcangeli 			ret = -EAGAIN;
113086039bd3SAndrea Arcangeli 			break;
113186039bd3SAndrea Arcangeli 		}
1132ae62c16eSChristoph Hellwig 		spin_unlock_irq(&ctx->fd_wqh.lock);
113386039bd3SAndrea Arcangeli 		schedule();
1134ae62c16eSChristoph Hellwig 		spin_lock_irq(&ctx->fd_wqh.lock);
113586039bd3SAndrea Arcangeli 	}
113686039bd3SAndrea Arcangeli 	__remove_wait_queue(&ctx->fd_wqh, &wait);
113786039bd3SAndrea Arcangeli 	__set_current_state(TASK_RUNNING);
1138ae62c16eSChristoph Hellwig 	spin_unlock_irq(&ctx->fd_wqh.lock);
113986039bd3SAndrea Arcangeli 
1140893e26e6SPavel Emelyanov 	if (!ret && msg->event == UFFD_EVENT_FORK) {
1141893e26e6SPavel Emelyanov 		ret = resolve_userfault_fork(ctx, fork_nctx, msg);
1142cbcfa130SEric Biggers 		spin_lock_irq(&ctx->event_wqh.lock);
1143893e26e6SPavel Emelyanov 		if (!list_empty(&fork_event)) {
1144384632e6SAndrea Arcangeli 			/*
1145384632e6SAndrea Arcangeli 			 * The fork thread didn't abort, so we can
1146384632e6SAndrea Arcangeli 			 * drop the temporary refcount.
1147384632e6SAndrea Arcangeli 			 */
1148384632e6SAndrea Arcangeli 			userfaultfd_ctx_put(fork_nctx);
1149384632e6SAndrea Arcangeli 
1150893e26e6SPavel Emelyanov 			uwq = list_first_entry(&fork_event,
1151893e26e6SPavel Emelyanov 					       typeof(*uwq),
11522055da97SIngo Molnar 					       wq.entry);
1153384632e6SAndrea Arcangeli 			/*
1154384632e6SAndrea Arcangeli 			 * If fork_event list wasn't empty and in turn
1155384632e6SAndrea Arcangeli 			 * the event wasn't already released by fork
1156384632e6SAndrea Arcangeli 			 * (the event is allocated on fork kernel
1157384632e6SAndrea Arcangeli 			 * stack), put the event back to its place in
1158384632e6SAndrea Arcangeli 			 * the event_wq. fork_event head will be freed
1159384632e6SAndrea Arcangeli 			 * as soon as we return so the event cannot
1160384632e6SAndrea Arcangeli 			 * stay queued there no matter the current
1161384632e6SAndrea Arcangeli 			 * "ret" value.
1162384632e6SAndrea Arcangeli 			 */
11632055da97SIngo Molnar 			list_del(&uwq->wq.entry);
1164893e26e6SPavel Emelyanov 			__add_wait_queue(&ctx->event_wqh, &uwq->wq);
1165384632e6SAndrea Arcangeli 
1166384632e6SAndrea Arcangeli 			/*
1167384632e6SAndrea Arcangeli 			 * Leave the event in the waitqueue and report
1168384632e6SAndrea Arcangeli 			 * error to userland if we failed to resolve
1169384632e6SAndrea Arcangeli 			 * the userfault fork.
1170384632e6SAndrea Arcangeli 			 */
1171384632e6SAndrea Arcangeli 			if (likely(!ret))
1172893e26e6SPavel Emelyanov 				userfaultfd_event_complete(ctx, uwq);
1173384632e6SAndrea Arcangeli 		} else {
1174384632e6SAndrea Arcangeli 			/*
1175384632e6SAndrea Arcangeli 			 * Here the fork thread aborted and the
1176384632e6SAndrea Arcangeli 			 * refcount from the fork thread on fork_nctx
1177384632e6SAndrea Arcangeli 			 * has already been released. We still hold
1178384632e6SAndrea Arcangeli 			 * the reference we took before releasing the
1179384632e6SAndrea Arcangeli 			 * lock above. If resolve_userfault_fork
1180384632e6SAndrea Arcangeli 			 * failed we've to drop it because the
1181384632e6SAndrea Arcangeli 			 * fork_nctx has to be freed in such case. If
1182384632e6SAndrea Arcangeli 			 * it succeeded we'll hold it because the new
1183384632e6SAndrea Arcangeli 			 * uffd references it.
1184384632e6SAndrea Arcangeli 			 */
1185384632e6SAndrea Arcangeli 			if (ret)
1186384632e6SAndrea Arcangeli 				userfaultfd_ctx_put(fork_nctx);
1187893e26e6SPavel Emelyanov 		}
1188cbcfa130SEric Biggers 		spin_unlock_irq(&ctx->event_wqh.lock);
1189893e26e6SPavel Emelyanov 	}
1190893e26e6SPavel Emelyanov 
119186039bd3SAndrea Arcangeli 	return ret;
119286039bd3SAndrea Arcangeli }
119386039bd3SAndrea Arcangeli 
119486039bd3SAndrea Arcangeli static ssize_t userfaultfd_read(struct file *file, char __user *buf,
119586039bd3SAndrea Arcangeli 				size_t count, loff_t *ppos)
119686039bd3SAndrea Arcangeli {
119786039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx = file->private_data;
119886039bd3SAndrea Arcangeli 	ssize_t _ret, ret = 0;
1199a9b85f94SAndrea Arcangeli 	struct uffd_msg msg;
120086039bd3SAndrea Arcangeli 	int no_wait = file->f_flags & O_NONBLOCK;
120186039bd3SAndrea Arcangeli 
120286039bd3SAndrea Arcangeli 	if (ctx->state == UFFD_STATE_WAIT_API)
120386039bd3SAndrea Arcangeli 		return -EINVAL;
120486039bd3SAndrea Arcangeli 
120586039bd3SAndrea Arcangeli 	for (;;) {
1206a9b85f94SAndrea Arcangeli 		if (count < sizeof(msg))
120786039bd3SAndrea Arcangeli 			return ret ? ret : -EINVAL;
1208a9b85f94SAndrea Arcangeli 		_ret = userfaultfd_ctx_read(ctx, no_wait, &msg);
120986039bd3SAndrea Arcangeli 		if (_ret < 0)
121086039bd3SAndrea Arcangeli 			return ret ? ret : _ret;
1211a9b85f94SAndrea Arcangeli 		if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
121286039bd3SAndrea Arcangeli 			return ret ? ret : -EFAULT;
1213a9b85f94SAndrea Arcangeli 		ret += sizeof(msg);
1214a9b85f94SAndrea Arcangeli 		buf += sizeof(msg);
1215a9b85f94SAndrea Arcangeli 		count -= sizeof(msg);
121686039bd3SAndrea Arcangeli 		/*
121786039bd3SAndrea Arcangeli 		 * Allow to read more than one fault at time but only
121886039bd3SAndrea Arcangeli 		 * block if waiting for the very first one.
121986039bd3SAndrea Arcangeli 		 */
122086039bd3SAndrea Arcangeli 		no_wait = O_NONBLOCK;
122186039bd3SAndrea Arcangeli 	}
122286039bd3SAndrea Arcangeli }
122386039bd3SAndrea Arcangeli 
122486039bd3SAndrea Arcangeli static void __wake_userfault(struct userfaultfd_ctx *ctx,
122586039bd3SAndrea Arcangeli 			     struct userfaultfd_wake_range *range)
122686039bd3SAndrea Arcangeli {
1227cbcfa130SEric Biggers 	spin_lock_irq(&ctx->fault_pending_wqh.lock);
122886039bd3SAndrea Arcangeli 	/* wake all in the range and autoremove */
122915b726efSAndrea Arcangeli 	if (waitqueue_active(&ctx->fault_pending_wqh))
1230ac5be6b4SAndrea Arcangeli 		__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
123115b726efSAndrea Arcangeli 				     range);
123215b726efSAndrea Arcangeli 	if (waitqueue_active(&ctx->fault_wqh))
1233c430d1e8SMatthew Wilcox 		__wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
1234cbcfa130SEric Biggers 	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
123586039bd3SAndrea Arcangeli }
123686039bd3SAndrea Arcangeli 
123786039bd3SAndrea Arcangeli static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
123886039bd3SAndrea Arcangeli 					   struct userfaultfd_wake_range *range)
123986039bd3SAndrea Arcangeli {
12402c5b7e1bSAndrea Arcangeli 	unsigned seq;
12412c5b7e1bSAndrea Arcangeli 	bool need_wakeup;
12422c5b7e1bSAndrea Arcangeli 
124386039bd3SAndrea Arcangeli 	/*
124486039bd3SAndrea Arcangeli 	 * To be sure waitqueue_active() is not reordered by the CPU
124586039bd3SAndrea Arcangeli 	 * before the pagetable update, use an explicit SMP memory
124686039bd3SAndrea Arcangeli 	 * barrier here. PT lock release or up_read(mmap_sem) still
124786039bd3SAndrea Arcangeli 	 * have release semantics that can allow the
124886039bd3SAndrea Arcangeli 	 * waitqueue_active() to be reordered before the pte update.
124986039bd3SAndrea Arcangeli 	 */
125086039bd3SAndrea Arcangeli 	smp_mb();
125186039bd3SAndrea Arcangeli 
125286039bd3SAndrea Arcangeli 	/*
125386039bd3SAndrea Arcangeli 	 * Use waitqueue_active because it's very frequent to
125486039bd3SAndrea Arcangeli 	 * change the address space atomically even if there are no
125586039bd3SAndrea Arcangeli 	 * userfaults yet. So we take the spinlock only when we're
125686039bd3SAndrea Arcangeli 	 * sure we've userfaults to wake.
125786039bd3SAndrea Arcangeli 	 */
12582c5b7e1bSAndrea Arcangeli 	do {
12592c5b7e1bSAndrea Arcangeli 		seq = read_seqcount_begin(&ctx->refile_seq);
12602c5b7e1bSAndrea Arcangeli 		need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
12612c5b7e1bSAndrea Arcangeli 			waitqueue_active(&ctx->fault_wqh);
12622c5b7e1bSAndrea Arcangeli 		cond_resched();
12632c5b7e1bSAndrea Arcangeli 	} while (read_seqcount_retry(&ctx->refile_seq, seq));
12642c5b7e1bSAndrea Arcangeli 	if (need_wakeup)
126586039bd3SAndrea Arcangeli 		__wake_userfault(ctx, range);
126686039bd3SAndrea Arcangeli }
126786039bd3SAndrea Arcangeli 
126886039bd3SAndrea Arcangeli static __always_inline int validate_range(struct mm_struct *mm,
12697d032574SAndrey Konovalov 					  __u64 *start, __u64 len)
127086039bd3SAndrea Arcangeli {
127186039bd3SAndrea Arcangeli 	__u64 task_size = mm->task_size;
127286039bd3SAndrea Arcangeli 
12737d032574SAndrey Konovalov 	*start = untagged_addr(*start);
12747d032574SAndrey Konovalov 
12757d032574SAndrey Konovalov 	if (*start & ~PAGE_MASK)
127686039bd3SAndrea Arcangeli 		return -EINVAL;
127786039bd3SAndrea Arcangeli 	if (len & ~PAGE_MASK)
127886039bd3SAndrea Arcangeli 		return -EINVAL;
127986039bd3SAndrea Arcangeli 	if (!len)
128086039bd3SAndrea Arcangeli 		return -EINVAL;
12817d032574SAndrey Konovalov 	if (*start < mmap_min_addr)
128286039bd3SAndrea Arcangeli 		return -EINVAL;
12837d032574SAndrey Konovalov 	if (*start >= task_size)
128486039bd3SAndrea Arcangeli 		return -EINVAL;
12857d032574SAndrey Konovalov 	if (len > task_size - *start)
128686039bd3SAndrea Arcangeli 		return -EINVAL;
128786039bd3SAndrea Arcangeli 	return 0;
128886039bd3SAndrea Arcangeli }
128986039bd3SAndrea Arcangeli 
1290ba6907dbSMike Rapoport static inline bool vma_can_userfault(struct vm_area_struct *vma)
1291ba6907dbSMike Rapoport {
1292cac67329SMike Rapoport 	return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
1293cac67329SMike Rapoport 		vma_is_shmem(vma);
1294ba6907dbSMike Rapoport }
1295ba6907dbSMike Rapoport 
129686039bd3SAndrea Arcangeli static int userfaultfd_register(struct userfaultfd_ctx *ctx,
129786039bd3SAndrea Arcangeli 				unsigned long arg)
129886039bd3SAndrea Arcangeli {
129986039bd3SAndrea Arcangeli 	struct mm_struct *mm = ctx->mm;
130086039bd3SAndrea Arcangeli 	struct vm_area_struct *vma, *prev, *cur;
130186039bd3SAndrea Arcangeli 	int ret;
130286039bd3SAndrea Arcangeli 	struct uffdio_register uffdio_register;
130386039bd3SAndrea Arcangeli 	struct uffdio_register __user *user_uffdio_register;
130486039bd3SAndrea Arcangeli 	unsigned long vm_flags, new_flags;
130586039bd3SAndrea Arcangeli 	bool found;
1306ce53e8e6SMike Rapoport 	bool basic_ioctls;
130786039bd3SAndrea Arcangeli 	unsigned long start, end, vma_end;
130886039bd3SAndrea Arcangeli 
130986039bd3SAndrea Arcangeli 	user_uffdio_register = (struct uffdio_register __user *) arg;
131086039bd3SAndrea Arcangeli 
131186039bd3SAndrea Arcangeli 	ret = -EFAULT;
131286039bd3SAndrea Arcangeli 	if (copy_from_user(&uffdio_register, user_uffdio_register,
131386039bd3SAndrea Arcangeli 			   sizeof(uffdio_register)-sizeof(__u64)))
131486039bd3SAndrea Arcangeli 		goto out;
131586039bd3SAndrea Arcangeli 
131686039bd3SAndrea Arcangeli 	ret = -EINVAL;
131786039bd3SAndrea Arcangeli 	if (!uffdio_register.mode)
131886039bd3SAndrea Arcangeli 		goto out;
131986039bd3SAndrea Arcangeli 	if (uffdio_register.mode & ~(UFFDIO_REGISTER_MODE_MISSING|
132086039bd3SAndrea Arcangeli 				     UFFDIO_REGISTER_MODE_WP))
132186039bd3SAndrea Arcangeli 		goto out;
132286039bd3SAndrea Arcangeli 	vm_flags = 0;
132386039bd3SAndrea Arcangeli 	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
132486039bd3SAndrea Arcangeli 		vm_flags |= VM_UFFD_MISSING;
132586039bd3SAndrea Arcangeli 	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
132686039bd3SAndrea Arcangeli 		vm_flags |= VM_UFFD_WP;
132786039bd3SAndrea Arcangeli 		/*
132886039bd3SAndrea Arcangeli 		 * FIXME: remove the below error constraint by
132986039bd3SAndrea Arcangeli 		 * implementing the wprotect tracking mode.
133086039bd3SAndrea Arcangeli 		 */
133186039bd3SAndrea Arcangeli 		ret = -EINVAL;
133286039bd3SAndrea Arcangeli 		goto out;
133386039bd3SAndrea Arcangeli 	}
133486039bd3SAndrea Arcangeli 
13357d032574SAndrey Konovalov 	ret = validate_range(mm, &uffdio_register.range.start,
133686039bd3SAndrea Arcangeli 			     uffdio_register.range.len);
133786039bd3SAndrea Arcangeli 	if (ret)
133886039bd3SAndrea Arcangeli 		goto out;
133986039bd3SAndrea Arcangeli 
134086039bd3SAndrea Arcangeli 	start = uffdio_register.range.start;
134186039bd3SAndrea Arcangeli 	end = start + uffdio_register.range.len;
134286039bd3SAndrea Arcangeli 
1343d2005e3fSOleg Nesterov 	ret = -ENOMEM;
1344d2005e3fSOleg Nesterov 	if (!mmget_not_zero(mm))
1345d2005e3fSOleg Nesterov 		goto out;
1346d2005e3fSOleg Nesterov 
134786039bd3SAndrea Arcangeli 	down_write(&mm->mmap_sem);
134804f5866eSAndrea Arcangeli 	if (!mmget_still_valid(mm))
134904f5866eSAndrea Arcangeli 		goto out_unlock;
135086039bd3SAndrea Arcangeli 	vma = find_vma_prev(mm, start, &prev);
135186039bd3SAndrea Arcangeli 	if (!vma)
135286039bd3SAndrea Arcangeli 		goto out_unlock;
135386039bd3SAndrea Arcangeli 
135486039bd3SAndrea Arcangeli 	/* check that there's at least one vma in the range */
135586039bd3SAndrea Arcangeli 	ret = -EINVAL;
135686039bd3SAndrea Arcangeli 	if (vma->vm_start >= end)
135786039bd3SAndrea Arcangeli 		goto out_unlock;
135886039bd3SAndrea Arcangeli 
135986039bd3SAndrea Arcangeli 	/*
1360cab350afSMike Kravetz 	 * If the first vma contains huge pages, make sure start address
1361cab350afSMike Kravetz 	 * is aligned to huge page size.
1362cab350afSMike Kravetz 	 */
1363cab350afSMike Kravetz 	if (is_vm_hugetlb_page(vma)) {
1364cab350afSMike Kravetz 		unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1365cab350afSMike Kravetz 
1366cab350afSMike Kravetz 		if (start & (vma_hpagesize - 1))
1367cab350afSMike Kravetz 			goto out_unlock;
1368cab350afSMike Kravetz 	}
1369cab350afSMike Kravetz 
1370cab350afSMike Kravetz 	/*
137186039bd3SAndrea Arcangeli 	 * Search for not compatible vmas.
137286039bd3SAndrea Arcangeli 	 */
137386039bd3SAndrea Arcangeli 	found = false;
1374ce53e8e6SMike Rapoport 	basic_ioctls = false;
137586039bd3SAndrea Arcangeli 	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
137686039bd3SAndrea Arcangeli 		cond_resched();
137786039bd3SAndrea Arcangeli 
137886039bd3SAndrea Arcangeli 		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
137986039bd3SAndrea Arcangeli 		       !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
138086039bd3SAndrea Arcangeli 
138186039bd3SAndrea Arcangeli 		/* check not compatible vmas */
138286039bd3SAndrea Arcangeli 		ret = -EINVAL;
1383ba6907dbSMike Rapoport 		if (!vma_can_userfault(cur))
138486039bd3SAndrea Arcangeli 			goto out_unlock;
138529ec9066SAndrea Arcangeli 
138629ec9066SAndrea Arcangeli 		/*
138729ec9066SAndrea Arcangeli 		 * UFFDIO_COPY will fill file holes even without
138829ec9066SAndrea Arcangeli 		 * PROT_WRITE. This check enforces that if this is a
138929ec9066SAndrea Arcangeli 		 * MAP_SHARED, the process has write permission to the backing
139029ec9066SAndrea Arcangeli 		 * file. If VM_MAYWRITE is set it also enforces that on a
139129ec9066SAndrea Arcangeli 		 * MAP_SHARED vma: there is no F_WRITE_SEAL and no further
139229ec9066SAndrea Arcangeli 		 * F_WRITE_SEAL can be taken until the vma is destroyed.
139329ec9066SAndrea Arcangeli 		 */
139429ec9066SAndrea Arcangeli 		ret = -EPERM;
139529ec9066SAndrea Arcangeli 		if (unlikely(!(cur->vm_flags & VM_MAYWRITE)))
139629ec9066SAndrea Arcangeli 			goto out_unlock;
139729ec9066SAndrea Arcangeli 
1398cab350afSMike Kravetz 		/*
1399cab350afSMike Kravetz 		 * If this vma contains ending address, and huge pages
1400cab350afSMike Kravetz 		 * check alignment.
1401cab350afSMike Kravetz 		 */
1402cab350afSMike Kravetz 		if (is_vm_hugetlb_page(cur) && end <= cur->vm_end &&
1403cab350afSMike Kravetz 		    end > cur->vm_start) {
1404cab350afSMike Kravetz 			unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
1405cab350afSMike Kravetz 
1406cab350afSMike Kravetz 			ret = -EINVAL;
1407cab350afSMike Kravetz 
1408cab350afSMike Kravetz 			if (end & (vma_hpagesize - 1))
1409cab350afSMike Kravetz 				goto out_unlock;
1410cab350afSMike Kravetz 		}
141186039bd3SAndrea Arcangeli 
141286039bd3SAndrea Arcangeli 		/*
141386039bd3SAndrea Arcangeli 		 * Check that this vma isn't already owned by a
141486039bd3SAndrea Arcangeli 		 * different userfaultfd. We can't allow more than one
141586039bd3SAndrea Arcangeli 		 * userfaultfd to own a single vma simultaneously or we
141686039bd3SAndrea Arcangeli 		 * wouldn't know which one to deliver the userfaults to.
141786039bd3SAndrea Arcangeli 		 */
141886039bd3SAndrea Arcangeli 		ret = -EBUSY;
141986039bd3SAndrea Arcangeli 		if (cur->vm_userfaultfd_ctx.ctx &&
142086039bd3SAndrea Arcangeli 		    cur->vm_userfaultfd_ctx.ctx != ctx)
142186039bd3SAndrea Arcangeli 			goto out_unlock;
142286039bd3SAndrea Arcangeli 
1423cab350afSMike Kravetz 		/*
1424cab350afSMike Kravetz 		 * Note vmas containing huge pages
1425cab350afSMike Kravetz 		 */
1426ce53e8e6SMike Rapoport 		if (is_vm_hugetlb_page(cur))
1427ce53e8e6SMike Rapoport 			basic_ioctls = true;
1428cab350afSMike Kravetz 
142986039bd3SAndrea Arcangeli 		found = true;
143086039bd3SAndrea Arcangeli 	}
143186039bd3SAndrea Arcangeli 	BUG_ON(!found);
143286039bd3SAndrea Arcangeli 
143386039bd3SAndrea Arcangeli 	if (vma->vm_start < start)
143486039bd3SAndrea Arcangeli 		prev = vma;
143586039bd3SAndrea Arcangeli 
143686039bd3SAndrea Arcangeli 	ret = 0;
143786039bd3SAndrea Arcangeli 	do {
143886039bd3SAndrea Arcangeli 		cond_resched();
143986039bd3SAndrea Arcangeli 
1440ba6907dbSMike Rapoport 		BUG_ON(!vma_can_userfault(vma));
144186039bd3SAndrea Arcangeli 		BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
144286039bd3SAndrea Arcangeli 		       vma->vm_userfaultfd_ctx.ctx != ctx);
144329ec9066SAndrea Arcangeli 		WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
144486039bd3SAndrea Arcangeli 
144586039bd3SAndrea Arcangeli 		/*
144686039bd3SAndrea Arcangeli 		 * Nothing to do: this vma is already registered into this
144786039bd3SAndrea Arcangeli 		 * userfaultfd and with the right tracking mode too.
144886039bd3SAndrea Arcangeli 		 */
144986039bd3SAndrea Arcangeli 		if (vma->vm_userfaultfd_ctx.ctx == ctx &&
145086039bd3SAndrea Arcangeli 		    (vma->vm_flags & vm_flags) == vm_flags)
145186039bd3SAndrea Arcangeli 			goto skip;
145286039bd3SAndrea Arcangeli 
145386039bd3SAndrea Arcangeli 		if (vma->vm_start > start)
145486039bd3SAndrea Arcangeli 			start = vma->vm_start;
145586039bd3SAndrea Arcangeli 		vma_end = min(end, vma->vm_end);
145686039bd3SAndrea Arcangeli 
14579d4678ebSAndrea Arcangeli 		new_flags = (vma->vm_flags &
14589d4678ebSAndrea Arcangeli 			     ~(VM_UFFD_MISSING|VM_UFFD_WP)) | vm_flags;
145986039bd3SAndrea Arcangeli 		prev = vma_merge(mm, prev, start, vma_end, new_flags,
146086039bd3SAndrea Arcangeli 				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
146186039bd3SAndrea Arcangeli 				 vma_policy(vma),
146286039bd3SAndrea Arcangeli 				 ((struct vm_userfaultfd_ctx){ ctx }));
146386039bd3SAndrea Arcangeli 		if (prev) {
146486039bd3SAndrea Arcangeli 			vma = prev;
146586039bd3SAndrea Arcangeli 			goto next;
146686039bd3SAndrea Arcangeli 		}
146786039bd3SAndrea Arcangeli 		if (vma->vm_start < start) {
146886039bd3SAndrea Arcangeli 			ret = split_vma(mm, vma, start, 1);
146986039bd3SAndrea Arcangeli 			if (ret)
147086039bd3SAndrea Arcangeli 				break;
147186039bd3SAndrea Arcangeli 		}
147286039bd3SAndrea Arcangeli 		if (vma->vm_end > end) {
147386039bd3SAndrea Arcangeli 			ret = split_vma(mm, vma, end, 0);
147486039bd3SAndrea Arcangeli 			if (ret)
147586039bd3SAndrea Arcangeli 				break;
147686039bd3SAndrea Arcangeli 		}
147786039bd3SAndrea Arcangeli 	next:
147886039bd3SAndrea Arcangeli 		/*
147986039bd3SAndrea Arcangeli 		 * In the vma_merge() successful mprotect-like case 8:
148086039bd3SAndrea Arcangeli 		 * the next vma was merged into the current one and
148186039bd3SAndrea Arcangeli 		 * the current one has not been updated yet.
148286039bd3SAndrea Arcangeli 		 */
148386039bd3SAndrea Arcangeli 		vma->vm_flags = new_flags;
148486039bd3SAndrea Arcangeli 		vma->vm_userfaultfd_ctx.ctx = ctx;
148586039bd3SAndrea Arcangeli 
148686039bd3SAndrea Arcangeli 	skip:
148786039bd3SAndrea Arcangeli 		prev = vma;
148886039bd3SAndrea Arcangeli 		start = vma->vm_end;
148986039bd3SAndrea Arcangeli 		vma = vma->vm_next;
149086039bd3SAndrea Arcangeli 	} while (vma && vma->vm_start < end);
149186039bd3SAndrea Arcangeli out_unlock:
149286039bd3SAndrea Arcangeli 	up_write(&mm->mmap_sem);
1493d2005e3fSOleg Nesterov 	mmput(mm);
149486039bd3SAndrea Arcangeli 	if (!ret) {
149586039bd3SAndrea Arcangeli 		/*
149686039bd3SAndrea Arcangeli 		 * Now that we scanned all vmas we can already tell
149786039bd3SAndrea Arcangeli 		 * userland which ioctls methods are guaranteed to
149886039bd3SAndrea Arcangeli 		 * succeed on this range.
149986039bd3SAndrea Arcangeli 		 */
1500ce53e8e6SMike Rapoport 		if (put_user(basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC :
1501cab350afSMike Kravetz 			     UFFD_API_RANGE_IOCTLS,
150286039bd3SAndrea Arcangeli 			     &user_uffdio_register->ioctls))
150386039bd3SAndrea Arcangeli 			ret = -EFAULT;
150486039bd3SAndrea Arcangeli 	}
150586039bd3SAndrea Arcangeli out:
150686039bd3SAndrea Arcangeli 	return ret;
150786039bd3SAndrea Arcangeli }
150886039bd3SAndrea Arcangeli 
150986039bd3SAndrea Arcangeli static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
151086039bd3SAndrea Arcangeli 				  unsigned long arg)
151186039bd3SAndrea Arcangeli {
151286039bd3SAndrea Arcangeli 	struct mm_struct *mm = ctx->mm;
151386039bd3SAndrea Arcangeli 	struct vm_area_struct *vma, *prev, *cur;
151486039bd3SAndrea Arcangeli 	int ret;
151586039bd3SAndrea Arcangeli 	struct uffdio_range uffdio_unregister;
151686039bd3SAndrea Arcangeli 	unsigned long new_flags;
151786039bd3SAndrea Arcangeli 	bool found;
151886039bd3SAndrea Arcangeli 	unsigned long start, end, vma_end;
151986039bd3SAndrea Arcangeli 	const void __user *buf = (void __user *)arg;
152086039bd3SAndrea Arcangeli 
152186039bd3SAndrea Arcangeli 	ret = -EFAULT;
152286039bd3SAndrea Arcangeli 	if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
152386039bd3SAndrea Arcangeli 		goto out;
152486039bd3SAndrea Arcangeli 
15257d032574SAndrey Konovalov 	ret = validate_range(mm, &uffdio_unregister.start,
152686039bd3SAndrea Arcangeli 			     uffdio_unregister.len);
152786039bd3SAndrea Arcangeli 	if (ret)
152886039bd3SAndrea Arcangeli 		goto out;
152986039bd3SAndrea Arcangeli 
153086039bd3SAndrea Arcangeli 	start = uffdio_unregister.start;
153186039bd3SAndrea Arcangeli 	end = start + uffdio_unregister.len;
153286039bd3SAndrea Arcangeli 
1533d2005e3fSOleg Nesterov 	ret = -ENOMEM;
1534d2005e3fSOleg Nesterov 	if (!mmget_not_zero(mm))
1535d2005e3fSOleg Nesterov 		goto out;
1536d2005e3fSOleg Nesterov 
153786039bd3SAndrea Arcangeli 	down_write(&mm->mmap_sem);
153804f5866eSAndrea Arcangeli 	if (!mmget_still_valid(mm))
153904f5866eSAndrea Arcangeli 		goto out_unlock;
154086039bd3SAndrea Arcangeli 	vma = find_vma_prev(mm, start, &prev);
154186039bd3SAndrea Arcangeli 	if (!vma)
154286039bd3SAndrea Arcangeli 		goto out_unlock;
154386039bd3SAndrea Arcangeli 
154486039bd3SAndrea Arcangeli 	/* check that there's at least one vma in the range */
154586039bd3SAndrea Arcangeli 	ret = -EINVAL;
154686039bd3SAndrea Arcangeli 	if (vma->vm_start >= end)
154786039bd3SAndrea Arcangeli 		goto out_unlock;
154886039bd3SAndrea Arcangeli 
154986039bd3SAndrea Arcangeli 	/*
1550cab350afSMike Kravetz 	 * If the first vma contains huge pages, make sure start address
1551cab350afSMike Kravetz 	 * is aligned to huge page size.
1552cab350afSMike Kravetz 	 */
1553cab350afSMike Kravetz 	if (is_vm_hugetlb_page(vma)) {
1554cab350afSMike Kravetz 		unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1555cab350afSMike Kravetz 
1556cab350afSMike Kravetz 		if (start & (vma_hpagesize - 1))
1557cab350afSMike Kravetz 			goto out_unlock;
1558cab350afSMike Kravetz 	}
1559cab350afSMike Kravetz 
1560cab350afSMike Kravetz 	/*
156186039bd3SAndrea Arcangeli 	 * Search for not compatible vmas.
156286039bd3SAndrea Arcangeli 	 */
156386039bd3SAndrea Arcangeli 	found = false;
156486039bd3SAndrea Arcangeli 	ret = -EINVAL;
156586039bd3SAndrea Arcangeli 	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
156686039bd3SAndrea Arcangeli 		cond_resched();
156786039bd3SAndrea Arcangeli 
156886039bd3SAndrea Arcangeli 		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
156986039bd3SAndrea Arcangeli 		       !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
157086039bd3SAndrea Arcangeli 
157186039bd3SAndrea Arcangeli 		/*
157286039bd3SAndrea Arcangeli 		 * Check not compatible vmas, not strictly required
157386039bd3SAndrea Arcangeli 		 * here as not compatible vmas cannot have an
157486039bd3SAndrea Arcangeli 		 * userfaultfd_ctx registered on them, but this
157586039bd3SAndrea Arcangeli 		 * provides for more strict behavior to notice
157686039bd3SAndrea Arcangeli 		 * unregistration errors.
157786039bd3SAndrea Arcangeli 		 */
1578ba6907dbSMike Rapoport 		if (!vma_can_userfault(cur))
157986039bd3SAndrea Arcangeli 			goto out_unlock;
158086039bd3SAndrea Arcangeli 
158186039bd3SAndrea Arcangeli 		found = true;
158286039bd3SAndrea Arcangeli 	}
158386039bd3SAndrea Arcangeli 	BUG_ON(!found);
158486039bd3SAndrea Arcangeli 
158586039bd3SAndrea Arcangeli 	if (vma->vm_start < start)
158686039bd3SAndrea Arcangeli 		prev = vma;
158786039bd3SAndrea Arcangeli 
158886039bd3SAndrea Arcangeli 	ret = 0;
158986039bd3SAndrea Arcangeli 	do {
159086039bd3SAndrea Arcangeli 		cond_resched();
159186039bd3SAndrea Arcangeli 
1592ba6907dbSMike Rapoport 		BUG_ON(!vma_can_userfault(vma));
159386039bd3SAndrea Arcangeli 
159486039bd3SAndrea Arcangeli 		/*
159586039bd3SAndrea Arcangeli 		 * Nothing to do: this vma is already registered into this
159686039bd3SAndrea Arcangeli 		 * userfaultfd and with the right tracking mode too.
159786039bd3SAndrea Arcangeli 		 */
159886039bd3SAndrea Arcangeli 		if (!vma->vm_userfaultfd_ctx.ctx)
159986039bd3SAndrea Arcangeli 			goto skip;
160086039bd3SAndrea Arcangeli 
160101e881f5SAndrea Arcangeli 		WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
160201e881f5SAndrea Arcangeli 
160386039bd3SAndrea Arcangeli 		if (vma->vm_start > start)
160486039bd3SAndrea Arcangeli 			start = vma->vm_start;
160586039bd3SAndrea Arcangeli 		vma_end = min(end, vma->vm_end);
160686039bd3SAndrea Arcangeli 
160709fa5296SAndrea Arcangeli 		if (userfaultfd_missing(vma)) {
160809fa5296SAndrea Arcangeli 			/*
160909fa5296SAndrea Arcangeli 			 * Wake any concurrent pending userfault while
161009fa5296SAndrea Arcangeli 			 * we unregister, so they will not hang
161109fa5296SAndrea Arcangeli 			 * permanently and it avoids userland to call
161209fa5296SAndrea Arcangeli 			 * UFFDIO_WAKE explicitly.
161309fa5296SAndrea Arcangeli 			 */
161409fa5296SAndrea Arcangeli 			struct userfaultfd_wake_range range;
161509fa5296SAndrea Arcangeli 			range.start = start;
161609fa5296SAndrea Arcangeli 			range.len = vma_end - start;
161709fa5296SAndrea Arcangeli 			wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
161809fa5296SAndrea Arcangeli 		}
161909fa5296SAndrea Arcangeli 
162086039bd3SAndrea Arcangeli 		new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
162186039bd3SAndrea Arcangeli 		prev = vma_merge(mm, prev, start, vma_end, new_flags,
162286039bd3SAndrea Arcangeli 				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
162386039bd3SAndrea Arcangeli 				 vma_policy(vma),
162486039bd3SAndrea Arcangeli 				 NULL_VM_UFFD_CTX);
162586039bd3SAndrea Arcangeli 		if (prev) {
162686039bd3SAndrea Arcangeli 			vma = prev;
162786039bd3SAndrea Arcangeli 			goto next;
162886039bd3SAndrea Arcangeli 		}
162986039bd3SAndrea Arcangeli 		if (vma->vm_start < start) {
163086039bd3SAndrea Arcangeli 			ret = split_vma(mm, vma, start, 1);
163186039bd3SAndrea Arcangeli 			if (ret)
163286039bd3SAndrea Arcangeli 				break;
163386039bd3SAndrea Arcangeli 		}
163486039bd3SAndrea Arcangeli 		if (vma->vm_end > end) {
163586039bd3SAndrea Arcangeli 			ret = split_vma(mm, vma, end, 0);
163686039bd3SAndrea Arcangeli 			if (ret)
163786039bd3SAndrea Arcangeli 				break;
163886039bd3SAndrea Arcangeli 		}
163986039bd3SAndrea Arcangeli 	next:
164086039bd3SAndrea Arcangeli 		/*
164186039bd3SAndrea Arcangeli 		 * In the vma_merge() successful mprotect-like case 8:
164286039bd3SAndrea Arcangeli 		 * the next vma was merged into the current one and
164386039bd3SAndrea Arcangeli 		 * the current one has not been updated yet.
164486039bd3SAndrea Arcangeli 		 */
164586039bd3SAndrea Arcangeli 		vma->vm_flags = new_flags;
164686039bd3SAndrea Arcangeli 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
164786039bd3SAndrea Arcangeli 
164886039bd3SAndrea Arcangeli 	skip:
164986039bd3SAndrea Arcangeli 		prev = vma;
165086039bd3SAndrea Arcangeli 		start = vma->vm_end;
165186039bd3SAndrea Arcangeli 		vma = vma->vm_next;
165286039bd3SAndrea Arcangeli 	} while (vma && vma->vm_start < end);
165386039bd3SAndrea Arcangeli out_unlock:
165486039bd3SAndrea Arcangeli 	up_write(&mm->mmap_sem);
1655d2005e3fSOleg Nesterov 	mmput(mm);
165686039bd3SAndrea Arcangeli out:
165786039bd3SAndrea Arcangeli 	return ret;
165886039bd3SAndrea Arcangeli }
165986039bd3SAndrea Arcangeli 
166086039bd3SAndrea Arcangeli /*
1661ba85c702SAndrea Arcangeli  * userfaultfd_wake may be used in combination with the
1662ba85c702SAndrea Arcangeli  * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches.
166386039bd3SAndrea Arcangeli  */
166486039bd3SAndrea Arcangeli static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
166586039bd3SAndrea Arcangeli 			    unsigned long arg)
166686039bd3SAndrea Arcangeli {
166786039bd3SAndrea Arcangeli 	int ret;
166886039bd3SAndrea Arcangeli 	struct uffdio_range uffdio_wake;
166986039bd3SAndrea Arcangeli 	struct userfaultfd_wake_range range;
167086039bd3SAndrea Arcangeli 	const void __user *buf = (void __user *)arg;
167186039bd3SAndrea Arcangeli 
167286039bd3SAndrea Arcangeli 	ret = -EFAULT;
167386039bd3SAndrea Arcangeli 	if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
167486039bd3SAndrea Arcangeli 		goto out;
167586039bd3SAndrea Arcangeli 
16767d032574SAndrey Konovalov 	ret = validate_range(ctx->mm, &uffdio_wake.start, uffdio_wake.len);
167786039bd3SAndrea Arcangeli 	if (ret)
167886039bd3SAndrea Arcangeli 		goto out;
167986039bd3SAndrea Arcangeli 
168086039bd3SAndrea Arcangeli 	range.start = uffdio_wake.start;
168186039bd3SAndrea Arcangeli 	range.len = uffdio_wake.len;
168286039bd3SAndrea Arcangeli 
168386039bd3SAndrea Arcangeli 	/*
168486039bd3SAndrea Arcangeli 	 * len == 0 means wake all and we don't want to wake all here,
168586039bd3SAndrea Arcangeli 	 * so check it again to be sure.
168686039bd3SAndrea Arcangeli 	 */
168786039bd3SAndrea Arcangeli 	VM_BUG_ON(!range.len);
168886039bd3SAndrea Arcangeli 
168986039bd3SAndrea Arcangeli 	wake_userfault(ctx, &range);
169086039bd3SAndrea Arcangeli 	ret = 0;
169186039bd3SAndrea Arcangeli 
169286039bd3SAndrea Arcangeli out:
169386039bd3SAndrea Arcangeli 	return ret;
169486039bd3SAndrea Arcangeli }
169586039bd3SAndrea Arcangeli 
1696ad465caeSAndrea Arcangeli static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
1697ad465caeSAndrea Arcangeli 			    unsigned long arg)
1698ad465caeSAndrea Arcangeli {
1699ad465caeSAndrea Arcangeli 	__s64 ret;
1700ad465caeSAndrea Arcangeli 	struct uffdio_copy uffdio_copy;
1701ad465caeSAndrea Arcangeli 	struct uffdio_copy __user *user_uffdio_copy;
1702ad465caeSAndrea Arcangeli 	struct userfaultfd_wake_range range;
1703ad465caeSAndrea Arcangeli 
1704ad465caeSAndrea Arcangeli 	user_uffdio_copy = (struct uffdio_copy __user *) arg;
1705ad465caeSAndrea Arcangeli 
1706df2cc96eSMike Rapoport 	ret = -EAGAIN;
1707df2cc96eSMike Rapoport 	if (READ_ONCE(ctx->mmap_changing))
1708df2cc96eSMike Rapoport 		goto out;
1709df2cc96eSMike Rapoport 
1710ad465caeSAndrea Arcangeli 	ret = -EFAULT;
1711ad465caeSAndrea Arcangeli 	if (copy_from_user(&uffdio_copy, user_uffdio_copy,
1712ad465caeSAndrea Arcangeli 			   /* don't copy "copy" last field */
1713ad465caeSAndrea Arcangeli 			   sizeof(uffdio_copy)-sizeof(__s64)))
1714ad465caeSAndrea Arcangeli 		goto out;
1715ad465caeSAndrea Arcangeli 
17167d032574SAndrey Konovalov 	ret = validate_range(ctx->mm, &uffdio_copy.dst, uffdio_copy.len);
1717ad465caeSAndrea Arcangeli 	if (ret)
1718ad465caeSAndrea Arcangeli 		goto out;
1719ad465caeSAndrea Arcangeli 	/*
1720ad465caeSAndrea Arcangeli 	 * double check for wraparound just in case. copy_from_user()
1721ad465caeSAndrea Arcangeli 	 * will later check uffdio_copy.src + uffdio_copy.len to fit
1722ad465caeSAndrea Arcangeli 	 * in the userland range.
1723ad465caeSAndrea Arcangeli 	 */
1724ad465caeSAndrea Arcangeli 	ret = -EINVAL;
1725ad465caeSAndrea Arcangeli 	if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
1726ad465caeSAndrea Arcangeli 		goto out;
1727*72981e0eSAndrea Arcangeli 	if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP))
1728ad465caeSAndrea Arcangeli 		goto out;
1729d2005e3fSOleg Nesterov 	if (mmget_not_zero(ctx->mm)) {
1730ad465caeSAndrea Arcangeli 		ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
1731*72981e0eSAndrea Arcangeli 				   uffdio_copy.len, &ctx->mmap_changing,
1732*72981e0eSAndrea Arcangeli 				   uffdio_copy.mode);
1733d2005e3fSOleg Nesterov 		mmput(ctx->mm);
173496333187SMike Rapoport 	} else {
1735e86b298bSMike Rapoport 		return -ESRCH;
1736d2005e3fSOleg Nesterov 	}
1737ad465caeSAndrea Arcangeli 	if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
1738ad465caeSAndrea Arcangeli 		return -EFAULT;
1739ad465caeSAndrea Arcangeli 	if (ret < 0)
1740ad465caeSAndrea Arcangeli 		goto out;
1741ad465caeSAndrea Arcangeli 	BUG_ON(!ret);
1742ad465caeSAndrea Arcangeli 	/* len == 0 would wake all */
1743ad465caeSAndrea Arcangeli 	range.len = ret;
1744ad465caeSAndrea Arcangeli 	if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
1745ad465caeSAndrea Arcangeli 		range.start = uffdio_copy.dst;
1746ad465caeSAndrea Arcangeli 		wake_userfault(ctx, &range);
1747ad465caeSAndrea Arcangeli 	}
1748ad465caeSAndrea Arcangeli 	ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
1749ad465caeSAndrea Arcangeli out:
1750ad465caeSAndrea Arcangeli 	return ret;
1751ad465caeSAndrea Arcangeli }
1752ad465caeSAndrea Arcangeli 
1753ad465caeSAndrea Arcangeli static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
1754ad465caeSAndrea Arcangeli 				unsigned long arg)
1755ad465caeSAndrea Arcangeli {
1756ad465caeSAndrea Arcangeli 	__s64 ret;
1757ad465caeSAndrea Arcangeli 	struct uffdio_zeropage uffdio_zeropage;
1758ad465caeSAndrea Arcangeli 	struct uffdio_zeropage __user *user_uffdio_zeropage;
1759ad465caeSAndrea Arcangeli 	struct userfaultfd_wake_range range;
1760ad465caeSAndrea Arcangeli 
1761ad465caeSAndrea Arcangeli 	user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
1762ad465caeSAndrea Arcangeli 
1763df2cc96eSMike Rapoport 	ret = -EAGAIN;
1764df2cc96eSMike Rapoport 	if (READ_ONCE(ctx->mmap_changing))
1765df2cc96eSMike Rapoport 		goto out;
1766df2cc96eSMike Rapoport 
1767ad465caeSAndrea Arcangeli 	ret = -EFAULT;
1768ad465caeSAndrea Arcangeli 	if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
1769ad465caeSAndrea Arcangeli 			   /* don't copy "zeropage" last field */
1770ad465caeSAndrea Arcangeli 			   sizeof(uffdio_zeropage)-sizeof(__s64)))
1771ad465caeSAndrea Arcangeli 		goto out;
1772ad465caeSAndrea Arcangeli 
17737d032574SAndrey Konovalov 	ret = validate_range(ctx->mm, &uffdio_zeropage.range.start,
1774ad465caeSAndrea Arcangeli 			     uffdio_zeropage.range.len);
1775ad465caeSAndrea Arcangeli 	if (ret)
1776ad465caeSAndrea Arcangeli 		goto out;
1777ad465caeSAndrea Arcangeli 	ret = -EINVAL;
1778ad465caeSAndrea Arcangeli 	if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
1779ad465caeSAndrea Arcangeli 		goto out;
1780ad465caeSAndrea Arcangeli 
1781d2005e3fSOleg Nesterov 	if (mmget_not_zero(ctx->mm)) {
1782ad465caeSAndrea Arcangeli 		ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
1783df2cc96eSMike Rapoport 				     uffdio_zeropage.range.len,
1784df2cc96eSMike Rapoport 				     &ctx->mmap_changing);
1785d2005e3fSOleg Nesterov 		mmput(ctx->mm);
17869d95aa4bSMike Rapoport 	} else {
1787e86b298bSMike Rapoport 		return -ESRCH;
1788d2005e3fSOleg Nesterov 	}
1789ad465caeSAndrea Arcangeli 	if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
1790ad465caeSAndrea Arcangeli 		return -EFAULT;
1791ad465caeSAndrea Arcangeli 	if (ret < 0)
1792ad465caeSAndrea Arcangeli 		goto out;
1793ad465caeSAndrea Arcangeli 	/* len == 0 would wake all */
1794ad465caeSAndrea Arcangeli 	BUG_ON(!ret);
1795ad465caeSAndrea Arcangeli 	range.len = ret;
1796ad465caeSAndrea Arcangeli 	if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
1797ad465caeSAndrea Arcangeli 		range.start = uffdio_zeropage.range.start;
1798ad465caeSAndrea Arcangeli 		wake_userfault(ctx, &range);
1799ad465caeSAndrea Arcangeli 	}
1800ad465caeSAndrea Arcangeli 	ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
1801ad465caeSAndrea Arcangeli out:
1802ad465caeSAndrea Arcangeli 	return ret;
1803ad465caeSAndrea Arcangeli }
1804ad465caeSAndrea Arcangeli 
18059cd75c3cSPavel Emelyanov static inline unsigned int uffd_ctx_features(__u64 user_features)
18069cd75c3cSPavel Emelyanov {
18079cd75c3cSPavel Emelyanov 	/*
18089cd75c3cSPavel Emelyanov 	 * For the current set of features the bits just coincide
18099cd75c3cSPavel Emelyanov 	 */
18109cd75c3cSPavel Emelyanov 	return (unsigned int)user_features;
18119cd75c3cSPavel Emelyanov }
18129cd75c3cSPavel Emelyanov 
181386039bd3SAndrea Arcangeli /*
181486039bd3SAndrea Arcangeli  * userland asks for a certain API version and we return which bits
181586039bd3SAndrea Arcangeli  * and ioctl commands are implemented in this kernel for such API
181686039bd3SAndrea Arcangeli  * version or -EINVAL if unknown.
181786039bd3SAndrea Arcangeli  */
181886039bd3SAndrea Arcangeli static int userfaultfd_api(struct userfaultfd_ctx *ctx,
181986039bd3SAndrea Arcangeli 			   unsigned long arg)
182086039bd3SAndrea Arcangeli {
182186039bd3SAndrea Arcangeli 	struct uffdio_api uffdio_api;
182286039bd3SAndrea Arcangeli 	void __user *buf = (void __user *)arg;
182386039bd3SAndrea Arcangeli 	int ret;
182465603144SAndrea Arcangeli 	__u64 features;
182586039bd3SAndrea Arcangeli 
182686039bd3SAndrea Arcangeli 	ret = -EINVAL;
182786039bd3SAndrea Arcangeli 	if (ctx->state != UFFD_STATE_WAIT_API)
182886039bd3SAndrea Arcangeli 		goto out;
182986039bd3SAndrea Arcangeli 	ret = -EFAULT;
1830a9b85f94SAndrea Arcangeli 	if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
183186039bd3SAndrea Arcangeli 		goto out;
183265603144SAndrea Arcangeli 	features = uffdio_api.features;
183386039bd3SAndrea Arcangeli 	ret = -EINVAL;
18343c1c24d9SMike Rapoport 	if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
18353c1c24d9SMike Rapoport 		goto err_out;
18363c1c24d9SMike Rapoport 	ret = -EPERM;
18373c1c24d9SMike Rapoport 	if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
18383c1c24d9SMike Rapoport 		goto err_out;
183965603144SAndrea Arcangeli 	/* report all available features and ioctls to userland */
184065603144SAndrea Arcangeli 	uffdio_api.features = UFFD_API_FEATURES;
184186039bd3SAndrea Arcangeli 	uffdio_api.ioctls = UFFD_API_IOCTLS;
184286039bd3SAndrea Arcangeli 	ret = -EFAULT;
184386039bd3SAndrea Arcangeli 	if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
184486039bd3SAndrea Arcangeli 		goto out;
184586039bd3SAndrea Arcangeli 	ctx->state = UFFD_STATE_RUNNING;
184665603144SAndrea Arcangeli 	/* only enable the requested features for this uffd context */
184765603144SAndrea Arcangeli 	ctx->features = uffd_ctx_features(features);
184886039bd3SAndrea Arcangeli 	ret = 0;
184986039bd3SAndrea Arcangeli out:
185086039bd3SAndrea Arcangeli 	return ret;
18513c1c24d9SMike Rapoport err_out:
18523c1c24d9SMike Rapoport 	memset(&uffdio_api, 0, sizeof(uffdio_api));
18533c1c24d9SMike Rapoport 	if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
18543c1c24d9SMike Rapoport 		ret = -EFAULT;
18553c1c24d9SMike Rapoport 	goto out;
185686039bd3SAndrea Arcangeli }
185786039bd3SAndrea Arcangeli 
185886039bd3SAndrea Arcangeli static long userfaultfd_ioctl(struct file *file, unsigned cmd,
185986039bd3SAndrea Arcangeli 			      unsigned long arg)
186086039bd3SAndrea Arcangeli {
186186039bd3SAndrea Arcangeli 	int ret = -EINVAL;
186286039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx = file->private_data;
186386039bd3SAndrea Arcangeli 
1864e6485a47SAndrea Arcangeli 	if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API)
1865e6485a47SAndrea Arcangeli 		return -EINVAL;
1866e6485a47SAndrea Arcangeli 
186786039bd3SAndrea Arcangeli 	switch(cmd) {
186886039bd3SAndrea Arcangeli 	case UFFDIO_API:
186986039bd3SAndrea Arcangeli 		ret = userfaultfd_api(ctx, arg);
187086039bd3SAndrea Arcangeli 		break;
187186039bd3SAndrea Arcangeli 	case UFFDIO_REGISTER:
187286039bd3SAndrea Arcangeli 		ret = userfaultfd_register(ctx, arg);
187386039bd3SAndrea Arcangeli 		break;
187486039bd3SAndrea Arcangeli 	case UFFDIO_UNREGISTER:
187586039bd3SAndrea Arcangeli 		ret = userfaultfd_unregister(ctx, arg);
187686039bd3SAndrea Arcangeli 		break;
187786039bd3SAndrea Arcangeli 	case UFFDIO_WAKE:
187886039bd3SAndrea Arcangeli 		ret = userfaultfd_wake(ctx, arg);
187986039bd3SAndrea Arcangeli 		break;
1880ad465caeSAndrea Arcangeli 	case UFFDIO_COPY:
1881ad465caeSAndrea Arcangeli 		ret = userfaultfd_copy(ctx, arg);
1882ad465caeSAndrea Arcangeli 		break;
1883ad465caeSAndrea Arcangeli 	case UFFDIO_ZEROPAGE:
1884ad465caeSAndrea Arcangeli 		ret = userfaultfd_zeropage(ctx, arg);
1885ad465caeSAndrea Arcangeli 		break;
188686039bd3SAndrea Arcangeli 	}
188786039bd3SAndrea Arcangeli 	return ret;
188886039bd3SAndrea Arcangeli }
188986039bd3SAndrea Arcangeli 
189086039bd3SAndrea Arcangeli #ifdef CONFIG_PROC_FS
189186039bd3SAndrea Arcangeli static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
189286039bd3SAndrea Arcangeli {
189386039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx = f->private_data;
1894ac6424b9SIngo Molnar 	wait_queue_entry_t *wq;
189586039bd3SAndrea Arcangeli 	unsigned long pending = 0, total = 0;
189686039bd3SAndrea Arcangeli 
1897cbcfa130SEric Biggers 	spin_lock_irq(&ctx->fault_pending_wqh.lock);
18982055da97SIngo Molnar 	list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
189986039bd3SAndrea Arcangeli 		pending++;
190086039bd3SAndrea Arcangeli 		total++;
190186039bd3SAndrea Arcangeli 	}
19022055da97SIngo Molnar 	list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
190315b726efSAndrea Arcangeli 		total++;
190415b726efSAndrea Arcangeli 	}
1905cbcfa130SEric Biggers 	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
190686039bd3SAndrea Arcangeli 
190786039bd3SAndrea Arcangeli 	/*
190886039bd3SAndrea Arcangeli 	 * If more protocols will be added, there will be all shown
190986039bd3SAndrea Arcangeli 	 * separated by a space. Like this:
191086039bd3SAndrea Arcangeli 	 *	protocols: aa:... bb:...
191186039bd3SAndrea Arcangeli 	 */
191286039bd3SAndrea Arcangeli 	seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
1913045098e9SMike Rapoport 		   pending, total, UFFD_API, ctx->features,
191486039bd3SAndrea Arcangeli 		   UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
191586039bd3SAndrea Arcangeli }
191686039bd3SAndrea Arcangeli #endif
191786039bd3SAndrea Arcangeli 
191886039bd3SAndrea Arcangeli static const struct file_operations userfaultfd_fops = {
191986039bd3SAndrea Arcangeli #ifdef CONFIG_PROC_FS
192086039bd3SAndrea Arcangeli 	.show_fdinfo	= userfaultfd_show_fdinfo,
192186039bd3SAndrea Arcangeli #endif
192286039bd3SAndrea Arcangeli 	.release	= userfaultfd_release,
192386039bd3SAndrea Arcangeli 	.poll		= userfaultfd_poll,
192486039bd3SAndrea Arcangeli 	.read		= userfaultfd_read,
192586039bd3SAndrea Arcangeli 	.unlocked_ioctl = userfaultfd_ioctl,
19261832f2d8SArnd Bergmann 	.compat_ioctl	= compat_ptr_ioctl,
192786039bd3SAndrea Arcangeli 	.llseek		= noop_llseek,
192886039bd3SAndrea Arcangeli };
192986039bd3SAndrea Arcangeli 
19303004ec9cSAndrea Arcangeli static void init_once_userfaultfd_ctx(void *mem)
19313004ec9cSAndrea Arcangeli {
19323004ec9cSAndrea Arcangeli 	struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
19333004ec9cSAndrea Arcangeli 
19343004ec9cSAndrea Arcangeli 	init_waitqueue_head(&ctx->fault_pending_wqh);
19353004ec9cSAndrea Arcangeli 	init_waitqueue_head(&ctx->fault_wqh);
19369cd75c3cSPavel Emelyanov 	init_waitqueue_head(&ctx->event_wqh);
19373004ec9cSAndrea Arcangeli 	init_waitqueue_head(&ctx->fd_wqh);
19382c5b7e1bSAndrea Arcangeli 	seqcount_init(&ctx->refile_seq);
19393004ec9cSAndrea Arcangeli }
19403004ec9cSAndrea Arcangeli 
1941284cd241SEric Biggers SYSCALL_DEFINE1(userfaultfd, int, flags)
194286039bd3SAndrea Arcangeli {
194386039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx;
1944284cd241SEric Biggers 	int fd;
194586039bd3SAndrea Arcangeli 
1946cefdca0aSPeter Xu 	if (!sysctl_unprivileged_userfaultfd && !capable(CAP_SYS_PTRACE))
1947cefdca0aSPeter Xu 		return -EPERM;
1948cefdca0aSPeter Xu 
194986039bd3SAndrea Arcangeli 	BUG_ON(!current->mm);
195086039bd3SAndrea Arcangeli 
195186039bd3SAndrea Arcangeli 	/* Check the UFFD_* constants for consistency.  */
195286039bd3SAndrea Arcangeli 	BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
195386039bd3SAndrea Arcangeli 	BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
195486039bd3SAndrea Arcangeli 
195586039bd3SAndrea Arcangeli 	if (flags & ~UFFD_SHARED_FCNTL_FLAGS)
1956284cd241SEric Biggers 		return -EINVAL;
195786039bd3SAndrea Arcangeli 
19583004ec9cSAndrea Arcangeli 	ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
195986039bd3SAndrea Arcangeli 	if (!ctx)
1960284cd241SEric Biggers 		return -ENOMEM;
196186039bd3SAndrea Arcangeli 
1962ca880420SEric Biggers 	refcount_set(&ctx->refcount, 1);
196386039bd3SAndrea Arcangeli 	ctx->flags = flags;
19649cd75c3cSPavel Emelyanov 	ctx->features = 0;
196586039bd3SAndrea Arcangeli 	ctx->state = UFFD_STATE_WAIT_API;
196686039bd3SAndrea Arcangeli 	ctx->released = false;
1967df2cc96eSMike Rapoport 	ctx->mmap_changing = false;
196886039bd3SAndrea Arcangeli 	ctx->mm = current->mm;
196986039bd3SAndrea Arcangeli 	/* prevent the mm struct to be freed */
1970f1f10076SVegard Nossum 	mmgrab(ctx->mm);
197186039bd3SAndrea Arcangeli 
1972284cd241SEric Biggers 	fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, ctx,
197386039bd3SAndrea Arcangeli 			      O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
1974284cd241SEric Biggers 	if (fd < 0) {
1975d2005e3fSOleg Nesterov 		mmdrop(ctx->mm);
19763004ec9cSAndrea Arcangeli 		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
1977c03e946fSEric Biggers 	}
197886039bd3SAndrea Arcangeli 	return fd;
197986039bd3SAndrea Arcangeli }
19803004ec9cSAndrea Arcangeli 
19813004ec9cSAndrea Arcangeli static int __init userfaultfd_init(void)
19823004ec9cSAndrea Arcangeli {
19833004ec9cSAndrea Arcangeli 	userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
19843004ec9cSAndrea Arcangeli 						sizeof(struct userfaultfd_ctx),
19853004ec9cSAndrea Arcangeli 						0,
19863004ec9cSAndrea Arcangeli 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
19873004ec9cSAndrea Arcangeli 						init_once_userfaultfd_ctx);
19883004ec9cSAndrea Arcangeli 	return 0;
19893004ec9cSAndrea Arcangeli }
19903004ec9cSAndrea Arcangeli __initcall(userfaultfd_init);
1991