xref: /openbmc/linux/fs/userfaultfd.c (revision 2d5de004e009add27db76c5cdc9f1f7f7dc087e7)
120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
286039bd3SAndrea Arcangeli /*
386039bd3SAndrea Arcangeli  *  fs/userfaultfd.c
486039bd3SAndrea Arcangeli  *
586039bd3SAndrea Arcangeli  *  Copyright (C) 2007  Davide Libenzi <davidel@xmailserver.org>
686039bd3SAndrea Arcangeli  *  Copyright (C) 2008-2009 Red Hat, Inc.
786039bd3SAndrea Arcangeli  *  Copyright (C) 2015  Red Hat, Inc.
886039bd3SAndrea Arcangeli  *
986039bd3SAndrea Arcangeli  *  Some part derived from fs/eventfd.c (anon inode setup) and
1086039bd3SAndrea Arcangeli  *  mm/ksm.c (mm hashing).
1186039bd3SAndrea Arcangeli  */
1286039bd3SAndrea Arcangeli 
139cd75c3cSPavel Emelyanov #include <linux/list.h>
1486039bd3SAndrea Arcangeli #include <linux/hashtable.h>
15174cd4b1SIngo Molnar #include <linux/sched/signal.h>
166e84f315SIngo Molnar #include <linux/sched/mm.h>
1786039bd3SAndrea Arcangeli #include <linux/mm.h>
1817fca131SArnd Bergmann #include <linux/mm_inline.h>
196dfeaff9SPeter Xu #include <linux/mmu_notifier.h>
2086039bd3SAndrea Arcangeli #include <linux/poll.h>
2186039bd3SAndrea Arcangeli #include <linux/slab.h>
2286039bd3SAndrea Arcangeli #include <linux/seq_file.h>
2386039bd3SAndrea Arcangeli #include <linux/file.h>
2486039bd3SAndrea Arcangeli #include <linux/bug.h>
2586039bd3SAndrea Arcangeli #include <linux/anon_inodes.h>
2686039bd3SAndrea Arcangeli #include <linux/syscalls.h>
2786039bd3SAndrea Arcangeli #include <linux/userfaultfd_k.h>
2886039bd3SAndrea Arcangeli #include <linux/mempolicy.h>
2986039bd3SAndrea Arcangeli #include <linux/ioctl.h>
3086039bd3SAndrea Arcangeli #include <linux/security.h>
31cab350afSMike Kravetz #include <linux/hugetlb.h>
325c041f5dSPeter Xu #include <linux/swapops.h>
33*2d5de004SAxel Rasmussen #include <linux/miscdevice.h>
3486039bd3SAndrea Arcangeli 
35d0d4730aSLokesh Gidra int sysctl_unprivileged_userfaultfd __read_mostly;
36cefdca0aSPeter Xu 
373004ec9cSAndrea Arcangeli static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
383004ec9cSAndrea Arcangeli 
393004ec9cSAndrea Arcangeli /*
403004ec9cSAndrea Arcangeli  * Start with fault_pending_wqh and fault_wqh so they're more likely
413004ec9cSAndrea Arcangeli  * to be in the same cacheline.
42cbcfa130SEric Biggers  *
43cbcfa130SEric Biggers  * Locking order:
44cbcfa130SEric Biggers  *	fd_wqh.lock
45cbcfa130SEric Biggers  *		fault_pending_wqh.lock
46cbcfa130SEric Biggers  *			fault_wqh.lock
47cbcfa130SEric Biggers  *		event_wqh.lock
48cbcfa130SEric Biggers  *
49cbcfa130SEric Biggers  * To avoid deadlocks, IRQs must be disabled when taking any of the above locks,
50cbcfa130SEric Biggers  * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's
51cbcfa130SEric Biggers  * also taken in IRQ context.
523004ec9cSAndrea Arcangeli  */
5386039bd3SAndrea Arcangeli struct userfaultfd_ctx {
5415b726efSAndrea Arcangeli 	/* waitqueue head for the pending (i.e. not read) userfaults */
5515b726efSAndrea Arcangeli 	wait_queue_head_t fault_pending_wqh;
5615b726efSAndrea Arcangeli 	/* waitqueue head for the userfaults */
5786039bd3SAndrea Arcangeli 	wait_queue_head_t fault_wqh;
5886039bd3SAndrea Arcangeli 	/* waitqueue head for the pseudo fd to wakeup poll/read */
5986039bd3SAndrea Arcangeli 	wait_queue_head_t fd_wqh;
609cd75c3cSPavel Emelyanov 	/* waitqueue head for events */
619cd75c3cSPavel Emelyanov 	wait_queue_head_t event_wqh;
622c5b7e1bSAndrea Arcangeli 	/* a refile sequence protected by fault_pending_wqh lock */
632ca97ac8SAhmed S. Darwish 	seqcount_spinlock_t refile_seq;
643004ec9cSAndrea Arcangeli 	/* pseudo fd refcounting */
65ca880420SEric Biggers 	refcount_t refcount;
6686039bd3SAndrea Arcangeli 	/* userfaultfd syscall flags */
6786039bd3SAndrea Arcangeli 	unsigned int flags;
689cd75c3cSPavel Emelyanov 	/* features requested from the userspace */
699cd75c3cSPavel Emelyanov 	unsigned int features;
7086039bd3SAndrea Arcangeli 	/* released */
7186039bd3SAndrea Arcangeli 	bool released;
72df2cc96eSMike Rapoport 	/* memory mappings are changing because of non-cooperative event */
73a759a909SNadav Amit 	atomic_t mmap_changing;
7486039bd3SAndrea Arcangeli 	/* mm with one ore more vmas attached to this userfaultfd_ctx */
7586039bd3SAndrea Arcangeli 	struct mm_struct *mm;
7686039bd3SAndrea Arcangeli };
7786039bd3SAndrea Arcangeli 
78893e26e6SPavel Emelyanov struct userfaultfd_fork_ctx {
79893e26e6SPavel Emelyanov 	struct userfaultfd_ctx *orig;
80893e26e6SPavel Emelyanov 	struct userfaultfd_ctx *new;
81893e26e6SPavel Emelyanov 	struct list_head list;
82893e26e6SPavel Emelyanov };
83893e26e6SPavel Emelyanov 
84897ab3e0SMike Rapoport struct userfaultfd_unmap_ctx {
85897ab3e0SMike Rapoport 	struct userfaultfd_ctx *ctx;
86897ab3e0SMike Rapoport 	unsigned long start;
87897ab3e0SMike Rapoport 	unsigned long end;
88897ab3e0SMike Rapoport 	struct list_head list;
89897ab3e0SMike Rapoport };
90897ab3e0SMike Rapoport 
9186039bd3SAndrea Arcangeli struct userfaultfd_wait_queue {
92a9b85f94SAndrea Arcangeli 	struct uffd_msg msg;
93ac6424b9SIngo Molnar 	wait_queue_entry_t wq;
9486039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx;
9515a77c6fSAndrea Arcangeli 	bool waken;
9686039bd3SAndrea Arcangeli };
9786039bd3SAndrea Arcangeli 
9886039bd3SAndrea Arcangeli struct userfaultfd_wake_range {
9986039bd3SAndrea Arcangeli 	unsigned long start;
10086039bd3SAndrea Arcangeli 	unsigned long len;
10186039bd3SAndrea Arcangeli };
10286039bd3SAndrea Arcangeli 
10322e5fe2aSNadav Amit /* internal indication that UFFD_API ioctl was successfully executed */
10422e5fe2aSNadav Amit #define UFFD_FEATURE_INITIALIZED		(1u << 31)
10522e5fe2aSNadav Amit 
10622e5fe2aSNadav Amit static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
10722e5fe2aSNadav Amit {
10822e5fe2aSNadav Amit 	return ctx->features & UFFD_FEATURE_INITIALIZED;
10922e5fe2aSNadav Amit }
11022e5fe2aSNadav Amit 
111ac6424b9SIngo Molnar static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
11286039bd3SAndrea Arcangeli 				     int wake_flags, void *key)
11386039bd3SAndrea Arcangeli {
11486039bd3SAndrea Arcangeli 	struct userfaultfd_wake_range *range = key;
11586039bd3SAndrea Arcangeli 	int ret;
11686039bd3SAndrea Arcangeli 	struct userfaultfd_wait_queue *uwq;
11786039bd3SAndrea Arcangeli 	unsigned long start, len;
11886039bd3SAndrea Arcangeli 
11986039bd3SAndrea Arcangeli 	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
12086039bd3SAndrea Arcangeli 	ret = 0;
12186039bd3SAndrea Arcangeli 	/* len == 0 means wake all */
12286039bd3SAndrea Arcangeli 	start = range->start;
12386039bd3SAndrea Arcangeli 	len = range->len;
124a9b85f94SAndrea Arcangeli 	if (len && (start > uwq->msg.arg.pagefault.address ||
125a9b85f94SAndrea Arcangeli 		    start + len <= uwq->msg.arg.pagefault.address))
12686039bd3SAndrea Arcangeli 		goto out;
12715a77c6fSAndrea Arcangeli 	WRITE_ONCE(uwq->waken, true);
12815a77c6fSAndrea Arcangeli 	/*
129a9668cd6SPeter Zijlstra 	 * The Program-Order guarantees provided by the scheduler
130a9668cd6SPeter Zijlstra 	 * ensure uwq->waken is visible before the task is woken.
13115a77c6fSAndrea Arcangeli 	 */
13286039bd3SAndrea Arcangeli 	ret = wake_up_state(wq->private, mode);
133a9668cd6SPeter Zijlstra 	if (ret) {
13486039bd3SAndrea Arcangeli 		/*
13586039bd3SAndrea Arcangeli 		 * Wake only once, autoremove behavior.
13686039bd3SAndrea Arcangeli 		 *
137a9668cd6SPeter Zijlstra 		 * After the effect of list_del_init is visible to the other
138a9668cd6SPeter Zijlstra 		 * CPUs, the waitqueue may disappear from under us, see the
139a9668cd6SPeter Zijlstra 		 * !list_empty_careful() in handle_userfault().
140a9668cd6SPeter Zijlstra 		 *
141a9668cd6SPeter Zijlstra 		 * try_to_wake_up() has an implicit smp_mb(), and the
142a9668cd6SPeter Zijlstra 		 * wq->private is read before calling the extern function
143a9668cd6SPeter Zijlstra 		 * "wake_up_state" (which in turns calls try_to_wake_up).
14486039bd3SAndrea Arcangeli 		 */
1452055da97SIngo Molnar 		list_del_init(&wq->entry);
146a9668cd6SPeter Zijlstra 	}
14786039bd3SAndrea Arcangeli out:
14886039bd3SAndrea Arcangeli 	return ret;
14986039bd3SAndrea Arcangeli }
15086039bd3SAndrea Arcangeli 
15186039bd3SAndrea Arcangeli /**
15286039bd3SAndrea Arcangeli  * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
15386039bd3SAndrea Arcangeli  * context.
15486039bd3SAndrea Arcangeli  * @ctx: [in] Pointer to the userfaultfd context.
15586039bd3SAndrea Arcangeli  */
15686039bd3SAndrea Arcangeli static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
15786039bd3SAndrea Arcangeli {
158ca880420SEric Biggers 	refcount_inc(&ctx->refcount);
15986039bd3SAndrea Arcangeli }
16086039bd3SAndrea Arcangeli 
16186039bd3SAndrea Arcangeli /**
16286039bd3SAndrea Arcangeli  * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
16386039bd3SAndrea Arcangeli  * context.
16486039bd3SAndrea Arcangeli  * @ctx: [in] Pointer to userfaultfd context.
16586039bd3SAndrea Arcangeli  *
16686039bd3SAndrea Arcangeli  * The userfaultfd context reference must have been previously acquired either
16786039bd3SAndrea Arcangeli  * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget().
16886039bd3SAndrea Arcangeli  */
16986039bd3SAndrea Arcangeli static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
17086039bd3SAndrea Arcangeli {
171ca880420SEric Biggers 	if (refcount_dec_and_test(&ctx->refcount)) {
17286039bd3SAndrea Arcangeli 		VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
17386039bd3SAndrea Arcangeli 		VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
17486039bd3SAndrea Arcangeli 		VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
17586039bd3SAndrea Arcangeli 		VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
1769cd75c3cSPavel Emelyanov 		VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
1779cd75c3cSPavel Emelyanov 		VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
17886039bd3SAndrea Arcangeli 		VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
17986039bd3SAndrea Arcangeli 		VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
180d2005e3fSOleg Nesterov 		mmdrop(ctx->mm);
1813004ec9cSAndrea Arcangeli 		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
18286039bd3SAndrea Arcangeli 	}
18386039bd3SAndrea Arcangeli }
18486039bd3SAndrea Arcangeli 
185a9b85f94SAndrea Arcangeli static inline void msg_init(struct uffd_msg *msg)
186a9b85f94SAndrea Arcangeli {
187a9b85f94SAndrea Arcangeli 	BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
188a9b85f94SAndrea Arcangeli 	/*
189a9b85f94SAndrea Arcangeli 	 * Must use memset to zero out the paddings or kernel data is
190a9b85f94SAndrea Arcangeli 	 * leaked to userland.
191a9b85f94SAndrea Arcangeli 	 */
192a9b85f94SAndrea Arcangeli 	memset(msg, 0, sizeof(struct uffd_msg));
193a9b85f94SAndrea Arcangeli }
194a9b85f94SAndrea Arcangeli 
195a9b85f94SAndrea Arcangeli static inline struct uffd_msg userfault_msg(unsigned long address,
196d172b1a3SNadav Amit 					    unsigned long real_address,
19786039bd3SAndrea Arcangeli 					    unsigned int flags,
1989d4ac934SAlexey Perevalov 					    unsigned long reason,
1999d4ac934SAlexey Perevalov 					    unsigned int features)
20086039bd3SAndrea Arcangeli {
201a9b85f94SAndrea Arcangeli 	struct uffd_msg msg;
202d172b1a3SNadav Amit 
203a9b85f94SAndrea Arcangeli 	msg_init(&msg);
204a9b85f94SAndrea Arcangeli 	msg.event = UFFD_EVENT_PAGEFAULT;
205824ddc60SNadav Amit 
206d172b1a3SNadav Amit 	msg.arg.pagefault.address = (features & UFFD_FEATURE_EXACT_ADDRESS) ?
207d172b1a3SNadav Amit 				    real_address : address;
208d172b1a3SNadav Amit 
20986039bd3SAndrea Arcangeli 	/*
2107677f7fdSAxel Rasmussen 	 * These flags indicate why the userfault occurred:
2117677f7fdSAxel Rasmussen 	 * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault.
2127677f7fdSAxel Rasmussen 	 * - UFFD_PAGEFAULT_FLAG_MINOR indicates a minor fault.
2137677f7fdSAxel Rasmussen 	 * - Neither of these flags being set indicates a MISSING fault.
2147677f7fdSAxel Rasmussen 	 *
2157677f7fdSAxel Rasmussen 	 * Separately, UFFD_PAGEFAULT_FLAG_WRITE indicates it was a write
2167677f7fdSAxel Rasmussen 	 * fault. Otherwise, it was a read fault.
21786039bd3SAndrea Arcangeli 	 */
2187677f7fdSAxel Rasmussen 	if (flags & FAULT_FLAG_WRITE)
219a9b85f94SAndrea Arcangeli 		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
22086039bd3SAndrea Arcangeli 	if (reason & VM_UFFD_WP)
221a9b85f94SAndrea Arcangeli 		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
2227677f7fdSAxel Rasmussen 	if (reason & VM_UFFD_MINOR)
2237677f7fdSAxel Rasmussen 		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_MINOR;
2249d4ac934SAlexey Perevalov 	if (features & UFFD_FEATURE_THREAD_ID)
225a36985d3SAndrea Arcangeli 		msg.arg.pagefault.feat.ptid = task_pid_vnr(current);
226a9b85f94SAndrea Arcangeli 	return msg;
22786039bd3SAndrea Arcangeli }
22886039bd3SAndrea Arcangeli 
229369cd212SMike Kravetz #ifdef CONFIG_HUGETLB_PAGE
230369cd212SMike Kravetz /*
231369cd212SMike Kravetz  * Same functionality as userfaultfd_must_wait below with modifications for
232369cd212SMike Kravetz  * hugepmd ranges.
233369cd212SMike Kravetz  */
234369cd212SMike Kravetz static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
2357868a208SPunit Agrawal 					 struct vm_area_struct *vma,
236369cd212SMike Kravetz 					 unsigned long address,
237369cd212SMike Kravetz 					 unsigned long flags,
238369cd212SMike Kravetz 					 unsigned long reason)
239369cd212SMike Kravetz {
240369cd212SMike Kravetz 	struct mm_struct *mm = ctx->mm;
2411e2c0436SJanosch Frank 	pte_t *ptep, pte;
242369cd212SMike Kravetz 	bool ret = true;
243369cd212SMike Kravetz 
24442fc5414SMichel Lespinasse 	mmap_assert_locked(mm);
245369cd212SMike Kravetz 
2461e2c0436SJanosch Frank 	ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
2471e2c0436SJanosch Frank 
2481e2c0436SJanosch Frank 	if (!ptep)
249369cd212SMike Kravetz 		goto out;
250369cd212SMike Kravetz 
251369cd212SMike Kravetz 	ret = false;
2521e2c0436SJanosch Frank 	pte = huge_ptep_get(ptep);
253369cd212SMike Kravetz 
254369cd212SMike Kravetz 	/*
255369cd212SMike Kravetz 	 * Lockless access: we're in a wait_event so it's ok if it
2565c041f5dSPeter Xu 	 * changes under us.  PTE markers should be handled the same as none
2575c041f5dSPeter Xu 	 * ptes here.
258369cd212SMike Kravetz 	 */
2595c041f5dSPeter Xu 	if (huge_pte_none_mostly(pte))
260369cd212SMike Kravetz 		ret = true;
2611e2c0436SJanosch Frank 	if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
262369cd212SMike Kravetz 		ret = true;
263369cd212SMike Kravetz out:
264369cd212SMike Kravetz 	return ret;
265369cd212SMike Kravetz }
266369cd212SMike Kravetz #else
267369cd212SMike Kravetz static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
2687868a208SPunit Agrawal 					 struct vm_area_struct *vma,
269369cd212SMike Kravetz 					 unsigned long address,
270369cd212SMike Kravetz 					 unsigned long flags,
271369cd212SMike Kravetz 					 unsigned long reason)
272369cd212SMike Kravetz {
273369cd212SMike Kravetz 	return false;	/* should never get here */
274369cd212SMike Kravetz }
275369cd212SMike Kravetz #endif /* CONFIG_HUGETLB_PAGE */
276369cd212SMike Kravetz 
27786039bd3SAndrea Arcangeli /*
2788d2afd96SAndrea Arcangeli  * Verify the pagetables are still not ok after having reigstered into
2798d2afd96SAndrea Arcangeli  * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
2808d2afd96SAndrea Arcangeli  * userfault that has already been resolved, if userfaultfd_read and
2818d2afd96SAndrea Arcangeli  * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different
2828d2afd96SAndrea Arcangeli  * threads.
2838d2afd96SAndrea Arcangeli  */
2848d2afd96SAndrea Arcangeli static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
2858d2afd96SAndrea Arcangeli 					 unsigned long address,
2868d2afd96SAndrea Arcangeli 					 unsigned long flags,
2878d2afd96SAndrea Arcangeli 					 unsigned long reason)
2888d2afd96SAndrea Arcangeli {
2898d2afd96SAndrea Arcangeli 	struct mm_struct *mm = ctx->mm;
2908d2afd96SAndrea Arcangeli 	pgd_t *pgd;
291c2febafcSKirill A. Shutemov 	p4d_t *p4d;
2928d2afd96SAndrea Arcangeli 	pud_t *pud;
2938d2afd96SAndrea Arcangeli 	pmd_t *pmd, _pmd;
2948d2afd96SAndrea Arcangeli 	pte_t *pte;
2958d2afd96SAndrea Arcangeli 	bool ret = true;
2968d2afd96SAndrea Arcangeli 
29742fc5414SMichel Lespinasse 	mmap_assert_locked(mm);
2988d2afd96SAndrea Arcangeli 
2998d2afd96SAndrea Arcangeli 	pgd = pgd_offset(mm, address);
3008d2afd96SAndrea Arcangeli 	if (!pgd_present(*pgd))
3018d2afd96SAndrea Arcangeli 		goto out;
302c2febafcSKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
303c2febafcSKirill A. Shutemov 	if (!p4d_present(*p4d))
304c2febafcSKirill A. Shutemov 		goto out;
305c2febafcSKirill A. Shutemov 	pud = pud_offset(p4d, address);
3068d2afd96SAndrea Arcangeli 	if (!pud_present(*pud))
3078d2afd96SAndrea Arcangeli 		goto out;
3088d2afd96SAndrea Arcangeli 	pmd = pmd_offset(pud, address);
3098d2afd96SAndrea Arcangeli 	/*
3108d2afd96SAndrea Arcangeli 	 * READ_ONCE must function as a barrier with narrower scope
3118d2afd96SAndrea Arcangeli 	 * and it must be equivalent to:
3128d2afd96SAndrea Arcangeli 	 *	_pmd = *pmd; barrier();
3138d2afd96SAndrea Arcangeli 	 *
3148d2afd96SAndrea Arcangeli 	 * This is to deal with the instability (as in
3158d2afd96SAndrea Arcangeli 	 * pmd_trans_unstable) of the pmd.
3168d2afd96SAndrea Arcangeli 	 */
3178d2afd96SAndrea Arcangeli 	_pmd = READ_ONCE(*pmd);
318a365ac09SHuang Ying 	if (pmd_none(_pmd))
3198d2afd96SAndrea Arcangeli 		goto out;
3208d2afd96SAndrea Arcangeli 
3218d2afd96SAndrea Arcangeli 	ret = false;
322a365ac09SHuang Ying 	if (!pmd_present(_pmd))
323a365ac09SHuang Ying 		goto out;
324a365ac09SHuang Ying 
32563b2d417SAndrea Arcangeli 	if (pmd_trans_huge(_pmd)) {
32663b2d417SAndrea Arcangeli 		if (!pmd_write(_pmd) && (reason & VM_UFFD_WP))
32763b2d417SAndrea Arcangeli 			ret = true;
3288d2afd96SAndrea Arcangeli 		goto out;
32963b2d417SAndrea Arcangeli 	}
3308d2afd96SAndrea Arcangeli 
3318d2afd96SAndrea Arcangeli 	/*
3328d2afd96SAndrea Arcangeli 	 * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it
3338d2afd96SAndrea Arcangeli 	 * and use the standard pte_offset_map() instead of parsing _pmd.
3348d2afd96SAndrea Arcangeli 	 */
3358d2afd96SAndrea Arcangeli 	pte = pte_offset_map(pmd, address);
3368d2afd96SAndrea Arcangeli 	/*
3378d2afd96SAndrea Arcangeli 	 * Lockless access: we're in a wait_event so it's ok if it
3385c041f5dSPeter Xu 	 * changes under us.  PTE markers should be handled the same as none
3395c041f5dSPeter Xu 	 * ptes here.
3408d2afd96SAndrea Arcangeli 	 */
3415c041f5dSPeter Xu 	if (pte_none_mostly(*pte))
3428d2afd96SAndrea Arcangeli 		ret = true;
34363b2d417SAndrea Arcangeli 	if (!pte_write(*pte) && (reason & VM_UFFD_WP))
34463b2d417SAndrea Arcangeli 		ret = true;
3458d2afd96SAndrea Arcangeli 	pte_unmap(pte);
3468d2afd96SAndrea Arcangeli 
3478d2afd96SAndrea Arcangeli out:
3488d2afd96SAndrea Arcangeli 	return ret;
3498d2afd96SAndrea Arcangeli }
3508d2afd96SAndrea Arcangeli 
3512f064a59SPeter Zijlstra static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags)
3523e69ad08SPeter Xu {
3533e69ad08SPeter Xu 	if (flags & FAULT_FLAG_INTERRUPTIBLE)
3543e69ad08SPeter Xu 		return TASK_INTERRUPTIBLE;
3553e69ad08SPeter Xu 
3563e69ad08SPeter Xu 	if (flags & FAULT_FLAG_KILLABLE)
3573e69ad08SPeter Xu 		return TASK_KILLABLE;
3583e69ad08SPeter Xu 
3593e69ad08SPeter Xu 	return TASK_UNINTERRUPTIBLE;
3603e69ad08SPeter Xu }
3613e69ad08SPeter Xu 
3628d2afd96SAndrea Arcangeli /*
36386039bd3SAndrea Arcangeli  * The locking rules involved in returning VM_FAULT_RETRY depending on
36486039bd3SAndrea Arcangeli  * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
36586039bd3SAndrea Arcangeli  * FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
36686039bd3SAndrea Arcangeli  * recommendation in __lock_page_or_retry is not an understatement.
36786039bd3SAndrea Arcangeli  *
368c1e8d7c6SMichel Lespinasse  * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_lock must be released
36986039bd3SAndrea Arcangeli  * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
37086039bd3SAndrea Arcangeli  * not set.
37186039bd3SAndrea Arcangeli  *
37286039bd3SAndrea Arcangeli  * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
37386039bd3SAndrea Arcangeli  * set, VM_FAULT_RETRY can still be returned if and only if there are
374c1e8d7c6SMichel Lespinasse  * fatal_signal_pending()s, and the mmap_lock must be released before
37586039bd3SAndrea Arcangeli  * returning it.
37686039bd3SAndrea Arcangeli  */
3772b740303SSouptick Joarder vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
37886039bd3SAndrea Arcangeli {
37982b0f8c3SJan Kara 	struct mm_struct *mm = vmf->vma->vm_mm;
38086039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx;
38186039bd3SAndrea Arcangeli 	struct userfaultfd_wait_queue uwq;
3822b740303SSouptick Joarder 	vm_fault_t ret = VM_FAULT_SIGBUS;
3833e69ad08SPeter Xu 	bool must_wait;
3842f064a59SPeter Zijlstra 	unsigned int blocking_state;
38586039bd3SAndrea Arcangeli 
38664c2b203SAndrea Arcangeli 	/*
38764c2b203SAndrea Arcangeli 	 * We don't do userfault handling for the final child pid update.
38864c2b203SAndrea Arcangeli 	 *
38964c2b203SAndrea Arcangeli 	 * We also don't do userfault handling during
39064c2b203SAndrea Arcangeli 	 * coredumping. hugetlbfs has the special
39164c2b203SAndrea Arcangeli 	 * follow_hugetlb_page() to skip missing pages in the
39264c2b203SAndrea Arcangeli 	 * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
39364c2b203SAndrea Arcangeli 	 * the no_page_table() helper in follow_page_mask(), but the
39464c2b203SAndrea Arcangeli 	 * shmem_vm_ops->fault method is invoked even during
395c1e8d7c6SMichel Lespinasse 	 * coredumping without mmap_lock and it ends up here.
39664c2b203SAndrea Arcangeli 	 */
39764c2b203SAndrea Arcangeli 	if (current->flags & (PF_EXITING|PF_DUMPCORE))
39864c2b203SAndrea Arcangeli 		goto out;
39964c2b203SAndrea Arcangeli 
40064c2b203SAndrea Arcangeli 	/*
401c1e8d7c6SMichel Lespinasse 	 * Coredumping runs without mmap_lock so we can only check that
402c1e8d7c6SMichel Lespinasse 	 * the mmap_lock is held, if PF_DUMPCORE was not set.
40364c2b203SAndrea Arcangeli 	 */
40442fc5414SMichel Lespinasse 	mmap_assert_locked(mm);
40564c2b203SAndrea Arcangeli 
40682b0f8c3SJan Kara 	ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
40786039bd3SAndrea Arcangeli 	if (!ctx)
408ba85c702SAndrea Arcangeli 		goto out;
40986039bd3SAndrea Arcangeli 
41086039bd3SAndrea Arcangeli 	BUG_ON(ctx->mm != mm);
41186039bd3SAndrea Arcangeli 
4127677f7fdSAxel Rasmussen 	/* Any unrecognized flag is a bug. */
4137677f7fdSAxel Rasmussen 	VM_BUG_ON(reason & ~__VM_UFFD_FLAGS);
4147677f7fdSAxel Rasmussen 	/* 0 or > 1 flags set is a bug; we expect exactly 1. */
4157677f7fdSAxel Rasmussen 	VM_BUG_ON(!reason || (reason & (reason - 1)));
41686039bd3SAndrea Arcangeli 
4172d6d6f5aSPrakash Sangappa 	if (ctx->features & UFFD_FEATURE_SIGBUS)
4182d6d6f5aSPrakash Sangappa 		goto out;
419*2d5de004SAxel Rasmussen 	if (!(vmf->flags & FAULT_FLAG_USER) && (ctx->flags & UFFD_USER_MODE_ONLY))
42037cd0575SLokesh Gidra 		goto out;
4212d6d6f5aSPrakash Sangappa 
42286039bd3SAndrea Arcangeli 	/*
42386039bd3SAndrea Arcangeli 	 * If it's already released don't get it. This avoids to loop
42486039bd3SAndrea Arcangeli 	 * in __get_user_pages if userfaultfd_release waits on the
425c1e8d7c6SMichel Lespinasse 	 * caller of handle_userfault to release the mmap_lock.
42686039bd3SAndrea Arcangeli 	 */
4276aa7de05SMark Rutland 	if (unlikely(READ_ONCE(ctx->released))) {
428656710a6SAndrea Arcangeli 		/*
429656710a6SAndrea Arcangeli 		 * Don't return VM_FAULT_SIGBUS in this case, so a non
430656710a6SAndrea Arcangeli 		 * cooperative manager can close the uffd after the
431656710a6SAndrea Arcangeli 		 * last UFFDIO_COPY, without risking to trigger an
432656710a6SAndrea Arcangeli 		 * involuntary SIGBUS if the process was starting the
433656710a6SAndrea Arcangeli 		 * userfaultfd while the userfaultfd was still armed
434656710a6SAndrea Arcangeli 		 * (but after the last UFFDIO_COPY). If the uffd
435656710a6SAndrea Arcangeli 		 * wasn't already closed when the userfault reached
436656710a6SAndrea Arcangeli 		 * this point, that would normally be solved by
437656710a6SAndrea Arcangeli 		 * userfaultfd_must_wait returning 'false'.
438656710a6SAndrea Arcangeli 		 *
439656710a6SAndrea Arcangeli 		 * If we were to return VM_FAULT_SIGBUS here, the non
440656710a6SAndrea Arcangeli 		 * cooperative manager would be instead forced to
441656710a6SAndrea Arcangeli 		 * always call UFFDIO_UNREGISTER before it can safely
442656710a6SAndrea Arcangeli 		 * close the uffd.
443656710a6SAndrea Arcangeli 		 */
444656710a6SAndrea Arcangeli 		ret = VM_FAULT_NOPAGE;
445ba85c702SAndrea Arcangeli 		goto out;
446656710a6SAndrea Arcangeli 	}
44786039bd3SAndrea Arcangeli 
44886039bd3SAndrea Arcangeli 	/*
44986039bd3SAndrea Arcangeli 	 * Check that we can return VM_FAULT_RETRY.
45086039bd3SAndrea Arcangeli 	 *
45186039bd3SAndrea Arcangeli 	 * NOTE: it should become possible to return VM_FAULT_RETRY
45286039bd3SAndrea Arcangeli 	 * even if FAULT_FLAG_TRIED is set without leading to gup()
45386039bd3SAndrea Arcangeli 	 * -EBUSY failures, if the userfaultfd is to be extended for
45486039bd3SAndrea Arcangeli 	 * VM_UFFD_WP tracking and we intend to arm the userfault
45586039bd3SAndrea Arcangeli 	 * without first stopping userland access to the memory. For
45686039bd3SAndrea Arcangeli 	 * VM_UFFD_MISSING userfaults this is enough for now.
45786039bd3SAndrea Arcangeli 	 */
45882b0f8c3SJan Kara 	if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
45986039bd3SAndrea Arcangeli 		/*
46086039bd3SAndrea Arcangeli 		 * Validate the invariant that nowait must allow retry
46186039bd3SAndrea Arcangeli 		 * to be sure not to return SIGBUS erroneously on
46286039bd3SAndrea Arcangeli 		 * nowait invocations.
46386039bd3SAndrea Arcangeli 		 */
46482b0f8c3SJan Kara 		BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
46586039bd3SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM
46686039bd3SAndrea Arcangeli 		if (printk_ratelimit()) {
46786039bd3SAndrea Arcangeli 			printk(KERN_WARNING
46882b0f8c3SJan Kara 			       "FAULT_FLAG_ALLOW_RETRY missing %x\n",
46982b0f8c3SJan Kara 			       vmf->flags);
47086039bd3SAndrea Arcangeli 			dump_stack();
47186039bd3SAndrea Arcangeli 		}
47286039bd3SAndrea Arcangeli #endif
473ba85c702SAndrea Arcangeli 		goto out;
47486039bd3SAndrea Arcangeli 	}
47586039bd3SAndrea Arcangeli 
47686039bd3SAndrea Arcangeli 	/*
47786039bd3SAndrea Arcangeli 	 * Handle nowait, not much to do other than tell it to retry
47886039bd3SAndrea Arcangeli 	 * and wait.
47986039bd3SAndrea Arcangeli 	 */
480ba85c702SAndrea Arcangeli 	ret = VM_FAULT_RETRY;
48182b0f8c3SJan Kara 	if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
482ba85c702SAndrea Arcangeli 		goto out;
48386039bd3SAndrea Arcangeli 
484c1e8d7c6SMichel Lespinasse 	/* take the reference before dropping the mmap_lock */
48586039bd3SAndrea Arcangeli 	userfaultfd_ctx_get(ctx);
48686039bd3SAndrea Arcangeli 
48786039bd3SAndrea Arcangeli 	init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
48886039bd3SAndrea Arcangeli 	uwq.wq.private = current;
489d172b1a3SNadav Amit 	uwq.msg = userfault_msg(vmf->address, vmf->real_address, vmf->flags,
490d172b1a3SNadav Amit 				reason, ctx->features);
49186039bd3SAndrea Arcangeli 	uwq.ctx = ctx;
49215a77c6fSAndrea Arcangeli 	uwq.waken = false;
49386039bd3SAndrea Arcangeli 
4943e69ad08SPeter Xu 	blocking_state = userfaultfd_get_blocking_state(vmf->flags);
495dfa37dc3SAndrea Arcangeli 
496cbcfa130SEric Biggers 	spin_lock_irq(&ctx->fault_pending_wqh.lock);
49786039bd3SAndrea Arcangeli 	/*
49886039bd3SAndrea Arcangeli 	 * After the __add_wait_queue the uwq is visible to userland
49986039bd3SAndrea Arcangeli 	 * through poll/read().
50086039bd3SAndrea Arcangeli 	 */
50115b726efSAndrea Arcangeli 	__add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
50215b726efSAndrea Arcangeli 	/*
50315b726efSAndrea Arcangeli 	 * The smp_mb() after __set_current_state prevents the reads
50415b726efSAndrea Arcangeli 	 * following the spin_unlock to happen before the list_add in
50515b726efSAndrea Arcangeli 	 * __add_wait_queue.
50615b726efSAndrea Arcangeli 	 */
50715a77c6fSAndrea Arcangeli 	set_current_state(blocking_state);
508cbcfa130SEric Biggers 	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
50986039bd3SAndrea Arcangeli 
510369cd212SMike Kravetz 	if (!is_vm_hugetlb_page(vmf->vma))
51182b0f8c3SJan Kara 		must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
51282b0f8c3SJan Kara 						  reason);
513369cd212SMike Kravetz 	else
5147868a208SPunit Agrawal 		must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
5157868a208SPunit Agrawal 						       vmf->address,
516369cd212SMike Kravetz 						       vmf->flags, reason);
517d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
5188d2afd96SAndrea Arcangeli 
519f9bf3522SLinus Torvalds 	if (likely(must_wait && !READ_ONCE(ctx->released))) {
520a9a08845SLinus Torvalds 		wake_up_poll(&ctx->fd_wqh, EPOLLIN);
52186039bd3SAndrea Arcangeli 		schedule();
52286039bd3SAndrea Arcangeli 	}
523ba85c702SAndrea Arcangeli 
52486039bd3SAndrea Arcangeli 	__set_current_state(TASK_RUNNING);
52515b726efSAndrea Arcangeli 
52615b726efSAndrea Arcangeli 	/*
52715b726efSAndrea Arcangeli 	 * Here we race with the list_del; list_add in
52815b726efSAndrea Arcangeli 	 * userfaultfd_ctx_read(), however because we don't ever run
52915b726efSAndrea Arcangeli 	 * list_del_init() to refile across the two lists, the prev
53015b726efSAndrea Arcangeli 	 * and next pointers will never point to self. list_add also
53115b726efSAndrea Arcangeli 	 * would never let any of the two pointers to point to
53215b726efSAndrea Arcangeli 	 * self. So list_empty_careful won't risk to see both pointers
53315b726efSAndrea Arcangeli 	 * pointing to self at any time during the list refile. The
53415b726efSAndrea Arcangeli 	 * only case where list_del_init() is called is the full
53515b726efSAndrea Arcangeli 	 * removal in the wake function and there we don't re-list_add
53615b726efSAndrea Arcangeli 	 * and it's fine not to block on the spinlock. The uwq on this
53715b726efSAndrea Arcangeli 	 * kernel stack can be released after the list_del_init.
53815b726efSAndrea Arcangeli 	 */
5392055da97SIngo Molnar 	if (!list_empty_careful(&uwq.wq.entry)) {
540cbcfa130SEric Biggers 		spin_lock_irq(&ctx->fault_pending_wqh.lock);
54115b726efSAndrea Arcangeli 		/*
54215b726efSAndrea Arcangeli 		 * No need of list_del_init(), the uwq on the stack
54315b726efSAndrea Arcangeli 		 * will be freed shortly anyway.
54415b726efSAndrea Arcangeli 		 */
5452055da97SIngo Molnar 		list_del(&uwq.wq.entry);
546cbcfa130SEric Biggers 		spin_unlock_irq(&ctx->fault_pending_wqh.lock);
547ba85c702SAndrea Arcangeli 	}
54886039bd3SAndrea Arcangeli 
54986039bd3SAndrea Arcangeli 	/*
55086039bd3SAndrea Arcangeli 	 * ctx may go away after this if the userfault pseudo fd is
55186039bd3SAndrea Arcangeli 	 * already released.
55286039bd3SAndrea Arcangeli 	 */
55386039bd3SAndrea Arcangeli 	userfaultfd_ctx_put(ctx);
55486039bd3SAndrea Arcangeli 
555ba85c702SAndrea Arcangeli out:
556ba85c702SAndrea Arcangeli 	return ret;
55786039bd3SAndrea Arcangeli }
55886039bd3SAndrea Arcangeli 
5598c9e7bb7SAndrea Arcangeli static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
5609cd75c3cSPavel Emelyanov 					      struct userfaultfd_wait_queue *ewq)
5619cd75c3cSPavel Emelyanov {
5620cbb4b4fSAndrea Arcangeli 	struct userfaultfd_ctx *release_new_ctx;
5630cbb4b4fSAndrea Arcangeli 
5649a69a829SAndrea Arcangeli 	if (WARN_ON_ONCE(current->flags & PF_EXITING))
5659a69a829SAndrea Arcangeli 		goto out;
5669a69a829SAndrea Arcangeli 
5679cd75c3cSPavel Emelyanov 	ewq->ctx = ctx;
5689cd75c3cSPavel Emelyanov 	init_waitqueue_entry(&ewq->wq, current);
5690cbb4b4fSAndrea Arcangeli 	release_new_ctx = NULL;
5709cd75c3cSPavel Emelyanov 
571cbcfa130SEric Biggers 	spin_lock_irq(&ctx->event_wqh.lock);
5729cd75c3cSPavel Emelyanov 	/*
5739cd75c3cSPavel Emelyanov 	 * After the __add_wait_queue the uwq is visible to userland
5749cd75c3cSPavel Emelyanov 	 * through poll/read().
5759cd75c3cSPavel Emelyanov 	 */
5769cd75c3cSPavel Emelyanov 	__add_wait_queue(&ctx->event_wqh, &ewq->wq);
5779cd75c3cSPavel Emelyanov 	for (;;) {
5789cd75c3cSPavel Emelyanov 		set_current_state(TASK_KILLABLE);
5799cd75c3cSPavel Emelyanov 		if (ewq->msg.event == 0)
5809cd75c3cSPavel Emelyanov 			break;
5816aa7de05SMark Rutland 		if (READ_ONCE(ctx->released) ||
5829cd75c3cSPavel Emelyanov 		    fatal_signal_pending(current)) {
583384632e6SAndrea Arcangeli 			/*
584384632e6SAndrea Arcangeli 			 * &ewq->wq may be queued in fork_event, but
585384632e6SAndrea Arcangeli 			 * __remove_wait_queue ignores the head
586384632e6SAndrea Arcangeli 			 * parameter. It would be a problem if it
587384632e6SAndrea Arcangeli 			 * didn't.
588384632e6SAndrea Arcangeli 			 */
5899cd75c3cSPavel Emelyanov 			__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
5907eb76d45SMike Rapoport 			if (ewq->msg.event == UFFD_EVENT_FORK) {
5917eb76d45SMike Rapoport 				struct userfaultfd_ctx *new;
5927eb76d45SMike Rapoport 
5937eb76d45SMike Rapoport 				new = (struct userfaultfd_ctx *)
5947eb76d45SMike Rapoport 					(unsigned long)
5957eb76d45SMike Rapoport 					ewq->msg.arg.reserved.reserved1;
5960cbb4b4fSAndrea Arcangeli 				release_new_ctx = new;
5977eb76d45SMike Rapoport 			}
5989cd75c3cSPavel Emelyanov 			break;
5999cd75c3cSPavel Emelyanov 		}
6009cd75c3cSPavel Emelyanov 
601cbcfa130SEric Biggers 		spin_unlock_irq(&ctx->event_wqh.lock);
6029cd75c3cSPavel Emelyanov 
603a9a08845SLinus Torvalds 		wake_up_poll(&ctx->fd_wqh, EPOLLIN);
6049cd75c3cSPavel Emelyanov 		schedule();
6059cd75c3cSPavel Emelyanov 
606cbcfa130SEric Biggers 		spin_lock_irq(&ctx->event_wqh.lock);
6079cd75c3cSPavel Emelyanov 	}
6089cd75c3cSPavel Emelyanov 	__set_current_state(TASK_RUNNING);
609cbcfa130SEric Biggers 	spin_unlock_irq(&ctx->event_wqh.lock);
6109cd75c3cSPavel Emelyanov 
6110cbb4b4fSAndrea Arcangeli 	if (release_new_ctx) {
6120cbb4b4fSAndrea Arcangeli 		struct vm_area_struct *vma;
6130cbb4b4fSAndrea Arcangeli 		struct mm_struct *mm = release_new_ctx->mm;
6140cbb4b4fSAndrea Arcangeli 
6150cbb4b4fSAndrea Arcangeli 		/* the various vma->vm_userfaultfd_ctx still points to it */
616d8ed45c5SMichel Lespinasse 		mmap_write_lock(mm);
6170cbb4b4fSAndrea Arcangeli 		for (vma = mm->mmap; vma; vma = vma->vm_next)
61831e810aaSMike Rapoport 			if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
6190cbb4b4fSAndrea Arcangeli 				vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
6207677f7fdSAxel Rasmussen 				vma->vm_flags &= ~__VM_UFFD_FLAGS;
62131e810aaSMike Rapoport 			}
622d8ed45c5SMichel Lespinasse 		mmap_write_unlock(mm);
6230cbb4b4fSAndrea Arcangeli 
6240cbb4b4fSAndrea Arcangeli 		userfaultfd_ctx_put(release_new_ctx);
6250cbb4b4fSAndrea Arcangeli 	}
6260cbb4b4fSAndrea Arcangeli 
6279cd75c3cSPavel Emelyanov 	/*
6289cd75c3cSPavel Emelyanov 	 * ctx may go away after this if the userfault pseudo fd is
6299cd75c3cSPavel Emelyanov 	 * already released.
6309cd75c3cSPavel Emelyanov 	 */
6319a69a829SAndrea Arcangeli out:
632a759a909SNadav Amit 	atomic_dec(&ctx->mmap_changing);
633a759a909SNadav Amit 	VM_BUG_ON(atomic_read(&ctx->mmap_changing) < 0);
6349cd75c3cSPavel Emelyanov 	userfaultfd_ctx_put(ctx);
6359cd75c3cSPavel Emelyanov }
6369cd75c3cSPavel Emelyanov 
6379cd75c3cSPavel Emelyanov static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
6389cd75c3cSPavel Emelyanov 				       struct userfaultfd_wait_queue *ewq)
6399cd75c3cSPavel Emelyanov {
6409cd75c3cSPavel Emelyanov 	ewq->msg.event = 0;
6419cd75c3cSPavel Emelyanov 	wake_up_locked(&ctx->event_wqh);
6429cd75c3cSPavel Emelyanov 	__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
6439cd75c3cSPavel Emelyanov }
6449cd75c3cSPavel Emelyanov 
645893e26e6SPavel Emelyanov int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
646893e26e6SPavel Emelyanov {
647893e26e6SPavel Emelyanov 	struct userfaultfd_ctx *ctx = NULL, *octx;
648893e26e6SPavel Emelyanov 	struct userfaultfd_fork_ctx *fctx;
649893e26e6SPavel Emelyanov 
650893e26e6SPavel Emelyanov 	octx = vma->vm_userfaultfd_ctx.ctx;
651893e26e6SPavel Emelyanov 	if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
652893e26e6SPavel Emelyanov 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
6537677f7fdSAxel Rasmussen 		vma->vm_flags &= ~__VM_UFFD_FLAGS;
654893e26e6SPavel Emelyanov 		return 0;
655893e26e6SPavel Emelyanov 	}
656893e26e6SPavel Emelyanov 
657893e26e6SPavel Emelyanov 	list_for_each_entry(fctx, fcs, list)
658893e26e6SPavel Emelyanov 		if (fctx->orig == octx) {
659893e26e6SPavel Emelyanov 			ctx = fctx->new;
660893e26e6SPavel Emelyanov 			break;
661893e26e6SPavel Emelyanov 		}
662893e26e6SPavel Emelyanov 
663893e26e6SPavel Emelyanov 	if (!ctx) {
664893e26e6SPavel Emelyanov 		fctx = kmalloc(sizeof(*fctx), GFP_KERNEL);
665893e26e6SPavel Emelyanov 		if (!fctx)
666893e26e6SPavel Emelyanov 			return -ENOMEM;
667893e26e6SPavel Emelyanov 
668893e26e6SPavel Emelyanov 		ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
669893e26e6SPavel Emelyanov 		if (!ctx) {
670893e26e6SPavel Emelyanov 			kfree(fctx);
671893e26e6SPavel Emelyanov 			return -ENOMEM;
672893e26e6SPavel Emelyanov 		}
673893e26e6SPavel Emelyanov 
674ca880420SEric Biggers 		refcount_set(&ctx->refcount, 1);
675893e26e6SPavel Emelyanov 		ctx->flags = octx->flags;
676893e26e6SPavel Emelyanov 		ctx->features = octx->features;
677893e26e6SPavel Emelyanov 		ctx->released = false;
678a759a909SNadav Amit 		atomic_set(&ctx->mmap_changing, 0);
679893e26e6SPavel Emelyanov 		ctx->mm = vma->vm_mm;
68000bb31faSMike Rapoport 		mmgrab(ctx->mm);
681893e26e6SPavel Emelyanov 
682893e26e6SPavel Emelyanov 		userfaultfd_ctx_get(octx);
683a759a909SNadav Amit 		atomic_inc(&octx->mmap_changing);
684893e26e6SPavel Emelyanov 		fctx->orig = octx;
685893e26e6SPavel Emelyanov 		fctx->new = ctx;
686893e26e6SPavel Emelyanov 		list_add_tail(&fctx->list, fcs);
687893e26e6SPavel Emelyanov 	}
688893e26e6SPavel Emelyanov 
689893e26e6SPavel Emelyanov 	vma->vm_userfaultfd_ctx.ctx = ctx;
690893e26e6SPavel Emelyanov 	return 0;
691893e26e6SPavel Emelyanov }
692893e26e6SPavel Emelyanov 
6938c9e7bb7SAndrea Arcangeli static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
694893e26e6SPavel Emelyanov {
695893e26e6SPavel Emelyanov 	struct userfaultfd_ctx *ctx = fctx->orig;
696893e26e6SPavel Emelyanov 	struct userfaultfd_wait_queue ewq;
697893e26e6SPavel Emelyanov 
698893e26e6SPavel Emelyanov 	msg_init(&ewq.msg);
699893e26e6SPavel Emelyanov 
700893e26e6SPavel Emelyanov 	ewq.msg.event = UFFD_EVENT_FORK;
701893e26e6SPavel Emelyanov 	ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
702893e26e6SPavel Emelyanov 
7038c9e7bb7SAndrea Arcangeli 	userfaultfd_event_wait_completion(ctx, &ewq);
704893e26e6SPavel Emelyanov }
705893e26e6SPavel Emelyanov 
706893e26e6SPavel Emelyanov void dup_userfaultfd_complete(struct list_head *fcs)
707893e26e6SPavel Emelyanov {
708893e26e6SPavel Emelyanov 	struct userfaultfd_fork_ctx *fctx, *n;
709893e26e6SPavel Emelyanov 
710893e26e6SPavel Emelyanov 	list_for_each_entry_safe(fctx, n, fcs, list) {
7118c9e7bb7SAndrea Arcangeli 		dup_fctx(fctx);
712893e26e6SPavel Emelyanov 		list_del(&fctx->list);
713893e26e6SPavel Emelyanov 		kfree(fctx);
714893e26e6SPavel Emelyanov 	}
715893e26e6SPavel Emelyanov }
716893e26e6SPavel Emelyanov 
71772f87654SPavel Emelyanov void mremap_userfaultfd_prep(struct vm_area_struct *vma,
71872f87654SPavel Emelyanov 			     struct vm_userfaultfd_ctx *vm_ctx)
71972f87654SPavel Emelyanov {
72072f87654SPavel Emelyanov 	struct userfaultfd_ctx *ctx;
72172f87654SPavel Emelyanov 
72272f87654SPavel Emelyanov 	ctx = vma->vm_userfaultfd_ctx.ctx;
7233cfd22beSPeter Xu 
7243cfd22beSPeter Xu 	if (!ctx)
7253cfd22beSPeter Xu 		return;
7263cfd22beSPeter Xu 
7273cfd22beSPeter Xu 	if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
72872f87654SPavel Emelyanov 		vm_ctx->ctx = ctx;
72972f87654SPavel Emelyanov 		userfaultfd_ctx_get(ctx);
730a759a909SNadav Amit 		atomic_inc(&ctx->mmap_changing);
7313cfd22beSPeter Xu 	} else {
7323cfd22beSPeter Xu 		/* Drop uffd context if remap feature not enabled */
7333cfd22beSPeter Xu 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
7347677f7fdSAxel Rasmussen 		vma->vm_flags &= ~__VM_UFFD_FLAGS;
73572f87654SPavel Emelyanov 	}
73672f87654SPavel Emelyanov }
73772f87654SPavel Emelyanov 
73890794bf1SAndrea Arcangeli void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
73972f87654SPavel Emelyanov 				 unsigned long from, unsigned long to,
74072f87654SPavel Emelyanov 				 unsigned long len)
74172f87654SPavel Emelyanov {
74290794bf1SAndrea Arcangeli 	struct userfaultfd_ctx *ctx = vm_ctx->ctx;
74372f87654SPavel Emelyanov 	struct userfaultfd_wait_queue ewq;
74472f87654SPavel Emelyanov 
74572f87654SPavel Emelyanov 	if (!ctx)
74672f87654SPavel Emelyanov 		return;
74772f87654SPavel Emelyanov 
74872f87654SPavel Emelyanov 	if (to & ~PAGE_MASK) {
74972f87654SPavel Emelyanov 		userfaultfd_ctx_put(ctx);
75072f87654SPavel Emelyanov 		return;
75172f87654SPavel Emelyanov 	}
75272f87654SPavel Emelyanov 
75372f87654SPavel Emelyanov 	msg_init(&ewq.msg);
75472f87654SPavel Emelyanov 
75572f87654SPavel Emelyanov 	ewq.msg.event = UFFD_EVENT_REMAP;
75672f87654SPavel Emelyanov 	ewq.msg.arg.remap.from = from;
75772f87654SPavel Emelyanov 	ewq.msg.arg.remap.to = to;
75872f87654SPavel Emelyanov 	ewq.msg.arg.remap.len = len;
75972f87654SPavel Emelyanov 
76072f87654SPavel Emelyanov 	userfaultfd_event_wait_completion(ctx, &ewq);
76172f87654SPavel Emelyanov }
76272f87654SPavel Emelyanov 
76370ccb92fSAndrea Arcangeli bool userfaultfd_remove(struct vm_area_struct *vma,
76405ce7724SPavel Emelyanov 			unsigned long start, unsigned long end)
76505ce7724SPavel Emelyanov {
76605ce7724SPavel Emelyanov 	struct mm_struct *mm = vma->vm_mm;
76705ce7724SPavel Emelyanov 	struct userfaultfd_ctx *ctx;
76805ce7724SPavel Emelyanov 	struct userfaultfd_wait_queue ewq;
76905ce7724SPavel Emelyanov 
77005ce7724SPavel Emelyanov 	ctx = vma->vm_userfaultfd_ctx.ctx;
771d811914dSMike Rapoport 	if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
77270ccb92fSAndrea Arcangeli 		return true;
77305ce7724SPavel Emelyanov 
77405ce7724SPavel Emelyanov 	userfaultfd_ctx_get(ctx);
775a759a909SNadav Amit 	atomic_inc(&ctx->mmap_changing);
776d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
77705ce7724SPavel Emelyanov 
77805ce7724SPavel Emelyanov 	msg_init(&ewq.msg);
77905ce7724SPavel Emelyanov 
780d811914dSMike Rapoport 	ewq.msg.event = UFFD_EVENT_REMOVE;
781d811914dSMike Rapoport 	ewq.msg.arg.remove.start = start;
782d811914dSMike Rapoport 	ewq.msg.arg.remove.end = end;
78305ce7724SPavel Emelyanov 
78405ce7724SPavel Emelyanov 	userfaultfd_event_wait_completion(ctx, &ewq);
78505ce7724SPavel Emelyanov 
78670ccb92fSAndrea Arcangeli 	return false;
78705ce7724SPavel Emelyanov }
78805ce7724SPavel Emelyanov 
789897ab3e0SMike Rapoport static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
790897ab3e0SMike Rapoport 			  unsigned long start, unsigned long end)
791897ab3e0SMike Rapoport {
792897ab3e0SMike Rapoport 	struct userfaultfd_unmap_ctx *unmap_ctx;
793897ab3e0SMike Rapoport 
794897ab3e0SMike Rapoport 	list_for_each_entry(unmap_ctx, unmaps, list)
795897ab3e0SMike Rapoport 		if (unmap_ctx->ctx == ctx && unmap_ctx->start == start &&
796897ab3e0SMike Rapoport 		    unmap_ctx->end == end)
797897ab3e0SMike Rapoport 			return true;
798897ab3e0SMike Rapoport 
799897ab3e0SMike Rapoport 	return false;
800897ab3e0SMike Rapoport }
801897ab3e0SMike Rapoport 
802897ab3e0SMike Rapoport int userfaultfd_unmap_prep(struct vm_area_struct *vma,
803897ab3e0SMike Rapoport 			   unsigned long start, unsigned long end,
804897ab3e0SMike Rapoport 			   struct list_head *unmaps)
805897ab3e0SMike Rapoport {
806897ab3e0SMike Rapoport 	for ( ; vma && vma->vm_start < end; vma = vma->vm_next) {
807897ab3e0SMike Rapoport 		struct userfaultfd_unmap_ctx *unmap_ctx;
808897ab3e0SMike Rapoport 		struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
809897ab3e0SMike Rapoport 
810897ab3e0SMike Rapoport 		if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
811897ab3e0SMike Rapoport 		    has_unmap_ctx(ctx, unmaps, start, end))
812897ab3e0SMike Rapoport 			continue;
813897ab3e0SMike Rapoport 
814897ab3e0SMike Rapoport 		unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL);
815897ab3e0SMike Rapoport 		if (!unmap_ctx)
816897ab3e0SMike Rapoport 			return -ENOMEM;
817897ab3e0SMike Rapoport 
818897ab3e0SMike Rapoport 		userfaultfd_ctx_get(ctx);
819a759a909SNadav Amit 		atomic_inc(&ctx->mmap_changing);
820897ab3e0SMike Rapoport 		unmap_ctx->ctx = ctx;
821897ab3e0SMike Rapoport 		unmap_ctx->start = start;
822897ab3e0SMike Rapoport 		unmap_ctx->end = end;
823897ab3e0SMike Rapoport 		list_add_tail(&unmap_ctx->list, unmaps);
824897ab3e0SMike Rapoport 	}
825897ab3e0SMike Rapoport 
826897ab3e0SMike Rapoport 	return 0;
827897ab3e0SMike Rapoport }
828897ab3e0SMike Rapoport 
829897ab3e0SMike Rapoport void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
830897ab3e0SMike Rapoport {
831897ab3e0SMike Rapoport 	struct userfaultfd_unmap_ctx *ctx, *n;
832897ab3e0SMike Rapoport 	struct userfaultfd_wait_queue ewq;
833897ab3e0SMike Rapoport 
834897ab3e0SMike Rapoport 	list_for_each_entry_safe(ctx, n, uf, list) {
835897ab3e0SMike Rapoport 		msg_init(&ewq.msg);
836897ab3e0SMike Rapoport 
837897ab3e0SMike Rapoport 		ewq.msg.event = UFFD_EVENT_UNMAP;
838897ab3e0SMike Rapoport 		ewq.msg.arg.remove.start = ctx->start;
839897ab3e0SMike Rapoport 		ewq.msg.arg.remove.end = ctx->end;
840897ab3e0SMike Rapoport 
841897ab3e0SMike Rapoport 		userfaultfd_event_wait_completion(ctx->ctx, &ewq);
842897ab3e0SMike Rapoport 
843897ab3e0SMike Rapoport 		list_del(&ctx->list);
844897ab3e0SMike Rapoport 		kfree(ctx);
845897ab3e0SMike Rapoport 	}
846897ab3e0SMike Rapoport }
847897ab3e0SMike Rapoport 
84886039bd3SAndrea Arcangeli static int userfaultfd_release(struct inode *inode, struct file *file)
84986039bd3SAndrea Arcangeli {
85086039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx = file->private_data;
85186039bd3SAndrea Arcangeli 	struct mm_struct *mm = ctx->mm;
85286039bd3SAndrea Arcangeli 	struct vm_area_struct *vma, *prev;
85386039bd3SAndrea Arcangeli 	/* len == 0 means wake all */
85486039bd3SAndrea Arcangeli 	struct userfaultfd_wake_range range = { .len = 0, };
85586039bd3SAndrea Arcangeli 	unsigned long new_flags;
85686039bd3SAndrea Arcangeli 
8576aa7de05SMark Rutland 	WRITE_ONCE(ctx->released, true);
85886039bd3SAndrea Arcangeli 
859d2005e3fSOleg Nesterov 	if (!mmget_not_zero(mm))
860d2005e3fSOleg Nesterov 		goto wakeup;
861d2005e3fSOleg Nesterov 
86286039bd3SAndrea Arcangeli 	/*
86386039bd3SAndrea Arcangeli 	 * Flush page faults out of all CPUs. NOTE: all page faults
86486039bd3SAndrea Arcangeli 	 * must be retried without returning VM_FAULT_SIGBUS if
86586039bd3SAndrea Arcangeli 	 * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
866c1e8d7c6SMichel Lespinasse 	 * changes while handle_userfault released the mmap_lock. So
86786039bd3SAndrea Arcangeli 	 * it's critical that released is set to true (above), before
868c1e8d7c6SMichel Lespinasse 	 * taking the mmap_lock for writing.
86986039bd3SAndrea Arcangeli 	 */
870d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
87186039bd3SAndrea Arcangeli 	prev = NULL;
87286039bd3SAndrea Arcangeli 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
87386039bd3SAndrea Arcangeli 		cond_resched();
87486039bd3SAndrea Arcangeli 		BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
8757677f7fdSAxel Rasmussen 		       !!(vma->vm_flags & __VM_UFFD_FLAGS));
87686039bd3SAndrea Arcangeli 		if (vma->vm_userfaultfd_ctx.ctx != ctx) {
87786039bd3SAndrea Arcangeli 			prev = vma;
87886039bd3SAndrea Arcangeli 			continue;
87986039bd3SAndrea Arcangeli 		}
8807677f7fdSAxel Rasmussen 		new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
88186039bd3SAndrea Arcangeli 		prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
88286039bd3SAndrea Arcangeli 				 new_flags, vma->anon_vma,
88386039bd3SAndrea Arcangeli 				 vma->vm_file, vma->vm_pgoff,
88486039bd3SAndrea Arcangeli 				 vma_policy(vma),
8855c26f6acSSuren Baghdasaryan 				 NULL_VM_UFFD_CTX, anon_vma_name(vma));
88686039bd3SAndrea Arcangeli 		if (prev)
88786039bd3SAndrea Arcangeli 			vma = prev;
88886039bd3SAndrea Arcangeli 		else
88986039bd3SAndrea Arcangeli 			prev = vma;
89086039bd3SAndrea Arcangeli 		vma->vm_flags = new_flags;
89186039bd3SAndrea Arcangeli 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
89286039bd3SAndrea Arcangeli 	}
893d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
894d2005e3fSOleg Nesterov 	mmput(mm);
895d2005e3fSOleg Nesterov wakeup:
89686039bd3SAndrea Arcangeli 	/*
89715b726efSAndrea Arcangeli 	 * After no new page faults can wait on this fault_*wqh, flush
89886039bd3SAndrea Arcangeli 	 * the last page faults that may have been already waiting on
89915b726efSAndrea Arcangeli 	 * the fault_*wqh.
90086039bd3SAndrea Arcangeli 	 */
901cbcfa130SEric Biggers 	spin_lock_irq(&ctx->fault_pending_wqh.lock);
902ac5be6b4SAndrea Arcangeli 	__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
903c430d1e8SMatthew Wilcox 	__wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
904cbcfa130SEric Biggers 	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
90586039bd3SAndrea Arcangeli 
9065a18b64eSMike Rapoport 	/* Flush pending events that may still wait on event_wqh */
9075a18b64eSMike Rapoport 	wake_up_all(&ctx->event_wqh);
9085a18b64eSMike Rapoport 
909a9a08845SLinus Torvalds 	wake_up_poll(&ctx->fd_wqh, EPOLLHUP);
91086039bd3SAndrea Arcangeli 	userfaultfd_ctx_put(ctx);
91186039bd3SAndrea Arcangeli 	return 0;
91286039bd3SAndrea Arcangeli }
91386039bd3SAndrea Arcangeli 
91415b726efSAndrea Arcangeli /* fault_pending_wqh.lock must be hold by the caller */
9156dcc27fdSPavel Emelyanov static inline struct userfaultfd_wait_queue *find_userfault_in(
9166dcc27fdSPavel Emelyanov 		wait_queue_head_t *wqh)
91786039bd3SAndrea Arcangeli {
918ac6424b9SIngo Molnar 	wait_queue_entry_t *wq;
91915b726efSAndrea Arcangeli 	struct userfaultfd_wait_queue *uwq;
92086039bd3SAndrea Arcangeli 
921456a7378SLance Roy 	lockdep_assert_held(&wqh->lock);
92286039bd3SAndrea Arcangeli 
92315b726efSAndrea Arcangeli 	uwq = NULL;
9246dcc27fdSPavel Emelyanov 	if (!waitqueue_active(wqh))
92515b726efSAndrea Arcangeli 		goto out;
92615b726efSAndrea Arcangeli 	/* walk in reverse to provide FIFO behavior to read userfaults */
9272055da97SIngo Molnar 	wq = list_last_entry(&wqh->head, typeof(*wq), entry);
92815b726efSAndrea Arcangeli 	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
92915b726efSAndrea Arcangeli out:
93015b726efSAndrea Arcangeli 	return uwq;
93186039bd3SAndrea Arcangeli }
93286039bd3SAndrea Arcangeli 
9336dcc27fdSPavel Emelyanov static inline struct userfaultfd_wait_queue *find_userfault(
9346dcc27fdSPavel Emelyanov 		struct userfaultfd_ctx *ctx)
9356dcc27fdSPavel Emelyanov {
9366dcc27fdSPavel Emelyanov 	return find_userfault_in(&ctx->fault_pending_wqh);
9376dcc27fdSPavel Emelyanov }
9386dcc27fdSPavel Emelyanov 
9399cd75c3cSPavel Emelyanov static inline struct userfaultfd_wait_queue *find_userfault_evt(
9409cd75c3cSPavel Emelyanov 		struct userfaultfd_ctx *ctx)
9419cd75c3cSPavel Emelyanov {
9429cd75c3cSPavel Emelyanov 	return find_userfault_in(&ctx->event_wqh);
9439cd75c3cSPavel Emelyanov }
9449cd75c3cSPavel Emelyanov 
945076ccb76SAl Viro static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
94686039bd3SAndrea Arcangeli {
94786039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx = file->private_data;
948076ccb76SAl Viro 	__poll_t ret;
94986039bd3SAndrea Arcangeli 
95086039bd3SAndrea Arcangeli 	poll_wait(file, &ctx->fd_wqh, wait);
95186039bd3SAndrea Arcangeli 
95222e5fe2aSNadav Amit 	if (!userfaultfd_is_initialized(ctx))
953a9a08845SLinus Torvalds 		return EPOLLERR;
95422e5fe2aSNadav Amit 
955ba85c702SAndrea Arcangeli 	/*
956ba85c702SAndrea Arcangeli 	 * poll() never guarantees that read won't block.
957ba85c702SAndrea Arcangeli 	 * userfaults can be waken before they're read().
958ba85c702SAndrea Arcangeli 	 */
959ba85c702SAndrea Arcangeli 	if (unlikely(!(file->f_flags & O_NONBLOCK)))
960a9a08845SLinus Torvalds 		return EPOLLERR;
96115b726efSAndrea Arcangeli 	/*
96215b726efSAndrea Arcangeli 	 * lockless access to see if there are pending faults
96315b726efSAndrea Arcangeli 	 * __pollwait last action is the add_wait_queue but
96415b726efSAndrea Arcangeli 	 * the spin_unlock would allow the waitqueue_active to
96515b726efSAndrea Arcangeli 	 * pass above the actual list_add inside
96615b726efSAndrea Arcangeli 	 * add_wait_queue critical section. So use a full
96715b726efSAndrea Arcangeli 	 * memory barrier to serialize the list_add write of
96815b726efSAndrea Arcangeli 	 * add_wait_queue() with the waitqueue_active read
96915b726efSAndrea Arcangeli 	 * below.
97015b726efSAndrea Arcangeli 	 */
97115b726efSAndrea Arcangeli 	ret = 0;
97215b726efSAndrea Arcangeli 	smp_mb();
97315b726efSAndrea Arcangeli 	if (waitqueue_active(&ctx->fault_pending_wqh))
974a9a08845SLinus Torvalds 		ret = EPOLLIN;
9759cd75c3cSPavel Emelyanov 	else if (waitqueue_active(&ctx->event_wqh))
976a9a08845SLinus Torvalds 		ret = EPOLLIN;
9779cd75c3cSPavel Emelyanov 
97886039bd3SAndrea Arcangeli 	return ret;
97986039bd3SAndrea Arcangeli }
98086039bd3SAndrea Arcangeli 
981893e26e6SPavel Emelyanov static const struct file_operations userfaultfd_fops;
982893e26e6SPavel Emelyanov 
983b537900fSDaniel Colascione static int resolve_userfault_fork(struct userfaultfd_ctx *new,
984b537900fSDaniel Colascione 				  struct inode *inode,
985893e26e6SPavel Emelyanov 				  struct uffd_msg *msg)
986893e26e6SPavel Emelyanov {
987893e26e6SPavel Emelyanov 	int fd;
988893e26e6SPavel Emelyanov 
989b537900fSDaniel Colascione 	fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, new,
990b537900fSDaniel Colascione 			O_RDWR | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode);
991893e26e6SPavel Emelyanov 	if (fd < 0)
992893e26e6SPavel Emelyanov 		return fd;
993893e26e6SPavel Emelyanov 
994893e26e6SPavel Emelyanov 	msg->arg.reserved.reserved1 = 0;
995893e26e6SPavel Emelyanov 	msg->arg.fork.ufd = fd;
996893e26e6SPavel Emelyanov 	return 0;
997893e26e6SPavel Emelyanov }
998893e26e6SPavel Emelyanov 
99986039bd3SAndrea Arcangeli static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
1000b537900fSDaniel Colascione 				    struct uffd_msg *msg, struct inode *inode)
100186039bd3SAndrea Arcangeli {
100286039bd3SAndrea Arcangeli 	ssize_t ret;
100386039bd3SAndrea Arcangeli 	DECLARE_WAITQUEUE(wait, current);
100415b726efSAndrea Arcangeli 	struct userfaultfd_wait_queue *uwq;
1005893e26e6SPavel Emelyanov 	/*
1006893e26e6SPavel Emelyanov 	 * Handling fork event requires sleeping operations, so
1007893e26e6SPavel Emelyanov 	 * we drop the event_wqh lock, then do these ops, then
1008893e26e6SPavel Emelyanov 	 * lock it back and wake up the waiter. While the lock is
1009893e26e6SPavel Emelyanov 	 * dropped the ewq may go away so we keep track of it
1010893e26e6SPavel Emelyanov 	 * carefully.
1011893e26e6SPavel Emelyanov 	 */
1012893e26e6SPavel Emelyanov 	LIST_HEAD(fork_event);
1013893e26e6SPavel Emelyanov 	struct userfaultfd_ctx *fork_nctx = NULL;
101486039bd3SAndrea Arcangeli 
101515b726efSAndrea Arcangeli 	/* always take the fd_wqh lock before the fault_pending_wqh lock */
1016ae62c16eSChristoph Hellwig 	spin_lock_irq(&ctx->fd_wqh.lock);
101786039bd3SAndrea Arcangeli 	__add_wait_queue(&ctx->fd_wqh, &wait);
101886039bd3SAndrea Arcangeli 	for (;;) {
101986039bd3SAndrea Arcangeli 		set_current_state(TASK_INTERRUPTIBLE);
102015b726efSAndrea Arcangeli 		spin_lock(&ctx->fault_pending_wqh.lock);
102115b726efSAndrea Arcangeli 		uwq = find_userfault(ctx);
102215b726efSAndrea Arcangeli 		if (uwq) {
102386039bd3SAndrea Arcangeli 			/*
10242c5b7e1bSAndrea Arcangeli 			 * Use a seqcount to repeat the lockless check
10252c5b7e1bSAndrea Arcangeli 			 * in wake_userfault() to avoid missing
10262c5b7e1bSAndrea Arcangeli 			 * wakeups because during the refile both
10272c5b7e1bSAndrea Arcangeli 			 * waitqueue could become empty if this is the
10282c5b7e1bSAndrea Arcangeli 			 * only userfault.
10292c5b7e1bSAndrea Arcangeli 			 */
10302c5b7e1bSAndrea Arcangeli 			write_seqcount_begin(&ctx->refile_seq);
10312c5b7e1bSAndrea Arcangeli 
10322c5b7e1bSAndrea Arcangeli 			/*
103315b726efSAndrea Arcangeli 			 * The fault_pending_wqh.lock prevents the uwq
103415b726efSAndrea Arcangeli 			 * to disappear from under us.
103515b726efSAndrea Arcangeli 			 *
103615b726efSAndrea Arcangeli 			 * Refile this userfault from
103715b726efSAndrea Arcangeli 			 * fault_pending_wqh to fault_wqh, it's not
103815b726efSAndrea Arcangeli 			 * pending anymore after we read it.
103915b726efSAndrea Arcangeli 			 *
104015b726efSAndrea Arcangeli 			 * Use list_del() by hand (as
104115b726efSAndrea Arcangeli 			 * userfaultfd_wake_function also uses
104215b726efSAndrea Arcangeli 			 * list_del_init() by hand) to be sure nobody
104315b726efSAndrea Arcangeli 			 * changes __remove_wait_queue() to use
104415b726efSAndrea Arcangeli 			 * list_del_init() in turn breaking the
104515b726efSAndrea Arcangeli 			 * !list_empty_careful() check in
10462055da97SIngo Molnar 			 * handle_userfault(). The uwq->wq.head list
104715b726efSAndrea Arcangeli 			 * must never be empty at any time during the
104815b726efSAndrea Arcangeli 			 * refile, or the waitqueue could disappear
104915b726efSAndrea Arcangeli 			 * from under us. The "wait_queue_head_t"
105015b726efSAndrea Arcangeli 			 * parameter of __remove_wait_queue() is unused
105115b726efSAndrea Arcangeli 			 * anyway.
105286039bd3SAndrea Arcangeli 			 */
10532055da97SIngo Molnar 			list_del(&uwq->wq.entry);
1054c430d1e8SMatthew Wilcox 			add_wait_queue(&ctx->fault_wqh, &uwq->wq);
105515b726efSAndrea Arcangeli 
10562c5b7e1bSAndrea Arcangeli 			write_seqcount_end(&ctx->refile_seq);
10572c5b7e1bSAndrea Arcangeli 
1058a9b85f94SAndrea Arcangeli 			/* careful to always initialize msg if ret == 0 */
1059a9b85f94SAndrea Arcangeli 			*msg = uwq->msg;
106015b726efSAndrea Arcangeli 			spin_unlock(&ctx->fault_pending_wqh.lock);
106186039bd3SAndrea Arcangeli 			ret = 0;
106286039bd3SAndrea Arcangeli 			break;
106386039bd3SAndrea Arcangeli 		}
106415b726efSAndrea Arcangeli 		spin_unlock(&ctx->fault_pending_wqh.lock);
10659cd75c3cSPavel Emelyanov 
10669cd75c3cSPavel Emelyanov 		spin_lock(&ctx->event_wqh.lock);
10679cd75c3cSPavel Emelyanov 		uwq = find_userfault_evt(ctx);
10689cd75c3cSPavel Emelyanov 		if (uwq) {
10699cd75c3cSPavel Emelyanov 			*msg = uwq->msg;
10709cd75c3cSPavel Emelyanov 
1071893e26e6SPavel Emelyanov 			if (uwq->msg.event == UFFD_EVENT_FORK) {
1072893e26e6SPavel Emelyanov 				fork_nctx = (struct userfaultfd_ctx *)
1073893e26e6SPavel Emelyanov 					(unsigned long)
1074893e26e6SPavel Emelyanov 					uwq->msg.arg.reserved.reserved1;
10752055da97SIngo Molnar 				list_move(&uwq->wq.entry, &fork_event);
1076384632e6SAndrea Arcangeli 				/*
1077384632e6SAndrea Arcangeli 				 * fork_nctx can be freed as soon as
1078384632e6SAndrea Arcangeli 				 * we drop the lock, unless we take a
1079384632e6SAndrea Arcangeli 				 * reference on it.
1080384632e6SAndrea Arcangeli 				 */
1081384632e6SAndrea Arcangeli 				userfaultfd_ctx_get(fork_nctx);
1082893e26e6SPavel Emelyanov 				spin_unlock(&ctx->event_wqh.lock);
1083893e26e6SPavel Emelyanov 				ret = 0;
1084893e26e6SPavel Emelyanov 				break;
1085893e26e6SPavel Emelyanov 			}
1086893e26e6SPavel Emelyanov 
10879cd75c3cSPavel Emelyanov 			userfaultfd_event_complete(ctx, uwq);
10889cd75c3cSPavel Emelyanov 			spin_unlock(&ctx->event_wqh.lock);
10899cd75c3cSPavel Emelyanov 			ret = 0;
10909cd75c3cSPavel Emelyanov 			break;
10919cd75c3cSPavel Emelyanov 		}
10929cd75c3cSPavel Emelyanov 		spin_unlock(&ctx->event_wqh.lock);
10939cd75c3cSPavel Emelyanov 
109486039bd3SAndrea Arcangeli 		if (signal_pending(current)) {
109586039bd3SAndrea Arcangeli 			ret = -ERESTARTSYS;
109686039bd3SAndrea Arcangeli 			break;
109786039bd3SAndrea Arcangeli 		}
109886039bd3SAndrea Arcangeli 		if (no_wait) {
109986039bd3SAndrea Arcangeli 			ret = -EAGAIN;
110086039bd3SAndrea Arcangeli 			break;
110186039bd3SAndrea Arcangeli 		}
1102ae62c16eSChristoph Hellwig 		spin_unlock_irq(&ctx->fd_wqh.lock);
110386039bd3SAndrea Arcangeli 		schedule();
1104ae62c16eSChristoph Hellwig 		spin_lock_irq(&ctx->fd_wqh.lock);
110586039bd3SAndrea Arcangeli 	}
110686039bd3SAndrea Arcangeli 	__remove_wait_queue(&ctx->fd_wqh, &wait);
110786039bd3SAndrea Arcangeli 	__set_current_state(TASK_RUNNING);
1108ae62c16eSChristoph Hellwig 	spin_unlock_irq(&ctx->fd_wqh.lock);
110986039bd3SAndrea Arcangeli 
1110893e26e6SPavel Emelyanov 	if (!ret && msg->event == UFFD_EVENT_FORK) {
1111b537900fSDaniel Colascione 		ret = resolve_userfault_fork(fork_nctx, inode, msg);
1112cbcfa130SEric Biggers 		spin_lock_irq(&ctx->event_wqh.lock);
1113893e26e6SPavel Emelyanov 		if (!list_empty(&fork_event)) {
1114384632e6SAndrea Arcangeli 			/*
1115384632e6SAndrea Arcangeli 			 * The fork thread didn't abort, so we can
1116384632e6SAndrea Arcangeli 			 * drop the temporary refcount.
1117384632e6SAndrea Arcangeli 			 */
1118384632e6SAndrea Arcangeli 			userfaultfd_ctx_put(fork_nctx);
1119384632e6SAndrea Arcangeli 
1120893e26e6SPavel Emelyanov 			uwq = list_first_entry(&fork_event,
1121893e26e6SPavel Emelyanov 					       typeof(*uwq),
11222055da97SIngo Molnar 					       wq.entry);
1123384632e6SAndrea Arcangeli 			/*
1124384632e6SAndrea Arcangeli 			 * If fork_event list wasn't empty and in turn
1125384632e6SAndrea Arcangeli 			 * the event wasn't already released by fork
1126384632e6SAndrea Arcangeli 			 * (the event is allocated on fork kernel
1127384632e6SAndrea Arcangeli 			 * stack), put the event back to its place in
1128384632e6SAndrea Arcangeli 			 * the event_wq. fork_event head will be freed
1129384632e6SAndrea Arcangeli 			 * as soon as we return so the event cannot
1130384632e6SAndrea Arcangeli 			 * stay queued there no matter the current
1131384632e6SAndrea Arcangeli 			 * "ret" value.
1132384632e6SAndrea Arcangeli 			 */
11332055da97SIngo Molnar 			list_del(&uwq->wq.entry);
1134893e26e6SPavel Emelyanov 			__add_wait_queue(&ctx->event_wqh, &uwq->wq);
1135384632e6SAndrea Arcangeli 
1136384632e6SAndrea Arcangeli 			/*
1137384632e6SAndrea Arcangeli 			 * Leave the event in the waitqueue and report
1138384632e6SAndrea Arcangeli 			 * error to userland if we failed to resolve
1139384632e6SAndrea Arcangeli 			 * the userfault fork.
1140384632e6SAndrea Arcangeli 			 */
1141384632e6SAndrea Arcangeli 			if (likely(!ret))
1142893e26e6SPavel Emelyanov 				userfaultfd_event_complete(ctx, uwq);
1143384632e6SAndrea Arcangeli 		} else {
1144384632e6SAndrea Arcangeli 			/*
1145384632e6SAndrea Arcangeli 			 * Here the fork thread aborted and the
1146384632e6SAndrea Arcangeli 			 * refcount from the fork thread on fork_nctx
1147384632e6SAndrea Arcangeli 			 * has already been released. We still hold
1148384632e6SAndrea Arcangeli 			 * the reference we took before releasing the
1149384632e6SAndrea Arcangeli 			 * lock above. If resolve_userfault_fork
1150384632e6SAndrea Arcangeli 			 * failed we've to drop it because the
1151384632e6SAndrea Arcangeli 			 * fork_nctx has to be freed in such case. If
1152384632e6SAndrea Arcangeli 			 * it succeeded we'll hold it because the new
1153384632e6SAndrea Arcangeli 			 * uffd references it.
1154384632e6SAndrea Arcangeli 			 */
1155384632e6SAndrea Arcangeli 			if (ret)
1156384632e6SAndrea Arcangeli 				userfaultfd_ctx_put(fork_nctx);
1157893e26e6SPavel Emelyanov 		}
1158cbcfa130SEric Biggers 		spin_unlock_irq(&ctx->event_wqh.lock);
1159893e26e6SPavel Emelyanov 	}
1160893e26e6SPavel Emelyanov 
116186039bd3SAndrea Arcangeli 	return ret;
116286039bd3SAndrea Arcangeli }
116386039bd3SAndrea Arcangeli 
116486039bd3SAndrea Arcangeli static ssize_t userfaultfd_read(struct file *file, char __user *buf,
116586039bd3SAndrea Arcangeli 				size_t count, loff_t *ppos)
116686039bd3SAndrea Arcangeli {
116786039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx = file->private_data;
116886039bd3SAndrea Arcangeli 	ssize_t _ret, ret = 0;
1169a9b85f94SAndrea Arcangeli 	struct uffd_msg msg;
117086039bd3SAndrea Arcangeli 	int no_wait = file->f_flags & O_NONBLOCK;
1171b537900fSDaniel Colascione 	struct inode *inode = file_inode(file);
117286039bd3SAndrea Arcangeli 
117322e5fe2aSNadav Amit 	if (!userfaultfd_is_initialized(ctx))
117486039bd3SAndrea Arcangeli 		return -EINVAL;
117586039bd3SAndrea Arcangeli 
117686039bd3SAndrea Arcangeli 	for (;;) {
1177a9b85f94SAndrea Arcangeli 		if (count < sizeof(msg))
117886039bd3SAndrea Arcangeli 			return ret ? ret : -EINVAL;
1179b537900fSDaniel Colascione 		_ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode);
118086039bd3SAndrea Arcangeli 		if (_ret < 0)
118186039bd3SAndrea Arcangeli 			return ret ? ret : _ret;
1182a9b85f94SAndrea Arcangeli 		if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
118386039bd3SAndrea Arcangeli 			return ret ? ret : -EFAULT;
1184a9b85f94SAndrea Arcangeli 		ret += sizeof(msg);
1185a9b85f94SAndrea Arcangeli 		buf += sizeof(msg);
1186a9b85f94SAndrea Arcangeli 		count -= sizeof(msg);
118786039bd3SAndrea Arcangeli 		/*
118886039bd3SAndrea Arcangeli 		 * Allow to read more than one fault at time but only
118986039bd3SAndrea Arcangeli 		 * block if waiting for the very first one.
119086039bd3SAndrea Arcangeli 		 */
119186039bd3SAndrea Arcangeli 		no_wait = O_NONBLOCK;
119286039bd3SAndrea Arcangeli 	}
119386039bd3SAndrea Arcangeli }
119486039bd3SAndrea Arcangeli 
119586039bd3SAndrea Arcangeli static void __wake_userfault(struct userfaultfd_ctx *ctx,
119686039bd3SAndrea Arcangeli 			     struct userfaultfd_wake_range *range)
119786039bd3SAndrea Arcangeli {
1198cbcfa130SEric Biggers 	spin_lock_irq(&ctx->fault_pending_wqh.lock);
119986039bd3SAndrea Arcangeli 	/* wake all in the range and autoremove */
120015b726efSAndrea Arcangeli 	if (waitqueue_active(&ctx->fault_pending_wqh))
1201ac5be6b4SAndrea Arcangeli 		__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
120215b726efSAndrea Arcangeli 				     range);
120315b726efSAndrea Arcangeli 	if (waitqueue_active(&ctx->fault_wqh))
1204c430d1e8SMatthew Wilcox 		__wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
1205cbcfa130SEric Biggers 	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
120686039bd3SAndrea Arcangeli }
120786039bd3SAndrea Arcangeli 
120886039bd3SAndrea Arcangeli static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
120986039bd3SAndrea Arcangeli 					   struct userfaultfd_wake_range *range)
121086039bd3SAndrea Arcangeli {
12112c5b7e1bSAndrea Arcangeli 	unsigned seq;
12122c5b7e1bSAndrea Arcangeli 	bool need_wakeup;
12132c5b7e1bSAndrea Arcangeli 
121486039bd3SAndrea Arcangeli 	/*
121586039bd3SAndrea Arcangeli 	 * To be sure waitqueue_active() is not reordered by the CPU
121686039bd3SAndrea Arcangeli 	 * before the pagetable update, use an explicit SMP memory
12173e4e28c5SMichel Lespinasse 	 * barrier here. PT lock release or mmap_read_unlock(mm) still
121886039bd3SAndrea Arcangeli 	 * have release semantics that can allow the
121986039bd3SAndrea Arcangeli 	 * waitqueue_active() to be reordered before the pte update.
122086039bd3SAndrea Arcangeli 	 */
122186039bd3SAndrea Arcangeli 	smp_mb();
122286039bd3SAndrea Arcangeli 
122386039bd3SAndrea Arcangeli 	/*
122486039bd3SAndrea Arcangeli 	 * Use waitqueue_active because it's very frequent to
122586039bd3SAndrea Arcangeli 	 * change the address space atomically even if there are no
122686039bd3SAndrea Arcangeli 	 * userfaults yet. So we take the spinlock only when we're
122786039bd3SAndrea Arcangeli 	 * sure we've userfaults to wake.
122886039bd3SAndrea Arcangeli 	 */
12292c5b7e1bSAndrea Arcangeli 	do {
12302c5b7e1bSAndrea Arcangeli 		seq = read_seqcount_begin(&ctx->refile_seq);
12312c5b7e1bSAndrea Arcangeli 		need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
12322c5b7e1bSAndrea Arcangeli 			waitqueue_active(&ctx->fault_wqh);
12332c5b7e1bSAndrea Arcangeli 		cond_resched();
12342c5b7e1bSAndrea Arcangeli 	} while (read_seqcount_retry(&ctx->refile_seq, seq));
12352c5b7e1bSAndrea Arcangeli 	if (need_wakeup)
123686039bd3SAndrea Arcangeli 		__wake_userfault(ctx, range);
123786039bd3SAndrea Arcangeli }
123886039bd3SAndrea Arcangeli 
123986039bd3SAndrea Arcangeli static __always_inline int validate_range(struct mm_struct *mm,
1240e71e2aceSPeter Collingbourne 					  __u64 start, __u64 len)
124186039bd3SAndrea Arcangeli {
124286039bd3SAndrea Arcangeli 	__u64 task_size = mm->task_size;
124386039bd3SAndrea Arcangeli 
1244e71e2aceSPeter Collingbourne 	if (start & ~PAGE_MASK)
124586039bd3SAndrea Arcangeli 		return -EINVAL;
124686039bd3SAndrea Arcangeli 	if (len & ~PAGE_MASK)
124786039bd3SAndrea Arcangeli 		return -EINVAL;
124886039bd3SAndrea Arcangeli 	if (!len)
124986039bd3SAndrea Arcangeli 		return -EINVAL;
1250e71e2aceSPeter Collingbourne 	if (start < mmap_min_addr)
125186039bd3SAndrea Arcangeli 		return -EINVAL;
1252e71e2aceSPeter Collingbourne 	if (start >= task_size)
125386039bd3SAndrea Arcangeli 		return -EINVAL;
1254e71e2aceSPeter Collingbourne 	if (len > task_size - start)
125586039bd3SAndrea Arcangeli 		return -EINVAL;
125686039bd3SAndrea Arcangeli 	return 0;
125786039bd3SAndrea Arcangeli }
125886039bd3SAndrea Arcangeli 
125986039bd3SAndrea Arcangeli static int userfaultfd_register(struct userfaultfd_ctx *ctx,
126086039bd3SAndrea Arcangeli 				unsigned long arg)
126186039bd3SAndrea Arcangeli {
126286039bd3SAndrea Arcangeli 	struct mm_struct *mm = ctx->mm;
126386039bd3SAndrea Arcangeli 	struct vm_area_struct *vma, *prev, *cur;
126486039bd3SAndrea Arcangeli 	int ret;
126586039bd3SAndrea Arcangeli 	struct uffdio_register uffdio_register;
126686039bd3SAndrea Arcangeli 	struct uffdio_register __user *user_uffdio_register;
126786039bd3SAndrea Arcangeli 	unsigned long vm_flags, new_flags;
126886039bd3SAndrea Arcangeli 	bool found;
1269ce53e8e6SMike Rapoport 	bool basic_ioctls;
127086039bd3SAndrea Arcangeli 	unsigned long start, end, vma_end;
127186039bd3SAndrea Arcangeli 
127286039bd3SAndrea Arcangeli 	user_uffdio_register = (struct uffdio_register __user *) arg;
127386039bd3SAndrea Arcangeli 
127486039bd3SAndrea Arcangeli 	ret = -EFAULT;
127586039bd3SAndrea Arcangeli 	if (copy_from_user(&uffdio_register, user_uffdio_register,
127686039bd3SAndrea Arcangeli 			   sizeof(uffdio_register)-sizeof(__u64)))
127786039bd3SAndrea Arcangeli 		goto out;
127886039bd3SAndrea Arcangeli 
127986039bd3SAndrea Arcangeli 	ret = -EINVAL;
128086039bd3SAndrea Arcangeli 	if (!uffdio_register.mode)
128186039bd3SAndrea Arcangeli 		goto out;
12827677f7fdSAxel Rasmussen 	if (uffdio_register.mode & ~UFFD_API_REGISTER_MODES)
128386039bd3SAndrea Arcangeli 		goto out;
128486039bd3SAndrea Arcangeli 	vm_flags = 0;
128586039bd3SAndrea Arcangeli 	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
128686039bd3SAndrea Arcangeli 		vm_flags |= VM_UFFD_MISSING;
128700b151f2SPeter Xu 	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
128800b151f2SPeter Xu #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
128900b151f2SPeter Xu 		goto out;
129000b151f2SPeter Xu #endif
129186039bd3SAndrea Arcangeli 		vm_flags |= VM_UFFD_WP;
129200b151f2SPeter Xu 	}
12937677f7fdSAxel Rasmussen 	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) {
12947677f7fdSAxel Rasmussen #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
12957677f7fdSAxel Rasmussen 		goto out;
12967677f7fdSAxel Rasmussen #endif
12977677f7fdSAxel Rasmussen 		vm_flags |= VM_UFFD_MINOR;
12987677f7fdSAxel Rasmussen 	}
129986039bd3SAndrea Arcangeli 
1300e71e2aceSPeter Collingbourne 	ret = validate_range(mm, uffdio_register.range.start,
130186039bd3SAndrea Arcangeli 			     uffdio_register.range.len);
130286039bd3SAndrea Arcangeli 	if (ret)
130386039bd3SAndrea Arcangeli 		goto out;
130486039bd3SAndrea Arcangeli 
130586039bd3SAndrea Arcangeli 	start = uffdio_register.range.start;
130686039bd3SAndrea Arcangeli 	end = start + uffdio_register.range.len;
130786039bd3SAndrea Arcangeli 
1308d2005e3fSOleg Nesterov 	ret = -ENOMEM;
1309d2005e3fSOleg Nesterov 	if (!mmget_not_zero(mm))
1310d2005e3fSOleg Nesterov 		goto out;
1311d2005e3fSOleg Nesterov 
1312d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
131386039bd3SAndrea Arcangeli 	vma = find_vma_prev(mm, start, &prev);
131486039bd3SAndrea Arcangeli 	if (!vma)
131586039bd3SAndrea Arcangeli 		goto out_unlock;
131686039bd3SAndrea Arcangeli 
131786039bd3SAndrea Arcangeli 	/* check that there's at least one vma in the range */
131886039bd3SAndrea Arcangeli 	ret = -EINVAL;
131986039bd3SAndrea Arcangeli 	if (vma->vm_start >= end)
132086039bd3SAndrea Arcangeli 		goto out_unlock;
132186039bd3SAndrea Arcangeli 
132286039bd3SAndrea Arcangeli 	/*
1323cab350afSMike Kravetz 	 * If the first vma contains huge pages, make sure start address
1324cab350afSMike Kravetz 	 * is aligned to huge page size.
1325cab350afSMike Kravetz 	 */
1326cab350afSMike Kravetz 	if (is_vm_hugetlb_page(vma)) {
1327cab350afSMike Kravetz 		unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1328cab350afSMike Kravetz 
1329cab350afSMike Kravetz 		if (start & (vma_hpagesize - 1))
1330cab350afSMike Kravetz 			goto out_unlock;
1331cab350afSMike Kravetz 	}
1332cab350afSMike Kravetz 
1333cab350afSMike Kravetz 	/*
133486039bd3SAndrea Arcangeli 	 * Search for not compatible vmas.
133586039bd3SAndrea Arcangeli 	 */
133686039bd3SAndrea Arcangeli 	found = false;
1337ce53e8e6SMike Rapoport 	basic_ioctls = false;
133886039bd3SAndrea Arcangeli 	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
133986039bd3SAndrea Arcangeli 		cond_resched();
134086039bd3SAndrea Arcangeli 
134186039bd3SAndrea Arcangeli 		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
13427677f7fdSAxel Rasmussen 		       !!(cur->vm_flags & __VM_UFFD_FLAGS));
134386039bd3SAndrea Arcangeli 
134486039bd3SAndrea Arcangeli 		/* check not compatible vmas */
134586039bd3SAndrea Arcangeli 		ret = -EINVAL;
134663b2d417SAndrea Arcangeli 		if (!vma_can_userfault(cur, vm_flags))
134786039bd3SAndrea Arcangeli 			goto out_unlock;
134829ec9066SAndrea Arcangeli 
134929ec9066SAndrea Arcangeli 		/*
135029ec9066SAndrea Arcangeli 		 * UFFDIO_COPY will fill file holes even without
135129ec9066SAndrea Arcangeli 		 * PROT_WRITE. This check enforces that if this is a
135229ec9066SAndrea Arcangeli 		 * MAP_SHARED, the process has write permission to the backing
135329ec9066SAndrea Arcangeli 		 * file. If VM_MAYWRITE is set it also enforces that on a
135429ec9066SAndrea Arcangeli 		 * MAP_SHARED vma: there is no F_WRITE_SEAL and no further
135529ec9066SAndrea Arcangeli 		 * F_WRITE_SEAL can be taken until the vma is destroyed.
135629ec9066SAndrea Arcangeli 		 */
135729ec9066SAndrea Arcangeli 		ret = -EPERM;
135829ec9066SAndrea Arcangeli 		if (unlikely(!(cur->vm_flags & VM_MAYWRITE)))
135929ec9066SAndrea Arcangeli 			goto out_unlock;
136029ec9066SAndrea Arcangeli 
1361cab350afSMike Kravetz 		/*
1362cab350afSMike Kravetz 		 * If this vma contains ending address, and huge pages
1363cab350afSMike Kravetz 		 * check alignment.
1364cab350afSMike Kravetz 		 */
1365cab350afSMike Kravetz 		if (is_vm_hugetlb_page(cur) && end <= cur->vm_end &&
1366cab350afSMike Kravetz 		    end > cur->vm_start) {
1367cab350afSMike Kravetz 			unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
1368cab350afSMike Kravetz 
1369cab350afSMike Kravetz 			ret = -EINVAL;
1370cab350afSMike Kravetz 
1371cab350afSMike Kravetz 			if (end & (vma_hpagesize - 1))
1372cab350afSMike Kravetz 				goto out_unlock;
1373cab350afSMike Kravetz 		}
137463b2d417SAndrea Arcangeli 		if ((vm_flags & VM_UFFD_WP) && !(cur->vm_flags & VM_MAYWRITE))
137563b2d417SAndrea Arcangeli 			goto out_unlock;
137686039bd3SAndrea Arcangeli 
137786039bd3SAndrea Arcangeli 		/*
137886039bd3SAndrea Arcangeli 		 * Check that this vma isn't already owned by a
137986039bd3SAndrea Arcangeli 		 * different userfaultfd. We can't allow more than one
138086039bd3SAndrea Arcangeli 		 * userfaultfd to own a single vma simultaneously or we
138186039bd3SAndrea Arcangeli 		 * wouldn't know which one to deliver the userfaults to.
138286039bd3SAndrea Arcangeli 		 */
138386039bd3SAndrea Arcangeli 		ret = -EBUSY;
138486039bd3SAndrea Arcangeli 		if (cur->vm_userfaultfd_ctx.ctx &&
138586039bd3SAndrea Arcangeli 		    cur->vm_userfaultfd_ctx.ctx != ctx)
138686039bd3SAndrea Arcangeli 			goto out_unlock;
138786039bd3SAndrea Arcangeli 
1388cab350afSMike Kravetz 		/*
1389cab350afSMike Kravetz 		 * Note vmas containing huge pages
1390cab350afSMike Kravetz 		 */
1391ce53e8e6SMike Rapoport 		if (is_vm_hugetlb_page(cur))
1392ce53e8e6SMike Rapoport 			basic_ioctls = true;
1393cab350afSMike Kravetz 
139486039bd3SAndrea Arcangeli 		found = true;
139586039bd3SAndrea Arcangeli 	}
139686039bd3SAndrea Arcangeli 	BUG_ON(!found);
139786039bd3SAndrea Arcangeli 
139886039bd3SAndrea Arcangeli 	if (vma->vm_start < start)
139986039bd3SAndrea Arcangeli 		prev = vma;
140086039bd3SAndrea Arcangeli 
140186039bd3SAndrea Arcangeli 	ret = 0;
140286039bd3SAndrea Arcangeli 	do {
140386039bd3SAndrea Arcangeli 		cond_resched();
140486039bd3SAndrea Arcangeli 
140563b2d417SAndrea Arcangeli 		BUG_ON(!vma_can_userfault(vma, vm_flags));
140686039bd3SAndrea Arcangeli 		BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
140786039bd3SAndrea Arcangeli 		       vma->vm_userfaultfd_ctx.ctx != ctx);
140829ec9066SAndrea Arcangeli 		WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
140986039bd3SAndrea Arcangeli 
141086039bd3SAndrea Arcangeli 		/*
141186039bd3SAndrea Arcangeli 		 * Nothing to do: this vma is already registered into this
141286039bd3SAndrea Arcangeli 		 * userfaultfd and with the right tracking mode too.
141386039bd3SAndrea Arcangeli 		 */
141486039bd3SAndrea Arcangeli 		if (vma->vm_userfaultfd_ctx.ctx == ctx &&
141586039bd3SAndrea Arcangeli 		    (vma->vm_flags & vm_flags) == vm_flags)
141686039bd3SAndrea Arcangeli 			goto skip;
141786039bd3SAndrea Arcangeli 
141886039bd3SAndrea Arcangeli 		if (vma->vm_start > start)
141986039bd3SAndrea Arcangeli 			start = vma->vm_start;
142086039bd3SAndrea Arcangeli 		vma_end = min(end, vma->vm_end);
142186039bd3SAndrea Arcangeli 
14227677f7fdSAxel Rasmussen 		new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
142386039bd3SAndrea Arcangeli 		prev = vma_merge(mm, prev, start, vma_end, new_flags,
142486039bd3SAndrea Arcangeli 				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
142586039bd3SAndrea Arcangeli 				 vma_policy(vma),
14269a10064fSColin Cross 				 ((struct vm_userfaultfd_ctx){ ctx }),
14275c26f6acSSuren Baghdasaryan 				 anon_vma_name(vma));
142886039bd3SAndrea Arcangeli 		if (prev) {
142986039bd3SAndrea Arcangeli 			vma = prev;
143086039bd3SAndrea Arcangeli 			goto next;
143186039bd3SAndrea Arcangeli 		}
143286039bd3SAndrea Arcangeli 		if (vma->vm_start < start) {
143386039bd3SAndrea Arcangeli 			ret = split_vma(mm, vma, start, 1);
143486039bd3SAndrea Arcangeli 			if (ret)
143586039bd3SAndrea Arcangeli 				break;
143686039bd3SAndrea Arcangeli 		}
143786039bd3SAndrea Arcangeli 		if (vma->vm_end > end) {
143886039bd3SAndrea Arcangeli 			ret = split_vma(mm, vma, end, 0);
143986039bd3SAndrea Arcangeli 			if (ret)
144086039bd3SAndrea Arcangeli 				break;
144186039bd3SAndrea Arcangeli 		}
144286039bd3SAndrea Arcangeli 	next:
144386039bd3SAndrea Arcangeli 		/*
144486039bd3SAndrea Arcangeli 		 * In the vma_merge() successful mprotect-like case 8:
144586039bd3SAndrea Arcangeli 		 * the next vma was merged into the current one and
144686039bd3SAndrea Arcangeli 		 * the current one has not been updated yet.
144786039bd3SAndrea Arcangeli 		 */
144886039bd3SAndrea Arcangeli 		vma->vm_flags = new_flags;
144986039bd3SAndrea Arcangeli 		vma->vm_userfaultfd_ctx.ctx = ctx;
145086039bd3SAndrea Arcangeli 
14516dfeaff9SPeter Xu 		if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
14526dfeaff9SPeter Xu 			hugetlb_unshare_all_pmds(vma);
14536dfeaff9SPeter Xu 
145486039bd3SAndrea Arcangeli 	skip:
145586039bd3SAndrea Arcangeli 		prev = vma;
145686039bd3SAndrea Arcangeli 		start = vma->vm_end;
145786039bd3SAndrea Arcangeli 		vma = vma->vm_next;
145886039bd3SAndrea Arcangeli 	} while (vma && vma->vm_start < end);
145986039bd3SAndrea Arcangeli out_unlock:
1460d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
1461d2005e3fSOleg Nesterov 	mmput(mm);
146286039bd3SAndrea Arcangeli 	if (!ret) {
146314819305SPeter Xu 		__u64 ioctls_out;
146414819305SPeter Xu 
146514819305SPeter Xu 		ioctls_out = basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC :
146614819305SPeter Xu 		    UFFD_API_RANGE_IOCTLS;
146714819305SPeter Xu 
146814819305SPeter Xu 		/*
146914819305SPeter Xu 		 * Declare the WP ioctl only if the WP mode is
147014819305SPeter Xu 		 * specified and all checks passed with the range
147114819305SPeter Xu 		 */
147214819305SPeter Xu 		if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP))
147314819305SPeter Xu 			ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT);
147414819305SPeter Xu 
1475f6191471SAxel Rasmussen 		/* CONTINUE ioctl is only supported for MINOR ranges. */
1476f6191471SAxel Rasmussen 		if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR))
1477f6191471SAxel Rasmussen 			ioctls_out &= ~((__u64)1 << _UFFDIO_CONTINUE);
1478f6191471SAxel Rasmussen 
147986039bd3SAndrea Arcangeli 		/*
148086039bd3SAndrea Arcangeli 		 * Now that we scanned all vmas we can already tell
148186039bd3SAndrea Arcangeli 		 * userland which ioctls methods are guaranteed to
148286039bd3SAndrea Arcangeli 		 * succeed on this range.
148386039bd3SAndrea Arcangeli 		 */
148414819305SPeter Xu 		if (put_user(ioctls_out, &user_uffdio_register->ioctls))
148586039bd3SAndrea Arcangeli 			ret = -EFAULT;
148686039bd3SAndrea Arcangeli 	}
148786039bd3SAndrea Arcangeli out:
148886039bd3SAndrea Arcangeli 	return ret;
148986039bd3SAndrea Arcangeli }
149086039bd3SAndrea Arcangeli 
149186039bd3SAndrea Arcangeli static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
149286039bd3SAndrea Arcangeli 				  unsigned long arg)
149386039bd3SAndrea Arcangeli {
149486039bd3SAndrea Arcangeli 	struct mm_struct *mm = ctx->mm;
149586039bd3SAndrea Arcangeli 	struct vm_area_struct *vma, *prev, *cur;
149686039bd3SAndrea Arcangeli 	int ret;
149786039bd3SAndrea Arcangeli 	struct uffdio_range uffdio_unregister;
149886039bd3SAndrea Arcangeli 	unsigned long new_flags;
149986039bd3SAndrea Arcangeli 	bool found;
150086039bd3SAndrea Arcangeli 	unsigned long start, end, vma_end;
150186039bd3SAndrea Arcangeli 	const void __user *buf = (void __user *)arg;
150286039bd3SAndrea Arcangeli 
150386039bd3SAndrea Arcangeli 	ret = -EFAULT;
150486039bd3SAndrea Arcangeli 	if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
150586039bd3SAndrea Arcangeli 		goto out;
150686039bd3SAndrea Arcangeli 
1507e71e2aceSPeter Collingbourne 	ret = validate_range(mm, uffdio_unregister.start,
150886039bd3SAndrea Arcangeli 			     uffdio_unregister.len);
150986039bd3SAndrea Arcangeli 	if (ret)
151086039bd3SAndrea Arcangeli 		goto out;
151186039bd3SAndrea Arcangeli 
151286039bd3SAndrea Arcangeli 	start = uffdio_unregister.start;
151386039bd3SAndrea Arcangeli 	end = start + uffdio_unregister.len;
151486039bd3SAndrea Arcangeli 
1515d2005e3fSOleg Nesterov 	ret = -ENOMEM;
1516d2005e3fSOleg Nesterov 	if (!mmget_not_zero(mm))
1517d2005e3fSOleg Nesterov 		goto out;
1518d2005e3fSOleg Nesterov 
1519d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
152086039bd3SAndrea Arcangeli 	vma = find_vma_prev(mm, start, &prev);
152186039bd3SAndrea Arcangeli 	if (!vma)
152286039bd3SAndrea Arcangeli 		goto out_unlock;
152386039bd3SAndrea Arcangeli 
152486039bd3SAndrea Arcangeli 	/* check that there's at least one vma in the range */
152586039bd3SAndrea Arcangeli 	ret = -EINVAL;
152686039bd3SAndrea Arcangeli 	if (vma->vm_start >= end)
152786039bd3SAndrea Arcangeli 		goto out_unlock;
152886039bd3SAndrea Arcangeli 
152986039bd3SAndrea Arcangeli 	/*
1530cab350afSMike Kravetz 	 * If the first vma contains huge pages, make sure start address
1531cab350afSMike Kravetz 	 * is aligned to huge page size.
1532cab350afSMike Kravetz 	 */
1533cab350afSMike Kravetz 	if (is_vm_hugetlb_page(vma)) {
1534cab350afSMike Kravetz 		unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1535cab350afSMike Kravetz 
1536cab350afSMike Kravetz 		if (start & (vma_hpagesize - 1))
1537cab350afSMike Kravetz 			goto out_unlock;
1538cab350afSMike Kravetz 	}
1539cab350afSMike Kravetz 
1540cab350afSMike Kravetz 	/*
154186039bd3SAndrea Arcangeli 	 * Search for not compatible vmas.
154286039bd3SAndrea Arcangeli 	 */
154386039bd3SAndrea Arcangeli 	found = false;
154486039bd3SAndrea Arcangeli 	ret = -EINVAL;
154586039bd3SAndrea Arcangeli 	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
154686039bd3SAndrea Arcangeli 		cond_resched();
154786039bd3SAndrea Arcangeli 
154886039bd3SAndrea Arcangeli 		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
15497677f7fdSAxel Rasmussen 		       !!(cur->vm_flags & __VM_UFFD_FLAGS));
155086039bd3SAndrea Arcangeli 
155186039bd3SAndrea Arcangeli 		/*
155286039bd3SAndrea Arcangeli 		 * Check not compatible vmas, not strictly required
155386039bd3SAndrea Arcangeli 		 * here as not compatible vmas cannot have an
155486039bd3SAndrea Arcangeli 		 * userfaultfd_ctx registered on them, but this
155586039bd3SAndrea Arcangeli 		 * provides for more strict behavior to notice
155686039bd3SAndrea Arcangeli 		 * unregistration errors.
155786039bd3SAndrea Arcangeli 		 */
155863b2d417SAndrea Arcangeli 		if (!vma_can_userfault(cur, cur->vm_flags))
155986039bd3SAndrea Arcangeli 			goto out_unlock;
156086039bd3SAndrea Arcangeli 
156186039bd3SAndrea Arcangeli 		found = true;
156286039bd3SAndrea Arcangeli 	}
156386039bd3SAndrea Arcangeli 	BUG_ON(!found);
156486039bd3SAndrea Arcangeli 
156586039bd3SAndrea Arcangeli 	if (vma->vm_start < start)
156686039bd3SAndrea Arcangeli 		prev = vma;
156786039bd3SAndrea Arcangeli 
156886039bd3SAndrea Arcangeli 	ret = 0;
156986039bd3SAndrea Arcangeli 	do {
157086039bd3SAndrea Arcangeli 		cond_resched();
157186039bd3SAndrea Arcangeli 
157263b2d417SAndrea Arcangeli 		BUG_ON(!vma_can_userfault(vma, vma->vm_flags));
157386039bd3SAndrea Arcangeli 
157486039bd3SAndrea Arcangeli 		/*
157586039bd3SAndrea Arcangeli 		 * Nothing to do: this vma is already registered into this
157686039bd3SAndrea Arcangeli 		 * userfaultfd and with the right tracking mode too.
157786039bd3SAndrea Arcangeli 		 */
157886039bd3SAndrea Arcangeli 		if (!vma->vm_userfaultfd_ctx.ctx)
157986039bd3SAndrea Arcangeli 			goto skip;
158086039bd3SAndrea Arcangeli 
158101e881f5SAndrea Arcangeli 		WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
158201e881f5SAndrea Arcangeli 
158386039bd3SAndrea Arcangeli 		if (vma->vm_start > start)
158486039bd3SAndrea Arcangeli 			start = vma->vm_start;
158586039bd3SAndrea Arcangeli 		vma_end = min(end, vma->vm_end);
158686039bd3SAndrea Arcangeli 
158709fa5296SAndrea Arcangeli 		if (userfaultfd_missing(vma)) {
158809fa5296SAndrea Arcangeli 			/*
158909fa5296SAndrea Arcangeli 			 * Wake any concurrent pending userfault while
159009fa5296SAndrea Arcangeli 			 * we unregister, so they will not hang
159109fa5296SAndrea Arcangeli 			 * permanently and it avoids userland to call
159209fa5296SAndrea Arcangeli 			 * UFFDIO_WAKE explicitly.
159309fa5296SAndrea Arcangeli 			 */
159409fa5296SAndrea Arcangeli 			struct userfaultfd_wake_range range;
159509fa5296SAndrea Arcangeli 			range.start = start;
159609fa5296SAndrea Arcangeli 			range.len = vma_end - start;
159709fa5296SAndrea Arcangeli 			wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
159809fa5296SAndrea Arcangeli 		}
159909fa5296SAndrea Arcangeli 
1600f369b07cSPeter Xu 		/* Reset ptes for the whole vma range if wr-protected */
1601f369b07cSPeter Xu 		if (userfaultfd_wp(vma))
1602f369b07cSPeter Xu 			uffd_wp_range(mm, vma, start, vma_end - start, false);
1603f369b07cSPeter Xu 
16047677f7fdSAxel Rasmussen 		new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
160586039bd3SAndrea Arcangeli 		prev = vma_merge(mm, prev, start, vma_end, new_flags,
160686039bd3SAndrea Arcangeli 				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
160786039bd3SAndrea Arcangeli 				 vma_policy(vma),
16085c26f6acSSuren Baghdasaryan 				 NULL_VM_UFFD_CTX, anon_vma_name(vma));
160986039bd3SAndrea Arcangeli 		if (prev) {
161086039bd3SAndrea Arcangeli 			vma = prev;
161186039bd3SAndrea Arcangeli 			goto next;
161286039bd3SAndrea Arcangeli 		}
161386039bd3SAndrea Arcangeli 		if (vma->vm_start < start) {
161486039bd3SAndrea Arcangeli 			ret = split_vma(mm, vma, start, 1);
161586039bd3SAndrea Arcangeli 			if (ret)
161686039bd3SAndrea Arcangeli 				break;
161786039bd3SAndrea Arcangeli 		}
161886039bd3SAndrea Arcangeli 		if (vma->vm_end > end) {
161986039bd3SAndrea Arcangeli 			ret = split_vma(mm, vma, end, 0);
162086039bd3SAndrea Arcangeli 			if (ret)
162186039bd3SAndrea Arcangeli 				break;
162286039bd3SAndrea Arcangeli 		}
162386039bd3SAndrea Arcangeli 	next:
162486039bd3SAndrea Arcangeli 		/*
162586039bd3SAndrea Arcangeli 		 * In the vma_merge() successful mprotect-like case 8:
162686039bd3SAndrea Arcangeli 		 * the next vma was merged into the current one and
162786039bd3SAndrea Arcangeli 		 * the current one has not been updated yet.
162886039bd3SAndrea Arcangeli 		 */
162986039bd3SAndrea Arcangeli 		vma->vm_flags = new_flags;
163086039bd3SAndrea Arcangeli 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
163186039bd3SAndrea Arcangeli 
163286039bd3SAndrea Arcangeli 	skip:
163386039bd3SAndrea Arcangeli 		prev = vma;
163486039bd3SAndrea Arcangeli 		start = vma->vm_end;
163586039bd3SAndrea Arcangeli 		vma = vma->vm_next;
163686039bd3SAndrea Arcangeli 	} while (vma && vma->vm_start < end);
163786039bd3SAndrea Arcangeli out_unlock:
1638d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
1639d2005e3fSOleg Nesterov 	mmput(mm);
164086039bd3SAndrea Arcangeli out:
164186039bd3SAndrea Arcangeli 	return ret;
164286039bd3SAndrea Arcangeli }
164386039bd3SAndrea Arcangeli 
164486039bd3SAndrea Arcangeli /*
1645ba85c702SAndrea Arcangeli  * userfaultfd_wake may be used in combination with the
1646ba85c702SAndrea Arcangeli  * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches.
164786039bd3SAndrea Arcangeli  */
164886039bd3SAndrea Arcangeli static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
164986039bd3SAndrea Arcangeli 			    unsigned long arg)
165086039bd3SAndrea Arcangeli {
165186039bd3SAndrea Arcangeli 	int ret;
165286039bd3SAndrea Arcangeli 	struct uffdio_range uffdio_wake;
165386039bd3SAndrea Arcangeli 	struct userfaultfd_wake_range range;
165486039bd3SAndrea Arcangeli 	const void __user *buf = (void __user *)arg;
165586039bd3SAndrea Arcangeli 
165686039bd3SAndrea Arcangeli 	ret = -EFAULT;
165786039bd3SAndrea Arcangeli 	if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
165886039bd3SAndrea Arcangeli 		goto out;
165986039bd3SAndrea Arcangeli 
1660e71e2aceSPeter Collingbourne 	ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
166186039bd3SAndrea Arcangeli 	if (ret)
166286039bd3SAndrea Arcangeli 		goto out;
166386039bd3SAndrea Arcangeli 
166486039bd3SAndrea Arcangeli 	range.start = uffdio_wake.start;
166586039bd3SAndrea Arcangeli 	range.len = uffdio_wake.len;
166686039bd3SAndrea Arcangeli 
166786039bd3SAndrea Arcangeli 	/*
166886039bd3SAndrea Arcangeli 	 * len == 0 means wake all and we don't want to wake all here,
166986039bd3SAndrea Arcangeli 	 * so check it again to be sure.
167086039bd3SAndrea Arcangeli 	 */
167186039bd3SAndrea Arcangeli 	VM_BUG_ON(!range.len);
167286039bd3SAndrea Arcangeli 
167386039bd3SAndrea Arcangeli 	wake_userfault(ctx, &range);
167486039bd3SAndrea Arcangeli 	ret = 0;
167586039bd3SAndrea Arcangeli 
167686039bd3SAndrea Arcangeli out:
167786039bd3SAndrea Arcangeli 	return ret;
167886039bd3SAndrea Arcangeli }
167986039bd3SAndrea Arcangeli 
1680ad465caeSAndrea Arcangeli static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
1681ad465caeSAndrea Arcangeli 			    unsigned long arg)
1682ad465caeSAndrea Arcangeli {
1683ad465caeSAndrea Arcangeli 	__s64 ret;
1684ad465caeSAndrea Arcangeli 	struct uffdio_copy uffdio_copy;
1685ad465caeSAndrea Arcangeli 	struct uffdio_copy __user *user_uffdio_copy;
1686ad465caeSAndrea Arcangeli 	struct userfaultfd_wake_range range;
1687ad465caeSAndrea Arcangeli 
1688ad465caeSAndrea Arcangeli 	user_uffdio_copy = (struct uffdio_copy __user *) arg;
1689ad465caeSAndrea Arcangeli 
1690df2cc96eSMike Rapoport 	ret = -EAGAIN;
1691a759a909SNadav Amit 	if (atomic_read(&ctx->mmap_changing))
1692df2cc96eSMike Rapoport 		goto out;
1693df2cc96eSMike Rapoport 
1694ad465caeSAndrea Arcangeli 	ret = -EFAULT;
1695ad465caeSAndrea Arcangeli 	if (copy_from_user(&uffdio_copy, user_uffdio_copy,
1696ad465caeSAndrea Arcangeli 			   /* don't copy "copy" last field */
1697ad465caeSAndrea Arcangeli 			   sizeof(uffdio_copy)-sizeof(__s64)))
1698ad465caeSAndrea Arcangeli 		goto out;
1699ad465caeSAndrea Arcangeli 
1700e71e2aceSPeter Collingbourne 	ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
1701ad465caeSAndrea Arcangeli 	if (ret)
1702ad465caeSAndrea Arcangeli 		goto out;
1703ad465caeSAndrea Arcangeli 	/*
1704ad465caeSAndrea Arcangeli 	 * double check for wraparound just in case. copy_from_user()
1705ad465caeSAndrea Arcangeli 	 * will later check uffdio_copy.src + uffdio_copy.len to fit
1706ad465caeSAndrea Arcangeli 	 * in the userland range.
1707ad465caeSAndrea Arcangeli 	 */
1708ad465caeSAndrea Arcangeli 	ret = -EINVAL;
1709ad465caeSAndrea Arcangeli 	if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
1710ad465caeSAndrea Arcangeli 		goto out;
171172981e0eSAndrea Arcangeli 	if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP))
1712ad465caeSAndrea Arcangeli 		goto out;
1713d2005e3fSOleg Nesterov 	if (mmget_not_zero(ctx->mm)) {
1714ad465caeSAndrea Arcangeli 		ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
171572981e0eSAndrea Arcangeli 				   uffdio_copy.len, &ctx->mmap_changing,
171672981e0eSAndrea Arcangeli 				   uffdio_copy.mode);
1717d2005e3fSOleg Nesterov 		mmput(ctx->mm);
171896333187SMike Rapoport 	} else {
1719e86b298bSMike Rapoport 		return -ESRCH;
1720d2005e3fSOleg Nesterov 	}
1721ad465caeSAndrea Arcangeli 	if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
1722ad465caeSAndrea Arcangeli 		return -EFAULT;
1723ad465caeSAndrea Arcangeli 	if (ret < 0)
1724ad465caeSAndrea Arcangeli 		goto out;
1725ad465caeSAndrea Arcangeli 	BUG_ON(!ret);
1726ad465caeSAndrea Arcangeli 	/* len == 0 would wake all */
1727ad465caeSAndrea Arcangeli 	range.len = ret;
1728ad465caeSAndrea Arcangeli 	if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
1729ad465caeSAndrea Arcangeli 		range.start = uffdio_copy.dst;
1730ad465caeSAndrea Arcangeli 		wake_userfault(ctx, &range);
1731ad465caeSAndrea Arcangeli 	}
1732ad465caeSAndrea Arcangeli 	ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
1733ad465caeSAndrea Arcangeli out:
1734ad465caeSAndrea Arcangeli 	return ret;
1735ad465caeSAndrea Arcangeli }
1736ad465caeSAndrea Arcangeli 
1737ad465caeSAndrea Arcangeli static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
1738ad465caeSAndrea Arcangeli 				unsigned long arg)
1739ad465caeSAndrea Arcangeli {
1740ad465caeSAndrea Arcangeli 	__s64 ret;
1741ad465caeSAndrea Arcangeli 	struct uffdio_zeropage uffdio_zeropage;
1742ad465caeSAndrea Arcangeli 	struct uffdio_zeropage __user *user_uffdio_zeropage;
1743ad465caeSAndrea Arcangeli 	struct userfaultfd_wake_range range;
1744ad465caeSAndrea Arcangeli 
1745ad465caeSAndrea Arcangeli 	user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
1746ad465caeSAndrea Arcangeli 
1747df2cc96eSMike Rapoport 	ret = -EAGAIN;
1748a759a909SNadav Amit 	if (atomic_read(&ctx->mmap_changing))
1749df2cc96eSMike Rapoport 		goto out;
1750df2cc96eSMike Rapoport 
1751ad465caeSAndrea Arcangeli 	ret = -EFAULT;
1752ad465caeSAndrea Arcangeli 	if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
1753ad465caeSAndrea Arcangeli 			   /* don't copy "zeropage" last field */
1754ad465caeSAndrea Arcangeli 			   sizeof(uffdio_zeropage)-sizeof(__s64)))
1755ad465caeSAndrea Arcangeli 		goto out;
1756ad465caeSAndrea Arcangeli 
1757e71e2aceSPeter Collingbourne 	ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
1758ad465caeSAndrea Arcangeli 			     uffdio_zeropage.range.len);
1759ad465caeSAndrea Arcangeli 	if (ret)
1760ad465caeSAndrea Arcangeli 		goto out;
1761ad465caeSAndrea Arcangeli 	ret = -EINVAL;
1762ad465caeSAndrea Arcangeli 	if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
1763ad465caeSAndrea Arcangeli 		goto out;
1764ad465caeSAndrea Arcangeli 
1765d2005e3fSOleg Nesterov 	if (mmget_not_zero(ctx->mm)) {
1766ad465caeSAndrea Arcangeli 		ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
1767df2cc96eSMike Rapoport 				     uffdio_zeropage.range.len,
1768df2cc96eSMike Rapoport 				     &ctx->mmap_changing);
1769d2005e3fSOleg Nesterov 		mmput(ctx->mm);
17709d95aa4bSMike Rapoport 	} else {
1771e86b298bSMike Rapoport 		return -ESRCH;
1772d2005e3fSOleg Nesterov 	}
1773ad465caeSAndrea Arcangeli 	if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
1774ad465caeSAndrea Arcangeli 		return -EFAULT;
1775ad465caeSAndrea Arcangeli 	if (ret < 0)
1776ad465caeSAndrea Arcangeli 		goto out;
1777ad465caeSAndrea Arcangeli 	/* len == 0 would wake all */
1778ad465caeSAndrea Arcangeli 	BUG_ON(!ret);
1779ad465caeSAndrea Arcangeli 	range.len = ret;
1780ad465caeSAndrea Arcangeli 	if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
1781ad465caeSAndrea Arcangeli 		range.start = uffdio_zeropage.range.start;
1782ad465caeSAndrea Arcangeli 		wake_userfault(ctx, &range);
1783ad465caeSAndrea Arcangeli 	}
1784ad465caeSAndrea Arcangeli 	ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
1785ad465caeSAndrea Arcangeli out:
1786ad465caeSAndrea Arcangeli 	return ret;
1787ad465caeSAndrea Arcangeli }
1788ad465caeSAndrea Arcangeli 
178963b2d417SAndrea Arcangeli static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
179063b2d417SAndrea Arcangeli 				    unsigned long arg)
179163b2d417SAndrea Arcangeli {
179263b2d417SAndrea Arcangeli 	int ret;
179363b2d417SAndrea Arcangeli 	struct uffdio_writeprotect uffdio_wp;
179463b2d417SAndrea Arcangeli 	struct uffdio_writeprotect __user *user_uffdio_wp;
179563b2d417SAndrea Arcangeli 	struct userfaultfd_wake_range range;
179623080e27SPeter Xu 	bool mode_wp, mode_dontwake;
179763b2d417SAndrea Arcangeli 
1798a759a909SNadav Amit 	if (atomic_read(&ctx->mmap_changing))
179963b2d417SAndrea Arcangeli 		return -EAGAIN;
180063b2d417SAndrea Arcangeli 
180163b2d417SAndrea Arcangeli 	user_uffdio_wp = (struct uffdio_writeprotect __user *) arg;
180263b2d417SAndrea Arcangeli 
180363b2d417SAndrea Arcangeli 	if (copy_from_user(&uffdio_wp, user_uffdio_wp,
180463b2d417SAndrea Arcangeli 			   sizeof(struct uffdio_writeprotect)))
180563b2d417SAndrea Arcangeli 		return -EFAULT;
180663b2d417SAndrea Arcangeli 
1807e71e2aceSPeter Collingbourne 	ret = validate_range(ctx->mm, uffdio_wp.range.start,
180863b2d417SAndrea Arcangeli 			     uffdio_wp.range.len);
180963b2d417SAndrea Arcangeli 	if (ret)
181063b2d417SAndrea Arcangeli 		return ret;
181163b2d417SAndrea Arcangeli 
181263b2d417SAndrea Arcangeli 	if (uffdio_wp.mode & ~(UFFDIO_WRITEPROTECT_MODE_DONTWAKE |
181363b2d417SAndrea Arcangeli 			       UFFDIO_WRITEPROTECT_MODE_WP))
181463b2d417SAndrea Arcangeli 		return -EINVAL;
181523080e27SPeter Xu 
181623080e27SPeter Xu 	mode_wp = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_WP;
181723080e27SPeter Xu 	mode_dontwake = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_DONTWAKE;
181823080e27SPeter Xu 
181923080e27SPeter Xu 	if (mode_wp && mode_dontwake)
182063b2d417SAndrea Arcangeli 		return -EINVAL;
182163b2d417SAndrea Arcangeli 
1822cb185d5fSNadav Amit 	if (mmget_not_zero(ctx->mm)) {
182363b2d417SAndrea Arcangeli 		ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
182423080e27SPeter Xu 					  uffdio_wp.range.len, mode_wp,
182563b2d417SAndrea Arcangeli 					  &ctx->mmap_changing);
1826cb185d5fSNadav Amit 		mmput(ctx->mm);
1827cb185d5fSNadav Amit 	} else {
1828cb185d5fSNadav Amit 		return -ESRCH;
1829cb185d5fSNadav Amit 	}
1830cb185d5fSNadav Amit 
183163b2d417SAndrea Arcangeli 	if (ret)
183263b2d417SAndrea Arcangeli 		return ret;
183363b2d417SAndrea Arcangeli 
183423080e27SPeter Xu 	if (!mode_wp && !mode_dontwake) {
183563b2d417SAndrea Arcangeli 		range.start = uffdio_wp.range.start;
183663b2d417SAndrea Arcangeli 		range.len = uffdio_wp.range.len;
183763b2d417SAndrea Arcangeli 		wake_userfault(ctx, &range);
183863b2d417SAndrea Arcangeli 	}
183963b2d417SAndrea Arcangeli 	return ret;
184063b2d417SAndrea Arcangeli }
184163b2d417SAndrea Arcangeli 
1842f6191471SAxel Rasmussen static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
1843f6191471SAxel Rasmussen {
1844f6191471SAxel Rasmussen 	__s64 ret;
1845f6191471SAxel Rasmussen 	struct uffdio_continue uffdio_continue;
1846f6191471SAxel Rasmussen 	struct uffdio_continue __user *user_uffdio_continue;
1847f6191471SAxel Rasmussen 	struct userfaultfd_wake_range range;
1848f6191471SAxel Rasmussen 
1849f6191471SAxel Rasmussen 	user_uffdio_continue = (struct uffdio_continue __user *)arg;
1850f6191471SAxel Rasmussen 
1851f6191471SAxel Rasmussen 	ret = -EAGAIN;
1852a759a909SNadav Amit 	if (atomic_read(&ctx->mmap_changing))
1853f6191471SAxel Rasmussen 		goto out;
1854f6191471SAxel Rasmussen 
1855f6191471SAxel Rasmussen 	ret = -EFAULT;
1856f6191471SAxel Rasmussen 	if (copy_from_user(&uffdio_continue, user_uffdio_continue,
1857f6191471SAxel Rasmussen 			   /* don't copy the output fields */
1858f6191471SAxel Rasmussen 			   sizeof(uffdio_continue) - (sizeof(__s64))))
1859f6191471SAxel Rasmussen 		goto out;
1860f6191471SAxel Rasmussen 
1861e71e2aceSPeter Collingbourne 	ret = validate_range(ctx->mm, uffdio_continue.range.start,
1862f6191471SAxel Rasmussen 			     uffdio_continue.range.len);
1863f6191471SAxel Rasmussen 	if (ret)
1864f6191471SAxel Rasmussen 		goto out;
1865f6191471SAxel Rasmussen 
1866f6191471SAxel Rasmussen 	ret = -EINVAL;
1867f6191471SAxel Rasmussen 	/* double check for wraparound just in case. */
1868f6191471SAxel Rasmussen 	if (uffdio_continue.range.start + uffdio_continue.range.len <=
1869f6191471SAxel Rasmussen 	    uffdio_continue.range.start) {
1870f6191471SAxel Rasmussen 		goto out;
1871f6191471SAxel Rasmussen 	}
1872f6191471SAxel Rasmussen 	if (uffdio_continue.mode & ~UFFDIO_CONTINUE_MODE_DONTWAKE)
1873f6191471SAxel Rasmussen 		goto out;
1874f6191471SAxel Rasmussen 
1875f6191471SAxel Rasmussen 	if (mmget_not_zero(ctx->mm)) {
1876f6191471SAxel Rasmussen 		ret = mcopy_continue(ctx->mm, uffdio_continue.range.start,
1877f6191471SAxel Rasmussen 				     uffdio_continue.range.len,
1878f6191471SAxel Rasmussen 				     &ctx->mmap_changing);
1879f6191471SAxel Rasmussen 		mmput(ctx->mm);
1880f6191471SAxel Rasmussen 	} else {
1881f6191471SAxel Rasmussen 		return -ESRCH;
1882f6191471SAxel Rasmussen 	}
1883f6191471SAxel Rasmussen 
1884f6191471SAxel Rasmussen 	if (unlikely(put_user(ret, &user_uffdio_continue->mapped)))
1885f6191471SAxel Rasmussen 		return -EFAULT;
1886f6191471SAxel Rasmussen 	if (ret < 0)
1887f6191471SAxel Rasmussen 		goto out;
1888f6191471SAxel Rasmussen 
1889f6191471SAxel Rasmussen 	/* len == 0 would wake all */
1890f6191471SAxel Rasmussen 	BUG_ON(!ret);
1891f6191471SAxel Rasmussen 	range.len = ret;
1892f6191471SAxel Rasmussen 	if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) {
1893f6191471SAxel Rasmussen 		range.start = uffdio_continue.range.start;
1894f6191471SAxel Rasmussen 		wake_userfault(ctx, &range);
1895f6191471SAxel Rasmussen 	}
1896f6191471SAxel Rasmussen 	ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN;
1897f6191471SAxel Rasmussen 
1898f6191471SAxel Rasmussen out:
1899f6191471SAxel Rasmussen 	return ret;
1900f6191471SAxel Rasmussen }
1901f6191471SAxel Rasmussen 
19029cd75c3cSPavel Emelyanov static inline unsigned int uffd_ctx_features(__u64 user_features)
19039cd75c3cSPavel Emelyanov {
19049cd75c3cSPavel Emelyanov 	/*
190522e5fe2aSNadav Amit 	 * For the current set of features the bits just coincide. Set
190622e5fe2aSNadav Amit 	 * UFFD_FEATURE_INITIALIZED to mark the features as enabled.
19079cd75c3cSPavel Emelyanov 	 */
190822e5fe2aSNadav Amit 	return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED;
19099cd75c3cSPavel Emelyanov }
19109cd75c3cSPavel Emelyanov 
191186039bd3SAndrea Arcangeli /*
191286039bd3SAndrea Arcangeli  * userland asks for a certain API version and we return which bits
191386039bd3SAndrea Arcangeli  * and ioctl commands are implemented in this kernel for such API
191486039bd3SAndrea Arcangeli  * version or -EINVAL if unknown.
191586039bd3SAndrea Arcangeli  */
191686039bd3SAndrea Arcangeli static int userfaultfd_api(struct userfaultfd_ctx *ctx,
191786039bd3SAndrea Arcangeli 			   unsigned long arg)
191886039bd3SAndrea Arcangeli {
191986039bd3SAndrea Arcangeli 	struct uffdio_api uffdio_api;
192086039bd3SAndrea Arcangeli 	void __user *buf = (void __user *)arg;
192122e5fe2aSNadav Amit 	unsigned int ctx_features;
192286039bd3SAndrea Arcangeli 	int ret;
192365603144SAndrea Arcangeli 	__u64 features;
192486039bd3SAndrea Arcangeli 
192586039bd3SAndrea Arcangeli 	ret = -EFAULT;
1926a9b85f94SAndrea Arcangeli 	if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
192786039bd3SAndrea Arcangeli 		goto out;
1928914eedcbSAxel Rasmussen 	/* Ignore unsupported features (userspace built against newer kernel) */
1929914eedcbSAxel Rasmussen 	features = uffdio_api.features & UFFD_API_FEATURES;
19303c1c24d9SMike Rapoport 	ret = -EPERM;
19313c1c24d9SMike Rapoport 	if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
19323c1c24d9SMike Rapoport 		goto err_out;
193365603144SAndrea Arcangeli 	/* report all available features and ioctls to userland */
193465603144SAndrea Arcangeli 	uffdio_api.features = UFFD_API_FEATURES;
19357677f7fdSAxel Rasmussen #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
1936964ab004SAxel Rasmussen 	uffdio_api.features &=
1937964ab004SAxel Rasmussen 		~(UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM);
19387677f7fdSAxel Rasmussen #endif
193900b151f2SPeter Xu #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
194000b151f2SPeter Xu 	uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP;
194186039bd3SAndrea Arcangeli #endif
1942b1f9e876SPeter Xu #ifndef CONFIG_PTE_MARKER_UFFD_WP
1943b1f9e876SPeter Xu 	uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM;
1944b1f9e876SPeter Xu #endif
194586039bd3SAndrea Arcangeli 	uffdio_api.ioctls = UFFD_API_IOCTLS;
194686039bd3SAndrea Arcangeli 	ret = -EFAULT;
194786039bd3SAndrea Arcangeli 	if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
194886039bd3SAndrea Arcangeli 		goto out;
194922e5fe2aSNadav Amit 
195086039bd3SAndrea Arcangeli 	/* only enable the requested features for this uffd context */
195122e5fe2aSNadav Amit 	ctx_features = uffd_ctx_features(features);
195222e5fe2aSNadav Amit 	ret = -EINVAL;
195322e5fe2aSNadav Amit 	if (cmpxchg(&ctx->features, 0, ctx_features) != 0)
195422e5fe2aSNadav Amit 		goto err_out;
195522e5fe2aSNadav Amit 
195686039bd3SAndrea Arcangeli 	ret = 0;
195786039bd3SAndrea Arcangeli out:
195886039bd3SAndrea Arcangeli 	return ret;
195986039bd3SAndrea Arcangeli err_out:
196086039bd3SAndrea Arcangeli 	memset(&uffdio_api, 0, sizeof(uffdio_api));
196186039bd3SAndrea Arcangeli 	if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
196286039bd3SAndrea Arcangeli 		ret = -EFAULT;
196386039bd3SAndrea Arcangeli 	goto out;
196486039bd3SAndrea Arcangeli }
196586039bd3SAndrea Arcangeli 
196686039bd3SAndrea Arcangeli static long userfaultfd_ioctl(struct file *file, unsigned cmd,
1967e6485a47SAndrea Arcangeli 			      unsigned long arg)
1968e6485a47SAndrea Arcangeli {
1969e6485a47SAndrea Arcangeli 	int ret = -EINVAL;
197086039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx = file->private_data;
197186039bd3SAndrea Arcangeli 
197222e5fe2aSNadav Amit 	if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx))
197386039bd3SAndrea Arcangeli 		return -EINVAL;
197486039bd3SAndrea Arcangeli 
197586039bd3SAndrea Arcangeli 	switch(cmd) {
197686039bd3SAndrea Arcangeli 	case UFFDIO_API:
197786039bd3SAndrea Arcangeli 		ret = userfaultfd_api(ctx, arg);
197886039bd3SAndrea Arcangeli 		break;
197986039bd3SAndrea Arcangeli 	case UFFDIO_REGISTER:
198086039bd3SAndrea Arcangeli 		ret = userfaultfd_register(ctx, arg);
198186039bd3SAndrea Arcangeli 		break;
198286039bd3SAndrea Arcangeli 	case UFFDIO_UNREGISTER:
198386039bd3SAndrea Arcangeli 		ret = userfaultfd_unregister(ctx, arg);
198486039bd3SAndrea Arcangeli 		break;
198586039bd3SAndrea Arcangeli 	case UFFDIO_WAKE:
1986ad465caeSAndrea Arcangeli 		ret = userfaultfd_wake(ctx, arg);
1987ad465caeSAndrea Arcangeli 		break;
1988ad465caeSAndrea Arcangeli 	case UFFDIO_COPY:
1989ad465caeSAndrea Arcangeli 		ret = userfaultfd_copy(ctx, arg);
1990ad465caeSAndrea Arcangeli 		break;
1991ad465caeSAndrea Arcangeli 	case UFFDIO_ZEROPAGE:
199286039bd3SAndrea Arcangeli 		ret = userfaultfd_zeropage(ctx, arg);
199386039bd3SAndrea Arcangeli 		break;
199463b2d417SAndrea Arcangeli 	case UFFDIO_WRITEPROTECT:
199563b2d417SAndrea Arcangeli 		ret = userfaultfd_writeprotect(ctx, arg);
199663b2d417SAndrea Arcangeli 		break;
1997f6191471SAxel Rasmussen 	case UFFDIO_CONTINUE:
1998f6191471SAxel Rasmussen 		ret = userfaultfd_continue(ctx, arg);
1999f6191471SAxel Rasmussen 		break;
200086039bd3SAndrea Arcangeli 	}
200186039bd3SAndrea Arcangeli 	return ret;
200286039bd3SAndrea Arcangeli }
200386039bd3SAndrea Arcangeli 
200486039bd3SAndrea Arcangeli #ifdef CONFIG_PROC_FS
200586039bd3SAndrea Arcangeli static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
200686039bd3SAndrea Arcangeli {
200786039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx = f->private_data;
2008ac6424b9SIngo Molnar 	wait_queue_entry_t *wq;
200986039bd3SAndrea Arcangeli 	unsigned long pending = 0, total = 0;
201086039bd3SAndrea Arcangeli 
2011cbcfa130SEric Biggers 	spin_lock_irq(&ctx->fault_pending_wqh.lock);
20122055da97SIngo Molnar 	list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
201386039bd3SAndrea Arcangeli 		pending++;
201486039bd3SAndrea Arcangeli 		total++;
201586039bd3SAndrea Arcangeli 	}
20162055da97SIngo Molnar 	list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
201715b726efSAndrea Arcangeli 		total++;
201815b726efSAndrea Arcangeli 	}
2019cbcfa130SEric Biggers 	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
202086039bd3SAndrea Arcangeli 
202186039bd3SAndrea Arcangeli 	/*
202286039bd3SAndrea Arcangeli 	 * If more protocols will be added, there will be all shown
202386039bd3SAndrea Arcangeli 	 * separated by a space. Like this:
202486039bd3SAndrea Arcangeli 	 *	protocols: aa:... bb:...
202586039bd3SAndrea Arcangeli 	 */
202686039bd3SAndrea Arcangeli 	seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
2027045098e9SMike Rapoport 		   pending, total, UFFD_API, ctx->features,
202886039bd3SAndrea Arcangeli 		   UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
202986039bd3SAndrea Arcangeli }
203086039bd3SAndrea Arcangeli #endif
203186039bd3SAndrea Arcangeli 
203286039bd3SAndrea Arcangeli static const struct file_operations userfaultfd_fops = {
203386039bd3SAndrea Arcangeli #ifdef CONFIG_PROC_FS
203486039bd3SAndrea Arcangeli 	.show_fdinfo	= userfaultfd_show_fdinfo,
203586039bd3SAndrea Arcangeli #endif
203686039bd3SAndrea Arcangeli 	.release	= userfaultfd_release,
203786039bd3SAndrea Arcangeli 	.poll		= userfaultfd_poll,
203886039bd3SAndrea Arcangeli 	.read		= userfaultfd_read,
203986039bd3SAndrea Arcangeli 	.unlocked_ioctl = userfaultfd_ioctl,
20401832f2d8SArnd Bergmann 	.compat_ioctl	= compat_ptr_ioctl,
204186039bd3SAndrea Arcangeli 	.llseek		= noop_llseek,
204286039bd3SAndrea Arcangeli };
204386039bd3SAndrea Arcangeli 
20443004ec9cSAndrea Arcangeli static void init_once_userfaultfd_ctx(void *mem)
20453004ec9cSAndrea Arcangeli {
20463004ec9cSAndrea Arcangeli 	struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
20473004ec9cSAndrea Arcangeli 
20483004ec9cSAndrea Arcangeli 	init_waitqueue_head(&ctx->fault_pending_wqh);
20493004ec9cSAndrea Arcangeli 	init_waitqueue_head(&ctx->fault_wqh);
20509cd75c3cSPavel Emelyanov 	init_waitqueue_head(&ctx->event_wqh);
20513004ec9cSAndrea Arcangeli 	init_waitqueue_head(&ctx->fd_wqh);
20522ca97ac8SAhmed S. Darwish 	seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock);
20533004ec9cSAndrea Arcangeli }
20543004ec9cSAndrea Arcangeli 
2055*2d5de004SAxel Rasmussen static int new_userfaultfd(int flags)
205686039bd3SAndrea Arcangeli {
205786039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx;
2058284cd241SEric Biggers 	int fd;
205986039bd3SAndrea Arcangeli 
206086039bd3SAndrea Arcangeli 	BUG_ON(!current->mm);
206186039bd3SAndrea Arcangeli 
206286039bd3SAndrea Arcangeli 	/* Check the UFFD_* constants for consistency.  */
206337cd0575SLokesh Gidra 	BUILD_BUG_ON(UFFD_USER_MODE_ONLY & UFFD_SHARED_FCNTL_FLAGS);
206486039bd3SAndrea Arcangeli 	BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
206586039bd3SAndrea Arcangeli 	BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
206686039bd3SAndrea Arcangeli 
206737cd0575SLokesh Gidra 	if (flags & ~(UFFD_SHARED_FCNTL_FLAGS | UFFD_USER_MODE_ONLY))
2068284cd241SEric Biggers 		return -EINVAL;
206986039bd3SAndrea Arcangeli 
20703004ec9cSAndrea Arcangeli 	ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
207186039bd3SAndrea Arcangeli 	if (!ctx)
2072284cd241SEric Biggers 		return -ENOMEM;
207386039bd3SAndrea Arcangeli 
2074ca880420SEric Biggers 	refcount_set(&ctx->refcount, 1);
207586039bd3SAndrea Arcangeli 	ctx->flags = flags;
20769cd75c3cSPavel Emelyanov 	ctx->features = 0;
207786039bd3SAndrea Arcangeli 	ctx->released = false;
2078a759a909SNadav Amit 	atomic_set(&ctx->mmap_changing, 0);
207986039bd3SAndrea Arcangeli 	ctx->mm = current->mm;
208086039bd3SAndrea Arcangeli 	/* prevent the mm struct to be freed */
2081f1f10076SVegard Nossum 	mmgrab(ctx->mm);
208286039bd3SAndrea Arcangeli 
2083b537900fSDaniel Colascione 	fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, ctx,
2084b537900fSDaniel Colascione 			O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL);
2085284cd241SEric Biggers 	if (fd < 0) {
2086d2005e3fSOleg Nesterov 		mmdrop(ctx->mm);
20873004ec9cSAndrea Arcangeli 		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
2088c03e946fSEric Biggers 	}
208986039bd3SAndrea Arcangeli 	return fd;
209086039bd3SAndrea Arcangeli }
20913004ec9cSAndrea Arcangeli 
2092*2d5de004SAxel Rasmussen static inline bool userfaultfd_syscall_allowed(int flags)
2093*2d5de004SAxel Rasmussen {
2094*2d5de004SAxel Rasmussen 	/* Userspace-only page faults are always allowed */
2095*2d5de004SAxel Rasmussen 	if (flags & UFFD_USER_MODE_ONLY)
2096*2d5de004SAxel Rasmussen 		return true;
2097*2d5de004SAxel Rasmussen 
2098*2d5de004SAxel Rasmussen 	/*
2099*2d5de004SAxel Rasmussen 	 * The user is requesting a userfaultfd which can handle kernel faults.
2100*2d5de004SAxel Rasmussen 	 * Privileged users are always allowed to do this.
2101*2d5de004SAxel Rasmussen 	 */
2102*2d5de004SAxel Rasmussen 	if (capable(CAP_SYS_PTRACE))
2103*2d5de004SAxel Rasmussen 		return true;
2104*2d5de004SAxel Rasmussen 
2105*2d5de004SAxel Rasmussen 	/* Otherwise, access to kernel fault handling is sysctl controlled. */
2106*2d5de004SAxel Rasmussen 	return sysctl_unprivileged_userfaultfd;
2107*2d5de004SAxel Rasmussen }
2108*2d5de004SAxel Rasmussen 
2109*2d5de004SAxel Rasmussen SYSCALL_DEFINE1(userfaultfd, int, flags)
2110*2d5de004SAxel Rasmussen {
2111*2d5de004SAxel Rasmussen 	if (!userfaultfd_syscall_allowed(flags))
2112*2d5de004SAxel Rasmussen 		return -EPERM;
2113*2d5de004SAxel Rasmussen 
2114*2d5de004SAxel Rasmussen 	return new_userfaultfd(flags);
2115*2d5de004SAxel Rasmussen }
2116*2d5de004SAxel Rasmussen 
2117*2d5de004SAxel Rasmussen static long userfaultfd_dev_ioctl(struct file *file, unsigned int cmd, unsigned long flags)
2118*2d5de004SAxel Rasmussen {
2119*2d5de004SAxel Rasmussen 	if (cmd != USERFAULTFD_IOC_NEW)
2120*2d5de004SAxel Rasmussen 		return -EINVAL;
2121*2d5de004SAxel Rasmussen 
2122*2d5de004SAxel Rasmussen 	return new_userfaultfd(flags);
2123*2d5de004SAxel Rasmussen }
2124*2d5de004SAxel Rasmussen 
2125*2d5de004SAxel Rasmussen static const struct file_operations userfaultfd_dev_fops = {
2126*2d5de004SAxel Rasmussen 	.unlocked_ioctl = userfaultfd_dev_ioctl,
2127*2d5de004SAxel Rasmussen 	.compat_ioctl = userfaultfd_dev_ioctl,
2128*2d5de004SAxel Rasmussen 	.owner = THIS_MODULE,
2129*2d5de004SAxel Rasmussen 	.llseek = noop_llseek,
2130*2d5de004SAxel Rasmussen };
2131*2d5de004SAxel Rasmussen 
2132*2d5de004SAxel Rasmussen static struct miscdevice userfaultfd_misc = {
2133*2d5de004SAxel Rasmussen 	.minor = MISC_DYNAMIC_MINOR,
2134*2d5de004SAxel Rasmussen 	.name = "userfaultfd",
2135*2d5de004SAxel Rasmussen 	.fops = &userfaultfd_dev_fops
2136*2d5de004SAxel Rasmussen };
2137*2d5de004SAxel Rasmussen 
21383004ec9cSAndrea Arcangeli static int __init userfaultfd_init(void)
21393004ec9cSAndrea Arcangeli {
2140*2d5de004SAxel Rasmussen 	int ret;
2141*2d5de004SAxel Rasmussen 
2142*2d5de004SAxel Rasmussen 	ret = misc_register(&userfaultfd_misc);
2143*2d5de004SAxel Rasmussen 	if (ret)
2144*2d5de004SAxel Rasmussen 		return ret;
2145*2d5de004SAxel Rasmussen 
21463004ec9cSAndrea Arcangeli 	userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
21473004ec9cSAndrea Arcangeli 						sizeof(struct userfaultfd_ctx),
21483004ec9cSAndrea Arcangeli 						0,
21493004ec9cSAndrea Arcangeli 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
21503004ec9cSAndrea Arcangeli 						init_once_userfaultfd_ctx);
21513004ec9cSAndrea Arcangeli 	return 0;
21523004ec9cSAndrea Arcangeli }
21533004ec9cSAndrea Arcangeli __initcall(userfaultfd_init);
2154