xref: /openbmc/linux/fs/userfaultfd.c (revision 270aa010620697fb27b8f892cc4e194bc2b7d134)
120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
286039bd3SAndrea Arcangeli /*
386039bd3SAndrea Arcangeli  *  fs/userfaultfd.c
486039bd3SAndrea Arcangeli  *
586039bd3SAndrea Arcangeli  *  Copyright (C) 2007  Davide Libenzi <davidel@xmailserver.org>
686039bd3SAndrea Arcangeli  *  Copyright (C) 2008-2009 Red Hat, Inc.
786039bd3SAndrea Arcangeli  *  Copyright (C) 2015  Red Hat, Inc.
886039bd3SAndrea Arcangeli  *
986039bd3SAndrea Arcangeli  *  Some part derived from fs/eventfd.c (anon inode setup) and
1086039bd3SAndrea Arcangeli  *  mm/ksm.c (mm hashing).
1186039bd3SAndrea Arcangeli  */
1286039bd3SAndrea Arcangeli 
139cd75c3cSPavel Emelyanov #include <linux/list.h>
1486039bd3SAndrea Arcangeli #include <linux/hashtable.h>
15174cd4b1SIngo Molnar #include <linux/sched/signal.h>
166e84f315SIngo Molnar #include <linux/sched/mm.h>
1786039bd3SAndrea Arcangeli #include <linux/mm.h>
1817fca131SArnd Bergmann #include <linux/mm_inline.h>
196dfeaff9SPeter Xu #include <linux/mmu_notifier.h>
2086039bd3SAndrea Arcangeli #include <linux/poll.h>
2186039bd3SAndrea Arcangeli #include <linux/slab.h>
2286039bd3SAndrea Arcangeli #include <linux/seq_file.h>
2386039bd3SAndrea Arcangeli #include <linux/file.h>
2486039bd3SAndrea Arcangeli #include <linux/bug.h>
2586039bd3SAndrea Arcangeli #include <linux/anon_inodes.h>
2686039bd3SAndrea Arcangeli #include <linux/syscalls.h>
2786039bd3SAndrea Arcangeli #include <linux/userfaultfd_k.h>
2886039bd3SAndrea Arcangeli #include <linux/mempolicy.h>
2986039bd3SAndrea Arcangeli #include <linux/ioctl.h>
3086039bd3SAndrea Arcangeli #include <linux/security.h>
31cab350afSMike Kravetz #include <linux/hugetlb.h>
325c041f5dSPeter Xu #include <linux/swapops.h>
332d5de004SAxel Rasmussen #include <linux/miscdevice.h>
3486039bd3SAndrea Arcangeli 
352d337b71SZhangPeng static int sysctl_unprivileged_userfaultfd __read_mostly;
362d337b71SZhangPeng 
372d337b71SZhangPeng #ifdef CONFIG_SYSCTL
382d337b71SZhangPeng static struct ctl_table vm_userfaultfd_table[] = {
392d337b71SZhangPeng 	{
402d337b71SZhangPeng 		.procname	= "unprivileged_userfaultfd",
412d337b71SZhangPeng 		.data		= &sysctl_unprivileged_userfaultfd,
422d337b71SZhangPeng 		.maxlen		= sizeof(sysctl_unprivileged_userfaultfd),
432d337b71SZhangPeng 		.mode		= 0644,
442d337b71SZhangPeng 		.proc_handler	= proc_dointvec_minmax,
452d337b71SZhangPeng 		.extra1		= SYSCTL_ZERO,
462d337b71SZhangPeng 		.extra2		= SYSCTL_ONE,
472d337b71SZhangPeng 	},
482d337b71SZhangPeng 	{ }
492d337b71SZhangPeng };
502d337b71SZhangPeng #endif
51cefdca0aSPeter Xu 
523004ec9cSAndrea Arcangeli static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
533004ec9cSAndrea Arcangeli 
543004ec9cSAndrea Arcangeli /*
553004ec9cSAndrea Arcangeli  * Start with fault_pending_wqh and fault_wqh so they're more likely
563004ec9cSAndrea Arcangeli  * to be in the same cacheline.
57cbcfa130SEric Biggers  *
58cbcfa130SEric Biggers  * Locking order:
59cbcfa130SEric Biggers  *	fd_wqh.lock
60cbcfa130SEric Biggers  *		fault_pending_wqh.lock
61cbcfa130SEric Biggers  *			fault_wqh.lock
62cbcfa130SEric Biggers  *		event_wqh.lock
63cbcfa130SEric Biggers  *
64cbcfa130SEric Biggers  * To avoid deadlocks, IRQs must be disabled when taking any of the above locks,
65cbcfa130SEric Biggers  * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's
66cbcfa130SEric Biggers  * also taken in IRQ context.
673004ec9cSAndrea Arcangeli  */
6886039bd3SAndrea Arcangeli struct userfaultfd_ctx {
6915b726efSAndrea Arcangeli 	/* waitqueue head for the pending (i.e. not read) userfaults */
7015b726efSAndrea Arcangeli 	wait_queue_head_t fault_pending_wqh;
7115b726efSAndrea Arcangeli 	/* waitqueue head for the userfaults */
7286039bd3SAndrea Arcangeli 	wait_queue_head_t fault_wqh;
7386039bd3SAndrea Arcangeli 	/* waitqueue head for the pseudo fd to wakeup poll/read */
7486039bd3SAndrea Arcangeli 	wait_queue_head_t fd_wqh;
759cd75c3cSPavel Emelyanov 	/* waitqueue head for events */
769cd75c3cSPavel Emelyanov 	wait_queue_head_t event_wqh;
772c5b7e1bSAndrea Arcangeli 	/* a refile sequence protected by fault_pending_wqh lock */
782ca97ac8SAhmed S. Darwish 	seqcount_spinlock_t refile_seq;
793004ec9cSAndrea Arcangeli 	/* pseudo fd refcounting */
80ca880420SEric Biggers 	refcount_t refcount;
8186039bd3SAndrea Arcangeli 	/* userfaultfd syscall flags */
8286039bd3SAndrea Arcangeli 	unsigned int flags;
839cd75c3cSPavel Emelyanov 	/* features requested from the userspace */
849cd75c3cSPavel Emelyanov 	unsigned int features;
8586039bd3SAndrea Arcangeli 	/* released */
8686039bd3SAndrea Arcangeli 	bool released;
87df2cc96eSMike Rapoport 	/* memory mappings are changing because of non-cooperative event */
88a759a909SNadav Amit 	atomic_t mmap_changing;
8986039bd3SAndrea Arcangeli 	/* mm with one ore more vmas attached to this userfaultfd_ctx */
9086039bd3SAndrea Arcangeli 	struct mm_struct *mm;
9186039bd3SAndrea Arcangeli };
9286039bd3SAndrea Arcangeli 
93893e26e6SPavel Emelyanov struct userfaultfd_fork_ctx {
94893e26e6SPavel Emelyanov 	struct userfaultfd_ctx *orig;
95893e26e6SPavel Emelyanov 	struct userfaultfd_ctx *new;
96893e26e6SPavel Emelyanov 	struct list_head list;
97893e26e6SPavel Emelyanov };
98893e26e6SPavel Emelyanov 
99897ab3e0SMike Rapoport struct userfaultfd_unmap_ctx {
100897ab3e0SMike Rapoport 	struct userfaultfd_ctx *ctx;
101897ab3e0SMike Rapoport 	unsigned long start;
102897ab3e0SMike Rapoport 	unsigned long end;
103897ab3e0SMike Rapoport 	struct list_head list;
104897ab3e0SMike Rapoport };
105897ab3e0SMike Rapoport 
10686039bd3SAndrea Arcangeli struct userfaultfd_wait_queue {
107a9b85f94SAndrea Arcangeli 	struct uffd_msg msg;
108ac6424b9SIngo Molnar 	wait_queue_entry_t wq;
10986039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx;
11015a77c6fSAndrea Arcangeli 	bool waken;
11186039bd3SAndrea Arcangeli };
11286039bd3SAndrea Arcangeli 
11386039bd3SAndrea Arcangeli struct userfaultfd_wake_range {
11486039bd3SAndrea Arcangeli 	unsigned long start;
11586039bd3SAndrea Arcangeli 	unsigned long len;
11686039bd3SAndrea Arcangeli };
11786039bd3SAndrea Arcangeli 
11822e5fe2aSNadav Amit /* internal indication that UFFD_API ioctl was successfully executed */
11922e5fe2aSNadav Amit #define UFFD_FEATURE_INITIALIZED		(1u << 31)
12022e5fe2aSNadav Amit 
12122e5fe2aSNadav Amit static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
12222e5fe2aSNadav Amit {
12322e5fe2aSNadav Amit 	return ctx->features & UFFD_FEATURE_INITIALIZED;
12422e5fe2aSNadav Amit }
12522e5fe2aSNadav Amit 
1262bad466cSPeter Xu /*
1272bad466cSPeter Xu  * Whether WP_UNPOPULATED is enabled on the uffd context.  It is only
1282bad466cSPeter Xu  * meaningful when userfaultfd_wp()==true on the vma and when it's
1292bad466cSPeter Xu  * anonymous.
1302bad466cSPeter Xu  */
1312bad466cSPeter Xu bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma)
1322bad466cSPeter Xu {
1332bad466cSPeter Xu 	struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
1342bad466cSPeter Xu 
1352bad466cSPeter Xu 	if (!ctx)
1362bad466cSPeter Xu 		return false;
1372bad466cSPeter Xu 
1382bad466cSPeter Xu 	return ctx->features & UFFD_FEATURE_WP_UNPOPULATED;
1392bad466cSPeter Xu }
1402bad466cSPeter Xu 
14151d3d5ebSDavid Hildenbrand static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
14251d3d5ebSDavid Hildenbrand 				     vm_flags_t flags)
14351d3d5ebSDavid Hildenbrand {
14451d3d5ebSDavid Hildenbrand 	const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP;
14551d3d5ebSDavid Hildenbrand 
1461c71222eSSuren Baghdasaryan 	vm_flags_reset(vma, flags);
14751d3d5ebSDavid Hildenbrand 	/*
14851d3d5ebSDavid Hildenbrand 	 * For shared mappings, we want to enable writenotify while
14951d3d5ebSDavid Hildenbrand 	 * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply
15051d3d5ebSDavid Hildenbrand 	 * recalculate vma->vm_page_prot whenever userfaultfd-wp changes.
15151d3d5ebSDavid Hildenbrand 	 */
15251d3d5ebSDavid Hildenbrand 	if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed)
15351d3d5ebSDavid Hildenbrand 		vma_set_page_prot(vma);
15451d3d5ebSDavid Hildenbrand }
15551d3d5ebSDavid Hildenbrand 
156ac6424b9SIngo Molnar static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
15786039bd3SAndrea Arcangeli 				     int wake_flags, void *key)
15886039bd3SAndrea Arcangeli {
15986039bd3SAndrea Arcangeli 	struct userfaultfd_wake_range *range = key;
16086039bd3SAndrea Arcangeli 	int ret;
16186039bd3SAndrea Arcangeli 	struct userfaultfd_wait_queue *uwq;
16286039bd3SAndrea Arcangeli 	unsigned long start, len;
16386039bd3SAndrea Arcangeli 
16486039bd3SAndrea Arcangeli 	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
16586039bd3SAndrea Arcangeli 	ret = 0;
16686039bd3SAndrea Arcangeli 	/* len == 0 means wake all */
16786039bd3SAndrea Arcangeli 	start = range->start;
16886039bd3SAndrea Arcangeli 	len = range->len;
169a9b85f94SAndrea Arcangeli 	if (len && (start > uwq->msg.arg.pagefault.address ||
170a9b85f94SAndrea Arcangeli 		    start + len <= uwq->msg.arg.pagefault.address))
17186039bd3SAndrea Arcangeli 		goto out;
17215a77c6fSAndrea Arcangeli 	WRITE_ONCE(uwq->waken, true);
17315a77c6fSAndrea Arcangeli 	/*
174a9668cd6SPeter Zijlstra 	 * The Program-Order guarantees provided by the scheduler
175a9668cd6SPeter Zijlstra 	 * ensure uwq->waken is visible before the task is woken.
17615a77c6fSAndrea Arcangeli 	 */
17786039bd3SAndrea Arcangeli 	ret = wake_up_state(wq->private, mode);
178a9668cd6SPeter Zijlstra 	if (ret) {
17986039bd3SAndrea Arcangeli 		/*
18086039bd3SAndrea Arcangeli 		 * Wake only once, autoremove behavior.
18186039bd3SAndrea Arcangeli 		 *
182a9668cd6SPeter Zijlstra 		 * After the effect of list_del_init is visible to the other
183a9668cd6SPeter Zijlstra 		 * CPUs, the waitqueue may disappear from under us, see the
184a9668cd6SPeter Zijlstra 		 * !list_empty_careful() in handle_userfault().
185a9668cd6SPeter Zijlstra 		 *
186a9668cd6SPeter Zijlstra 		 * try_to_wake_up() has an implicit smp_mb(), and the
187a9668cd6SPeter Zijlstra 		 * wq->private is read before calling the extern function
188a9668cd6SPeter Zijlstra 		 * "wake_up_state" (which in turns calls try_to_wake_up).
18986039bd3SAndrea Arcangeli 		 */
1902055da97SIngo Molnar 		list_del_init(&wq->entry);
191a9668cd6SPeter Zijlstra 	}
19286039bd3SAndrea Arcangeli out:
19386039bd3SAndrea Arcangeli 	return ret;
19486039bd3SAndrea Arcangeli }
19586039bd3SAndrea Arcangeli 
19686039bd3SAndrea Arcangeli /**
19786039bd3SAndrea Arcangeli  * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
19886039bd3SAndrea Arcangeli  * context.
19986039bd3SAndrea Arcangeli  * @ctx: [in] Pointer to the userfaultfd context.
20086039bd3SAndrea Arcangeli  */
20186039bd3SAndrea Arcangeli static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
20286039bd3SAndrea Arcangeli {
203ca880420SEric Biggers 	refcount_inc(&ctx->refcount);
20486039bd3SAndrea Arcangeli }
20586039bd3SAndrea Arcangeli 
20686039bd3SAndrea Arcangeli /**
20786039bd3SAndrea Arcangeli  * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
20886039bd3SAndrea Arcangeli  * context.
20986039bd3SAndrea Arcangeli  * @ctx: [in] Pointer to userfaultfd context.
21086039bd3SAndrea Arcangeli  *
21186039bd3SAndrea Arcangeli  * The userfaultfd context reference must have been previously acquired either
21286039bd3SAndrea Arcangeli  * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget().
21386039bd3SAndrea Arcangeli  */
21486039bd3SAndrea Arcangeli static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
21586039bd3SAndrea Arcangeli {
216ca880420SEric Biggers 	if (refcount_dec_and_test(&ctx->refcount)) {
21786039bd3SAndrea Arcangeli 		VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
21886039bd3SAndrea Arcangeli 		VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
21986039bd3SAndrea Arcangeli 		VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
22086039bd3SAndrea Arcangeli 		VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
2219cd75c3cSPavel Emelyanov 		VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
2229cd75c3cSPavel Emelyanov 		VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
22386039bd3SAndrea Arcangeli 		VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
22486039bd3SAndrea Arcangeli 		VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
225d2005e3fSOleg Nesterov 		mmdrop(ctx->mm);
2263004ec9cSAndrea Arcangeli 		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
22786039bd3SAndrea Arcangeli 	}
22886039bd3SAndrea Arcangeli }
22986039bd3SAndrea Arcangeli 
230a9b85f94SAndrea Arcangeli static inline void msg_init(struct uffd_msg *msg)
231a9b85f94SAndrea Arcangeli {
232a9b85f94SAndrea Arcangeli 	BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
233a9b85f94SAndrea Arcangeli 	/*
234a9b85f94SAndrea Arcangeli 	 * Must use memset to zero out the paddings or kernel data is
235a9b85f94SAndrea Arcangeli 	 * leaked to userland.
236a9b85f94SAndrea Arcangeli 	 */
237a9b85f94SAndrea Arcangeli 	memset(msg, 0, sizeof(struct uffd_msg));
238a9b85f94SAndrea Arcangeli }
239a9b85f94SAndrea Arcangeli 
240a9b85f94SAndrea Arcangeli static inline struct uffd_msg userfault_msg(unsigned long address,
241d172b1a3SNadav Amit 					    unsigned long real_address,
24286039bd3SAndrea Arcangeli 					    unsigned int flags,
2439d4ac934SAlexey Perevalov 					    unsigned long reason,
2449d4ac934SAlexey Perevalov 					    unsigned int features)
24586039bd3SAndrea Arcangeli {
246a9b85f94SAndrea Arcangeli 	struct uffd_msg msg;
247d172b1a3SNadav Amit 
248a9b85f94SAndrea Arcangeli 	msg_init(&msg);
249a9b85f94SAndrea Arcangeli 	msg.event = UFFD_EVENT_PAGEFAULT;
250824ddc60SNadav Amit 
251d172b1a3SNadav Amit 	msg.arg.pagefault.address = (features & UFFD_FEATURE_EXACT_ADDRESS) ?
252d172b1a3SNadav Amit 				    real_address : address;
253d172b1a3SNadav Amit 
25486039bd3SAndrea Arcangeli 	/*
2557677f7fdSAxel Rasmussen 	 * These flags indicate why the userfault occurred:
2567677f7fdSAxel Rasmussen 	 * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault.
2577677f7fdSAxel Rasmussen 	 * - UFFD_PAGEFAULT_FLAG_MINOR indicates a minor fault.
2587677f7fdSAxel Rasmussen 	 * - Neither of these flags being set indicates a MISSING fault.
2597677f7fdSAxel Rasmussen 	 *
2607677f7fdSAxel Rasmussen 	 * Separately, UFFD_PAGEFAULT_FLAG_WRITE indicates it was a write
2617677f7fdSAxel Rasmussen 	 * fault. Otherwise, it was a read fault.
26286039bd3SAndrea Arcangeli 	 */
2637677f7fdSAxel Rasmussen 	if (flags & FAULT_FLAG_WRITE)
264a9b85f94SAndrea Arcangeli 		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
26586039bd3SAndrea Arcangeli 	if (reason & VM_UFFD_WP)
266a9b85f94SAndrea Arcangeli 		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
2677677f7fdSAxel Rasmussen 	if (reason & VM_UFFD_MINOR)
2687677f7fdSAxel Rasmussen 		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_MINOR;
2699d4ac934SAlexey Perevalov 	if (features & UFFD_FEATURE_THREAD_ID)
270a36985d3SAndrea Arcangeli 		msg.arg.pagefault.feat.ptid = task_pid_vnr(current);
271a9b85f94SAndrea Arcangeli 	return msg;
27286039bd3SAndrea Arcangeli }
27386039bd3SAndrea Arcangeli 
274369cd212SMike Kravetz #ifdef CONFIG_HUGETLB_PAGE
275369cd212SMike Kravetz /*
276369cd212SMike Kravetz  * Same functionality as userfaultfd_must_wait below with modifications for
277369cd212SMike Kravetz  * hugepmd ranges.
278369cd212SMike Kravetz  */
279369cd212SMike Kravetz static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
2807868a208SPunit Agrawal 					 struct vm_area_struct *vma,
281369cd212SMike Kravetz 					 unsigned long address,
282369cd212SMike Kravetz 					 unsigned long flags,
283369cd212SMike Kravetz 					 unsigned long reason)
284369cd212SMike Kravetz {
2851e2c0436SJanosch Frank 	pte_t *ptep, pte;
286369cd212SMike Kravetz 	bool ret = true;
287369cd212SMike Kravetz 
2889c67a207SPeter Xu 	mmap_assert_locked(ctx->mm);
289369cd212SMike Kravetz 
2909c67a207SPeter Xu 	ptep = hugetlb_walk(vma, address, vma_mmu_pagesize(vma));
2911e2c0436SJanosch Frank 	if (!ptep)
292369cd212SMike Kravetz 		goto out;
293369cd212SMike Kravetz 
294369cd212SMike Kravetz 	ret = false;
2951e2c0436SJanosch Frank 	pte = huge_ptep_get(ptep);
296369cd212SMike Kravetz 
297369cd212SMike Kravetz 	/*
298369cd212SMike Kravetz 	 * Lockless access: we're in a wait_event so it's ok if it
2995c041f5dSPeter Xu 	 * changes under us.  PTE markers should be handled the same as none
3005c041f5dSPeter Xu 	 * ptes here.
301369cd212SMike Kravetz 	 */
3025c041f5dSPeter Xu 	if (huge_pte_none_mostly(pte))
303369cd212SMike Kravetz 		ret = true;
3041e2c0436SJanosch Frank 	if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
305369cd212SMike Kravetz 		ret = true;
306369cd212SMike Kravetz out:
307369cd212SMike Kravetz 	return ret;
308369cd212SMike Kravetz }
309369cd212SMike Kravetz #else
310369cd212SMike Kravetz static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
3117868a208SPunit Agrawal 					 struct vm_area_struct *vma,
312369cd212SMike Kravetz 					 unsigned long address,
313369cd212SMike Kravetz 					 unsigned long flags,
314369cd212SMike Kravetz 					 unsigned long reason)
315369cd212SMike Kravetz {
316369cd212SMike Kravetz 	return false;	/* should never get here */
317369cd212SMike Kravetz }
318369cd212SMike Kravetz #endif /* CONFIG_HUGETLB_PAGE */
319369cd212SMike Kravetz 
32086039bd3SAndrea Arcangeli /*
3218d2afd96SAndrea Arcangeli  * Verify the pagetables are still not ok after having reigstered into
3228d2afd96SAndrea Arcangeli  * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
3238d2afd96SAndrea Arcangeli  * userfault that has already been resolved, if userfaultfd_read and
3248d2afd96SAndrea Arcangeli  * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different
3258d2afd96SAndrea Arcangeli  * threads.
3268d2afd96SAndrea Arcangeli  */
3278d2afd96SAndrea Arcangeli static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
3288d2afd96SAndrea Arcangeli 					 unsigned long address,
3298d2afd96SAndrea Arcangeli 					 unsigned long flags,
3308d2afd96SAndrea Arcangeli 					 unsigned long reason)
3318d2afd96SAndrea Arcangeli {
3328d2afd96SAndrea Arcangeli 	struct mm_struct *mm = ctx->mm;
3338d2afd96SAndrea Arcangeli 	pgd_t *pgd;
334c2febafcSKirill A. Shutemov 	p4d_t *p4d;
3358d2afd96SAndrea Arcangeli 	pud_t *pud;
3368d2afd96SAndrea Arcangeli 	pmd_t *pmd, _pmd;
3378d2afd96SAndrea Arcangeli 	pte_t *pte;
3388d2afd96SAndrea Arcangeli 	bool ret = true;
3398d2afd96SAndrea Arcangeli 
34042fc5414SMichel Lespinasse 	mmap_assert_locked(mm);
3418d2afd96SAndrea Arcangeli 
3428d2afd96SAndrea Arcangeli 	pgd = pgd_offset(mm, address);
3438d2afd96SAndrea Arcangeli 	if (!pgd_present(*pgd))
3448d2afd96SAndrea Arcangeli 		goto out;
345c2febafcSKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
346c2febafcSKirill A. Shutemov 	if (!p4d_present(*p4d))
347c2febafcSKirill A. Shutemov 		goto out;
348c2febafcSKirill A. Shutemov 	pud = pud_offset(p4d, address);
3498d2afd96SAndrea Arcangeli 	if (!pud_present(*pud))
3508d2afd96SAndrea Arcangeli 		goto out;
3518d2afd96SAndrea Arcangeli 	pmd = pmd_offset(pud, address);
3528d2afd96SAndrea Arcangeli 	/*
3538d2afd96SAndrea Arcangeli 	 * READ_ONCE must function as a barrier with narrower scope
3548d2afd96SAndrea Arcangeli 	 * and it must be equivalent to:
3558d2afd96SAndrea Arcangeli 	 *	_pmd = *pmd; barrier();
3568d2afd96SAndrea Arcangeli 	 *
3578d2afd96SAndrea Arcangeli 	 * This is to deal with the instability (as in
3588d2afd96SAndrea Arcangeli 	 * pmd_trans_unstable) of the pmd.
3598d2afd96SAndrea Arcangeli 	 */
3608d2afd96SAndrea Arcangeli 	_pmd = READ_ONCE(*pmd);
361a365ac09SHuang Ying 	if (pmd_none(_pmd))
3628d2afd96SAndrea Arcangeli 		goto out;
3638d2afd96SAndrea Arcangeli 
3648d2afd96SAndrea Arcangeli 	ret = false;
365a365ac09SHuang Ying 	if (!pmd_present(_pmd))
366a365ac09SHuang Ying 		goto out;
367a365ac09SHuang Ying 
36863b2d417SAndrea Arcangeli 	if (pmd_trans_huge(_pmd)) {
36963b2d417SAndrea Arcangeli 		if (!pmd_write(_pmd) && (reason & VM_UFFD_WP))
37063b2d417SAndrea Arcangeli 			ret = true;
3718d2afd96SAndrea Arcangeli 		goto out;
37263b2d417SAndrea Arcangeli 	}
3738d2afd96SAndrea Arcangeli 
3748d2afd96SAndrea Arcangeli 	/*
3758d2afd96SAndrea Arcangeli 	 * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it
3768d2afd96SAndrea Arcangeli 	 * and use the standard pte_offset_map() instead of parsing _pmd.
3778d2afd96SAndrea Arcangeli 	 */
3788d2afd96SAndrea Arcangeli 	pte = pte_offset_map(pmd, address);
3798d2afd96SAndrea Arcangeli 	/*
3808d2afd96SAndrea Arcangeli 	 * Lockless access: we're in a wait_event so it's ok if it
3815c041f5dSPeter Xu 	 * changes under us.  PTE markers should be handled the same as none
3825c041f5dSPeter Xu 	 * ptes here.
3838d2afd96SAndrea Arcangeli 	 */
3845c041f5dSPeter Xu 	if (pte_none_mostly(*pte))
3858d2afd96SAndrea Arcangeli 		ret = true;
38663b2d417SAndrea Arcangeli 	if (!pte_write(*pte) && (reason & VM_UFFD_WP))
38763b2d417SAndrea Arcangeli 		ret = true;
3888d2afd96SAndrea Arcangeli 	pte_unmap(pte);
3898d2afd96SAndrea Arcangeli 
3908d2afd96SAndrea Arcangeli out:
3918d2afd96SAndrea Arcangeli 	return ret;
3928d2afd96SAndrea Arcangeli }
3938d2afd96SAndrea Arcangeli 
3942f064a59SPeter Zijlstra static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags)
3953e69ad08SPeter Xu {
3963e69ad08SPeter Xu 	if (flags & FAULT_FLAG_INTERRUPTIBLE)
3973e69ad08SPeter Xu 		return TASK_INTERRUPTIBLE;
3983e69ad08SPeter Xu 
3993e69ad08SPeter Xu 	if (flags & FAULT_FLAG_KILLABLE)
4003e69ad08SPeter Xu 		return TASK_KILLABLE;
4013e69ad08SPeter Xu 
4023e69ad08SPeter Xu 	return TASK_UNINTERRUPTIBLE;
4033e69ad08SPeter Xu }
4043e69ad08SPeter Xu 
4058d2afd96SAndrea Arcangeli /*
40686039bd3SAndrea Arcangeli  * The locking rules involved in returning VM_FAULT_RETRY depending on
40786039bd3SAndrea Arcangeli  * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
40886039bd3SAndrea Arcangeli  * FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
40986039bd3SAndrea Arcangeli  * recommendation in __lock_page_or_retry is not an understatement.
41086039bd3SAndrea Arcangeli  *
411c1e8d7c6SMichel Lespinasse  * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_lock must be released
41286039bd3SAndrea Arcangeli  * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
41386039bd3SAndrea Arcangeli  * not set.
41486039bd3SAndrea Arcangeli  *
41586039bd3SAndrea Arcangeli  * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
41686039bd3SAndrea Arcangeli  * set, VM_FAULT_RETRY can still be returned if and only if there are
417c1e8d7c6SMichel Lespinasse  * fatal_signal_pending()s, and the mmap_lock must be released before
41886039bd3SAndrea Arcangeli  * returning it.
41986039bd3SAndrea Arcangeli  */
4202b740303SSouptick Joarder vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
42186039bd3SAndrea Arcangeli {
422b8da2e46SPeter Xu 	struct vm_area_struct *vma = vmf->vma;
423b8da2e46SPeter Xu 	struct mm_struct *mm = vma->vm_mm;
42486039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx;
42586039bd3SAndrea Arcangeli 	struct userfaultfd_wait_queue uwq;
4262b740303SSouptick Joarder 	vm_fault_t ret = VM_FAULT_SIGBUS;
4273e69ad08SPeter Xu 	bool must_wait;
4282f064a59SPeter Zijlstra 	unsigned int blocking_state;
42986039bd3SAndrea Arcangeli 
43064c2b203SAndrea Arcangeli 	/*
43164c2b203SAndrea Arcangeli 	 * We don't do userfault handling for the final child pid update.
43264c2b203SAndrea Arcangeli 	 *
43364c2b203SAndrea Arcangeli 	 * We also don't do userfault handling during
43464c2b203SAndrea Arcangeli 	 * coredumping. hugetlbfs has the special
43564c2b203SAndrea Arcangeli 	 * follow_hugetlb_page() to skip missing pages in the
43664c2b203SAndrea Arcangeli 	 * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
43764c2b203SAndrea Arcangeli 	 * the no_page_table() helper in follow_page_mask(), but the
43864c2b203SAndrea Arcangeli 	 * shmem_vm_ops->fault method is invoked even during
439c1e8d7c6SMichel Lespinasse 	 * coredumping without mmap_lock and it ends up here.
44064c2b203SAndrea Arcangeli 	 */
44164c2b203SAndrea Arcangeli 	if (current->flags & (PF_EXITING|PF_DUMPCORE))
44264c2b203SAndrea Arcangeli 		goto out;
44364c2b203SAndrea Arcangeli 
44464c2b203SAndrea Arcangeli 	/*
445c1e8d7c6SMichel Lespinasse 	 * Coredumping runs without mmap_lock so we can only check that
446c1e8d7c6SMichel Lespinasse 	 * the mmap_lock is held, if PF_DUMPCORE was not set.
44764c2b203SAndrea Arcangeli 	 */
44842fc5414SMichel Lespinasse 	mmap_assert_locked(mm);
44964c2b203SAndrea Arcangeli 
450b8da2e46SPeter Xu 	ctx = vma->vm_userfaultfd_ctx.ctx;
45186039bd3SAndrea Arcangeli 	if (!ctx)
452ba85c702SAndrea Arcangeli 		goto out;
45386039bd3SAndrea Arcangeli 
45486039bd3SAndrea Arcangeli 	BUG_ON(ctx->mm != mm);
45586039bd3SAndrea Arcangeli 
4567677f7fdSAxel Rasmussen 	/* Any unrecognized flag is a bug. */
4577677f7fdSAxel Rasmussen 	VM_BUG_ON(reason & ~__VM_UFFD_FLAGS);
4587677f7fdSAxel Rasmussen 	/* 0 or > 1 flags set is a bug; we expect exactly 1. */
4597677f7fdSAxel Rasmussen 	VM_BUG_ON(!reason || (reason & (reason - 1)));
46086039bd3SAndrea Arcangeli 
4612d6d6f5aSPrakash Sangappa 	if (ctx->features & UFFD_FEATURE_SIGBUS)
4622d6d6f5aSPrakash Sangappa 		goto out;
4632d5de004SAxel Rasmussen 	if (!(vmf->flags & FAULT_FLAG_USER) && (ctx->flags & UFFD_USER_MODE_ONLY))
46437cd0575SLokesh Gidra 		goto out;
4652d6d6f5aSPrakash Sangappa 
46686039bd3SAndrea Arcangeli 	/*
46786039bd3SAndrea Arcangeli 	 * If it's already released don't get it. This avoids to loop
46886039bd3SAndrea Arcangeli 	 * in __get_user_pages if userfaultfd_release waits on the
469c1e8d7c6SMichel Lespinasse 	 * caller of handle_userfault to release the mmap_lock.
47086039bd3SAndrea Arcangeli 	 */
4716aa7de05SMark Rutland 	if (unlikely(READ_ONCE(ctx->released))) {
472656710a6SAndrea Arcangeli 		/*
473656710a6SAndrea Arcangeli 		 * Don't return VM_FAULT_SIGBUS in this case, so a non
474656710a6SAndrea Arcangeli 		 * cooperative manager can close the uffd after the
475656710a6SAndrea Arcangeli 		 * last UFFDIO_COPY, without risking to trigger an
476656710a6SAndrea Arcangeli 		 * involuntary SIGBUS if the process was starting the
477656710a6SAndrea Arcangeli 		 * userfaultfd while the userfaultfd was still armed
478656710a6SAndrea Arcangeli 		 * (but after the last UFFDIO_COPY). If the uffd
479656710a6SAndrea Arcangeli 		 * wasn't already closed when the userfault reached
480656710a6SAndrea Arcangeli 		 * this point, that would normally be solved by
481656710a6SAndrea Arcangeli 		 * userfaultfd_must_wait returning 'false'.
482656710a6SAndrea Arcangeli 		 *
483656710a6SAndrea Arcangeli 		 * If we were to return VM_FAULT_SIGBUS here, the non
484656710a6SAndrea Arcangeli 		 * cooperative manager would be instead forced to
485656710a6SAndrea Arcangeli 		 * always call UFFDIO_UNREGISTER before it can safely
486656710a6SAndrea Arcangeli 		 * close the uffd.
487656710a6SAndrea Arcangeli 		 */
488656710a6SAndrea Arcangeli 		ret = VM_FAULT_NOPAGE;
489ba85c702SAndrea Arcangeli 		goto out;
490656710a6SAndrea Arcangeli 	}
49186039bd3SAndrea Arcangeli 
49286039bd3SAndrea Arcangeli 	/*
49386039bd3SAndrea Arcangeli 	 * Check that we can return VM_FAULT_RETRY.
49486039bd3SAndrea Arcangeli 	 *
49586039bd3SAndrea Arcangeli 	 * NOTE: it should become possible to return VM_FAULT_RETRY
49686039bd3SAndrea Arcangeli 	 * even if FAULT_FLAG_TRIED is set without leading to gup()
49786039bd3SAndrea Arcangeli 	 * -EBUSY failures, if the userfaultfd is to be extended for
49886039bd3SAndrea Arcangeli 	 * VM_UFFD_WP tracking and we intend to arm the userfault
49986039bd3SAndrea Arcangeli 	 * without first stopping userland access to the memory. For
50086039bd3SAndrea Arcangeli 	 * VM_UFFD_MISSING userfaults this is enough for now.
50186039bd3SAndrea Arcangeli 	 */
50282b0f8c3SJan Kara 	if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
50386039bd3SAndrea Arcangeli 		/*
50486039bd3SAndrea Arcangeli 		 * Validate the invariant that nowait must allow retry
50586039bd3SAndrea Arcangeli 		 * to be sure not to return SIGBUS erroneously on
50686039bd3SAndrea Arcangeli 		 * nowait invocations.
50786039bd3SAndrea Arcangeli 		 */
50882b0f8c3SJan Kara 		BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
50986039bd3SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM
51086039bd3SAndrea Arcangeli 		if (printk_ratelimit()) {
51186039bd3SAndrea Arcangeli 			printk(KERN_WARNING
51282b0f8c3SJan Kara 			       "FAULT_FLAG_ALLOW_RETRY missing %x\n",
51382b0f8c3SJan Kara 			       vmf->flags);
51486039bd3SAndrea Arcangeli 			dump_stack();
51586039bd3SAndrea Arcangeli 		}
51686039bd3SAndrea Arcangeli #endif
517ba85c702SAndrea Arcangeli 		goto out;
51886039bd3SAndrea Arcangeli 	}
51986039bd3SAndrea Arcangeli 
52086039bd3SAndrea Arcangeli 	/*
52186039bd3SAndrea Arcangeli 	 * Handle nowait, not much to do other than tell it to retry
52286039bd3SAndrea Arcangeli 	 * and wait.
52386039bd3SAndrea Arcangeli 	 */
524ba85c702SAndrea Arcangeli 	ret = VM_FAULT_RETRY;
52582b0f8c3SJan Kara 	if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
526ba85c702SAndrea Arcangeli 		goto out;
52786039bd3SAndrea Arcangeli 
528c1e8d7c6SMichel Lespinasse 	/* take the reference before dropping the mmap_lock */
52986039bd3SAndrea Arcangeli 	userfaultfd_ctx_get(ctx);
53086039bd3SAndrea Arcangeli 
53186039bd3SAndrea Arcangeli 	init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
53286039bd3SAndrea Arcangeli 	uwq.wq.private = current;
533d172b1a3SNadav Amit 	uwq.msg = userfault_msg(vmf->address, vmf->real_address, vmf->flags,
534d172b1a3SNadav Amit 				reason, ctx->features);
53586039bd3SAndrea Arcangeli 	uwq.ctx = ctx;
53615a77c6fSAndrea Arcangeli 	uwq.waken = false;
53786039bd3SAndrea Arcangeli 
5383e69ad08SPeter Xu 	blocking_state = userfaultfd_get_blocking_state(vmf->flags);
539dfa37dc3SAndrea Arcangeli 
540b8da2e46SPeter Xu         /*
541b8da2e46SPeter Xu          * Take the vma lock now, in order to safely call
542b8da2e46SPeter Xu          * userfaultfd_huge_must_wait() later. Since acquiring the
543b8da2e46SPeter Xu          * (sleepable) vma lock can modify the current task state, that
544b8da2e46SPeter Xu          * must be before explicitly calling set_current_state().
545b8da2e46SPeter Xu          */
546b8da2e46SPeter Xu 	if (is_vm_hugetlb_page(vma))
547b8da2e46SPeter Xu 		hugetlb_vma_lock_read(vma);
548b8da2e46SPeter Xu 
549cbcfa130SEric Biggers 	spin_lock_irq(&ctx->fault_pending_wqh.lock);
55086039bd3SAndrea Arcangeli 	/*
55186039bd3SAndrea Arcangeli 	 * After the __add_wait_queue the uwq is visible to userland
55286039bd3SAndrea Arcangeli 	 * through poll/read().
55386039bd3SAndrea Arcangeli 	 */
55415b726efSAndrea Arcangeli 	__add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
55515b726efSAndrea Arcangeli 	/*
55615b726efSAndrea Arcangeli 	 * The smp_mb() after __set_current_state prevents the reads
55715b726efSAndrea Arcangeli 	 * following the spin_unlock to happen before the list_add in
55815b726efSAndrea Arcangeli 	 * __add_wait_queue.
55915b726efSAndrea Arcangeli 	 */
56015a77c6fSAndrea Arcangeli 	set_current_state(blocking_state);
561cbcfa130SEric Biggers 	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
56286039bd3SAndrea Arcangeli 
563b8da2e46SPeter Xu 	if (!is_vm_hugetlb_page(vma))
56482b0f8c3SJan Kara 		must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
56582b0f8c3SJan Kara 						  reason);
566369cd212SMike Kravetz 	else
567b8da2e46SPeter Xu 		must_wait = userfaultfd_huge_must_wait(ctx, vma,
5687868a208SPunit Agrawal 						       vmf->address,
569369cd212SMike Kravetz 						       vmf->flags, reason);
570b8da2e46SPeter Xu 	if (is_vm_hugetlb_page(vma))
571b8da2e46SPeter Xu 		hugetlb_vma_unlock_read(vma);
572d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
5738d2afd96SAndrea Arcangeli 
574f9bf3522SLinus Torvalds 	if (likely(must_wait && !READ_ONCE(ctx->released))) {
575a9a08845SLinus Torvalds 		wake_up_poll(&ctx->fd_wqh, EPOLLIN);
57686039bd3SAndrea Arcangeli 		schedule();
57786039bd3SAndrea Arcangeli 	}
578ba85c702SAndrea Arcangeli 
57986039bd3SAndrea Arcangeli 	__set_current_state(TASK_RUNNING);
58015b726efSAndrea Arcangeli 
58115b726efSAndrea Arcangeli 	/*
58215b726efSAndrea Arcangeli 	 * Here we race with the list_del; list_add in
58315b726efSAndrea Arcangeli 	 * userfaultfd_ctx_read(), however because we don't ever run
58415b726efSAndrea Arcangeli 	 * list_del_init() to refile across the two lists, the prev
58515b726efSAndrea Arcangeli 	 * and next pointers will never point to self. list_add also
58615b726efSAndrea Arcangeli 	 * would never let any of the two pointers to point to
58715b726efSAndrea Arcangeli 	 * self. So list_empty_careful won't risk to see both pointers
58815b726efSAndrea Arcangeli 	 * pointing to self at any time during the list refile. The
58915b726efSAndrea Arcangeli 	 * only case where list_del_init() is called is the full
59015b726efSAndrea Arcangeli 	 * removal in the wake function and there we don't re-list_add
59115b726efSAndrea Arcangeli 	 * and it's fine not to block on the spinlock. The uwq on this
59215b726efSAndrea Arcangeli 	 * kernel stack can be released after the list_del_init.
59315b726efSAndrea Arcangeli 	 */
5942055da97SIngo Molnar 	if (!list_empty_careful(&uwq.wq.entry)) {
595cbcfa130SEric Biggers 		spin_lock_irq(&ctx->fault_pending_wqh.lock);
59615b726efSAndrea Arcangeli 		/*
59715b726efSAndrea Arcangeli 		 * No need of list_del_init(), the uwq on the stack
59815b726efSAndrea Arcangeli 		 * will be freed shortly anyway.
59915b726efSAndrea Arcangeli 		 */
6002055da97SIngo Molnar 		list_del(&uwq.wq.entry);
601cbcfa130SEric Biggers 		spin_unlock_irq(&ctx->fault_pending_wqh.lock);
602ba85c702SAndrea Arcangeli 	}
60386039bd3SAndrea Arcangeli 
60486039bd3SAndrea Arcangeli 	/*
60586039bd3SAndrea Arcangeli 	 * ctx may go away after this if the userfault pseudo fd is
60686039bd3SAndrea Arcangeli 	 * already released.
60786039bd3SAndrea Arcangeli 	 */
60886039bd3SAndrea Arcangeli 	userfaultfd_ctx_put(ctx);
60986039bd3SAndrea Arcangeli 
610ba85c702SAndrea Arcangeli out:
611ba85c702SAndrea Arcangeli 	return ret;
61286039bd3SAndrea Arcangeli }
61386039bd3SAndrea Arcangeli 
6148c9e7bb7SAndrea Arcangeli static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
6159cd75c3cSPavel Emelyanov 					      struct userfaultfd_wait_queue *ewq)
6169cd75c3cSPavel Emelyanov {
6170cbb4b4fSAndrea Arcangeli 	struct userfaultfd_ctx *release_new_ctx;
6180cbb4b4fSAndrea Arcangeli 
6199a69a829SAndrea Arcangeli 	if (WARN_ON_ONCE(current->flags & PF_EXITING))
6209a69a829SAndrea Arcangeli 		goto out;
6219a69a829SAndrea Arcangeli 
6229cd75c3cSPavel Emelyanov 	ewq->ctx = ctx;
6239cd75c3cSPavel Emelyanov 	init_waitqueue_entry(&ewq->wq, current);
6240cbb4b4fSAndrea Arcangeli 	release_new_ctx = NULL;
6259cd75c3cSPavel Emelyanov 
626cbcfa130SEric Biggers 	spin_lock_irq(&ctx->event_wqh.lock);
6279cd75c3cSPavel Emelyanov 	/*
6289cd75c3cSPavel Emelyanov 	 * After the __add_wait_queue the uwq is visible to userland
6299cd75c3cSPavel Emelyanov 	 * through poll/read().
6309cd75c3cSPavel Emelyanov 	 */
6319cd75c3cSPavel Emelyanov 	__add_wait_queue(&ctx->event_wqh, &ewq->wq);
6329cd75c3cSPavel Emelyanov 	for (;;) {
6339cd75c3cSPavel Emelyanov 		set_current_state(TASK_KILLABLE);
6349cd75c3cSPavel Emelyanov 		if (ewq->msg.event == 0)
6359cd75c3cSPavel Emelyanov 			break;
6366aa7de05SMark Rutland 		if (READ_ONCE(ctx->released) ||
6379cd75c3cSPavel Emelyanov 		    fatal_signal_pending(current)) {
638384632e6SAndrea Arcangeli 			/*
639384632e6SAndrea Arcangeli 			 * &ewq->wq may be queued in fork_event, but
640384632e6SAndrea Arcangeli 			 * __remove_wait_queue ignores the head
641384632e6SAndrea Arcangeli 			 * parameter. It would be a problem if it
642384632e6SAndrea Arcangeli 			 * didn't.
643384632e6SAndrea Arcangeli 			 */
6449cd75c3cSPavel Emelyanov 			__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
6457eb76d45SMike Rapoport 			if (ewq->msg.event == UFFD_EVENT_FORK) {
6467eb76d45SMike Rapoport 				struct userfaultfd_ctx *new;
6477eb76d45SMike Rapoport 
6487eb76d45SMike Rapoport 				new = (struct userfaultfd_ctx *)
6497eb76d45SMike Rapoport 					(unsigned long)
6507eb76d45SMike Rapoport 					ewq->msg.arg.reserved.reserved1;
6510cbb4b4fSAndrea Arcangeli 				release_new_ctx = new;
6527eb76d45SMike Rapoport 			}
6539cd75c3cSPavel Emelyanov 			break;
6549cd75c3cSPavel Emelyanov 		}
6559cd75c3cSPavel Emelyanov 
656cbcfa130SEric Biggers 		spin_unlock_irq(&ctx->event_wqh.lock);
6579cd75c3cSPavel Emelyanov 
658a9a08845SLinus Torvalds 		wake_up_poll(&ctx->fd_wqh, EPOLLIN);
6599cd75c3cSPavel Emelyanov 		schedule();
6609cd75c3cSPavel Emelyanov 
661cbcfa130SEric Biggers 		spin_lock_irq(&ctx->event_wqh.lock);
6629cd75c3cSPavel Emelyanov 	}
6639cd75c3cSPavel Emelyanov 	__set_current_state(TASK_RUNNING);
664cbcfa130SEric Biggers 	spin_unlock_irq(&ctx->event_wqh.lock);
6659cd75c3cSPavel Emelyanov 
6660cbb4b4fSAndrea Arcangeli 	if (release_new_ctx) {
6670cbb4b4fSAndrea Arcangeli 		struct vm_area_struct *vma;
6680cbb4b4fSAndrea Arcangeli 		struct mm_struct *mm = release_new_ctx->mm;
66969dbe6daSLiam R. Howlett 		VMA_ITERATOR(vmi, mm, 0);
6700cbb4b4fSAndrea Arcangeli 
6710cbb4b4fSAndrea Arcangeli 		/* the various vma->vm_userfaultfd_ctx still points to it */
672d8ed45c5SMichel Lespinasse 		mmap_write_lock(mm);
67369dbe6daSLiam R. Howlett 		for_each_vma(vmi, vma) {
67431e810aaSMike Rapoport 			if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
6750cbb4b4fSAndrea Arcangeli 				vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
67651d3d5ebSDavid Hildenbrand 				userfaultfd_set_vm_flags(vma,
67751d3d5ebSDavid Hildenbrand 							 vma->vm_flags & ~__VM_UFFD_FLAGS);
67831e810aaSMike Rapoport 			}
67969dbe6daSLiam R. Howlett 		}
680d8ed45c5SMichel Lespinasse 		mmap_write_unlock(mm);
6810cbb4b4fSAndrea Arcangeli 
6820cbb4b4fSAndrea Arcangeli 		userfaultfd_ctx_put(release_new_ctx);
6830cbb4b4fSAndrea Arcangeli 	}
6840cbb4b4fSAndrea Arcangeli 
6859cd75c3cSPavel Emelyanov 	/*
6869cd75c3cSPavel Emelyanov 	 * ctx may go away after this if the userfault pseudo fd is
6879cd75c3cSPavel Emelyanov 	 * already released.
6889cd75c3cSPavel Emelyanov 	 */
6899a69a829SAndrea Arcangeli out:
690a759a909SNadav Amit 	atomic_dec(&ctx->mmap_changing);
691a759a909SNadav Amit 	VM_BUG_ON(atomic_read(&ctx->mmap_changing) < 0);
6929cd75c3cSPavel Emelyanov 	userfaultfd_ctx_put(ctx);
6939cd75c3cSPavel Emelyanov }
6949cd75c3cSPavel Emelyanov 
6959cd75c3cSPavel Emelyanov static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
6969cd75c3cSPavel Emelyanov 				       struct userfaultfd_wait_queue *ewq)
6979cd75c3cSPavel Emelyanov {
6989cd75c3cSPavel Emelyanov 	ewq->msg.event = 0;
6999cd75c3cSPavel Emelyanov 	wake_up_locked(&ctx->event_wqh);
7009cd75c3cSPavel Emelyanov 	__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
7019cd75c3cSPavel Emelyanov }
7029cd75c3cSPavel Emelyanov 
703893e26e6SPavel Emelyanov int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
704893e26e6SPavel Emelyanov {
705893e26e6SPavel Emelyanov 	struct userfaultfd_ctx *ctx = NULL, *octx;
706893e26e6SPavel Emelyanov 	struct userfaultfd_fork_ctx *fctx;
707893e26e6SPavel Emelyanov 
708893e26e6SPavel Emelyanov 	octx = vma->vm_userfaultfd_ctx.ctx;
709893e26e6SPavel Emelyanov 	if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
710893e26e6SPavel Emelyanov 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
71151d3d5ebSDavid Hildenbrand 		userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
712893e26e6SPavel Emelyanov 		return 0;
713893e26e6SPavel Emelyanov 	}
714893e26e6SPavel Emelyanov 
715893e26e6SPavel Emelyanov 	list_for_each_entry(fctx, fcs, list)
716893e26e6SPavel Emelyanov 		if (fctx->orig == octx) {
717893e26e6SPavel Emelyanov 			ctx = fctx->new;
718893e26e6SPavel Emelyanov 			break;
719893e26e6SPavel Emelyanov 		}
720893e26e6SPavel Emelyanov 
721893e26e6SPavel Emelyanov 	if (!ctx) {
722893e26e6SPavel Emelyanov 		fctx = kmalloc(sizeof(*fctx), GFP_KERNEL);
723893e26e6SPavel Emelyanov 		if (!fctx)
724893e26e6SPavel Emelyanov 			return -ENOMEM;
725893e26e6SPavel Emelyanov 
726893e26e6SPavel Emelyanov 		ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
727893e26e6SPavel Emelyanov 		if (!ctx) {
728893e26e6SPavel Emelyanov 			kfree(fctx);
729893e26e6SPavel Emelyanov 			return -ENOMEM;
730893e26e6SPavel Emelyanov 		}
731893e26e6SPavel Emelyanov 
732ca880420SEric Biggers 		refcount_set(&ctx->refcount, 1);
733893e26e6SPavel Emelyanov 		ctx->flags = octx->flags;
734893e26e6SPavel Emelyanov 		ctx->features = octx->features;
735893e26e6SPavel Emelyanov 		ctx->released = false;
736a759a909SNadav Amit 		atomic_set(&ctx->mmap_changing, 0);
737893e26e6SPavel Emelyanov 		ctx->mm = vma->vm_mm;
73800bb31faSMike Rapoport 		mmgrab(ctx->mm);
739893e26e6SPavel Emelyanov 
740893e26e6SPavel Emelyanov 		userfaultfd_ctx_get(octx);
741a759a909SNadav Amit 		atomic_inc(&octx->mmap_changing);
742893e26e6SPavel Emelyanov 		fctx->orig = octx;
743893e26e6SPavel Emelyanov 		fctx->new = ctx;
744893e26e6SPavel Emelyanov 		list_add_tail(&fctx->list, fcs);
745893e26e6SPavel Emelyanov 	}
746893e26e6SPavel Emelyanov 
747893e26e6SPavel Emelyanov 	vma->vm_userfaultfd_ctx.ctx = ctx;
748893e26e6SPavel Emelyanov 	return 0;
749893e26e6SPavel Emelyanov }
750893e26e6SPavel Emelyanov 
7518c9e7bb7SAndrea Arcangeli static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
752893e26e6SPavel Emelyanov {
753893e26e6SPavel Emelyanov 	struct userfaultfd_ctx *ctx = fctx->orig;
754893e26e6SPavel Emelyanov 	struct userfaultfd_wait_queue ewq;
755893e26e6SPavel Emelyanov 
756893e26e6SPavel Emelyanov 	msg_init(&ewq.msg);
757893e26e6SPavel Emelyanov 
758893e26e6SPavel Emelyanov 	ewq.msg.event = UFFD_EVENT_FORK;
759893e26e6SPavel Emelyanov 	ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
760893e26e6SPavel Emelyanov 
7618c9e7bb7SAndrea Arcangeli 	userfaultfd_event_wait_completion(ctx, &ewq);
762893e26e6SPavel Emelyanov }
763893e26e6SPavel Emelyanov 
764893e26e6SPavel Emelyanov void dup_userfaultfd_complete(struct list_head *fcs)
765893e26e6SPavel Emelyanov {
766893e26e6SPavel Emelyanov 	struct userfaultfd_fork_ctx *fctx, *n;
767893e26e6SPavel Emelyanov 
768893e26e6SPavel Emelyanov 	list_for_each_entry_safe(fctx, n, fcs, list) {
7698c9e7bb7SAndrea Arcangeli 		dup_fctx(fctx);
770893e26e6SPavel Emelyanov 		list_del(&fctx->list);
771893e26e6SPavel Emelyanov 		kfree(fctx);
772893e26e6SPavel Emelyanov 	}
773893e26e6SPavel Emelyanov }
774893e26e6SPavel Emelyanov 
77572f87654SPavel Emelyanov void mremap_userfaultfd_prep(struct vm_area_struct *vma,
77672f87654SPavel Emelyanov 			     struct vm_userfaultfd_ctx *vm_ctx)
77772f87654SPavel Emelyanov {
77872f87654SPavel Emelyanov 	struct userfaultfd_ctx *ctx;
77972f87654SPavel Emelyanov 
78072f87654SPavel Emelyanov 	ctx = vma->vm_userfaultfd_ctx.ctx;
7813cfd22beSPeter Xu 
7823cfd22beSPeter Xu 	if (!ctx)
7833cfd22beSPeter Xu 		return;
7843cfd22beSPeter Xu 
7853cfd22beSPeter Xu 	if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
78672f87654SPavel Emelyanov 		vm_ctx->ctx = ctx;
78772f87654SPavel Emelyanov 		userfaultfd_ctx_get(ctx);
788a759a909SNadav Amit 		atomic_inc(&ctx->mmap_changing);
7893cfd22beSPeter Xu 	} else {
7903cfd22beSPeter Xu 		/* Drop uffd context if remap feature not enabled */
7913cfd22beSPeter Xu 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
79251d3d5ebSDavid Hildenbrand 		userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
79372f87654SPavel Emelyanov 	}
79472f87654SPavel Emelyanov }
79572f87654SPavel Emelyanov 
79690794bf1SAndrea Arcangeli void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
79772f87654SPavel Emelyanov 				 unsigned long from, unsigned long to,
79872f87654SPavel Emelyanov 				 unsigned long len)
79972f87654SPavel Emelyanov {
80090794bf1SAndrea Arcangeli 	struct userfaultfd_ctx *ctx = vm_ctx->ctx;
80172f87654SPavel Emelyanov 	struct userfaultfd_wait_queue ewq;
80272f87654SPavel Emelyanov 
80372f87654SPavel Emelyanov 	if (!ctx)
80472f87654SPavel Emelyanov 		return;
80572f87654SPavel Emelyanov 
80672f87654SPavel Emelyanov 	if (to & ~PAGE_MASK) {
80772f87654SPavel Emelyanov 		userfaultfd_ctx_put(ctx);
80872f87654SPavel Emelyanov 		return;
80972f87654SPavel Emelyanov 	}
81072f87654SPavel Emelyanov 
81172f87654SPavel Emelyanov 	msg_init(&ewq.msg);
81272f87654SPavel Emelyanov 
81372f87654SPavel Emelyanov 	ewq.msg.event = UFFD_EVENT_REMAP;
81472f87654SPavel Emelyanov 	ewq.msg.arg.remap.from = from;
81572f87654SPavel Emelyanov 	ewq.msg.arg.remap.to = to;
81672f87654SPavel Emelyanov 	ewq.msg.arg.remap.len = len;
81772f87654SPavel Emelyanov 
81872f87654SPavel Emelyanov 	userfaultfd_event_wait_completion(ctx, &ewq);
81972f87654SPavel Emelyanov }
82072f87654SPavel Emelyanov 
82170ccb92fSAndrea Arcangeli bool userfaultfd_remove(struct vm_area_struct *vma,
82205ce7724SPavel Emelyanov 			unsigned long start, unsigned long end)
82305ce7724SPavel Emelyanov {
82405ce7724SPavel Emelyanov 	struct mm_struct *mm = vma->vm_mm;
82505ce7724SPavel Emelyanov 	struct userfaultfd_ctx *ctx;
82605ce7724SPavel Emelyanov 	struct userfaultfd_wait_queue ewq;
82705ce7724SPavel Emelyanov 
82805ce7724SPavel Emelyanov 	ctx = vma->vm_userfaultfd_ctx.ctx;
829d811914dSMike Rapoport 	if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
83070ccb92fSAndrea Arcangeli 		return true;
83105ce7724SPavel Emelyanov 
83205ce7724SPavel Emelyanov 	userfaultfd_ctx_get(ctx);
833a759a909SNadav Amit 	atomic_inc(&ctx->mmap_changing);
834d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
83505ce7724SPavel Emelyanov 
83605ce7724SPavel Emelyanov 	msg_init(&ewq.msg);
83705ce7724SPavel Emelyanov 
838d811914dSMike Rapoport 	ewq.msg.event = UFFD_EVENT_REMOVE;
839d811914dSMike Rapoport 	ewq.msg.arg.remove.start = start;
840d811914dSMike Rapoport 	ewq.msg.arg.remove.end = end;
84105ce7724SPavel Emelyanov 
84205ce7724SPavel Emelyanov 	userfaultfd_event_wait_completion(ctx, &ewq);
84305ce7724SPavel Emelyanov 
84470ccb92fSAndrea Arcangeli 	return false;
84505ce7724SPavel Emelyanov }
84605ce7724SPavel Emelyanov 
847897ab3e0SMike Rapoport static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
848897ab3e0SMike Rapoport 			  unsigned long start, unsigned long end)
849897ab3e0SMike Rapoport {
850897ab3e0SMike Rapoport 	struct userfaultfd_unmap_ctx *unmap_ctx;
851897ab3e0SMike Rapoport 
852897ab3e0SMike Rapoport 	list_for_each_entry(unmap_ctx, unmaps, list)
853897ab3e0SMike Rapoport 		if (unmap_ctx->ctx == ctx && unmap_ctx->start == start &&
854897ab3e0SMike Rapoport 		    unmap_ctx->end == end)
855897ab3e0SMike Rapoport 			return true;
856897ab3e0SMike Rapoport 
857897ab3e0SMike Rapoport 	return false;
858897ab3e0SMike Rapoport }
859897ab3e0SMike Rapoport 
86069dbe6daSLiam R. Howlett int userfaultfd_unmap_prep(struct mm_struct *mm, unsigned long start,
86169dbe6daSLiam R. Howlett 			   unsigned long end, struct list_head *unmaps)
862897ab3e0SMike Rapoport {
86369dbe6daSLiam R. Howlett 	VMA_ITERATOR(vmi, mm, start);
86469dbe6daSLiam R. Howlett 	struct vm_area_struct *vma;
86569dbe6daSLiam R. Howlett 
86669dbe6daSLiam R. Howlett 	for_each_vma_range(vmi, vma, end) {
867897ab3e0SMike Rapoport 		struct userfaultfd_unmap_ctx *unmap_ctx;
868897ab3e0SMike Rapoport 		struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
869897ab3e0SMike Rapoport 
870897ab3e0SMike Rapoport 		if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
871897ab3e0SMike Rapoport 		    has_unmap_ctx(ctx, unmaps, start, end))
872897ab3e0SMike Rapoport 			continue;
873897ab3e0SMike Rapoport 
874897ab3e0SMike Rapoport 		unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL);
875897ab3e0SMike Rapoport 		if (!unmap_ctx)
876897ab3e0SMike Rapoport 			return -ENOMEM;
877897ab3e0SMike Rapoport 
878897ab3e0SMike Rapoport 		userfaultfd_ctx_get(ctx);
879a759a909SNadav Amit 		atomic_inc(&ctx->mmap_changing);
880897ab3e0SMike Rapoport 		unmap_ctx->ctx = ctx;
881897ab3e0SMike Rapoport 		unmap_ctx->start = start;
882897ab3e0SMike Rapoport 		unmap_ctx->end = end;
883897ab3e0SMike Rapoport 		list_add_tail(&unmap_ctx->list, unmaps);
884897ab3e0SMike Rapoport 	}
885897ab3e0SMike Rapoport 
886897ab3e0SMike Rapoport 	return 0;
887897ab3e0SMike Rapoport }
888897ab3e0SMike Rapoport 
889897ab3e0SMike Rapoport void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
890897ab3e0SMike Rapoport {
891897ab3e0SMike Rapoport 	struct userfaultfd_unmap_ctx *ctx, *n;
892897ab3e0SMike Rapoport 	struct userfaultfd_wait_queue ewq;
893897ab3e0SMike Rapoport 
894897ab3e0SMike Rapoport 	list_for_each_entry_safe(ctx, n, uf, list) {
895897ab3e0SMike Rapoport 		msg_init(&ewq.msg);
896897ab3e0SMike Rapoport 
897897ab3e0SMike Rapoport 		ewq.msg.event = UFFD_EVENT_UNMAP;
898897ab3e0SMike Rapoport 		ewq.msg.arg.remove.start = ctx->start;
899897ab3e0SMike Rapoport 		ewq.msg.arg.remove.end = ctx->end;
900897ab3e0SMike Rapoport 
901897ab3e0SMike Rapoport 		userfaultfd_event_wait_completion(ctx->ctx, &ewq);
902897ab3e0SMike Rapoport 
903897ab3e0SMike Rapoport 		list_del(&ctx->list);
904897ab3e0SMike Rapoport 		kfree(ctx);
905897ab3e0SMike Rapoport 	}
906897ab3e0SMike Rapoport }
907897ab3e0SMike Rapoport 
90886039bd3SAndrea Arcangeli static int userfaultfd_release(struct inode *inode, struct file *file)
90986039bd3SAndrea Arcangeli {
91086039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx = file->private_data;
91186039bd3SAndrea Arcangeli 	struct mm_struct *mm = ctx->mm;
91286039bd3SAndrea Arcangeli 	struct vm_area_struct *vma, *prev;
91386039bd3SAndrea Arcangeli 	/* len == 0 means wake all */
91486039bd3SAndrea Arcangeli 	struct userfaultfd_wake_range range = { .len = 0, };
91586039bd3SAndrea Arcangeli 	unsigned long new_flags;
91611a9b902SLiam R. Howlett 	VMA_ITERATOR(vmi, mm, 0);
91786039bd3SAndrea Arcangeli 
9186aa7de05SMark Rutland 	WRITE_ONCE(ctx->released, true);
91986039bd3SAndrea Arcangeli 
920d2005e3fSOleg Nesterov 	if (!mmget_not_zero(mm))
921d2005e3fSOleg Nesterov 		goto wakeup;
922d2005e3fSOleg Nesterov 
92386039bd3SAndrea Arcangeli 	/*
92486039bd3SAndrea Arcangeli 	 * Flush page faults out of all CPUs. NOTE: all page faults
92586039bd3SAndrea Arcangeli 	 * must be retried without returning VM_FAULT_SIGBUS if
92686039bd3SAndrea Arcangeli 	 * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
927c1e8d7c6SMichel Lespinasse 	 * changes while handle_userfault released the mmap_lock. So
92886039bd3SAndrea Arcangeli 	 * it's critical that released is set to true (above), before
929c1e8d7c6SMichel Lespinasse 	 * taking the mmap_lock for writing.
93086039bd3SAndrea Arcangeli 	 */
931d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
93286039bd3SAndrea Arcangeli 	prev = NULL;
93311a9b902SLiam R. Howlett 	for_each_vma(vmi, vma) {
93486039bd3SAndrea Arcangeli 		cond_resched();
93586039bd3SAndrea Arcangeli 		BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
9367677f7fdSAxel Rasmussen 		       !!(vma->vm_flags & __VM_UFFD_FLAGS));
93786039bd3SAndrea Arcangeli 		if (vma->vm_userfaultfd_ctx.ctx != ctx) {
93886039bd3SAndrea Arcangeli 			prev = vma;
93986039bd3SAndrea Arcangeli 			continue;
94086039bd3SAndrea Arcangeli 		}
9417677f7fdSAxel Rasmussen 		new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
9429760ebffSLiam R. Howlett 		prev = vma_merge(&vmi, mm, prev, vma->vm_start, vma->vm_end,
94386039bd3SAndrea Arcangeli 				 new_flags, vma->anon_vma,
94486039bd3SAndrea Arcangeli 				 vma->vm_file, vma->vm_pgoff,
94586039bd3SAndrea Arcangeli 				 vma_policy(vma),
9465c26f6acSSuren Baghdasaryan 				 NULL_VM_UFFD_CTX, anon_vma_name(vma));
94769dbe6daSLiam R. Howlett 		if (prev) {
94886039bd3SAndrea Arcangeli 			vma = prev;
94969dbe6daSLiam R. Howlett 		} else {
95086039bd3SAndrea Arcangeli 			prev = vma;
95169dbe6daSLiam R. Howlett 		}
95269dbe6daSLiam R. Howlett 
95351d3d5ebSDavid Hildenbrand 		userfaultfd_set_vm_flags(vma, new_flags);
95486039bd3SAndrea Arcangeli 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
95586039bd3SAndrea Arcangeli 	}
956d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
957d2005e3fSOleg Nesterov 	mmput(mm);
958d2005e3fSOleg Nesterov wakeup:
95986039bd3SAndrea Arcangeli 	/*
96015b726efSAndrea Arcangeli 	 * After no new page faults can wait on this fault_*wqh, flush
96186039bd3SAndrea Arcangeli 	 * the last page faults that may have been already waiting on
96215b726efSAndrea Arcangeli 	 * the fault_*wqh.
96386039bd3SAndrea Arcangeli 	 */
964cbcfa130SEric Biggers 	spin_lock_irq(&ctx->fault_pending_wqh.lock);
965ac5be6b4SAndrea Arcangeli 	__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
966c430d1e8SMatthew Wilcox 	__wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
967cbcfa130SEric Biggers 	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
96886039bd3SAndrea Arcangeli 
9695a18b64eSMike Rapoport 	/* Flush pending events that may still wait on event_wqh */
9705a18b64eSMike Rapoport 	wake_up_all(&ctx->event_wqh);
9715a18b64eSMike Rapoport 
972a9a08845SLinus Torvalds 	wake_up_poll(&ctx->fd_wqh, EPOLLHUP);
97386039bd3SAndrea Arcangeli 	userfaultfd_ctx_put(ctx);
97486039bd3SAndrea Arcangeli 	return 0;
97586039bd3SAndrea Arcangeli }
97686039bd3SAndrea Arcangeli 
97715b726efSAndrea Arcangeli /* fault_pending_wqh.lock must be hold by the caller */
9786dcc27fdSPavel Emelyanov static inline struct userfaultfd_wait_queue *find_userfault_in(
9796dcc27fdSPavel Emelyanov 		wait_queue_head_t *wqh)
98086039bd3SAndrea Arcangeli {
981ac6424b9SIngo Molnar 	wait_queue_entry_t *wq;
98215b726efSAndrea Arcangeli 	struct userfaultfd_wait_queue *uwq;
98386039bd3SAndrea Arcangeli 
984456a7378SLance Roy 	lockdep_assert_held(&wqh->lock);
98586039bd3SAndrea Arcangeli 
98615b726efSAndrea Arcangeli 	uwq = NULL;
9876dcc27fdSPavel Emelyanov 	if (!waitqueue_active(wqh))
98815b726efSAndrea Arcangeli 		goto out;
98915b726efSAndrea Arcangeli 	/* walk in reverse to provide FIFO behavior to read userfaults */
9902055da97SIngo Molnar 	wq = list_last_entry(&wqh->head, typeof(*wq), entry);
99115b726efSAndrea Arcangeli 	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
99215b726efSAndrea Arcangeli out:
99315b726efSAndrea Arcangeli 	return uwq;
99486039bd3SAndrea Arcangeli }
99586039bd3SAndrea Arcangeli 
9966dcc27fdSPavel Emelyanov static inline struct userfaultfd_wait_queue *find_userfault(
9976dcc27fdSPavel Emelyanov 		struct userfaultfd_ctx *ctx)
9986dcc27fdSPavel Emelyanov {
9996dcc27fdSPavel Emelyanov 	return find_userfault_in(&ctx->fault_pending_wqh);
10006dcc27fdSPavel Emelyanov }
10016dcc27fdSPavel Emelyanov 
10029cd75c3cSPavel Emelyanov static inline struct userfaultfd_wait_queue *find_userfault_evt(
10039cd75c3cSPavel Emelyanov 		struct userfaultfd_ctx *ctx)
10049cd75c3cSPavel Emelyanov {
10059cd75c3cSPavel Emelyanov 	return find_userfault_in(&ctx->event_wqh);
10069cd75c3cSPavel Emelyanov }
10079cd75c3cSPavel Emelyanov 
1008076ccb76SAl Viro static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
100986039bd3SAndrea Arcangeli {
101086039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx = file->private_data;
1011076ccb76SAl Viro 	__poll_t ret;
101286039bd3SAndrea Arcangeli 
101386039bd3SAndrea Arcangeli 	poll_wait(file, &ctx->fd_wqh, wait);
101486039bd3SAndrea Arcangeli 
101522e5fe2aSNadav Amit 	if (!userfaultfd_is_initialized(ctx))
1016a9a08845SLinus Torvalds 		return EPOLLERR;
101722e5fe2aSNadav Amit 
1018ba85c702SAndrea Arcangeli 	/*
1019ba85c702SAndrea Arcangeli 	 * poll() never guarantees that read won't block.
1020ba85c702SAndrea Arcangeli 	 * userfaults can be waken before they're read().
1021ba85c702SAndrea Arcangeli 	 */
1022ba85c702SAndrea Arcangeli 	if (unlikely(!(file->f_flags & O_NONBLOCK)))
1023a9a08845SLinus Torvalds 		return EPOLLERR;
102415b726efSAndrea Arcangeli 	/*
102515b726efSAndrea Arcangeli 	 * lockless access to see if there are pending faults
102615b726efSAndrea Arcangeli 	 * __pollwait last action is the add_wait_queue but
102715b726efSAndrea Arcangeli 	 * the spin_unlock would allow the waitqueue_active to
102815b726efSAndrea Arcangeli 	 * pass above the actual list_add inside
102915b726efSAndrea Arcangeli 	 * add_wait_queue critical section. So use a full
103015b726efSAndrea Arcangeli 	 * memory barrier to serialize the list_add write of
103115b726efSAndrea Arcangeli 	 * add_wait_queue() with the waitqueue_active read
103215b726efSAndrea Arcangeli 	 * below.
103315b726efSAndrea Arcangeli 	 */
103415b726efSAndrea Arcangeli 	ret = 0;
103515b726efSAndrea Arcangeli 	smp_mb();
103615b726efSAndrea Arcangeli 	if (waitqueue_active(&ctx->fault_pending_wqh))
1037a9a08845SLinus Torvalds 		ret = EPOLLIN;
10389cd75c3cSPavel Emelyanov 	else if (waitqueue_active(&ctx->event_wqh))
1039a9a08845SLinus Torvalds 		ret = EPOLLIN;
10409cd75c3cSPavel Emelyanov 
104186039bd3SAndrea Arcangeli 	return ret;
104286039bd3SAndrea Arcangeli }
104386039bd3SAndrea Arcangeli 
1044893e26e6SPavel Emelyanov static const struct file_operations userfaultfd_fops;
1045893e26e6SPavel Emelyanov 
1046b537900fSDaniel Colascione static int resolve_userfault_fork(struct userfaultfd_ctx *new,
1047b537900fSDaniel Colascione 				  struct inode *inode,
1048893e26e6SPavel Emelyanov 				  struct uffd_msg *msg)
1049893e26e6SPavel Emelyanov {
1050893e26e6SPavel Emelyanov 	int fd;
1051893e26e6SPavel Emelyanov 
1052b537900fSDaniel Colascione 	fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, new,
1053abec3d01SOndrej Mosnacek 			O_RDONLY | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode);
1054893e26e6SPavel Emelyanov 	if (fd < 0)
1055893e26e6SPavel Emelyanov 		return fd;
1056893e26e6SPavel Emelyanov 
1057893e26e6SPavel Emelyanov 	msg->arg.reserved.reserved1 = 0;
1058893e26e6SPavel Emelyanov 	msg->arg.fork.ufd = fd;
1059893e26e6SPavel Emelyanov 	return 0;
1060893e26e6SPavel Emelyanov }
1061893e26e6SPavel Emelyanov 
106286039bd3SAndrea Arcangeli static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
1063b537900fSDaniel Colascione 				    struct uffd_msg *msg, struct inode *inode)
106486039bd3SAndrea Arcangeli {
106586039bd3SAndrea Arcangeli 	ssize_t ret;
106686039bd3SAndrea Arcangeli 	DECLARE_WAITQUEUE(wait, current);
106715b726efSAndrea Arcangeli 	struct userfaultfd_wait_queue *uwq;
1068893e26e6SPavel Emelyanov 	/*
1069893e26e6SPavel Emelyanov 	 * Handling fork event requires sleeping operations, so
1070893e26e6SPavel Emelyanov 	 * we drop the event_wqh lock, then do these ops, then
1071893e26e6SPavel Emelyanov 	 * lock it back and wake up the waiter. While the lock is
1072893e26e6SPavel Emelyanov 	 * dropped the ewq may go away so we keep track of it
1073893e26e6SPavel Emelyanov 	 * carefully.
1074893e26e6SPavel Emelyanov 	 */
1075893e26e6SPavel Emelyanov 	LIST_HEAD(fork_event);
1076893e26e6SPavel Emelyanov 	struct userfaultfd_ctx *fork_nctx = NULL;
107786039bd3SAndrea Arcangeli 
107815b726efSAndrea Arcangeli 	/* always take the fd_wqh lock before the fault_pending_wqh lock */
1079ae62c16eSChristoph Hellwig 	spin_lock_irq(&ctx->fd_wqh.lock);
108086039bd3SAndrea Arcangeli 	__add_wait_queue(&ctx->fd_wqh, &wait);
108186039bd3SAndrea Arcangeli 	for (;;) {
108286039bd3SAndrea Arcangeli 		set_current_state(TASK_INTERRUPTIBLE);
108315b726efSAndrea Arcangeli 		spin_lock(&ctx->fault_pending_wqh.lock);
108415b726efSAndrea Arcangeli 		uwq = find_userfault(ctx);
108515b726efSAndrea Arcangeli 		if (uwq) {
108686039bd3SAndrea Arcangeli 			/*
10872c5b7e1bSAndrea Arcangeli 			 * Use a seqcount to repeat the lockless check
10882c5b7e1bSAndrea Arcangeli 			 * in wake_userfault() to avoid missing
10892c5b7e1bSAndrea Arcangeli 			 * wakeups because during the refile both
10902c5b7e1bSAndrea Arcangeli 			 * waitqueue could become empty if this is the
10912c5b7e1bSAndrea Arcangeli 			 * only userfault.
10922c5b7e1bSAndrea Arcangeli 			 */
10932c5b7e1bSAndrea Arcangeli 			write_seqcount_begin(&ctx->refile_seq);
10942c5b7e1bSAndrea Arcangeli 
10952c5b7e1bSAndrea Arcangeli 			/*
109615b726efSAndrea Arcangeli 			 * The fault_pending_wqh.lock prevents the uwq
109715b726efSAndrea Arcangeli 			 * to disappear from under us.
109815b726efSAndrea Arcangeli 			 *
109915b726efSAndrea Arcangeli 			 * Refile this userfault from
110015b726efSAndrea Arcangeli 			 * fault_pending_wqh to fault_wqh, it's not
110115b726efSAndrea Arcangeli 			 * pending anymore after we read it.
110215b726efSAndrea Arcangeli 			 *
110315b726efSAndrea Arcangeli 			 * Use list_del() by hand (as
110415b726efSAndrea Arcangeli 			 * userfaultfd_wake_function also uses
110515b726efSAndrea Arcangeli 			 * list_del_init() by hand) to be sure nobody
110615b726efSAndrea Arcangeli 			 * changes __remove_wait_queue() to use
110715b726efSAndrea Arcangeli 			 * list_del_init() in turn breaking the
110815b726efSAndrea Arcangeli 			 * !list_empty_careful() check in
11092055da97SIngo Molnar 			 * handle_userfault(). The uwq->wq.head list
111015b726efSAndrea Arcangeli 			 * must never be empty at any time during the
111115b726efSAndrea Arcangeli 			 * refile, or the waitqueue could disappear
111215b726efSAndrea Arcangeli 			 * from under us. The "wait_queue_head_t"
111315b726efSAndrea Arcangeli 			 * parameter of __remove_wait_queue() is unused
111415b726efSAndrea Arcangeli 			 * anyway.
111586039bd3SAndrea Arcangeli 			 */
11162055da97SIngo Molnar 			list_del(&uwq->wq.entry);
1117c430d1e8SMatthew Wilcox 			add_wait_queue(&ctx->fault_wqh, &uwq->wq);
111815b726efSAndrea Arcangeli 
11192c5b7e1bSAndrea Arcangeli 			write_seqcount_end(&ctx->refile_seq);
11202c5b7e1bSAndrea Arcangeli 
1121a9b85f94SAndrea Arcangeli 			/* careful to always initialize msg if ret == 0 */
1122a9b85f94SAndrea Arcangeli 			*msg = uwq->msg;
112315b726efSAndrea Arcangeli 			spin_unlock(&ctx->fault_pending_wqh.lock);
112486039bd3SAndrea Arcangeli 			ret = 0;
112586039bd3SAndrea Arcangeli 			break;
112686039bd3SAndrea Arcangeli 		}
112715b726efSAndrea Arcangeli 		spin_unlock(&ctx->fault_pending_wqh.lock);
11289cd75c3cSPavel Emelyanov 
11299cd75c3cSPavel Emelyanov 		spin_lock(&ctx->event_wqh.lock);
11309cd75c3cSPavel Emelyanov 		uwq = find_userfault_evt(ctx);
11319cd75c3cSPavel Emelyanov 		if (uwq) {
11329cd75c3cSPavel Emelyanov 			*msg = uwq->msg;
11339cd75c3cSPavel Emelyanov 
1134893e26e6SPavel Emelyanov 			if (uwq->msg.event == UFFD_EVENT_FORK) {
1135893e26e6SPavel Emelyanov 				fork_nctx = (struct userfaultfd_ctx *)
1136893e26e6SPavel Emelyanov 					(unsigned long)
1137893e26e6SPavel Emelyanov 					uwq->msg.arg.reserved.reserved1;
11382055da97SIngo Molnar 				list_move(&uwq->wq.entry, &fork_event);
1139384632e6SAndrea Arcangeli 				/*
1140384632e6SAndrea Arcangeli 				 * fork_nctx can be freed as soon as
1141384632e6SAndrea Arcangeli 				 * we drop the lock, unless we take a
1142384632e6SAndrea Arcangeli 				 * reference on it.
1143384632e6SAndrea Arcangeli 				 */
1144384632e6SAndrea Arcangeli 				userfaultfd_ctx_get(fork_nctx);
1145893e26e6SPavel Emelyanov 				spin_unlock(&ctx->event_wqh.lock);
1146893e26e6SPavel Emelyanov 				ret = 0;
1147893e26e6SPavel Emelyanov 				break;
1148893e26e6SPavel Emelyanov 			}
1149893e26e6SPavel Emelyanov 
11509cd75c3cSPavel Emelyanov 			userfaultfd_event_complete(ctx, uwq);
11519cd75c3cSPavel Emelyanov 			spin_unlock(&ctx->event_wqh.lock);
11529cd75c3cSPavel Emelyanov 			ret = 0;
11539cd75c3cSPavel Emelyanov 			break;
11549cd75c3cSPavel Emelyanov 		}
11559cd75c3cSPavel Emelyanov 		spin_unlock(&ctx->event_wqh.lock);
11569cd75c3cSPavel Emelyanov 
115786039bd3SAndrea Arcangeli 		if (signal_pending(current)) {
115886039bd3SAndrea Arcangeli 			ret = -ERESTARTSYS;
115986039bd3SAndrea Arcangeli 			break;
116086039bd3SAndrea Arcangeli 		}
116186039bd3SAndrea Arcangeli 		if (no_wait) {
116286039bd3SAndrea Arcangeli 			ret = -EAGAIN;
116386039bd3SAndrea Arcangeli 			break;
116486039bd3SAndrea Arcangeli 		}
1165ae62c16eSChristoph Hellwig 		spin_unlock_irq(&ctx->fd_wqh.lock);
116686039bd3SAndrea Arcangeli 		schedule();
1167ae62c16eSChristoph Hellwig 		spin_lock_irq(&ctx->fd_wqh.lock);
116886039bd3SAndrea Arcangeli 	}
116986039bd3SAndrea Arcangeli 	__remove_wait_queue(&ctx->fd_wqh, &wait);
117086039bd3SAndrea Arcangeli 	__set_current_state(TASK_RUNNING);
1171ae62c16eSChristoph Hellwig 	spin_unlock_irq(&ctx->fd_wqh.lock);
117286039bd3SAndrea Arcangeli 
1173893e26e6SPavel Emelyanov 	if (!ret && msg->event == UFFD_EVENT_FORK) {
1174b537900fSDaniel Colascione 		ret = resolve_userfault_fork(fork_nctx, inode, msg);
1175cbcfa130SEric Biggers 		spin_lock_irq(&ctx->event_wqh.lock);
1176893e26e6SPavel Emelyanov 		if (!list_empty(&fork_event)) {
1177384632e6SAndrea Arcangeli 			/*
1178384632e6SAndrea Arcangeli 			 * The fork thread didn't abort, so we can
1179384632e6SAndrea Arcangeli 			 * drop the temporary refcount.
1180384632e6SAndrea Arcangeli 			 */
1181384632e6SAndrea Arcangeli 			userfaultfd_ctx_put(fork_nctx);
1182384632e6SAndrea Arcangeli 
1183893e26e6SPavel Emelyanov 			uwq = list_first_entry(&fork_event,
1184893e26e6SPavel Emelyanov 					       typeof(*uwq),
11852055da97SIngo Molnar 					       wq.entry);
1186384632e6SAndrea Arcangeli 			/*
1187384632e6SAndrea Arcangeli 			 * If fork_event list wasn't empty and in turn
1188384632e6SAndrea Arcangeli 			 * the event wasn't already released by fork
1189384632e6SAndrea Arcangeli 			 * (the event is allocated on fork kernel
1190384632e6SAndrea Arcangeli 			 * stack), put the event back to its place in
1191384632e6SAndrea Arcangeli 			 * the event_wq. fork_event head will be freed
1192384632e6SAndrea Arcangeli 			 * as soon as we return so the event cannot
1193384632e6SAndrea Arcangeli 			 * stay queued there no matter the current
1194384632e6SAndrea Arcangeli 			 * "ret" value.
1195384632e6SAndrea Arcangeli 			 */
11962055da97SIngo Molnar 			list_del(&uwq->wq.entry);
1197893e26e6SPavel Emelyanov 			__add_wait_queue(&ctx->event_wqh, &uwq->wq);
1198384632e6SAndrea Arcangeli 
1199384632e6SAndrea Arcangeli 			/*
1200384632e6SAndrea Arcangeli 			 * Leave the event in the waitqueue and report
1201384632e6SAndrea Arcangeli 			 * error to userland if we failed to resolve
1202384632e6SAndrea Arcangeli 			 * the userfault fork.
1203384632e6SAndrea Arcangeli 			 */
1204384632e6SAndrea Arcangeli 			if (likely(!ret))
1205893e26e6SPavel Emelyanov 				userfaultfd_event_complete(ctx, uwq);
1206384632e6SAndrea Arcangeli 		} else {
1207384632e6SAndrea Arcangeli 			/*
1208384632e6SAndrea Arcangeli 			 * Here the fork thread aborted and the
1209384632e6SAndrea Arcangeli 			 * refcount from the fork thread on fork_nctx
1210384632e6SAndrea Arcangeli 			 * has already been released. We still hold
1211384632e6SAndrea Arcangeli 			 * the reference we took before releasing the
1212384632e6SAndrea Arcangeli 			 * lock above. If resolve_userfault_fork
1213384632e6SAndrea Arcangeli 			 * failed we've to drop it because the
1214384632e6SAndrea Arcangeli 			 * fork_nctx has to be freed in such case. If
1215384632e6SAndrea Arcangeli 			 * it succeeded we'll hold it because the new
1216384632e6SAndrea Arcangeli 			 * uffd references it.
1217384632e6SAndrea Arcangeli 			 */
1218384632e6SAndrea Arcangeli 			if (ret)
1219384632e6SAndrea Arcangeli 				userfaultfd_ctx_put(fork_nctx);
1220893e26e6SPavel Emelyanov 		}
1221cbcfa130SEric Biggers 		spin_unlock_irq(&ctx->event_wqh.lock);
1222893e26e6SPavel Emelyanov 	}
1223893e26e6SPavel Emelyanov 
122486039bd3SAndrea Arcangeli 	return ret;
122586039bd3SAndrea Arcangeli }
122686039bd3SAndrea Arcangeli 
122786039bd3SAndrea Arcangeli static ssize_t userfaultfd_read(struct file *file, char __user *buf,
122886039bd3SAndrea Arcangeli 				size_t count, loff_t *ppos)
122986039bd3SAndrea Arcangeli {
123086039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx = file->private_data;
123186039bd3SAndrea Arcangeli 	ssize_t _ret, ret = 0;
1232a9b85f94SAndrea Arcangeli 	struct uffd_msg msg;
123386039bd3SAndrea Arcangeli 	int no_wait = file->f_flags & O_NONBLOCK;
1234b537900fSDaniel Colascione 	struct inode *inode = file_inode(file);
123586039bd3SAndrea Arcangeli 
123622e5fe2aSNadav Amit 	if (!userfaultfd_is_initialized(ctx))
123786039bd3SAndrea Arcangeli 		return -EINVAL;
123886039bd3SAndrea Arcangeli 
123986039bd3SAndrea Arcangeli 	for (;;) {
1240a9b85f94SAndrea Arcangeli 		if (count < sizeof(msg))
124186039bd3SAndrea Arcangeli 			return ret ? ret : -EINVAL;
1242b537900fSDaniel Colascione 		_ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode);
124386039bd3SAndrea Arcangeli 		if (_ret < 0)
124486039bd3SAndrea Arcangeli 			return ret ? ret : _ret;
1245a9b85f94SAndrea Arcangeli 		if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
124686039bd3SAndrea Arcangeli 			return ret ? ret : -EFAULT;
1247a9b85f94SAndrea Arcangeli 		ret += sizeof(msg);
1248a9b85f94SAndrea Arcangeli 		buf += sizeof(msg);
1249a9b85f94SAndrea Arcangeli 		count -= sizeof(msg);
125086039bd3SAndrea Arcangeli 		/*
125186039bd3SAndrea Arcangeli 		 * Allow to read more than one fault at time but only
125286039bd3SAndrea Arcangeli 		 * block if waiting for the very first one.
125386039bd3SAndrea Arcangeli 		 */
125486039bd3SAndrea Arcangeli 		no_wait = O_NONBLOCK;
125586039bd3SAndrea Arcangeli 	}
125686039bd3SAndrea Arcangeli }
125786039bd3SAndrea Arcangeli 
125886039bd3SAndrea Arcangeli static void __wake_userfault(struct userfaultfd_ctx *ctx,
125986039bd3SAndrea Arcangeli 			     struct userfaultfd_wake_range *range)
126086039bd3SAndrea Arcangeli {
1261cbcfa130SEric Biggers 	spin_lock_irq(&ctx->fault_pending_wqh.lock);
126286039bd3SAndrea Arcangeli 	/* wake all in the range and autoremove */
126315b726efSAndrea Arcangeli 	if (waitqueue_active(&ctx->fault_pending_wqh))
1264ac5be6b4SAndrea Arcangeli 		__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
126515b726efSAndrea Arcangeli 				     range);
126615b726efSAndrea Arcangeli 	if (waitqueue_active(&ctx->fault_wqh))
1267c430d1e8SMatthew Wilcox 		__wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
1268cbcfa130SEric Biggers 	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
126986039bd3SAndrea Arcangeli }
127086039bd3SAndrea Arcangeli 
127186039bd3SAndrea Arcangeli static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
127286039bd3SAndrea Arcangeli 					   struct userfaultfd_wake_range *range)
127386039bd3SAndrea Arcangeli {
12742c5b7e1bSAndrea Arcangeli 	unsigned seq;
12752c5b7e1bSAndrea Arcangeli 	bool need_wakeup;
12762c5b7e1bSAndrea Arcangeli 
127786039bd3SAndrea Arcangeli 	/*
127886039bd3SAndrea Arcangeli 	 * To be sure waitqueue_active() is not reordered by the CPU
127986039bd3SAndrea Arcangeli 	 * before the pagetable update, use an explicit SMP memory
12803e4e28c5SMichel Lespinasse 	 * barrier here. PT lock release or mmap_read_unlock(mm) still
128186039bd3SAndrea Arcangeli 	 * have release semantics that can allow the
128286039bd3SAndrea Arcangeli 	 * waitqueue_active() to be reordered before the pte update.
128386039bd3SAndrea Arcangeli 	 */
128486039bd3SAndrea Arcangeli 	smp_mb();
128586039bd3SAndrea Arcangeli 
128686039bd3SAndrea Arcangeli 	/*
128786039bd3SAndrea Arcangeli 	 * Use waitqueue_active because it's very frequent to
128886039bd3SAndrea Arcangeli 	 * change the address space atomically even if there are no
128986039bd3SAndrea Arcangeli 	 * userfaults yet. So we take the spinlock only when we're
129086039bd3SAndrea Arcangeli 	 * sure we've userfaults to wake.
129186039bd3SAndrea Arcangeli 	 */
12922c5b7e1bSAndrea Arcangeli 	do {
12932c5b7e1bSAndrea Arcangeli 		seq = read_seqcount_begin(&ctx->refile_seq);
12942c5b7e1bSAndrea Arcangeli 		need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
12952c5b7e1bSAndrea Arcangeli 			waitqueue_active(&ctx->fault_wqh);
12962c5b7e1bSAndrea Arcangeli 		cond_resched();
12972c5b7e1bSAndrea Arcangeli 	} while (read_seqcount_retry(&ctx->refile_seq, seq));
12982c5b7e1bSAndrea Arcangeli 	if (need_wakeup)
129986039bd3SAndrea Arcangeli 		__wake_userfault(ctx, range);
130086039bd3SAndrea Arcangeli }
130186039bd3SAndrea Arcangeli 
130286039bd3SAndrea Arcangeli static __always_inline int validate_range(struct mm_struct *mm,
1303e71e2aceSPeter Collingbourne 					  __u64 start, __u64 len)
130486039bd3SAndrea Arcangeli {
130586039bd3SAndrea Arcangeli 	__u64 task_size = mm->task_size;
130686039bd3SAndrea Arcangeli 
1307e71e2aceSPeter Collingbourne 	if (start & ~PAGE_MASK)
130886039bd3SAndrea Arcangeli 		return -EINVAL;
130986039bd3SAndrea Arcangeli 	if (len & ~PAGE_MASK)
131086039bd3SAndrea Arcangeli 		return -EINVAL;
131186039bd3SAndrea Arcangeli 	if (!len)
131286039bd3SAndrea Arcangeli 		return -EINVAL;
1313e71e2aceSPeter Collingbourne 	if (start < mmap_min_addr)
131486039bd3SAndrea Arcangeli 		return -EINVAL;
1315e71e2aceSPeter Collingbourne 	if (start >= task_size)
131686039bd3SAndrea Arcangeli 		return -EINVAL;
1317e71e2aceSPeter Collingbourne 	if (len > task_size - start)
131886039bd3SAndrea Arcangeli 		return -EINVAL;
131986039bd3SAndrea Arcangeli 	return 0;
132086039bd3SAndrea Arcangeli }
132186039bd3SAndrea Arcangeli 
132286039bd3SAndrea Arcangeli static int userfaultfd_register(struct userfaultfd_ctx *ctx,
132386039bd3SAndrea Arcangeli 				unsigned long arg)
132486039bd3SAndrea Arcangeli {
132586039bd3SAndrea Arcangeli 	struct mm_struct *mm = ctx->mm;
132686039bd3SAndrea Arcangeli 	struct vm_area_struct *vma, *prev, *cur;
132786039bd3SAndrea Arcangeli 	int ret;
132886039bd3SAndrea Arcangeli 	struct uffdio_register uffdio_register;
132986039bd3SAndrea Arcangeli 	struct uffdio_register __user *user_uffdio_register;
133086039bd3SAndrea Arcangeli 	unsigned long vm_flags, new_flags;
133186039bd3SAndrea Arcangeli 	bool found;
1332ce53e8e6SMike Rapoport 	bool basic_ioctls;
133386039bd3SAndrea Arcangeli 	unsigned long start, end, vma_end;
133411a9b902SLiam R. Howlett 	struct vma_iterator vmi;
133586039bd3SAndrea Arcangeli 
133686039bd3SAndrea Arcangeli 	user_uffdio_register = (struct uffdio_register __user *) arg;
133786039bd3SAndrea Arcangeli 
133886039bd3SAndrea Arcangeli 	ret = -EFAULT;
133986039bd3SAndrea Arcangeli 	if (copy_from_user(&uffdio_register, user_uffdio_register,
134086039bd3SAndrea Arcangeli 			   sizeof(uffdio_register)-sizeof(__u64)))
134186039bd3SAndrea Arcangeli 		goto out;
134286039bd3SAndrea Arcangeli 
134386039bd3SAndrea Arcangeli 	ret = -EINVAL;
134486039bd3SAndrea Arcangeli 	if (!uffdio_register.mode)
134586039bd3SAndrea Arcangeli 		goto out;
13467677f7fdSAxel Rasmussen 	if (uffdio_register.mode & ~UFFD_API_REGISTER_MODES)
134786039bd3SAndrea Arcangeli 		goto out;
134886039bd3SAndrea Arcangeli 	vm_flags = 0;
134986039bd3SAndrea Arcangeli 	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
135086039bd3SAndrea Arcangeli 		vm_flags |= VM_UFFD_MISSING;
135100b151f2SPeter Xu 	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
135200b151f2SPeter Xu #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
135300b151f2SPeter Xu 		goto out;
135400b151f2SPeter Xu #endif
135586039bd3SAndrea Arcangeli 		vm_flags |= VM_UFFD_WP;
135600b151f2SPeter Xu 	}
13577677f7fdSAxel Rasmussen 	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) {
13587677f7fdSAxel Rasmussen #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
13597677f7fdSAxel Rasmussen 		goto out;
13607677f7fdSAxel Rasmussen #endif
13617677f7fdSAxel Rasmussen 		vm_flags |= VM_UFFD_MINOR;
13627677f7fdSAxel Rasmussen 	}
136386039bd3SAndrea Arcangeli 
1364e71e2aceSPeter Collingbourne 	ret = validate_range(mm, uffdio_register.range.start,
136586039bd3SAndrea Arcangeli 			     uffdio_register.range.len);
136686039bd3SAndrea Arcangeli 	if (ret)
136786039bd3SAndrea Arcangeli 		goto out;
136886039bd3SAndrea Arcangeli 
136986039bd3SAndrea Arcangeli 	start = uffdio_register.range.start;
137086039bd3SAndrea Arcangeli 	end = start + uffdio_register.range.len;
137186039bd3SAndrea Arcangeli 
1372d2005e3fSOleg Nesterov 	ret = -ENOMEM;
1373d2005e3fSOleg Nesterov 	if (!mmget_not_zero(mm))
1374d2005e3fSOleg Nesterov 		goto out;
1375d2005e3fSOleg Nesterov 
137686039bd3SAndrea Arcangeli 	ret = -EINVAL;
137711a9b902SLiam R. Howlett 	mmap_write_lock(mm);
137811a9b902SLiam R. Howlett 	vma_iter_init(&vmi, mm, start);
137911a9b902SLiam R. Howlett 	vma = vma_find(&vmi, end);
138011a9b902SLiam R. Howlett 	if (!vma)
138186039bd3SAndrea Arcangeli 		goto out_unlock;
138286039bd3SAndrea Arcangeli 
138386039bd3SAndrea Arcangeli 	/*
1384cab350afSMike Kravetz 	 * If the first vma contains huge pages, make sure start address
1385cab350afSMike Kravetz 	 * is aligned to huge page size.
1386cab350afSMike Kravetz 	 */
1387cab350afSMike Kravetz 	if (is_vm_hugetlb_page(vma)) {
1388cab350afSMike Kravetz 		unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1389cab350afSMike Kravetz 
1390cab350afSMike Kravetz 		if (start & (vma_hpagesize - 1))
1391cab350afSMike Kravetz 			goto out_unlock;
1392cab350afSMike Kravetz 	}
1393cab350afSMike Kravetz 
1394cab350afSMike Kravetz 	/*
139586039bd3SAndrea Arcangeli 	 * Search for not compatible vmas.
139686039bd3SAndrea Arcangeli 	 */
139786039bd3SAndrea Arcangeli 	found = false;
1398ce53e8e6SMike Rapoport 	basic_ioctls = false;
139911a9b902SLiam R. Howlett 	cur = vma;
140011a9b902SLiam R. Howlett 	do {
140186039bd3SAndrea Arcangeli 		cond_resched();
140286039bd3SAndrea Arcangeli 
140386039bd3SAndrea Arcangeli 		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
14047677f7fdSAxel Rasmussen 		       !!(cur->vm_flags & __VM_UFFD_FLAGS));
140586039bd3SAndrea Arcangeli 
140686039bd3SAndrea Arcangeli 		/* check not compatible vmas */
140786039bd3SAndrea Arcangeli 		ret = -EINVAL;
140863b2d417SAndrea Arcangeli 		if (!vma_can_userfault(cur, vm_flags))
140986039bd3SAndrea Arcangeli 			goto out_unlock;
141029ec9066SAndrea Arcangeli 
141129ec9066SAndrea Arcangeli 		/*
141229ec9066SAndrea Arcangeli 		 * UFFDIO_COPY will fill file holes even without
141329ec9066SAndrea Arcangeli 		 * PROT_WRITE. This check enforces that if this is a
141429ec9066SAndrea Arcangeli 		 * MAP_SHARED, the process has write permission to the backing
141529ec9066SAndrea Arcangeli 		 * file. If VM_MAYWRITE is set it also enforces that on a
141629ec9066SAndrea Arcangeli 		 * MAP_SHARED vma: there is no F_WRITE_SEAL and no further
141729ec9066SAndrea Arcangeli 		 * F_WRITE_SEAL can be taken until the vma is destroyed.
141829ec9066SAndrea Arcangeli 		 */
141929ec9066SAndrea Arcangeli 		ret = -EPERM;
142029ec9066SAndrea Arcangeli 		if (unlikely(!(cur->vm_flags & VM_MAYWRITE)))
142129ec9066SAndrea Arcangeli 			goto out_unlock;
142229ec9066SAndrea Arcangeli 
1423cab350afSMike Kravetz 		/*
1424cab350afSMike Kravetz 		 * If this vma contains ending address, and huge pages
1425cab350afSMike Kravetz 		 * check alignment.
1426cab350afSMike Kravetz 		 */
1427cab350afSMike Kravetz 		if (is_vm_hugetlb_page(cur) && end <= cur->vm_end &&
1428cab350afSMike Kravetz 		    end > cur->vm_start) {
1429cab350afSMike Kravetz 			unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
1430cab350afSMike Kravetz 
1431cab350afSMike Kravetz 			ret = -EINVAL;
1432cab350afSMike Kravetz 
1433cab350afSMike Kravetz 			if (end & (vma_hpagesize - 1))
1434cab350afSMike Kravetz 				goto out_unlock;
1435cab350afSMike Kravetz 		}
143663b2d417SAndrea Arcangeli 		if ((vm_flags & VM_UFFD_WP) && !(cur->vm_flags & VM_MAYWRITE))
143763b2d417SAndrea Arcangeli 			goto out_unlock;
143886039bd3SAndrea Arcangeli 
143986039bd3SAndrea Arcangeli 		/*
144086039bd3SAndrea Arcangeli 		 * Check that this vma isn't already owned by a
144186039bd3SAndrea Arcangeli 		 * different userfaultfd. We can't allow more than one
144286039bd3SAndrea Arcangeli 		 * userfaultfd to own a single vma simultaneously or we
144386039bd3SAndrea Arcangeli 		 * wouldn't know which one to deliver the userfaults to.
144486039bd3SAndrea Arcangeli 		 */
144586039bd3SAndrea Arcangeli 		ret = -EBUSY;
144686039bd3SAndrea Arcangeli 		if (cur->vm_userfaultfd_ctx.ctx &&
144786039bd3SAndrea Arcangeli 		    cur->vm_userfaultfd_ctx.ctx != ctx)
144886039bd3SAndrea Arcangeli 			goto out_unlock;
144986039bd3SAndrea Arcangeli 
1450cab350afSMike Kravetz 		/*
1451cab350afSMike Kravetz 		 * Note vmas containing huge pages
1452cab350afSMike Kravetz 		 */
1453ce53e8e6SMike Rapoport 		if (is_vm_hugetlb_page(cur))
1454ce53e8e6SMike Rapoport 			basic_ioctls = true;
1455cab350afSMike Kravetz 
145686039bd3SAndrea Arcangeli 		found = true;
145711a9b902SLiam R. Howlett 	} for_each_vma_range(vmi, cur, end);
145886039bd3SAndrea Arcangeli 	BUG_ON(!found);
145986039bd3SAndrea Arcangeli 
146011a9b902SLiam R. Howlett 	vma_iter_set(&vmi, start);
146111a9b902SLiam R. Howlett 	prev = vma_prev(&vmi);
1462*270aa010SPeter Xu 	if (vma->vm_start < start)
1463*270aa010SPeter Xu 		prev = vma;
146486039bd3SAndrea Arcangeli 
146586039bd3SAndrea Arcangeli 	ret = 0;
146611a9b902SLiam R. Howlett 	for_each_vma_range(vmi, vma, end) {
146786039bd3SAndrea Arcangeli 		cond_resched();
146886039bd3SAndrea Arcangeli 
146963b2d417SAndrea Arcangeli 		BUG_ON(!vma_can_userfault(vma, vm_flags));
147086039bd3SAndrea Arcangeli 		BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
147186039bd3SAndrea Arcangeli 		       vma->vm_userfaultfd_ctx.ctx != ctx);
147229ec9066SAndrea Arcangeli 		WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
147386039bd3SAndrea Arcangeli 
147486039bd3SAndrea Arcangeli 		/*
147586039bd3SAndrea Arcangeli 		 * Nothing to do: this vma is already registered into this
147686039bd3SAndrea Arcangeli 		 * userfaultfd and with the right tracking mode too.
147786039bd3SAndrea Arcangeli 		 */
147886039bd3SAndrea Arcangeli 		if (vma->vm_userfaultfd_ctx.ctx == ctx &&
147986039bd3SAndrea Arcangeli 		    (vma->vm_flags & vm_flags) == vm_flags)
148086039bd3SAndrea Arcangeli 			goto skip;
148186039bd3SAndrea Arcangeli 
148286039bd3SAndrea Arcangeli 		if (vma->vm_start > start)
148386039bd3SAndrea Arcangeli 			start = vma->vm_start;
148486039bd3SAndrea Arcangeli 		vma_end = min(end, vma->vm_end);
148586039bd3SAndrea Arcangeli 
14867677f7fdSAxel Rasmussen 		new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
14879760ebffSLiam R. Howlett 		prev = vma_merge(&vmi, mm, prev, start, vma_end, new_flags,
148886039bd3SAndrea Arcangeli 				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
148986039bd3SAndrea Arcangeli 				 vma_policy(vma),
14909a10064fSColin Cross 				 ((struct vm_userfaultfd_ctx){ ctx }),
14915c26f6acSSuren Baghdasaryan 				 anon_vma_name(vma));
149286039bd3SAndrea Arcangeli 		if (prev) {
149369dbe6daSLiam R. Howlett 			/* vma_merge() invalidated the mas */
149486039bd3SAndrea Arcangeli 			vma = prev;
149586039bd3SAndrea Arcangeli 			goto next;
149686039bd3SAndrea Arcangeli 		}
149786039bd3SAndrea Arcangeli 		if (vma->vm_start < start) {
14989760ebffSLiam R. Howlett 			ret = split_vma(&vmi, vma, start, 1);
149986039bd3SAndrea Arcangeli 			if (ret)
150086039bd3SAndrea Arcangeli 				break;
150186039bd3SAndrea Arcangeli 		}
150286039bd3SAndrea Arcangeli 		if (vma->vm_end > end) {
15039760ebffSLiam R. Howlett 			ret = split_vma(&vmi, vma, end, 0);
150486039bd3SAndrea Arcangeli 			if (ret)
150586039bd3SAndrea Arcangeli 				break;
150686039bd3SAndrea Arcangeli 		}
150786039bd3SAndrea Arcangeli 	next:
150886039bd3SAndrea Arcangeli 		/*
150986039bd3SAndrea Arcangeli 		 * In the vma_merge() successful mprotect-like case 8:
151086039bd3SAndrea Arcangeli 		 * the next vma was merged into the current one and
151186039bd3SAndrea Arcangeli 		 * the current one has not been updated yet.
151286039bd3SAndrea Arcangeli 		 */
151351d3d5ebSDavid Hildenbrand 		userfaultfd_set_vm_flags(vma, new_flags);
151486039bd3SAndrea Arcangeli 		vma->vm_userfaultfd_ctx.ctx = ctx;
151586039bd3SAndrea Arcangeli 
15166dfeaff9SPeter Xu 		if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
15176dfeaff9SPeter Xu 			hugetlb_unshare_all_pmds(vma);
15186dfeaff9SPeter Xu 
151986039bd3SAndrea Arcangeli 	skip:
152086039bd3SAndrea Arcangeli 		prev = vma;
152186039bd3SAndrea Arcangeli 		start = vma->vm_end;
152211a9b902SLiam R. Howlett 	}
152311a9b902SLiam R. Howlett 
152486039bd3SAndrea Arcangeli out_unlock:
1525d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
1526d2005e3fSOleg Nesterov 	mmput(mm);
152786039bd3SAndrea Arcangeli 	if (!ret) {
152814819305SPeter Xu 		__u64 ioctls_out;
152914819305SPeter Xu 
153014819305SPeter Xu 		ioctls_out = basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC :
153114819305SPeter Xu 		    UFFD_API_RANGE_IOCTLS;
153214819305SPeter Xu 
153314819305SPeter Xu 		/*
153414819305SPeter Xu 		 * Declare the WP ioctl only if the WP mode is
153514819305SPeter Xu 		 * specified and all checks passed with the range
153614819305SPeter Xu 		 */
153714819305SPeter Xu 		if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP))
153814819305SPeter Xu 			ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT);
153914819305SPeter Xu 
1540f6191471SAxel Rasmussen 		/* CONTINUE ioctl is only supported for MINOR ranges. */
1541f6191471SAxel Rasmussen 		if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR))
1542f6191471SAxel Rasmussen 			ioctls_out &= ~((__u64)1 << _UFFDIO_CONTINUE);
1543f6191471SAxel Rasmussen 
154486039bd3SAndrea Arcangeli 		/*
154586039bd3SAndrea Arcangeli 		 * Now that we scanned all vmas we can already tell
154686039bd3SAndrea Arcangeli 		 * userland which ioctls methods are guaranteed to
154786039bd3SAndrea Arcangeli 		 * succeed on this range.
154886039bd3SAndrea Arcangeli 		 */
154914819305SPeter Xu 		if (put_user(ioctls_out, &user_uffdio_register->ioctls))
155086039bd3SAndrea Arcangeli 			ret = -EFAULT;
155186039bd3SAndrea Arcangeli 	}
155286039bd3SAndrea Arcangeli out:
155386039bd3SAndrea Arcangeli 	return ret;
155486039bd3SAndrea Arcangeli }
155586039bd3SAndrea Arcangeli 
155686039bd3SAndrea Arcangeli static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
155786039bd3SAndrea Arcangeli 				  unsigned long arg)
155886039bd3SAndrea Arcangeli {
155986039bd3SAndrea Arcangeli 	struct mm_struct *mm = ctx->mm;
156086039bd3SAndrea Arcangeli 	struct vm_area_struct *vma, *prev, *cur;
156186039bd3SAndrea Arcangeli 	int ret;
156286039bd3SAndrea Arcangeli 	struct uffdio_range uffdio_unregister;
156386039bd3SAndrea Arcangeli 	unsigned long new_flags;
156486039bd3SAndrea Arcangeli 	bool found;
156586039bd3SAndrea Arcangeli 	unsigned long start, end, vma_end;
156686039bd3SAndrea Arcangeli 	const void __user *buf = (void __user *)arg;
156711a9b902SLiam R. Howlett 	struct vma_iterator vmi;
156886039bd3SAndrea Arcangeli 
156986039bd3SAndrea Arcangeli 	ret = -EFAULT;
157086039bd3SAndrea Arcangeli 	if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
157186039bd3SAndrea Arcangeli 		goto out;
157286039bd3SAndrea Arcangeli 
1573e71e2aceSPeter Collingbourne 	ret = validate_range(mm, uffdio_unregister.start,
157486039bd3SAndrea Arcangeli 			     uffdio_unregister.len);
157586039bd3SAndrea Arcangeli 	if (ret)
157686039bd3SAndrea Arcangeli 		goto out;
157786039bd3SAndrea Arcangeli 
157886039bd3SAndrea Arcangeli 	start = uffdio_unregister.start;
157986039bd3SAndrea Arcangeli 	end = start + uffdio_unregister.len;
158086039bd3SAndrea Arcangeli 
1581d2005e3fSOleg Nesterov 	ret = -ENOMEM;
1582d2005e3fSOleg Nesterov 	if (!mmget_not_zero(mm))
1583d2005e3fSOleg Nesterov 		goto out;
1584d2005e3fSOleg Nesterov 
1585d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
158686039bd3SAndrea Arcangeli 	ret = -EINVAL;
158711a9b902SLiam R. Howlett 	vma_iter_init(&vmi, mm, start);
158811a9b902SLiam R. Howlett 	vma = vma_find(&vmi, end);
158911a9b902SLiam R. Howlett 	if (!vma)
159086039bd3SAndrea Arcangeli 		goto out_unlock;
159186039bd3SAndrea Arcangeli 
159286039bd3SAndrea Arcangeli 	/*
1593cab350afSMike Kravetz 	 * If the first vma contains huge pages, make sure start address
1594cab350afSMike Kravetz 	 * is aligned to huge page size.
1595cab350afSMike Kravetz 	 */
1596cab350afSMike Kravetz 	if (is_vm_hugetlb_page(vma)) {
1597cab350afSMike Kravetz 		unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1598cab350afSMike Kravetz 
1599cab350afSMike Kravetz 		if (start & (vma_hpagesize - 1))
1600cab350afSMike Kravetz 			goto out_unlock;
1601cab350afSMike Kravetz 	}
1602cab350afSMike Kravetz 
1603cab350afSMike Kravetz 	/*
160486039bd3SAndrea Arcangeli 	 * Search for not compatible vmas.
160586039bd3SAndrea Arcangeli 	 */
160686039bd3SAndrea Arcangeli 	found = false;
160711a9b902SLiam R. Howlett 	cur = vma;
160811a9b902SLiam R. Howlett 	do {
160986039bd3SAndrea Arcangeli 		cond_resched();
161086039bd3SAndrea Arcangeli 
161186039bd3SAndrea Arcangeli 		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
16127677f7fdSAxel Rasmussen 		       !!(cur->vm_flags & __VM_UFFD_FLAGS));
161386039bd3SAndrea Arcangeli 
161486039bd3SAndrea Arcangeli 		/*
161586039bd3SAndrea Arcangeli 		 * Check not compatible vmas, not strictly required
161686039bd3SAndrea Arcangeli 		 * here as not compatible vmas cannot have an
161786039bd3SAndrea Arcangeli 		 * userfaultfd_ctx registered on them, but this
161886039bd3SAndrea Arcangeli 		 * provides for more strict behavior to notice
161986039bd3SAndrea Arcangeli 		 * unregistration errors.
162086039bd3SAndrea Arcangeli 		 */
162163b2d417SAndrea Arcangeli 		if (!vma_can_userfault(cur, cur->vm_flags))
162286039bd3SAndrea Arcangeli 			goto out_unlock;
162386039bd3SAndrea Arcangeli 
162486039bd3SAndrea Arcangeli 		found = true;
162511a9b902SLiam R. Howlett 	} for_each_vma_range(vmi, cur, end);
162686039bd3SAndrea Arcangeli 	BUG_ON(!found);
162786039bd3SAndrea Arcangeli 
162811a9b902SLiam R. Howlett 	vma_iter_set(&vmi, start);
162911a9b902SLiam R. Howlett 	prev = vma_prev(&vmi);
1630*270aa010SPeter Xu 	if (vma->vm_start < start)
1631*270aa010SPeter Xu 		prev = vma;
1632*270aa010SPeter Xu 
163386039bd3SAndrea Arcangeli 	ret = 0;
163411a9b902SLiam R. Howlett 	for_each_vma_range(vmi, vma, end) {
163586039bd3SAndrea Arcangeli 		cond_resched();
163686039bd3SAndrea Arcangeli 
163763b2d417SAndrea Arcangeli 		BUG_ON(!vma_can_userfault(vma, vma->vm_flags));
163886039bd3SAndrea Arcangeli 
163986039bd3SAndrea Arcangeli 		/*
164086039bd3SAndrea Arcangeli 		 * Nothing to do: this vma is already registered into this
164186039bd3SAndrea Arcangeli 		 * userfaultfd and with the right tracking mode too.
164286039bd3SAndrea Arcangeli 		 */
164386039bd3SAndrea Arcangeli 		if (!vma->vm_userfaultfd_ctx.ctx)
164486039bd3SAndrea Arcangeli 			goto skip;
164586039bd3SAndrea Arcangeli 
164601e881f5SAndrea Arcangeli 		WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
164701e881f5SAndrea Arcangeli 
164886039bd3SAndrea Arcangeli 		if (vma->vm_start > start)
164986039bd3SAndrea Arcangeli 			start = vma->vm_start;
165086039bd3SAndrea Arcangeli 		vma_end = min(end, vma->vm_end);
165186039bd3SAndrea Arcangeli 
165209fa5296SAndrea Arcangeli 		if (userfaultfd_missing(vma)) {
165309fa5296SAndrea Arcangeli 			/*
165409fa5296SAndrea Arcangeli 			 * Wake any concurrent pending userfault while
165509fa5296SAndrea Arcangeli 			 * we unregister, so they will not hang
165609fa5296SAndrea Arcangeli 			 * permanently and it avoids userland to call
165709fa5296SAndrea Arcangeli 			 * UFFDIO_WAKE explicitly.
165809fa5296SAndrea Arcangeli 			 */
165909fa5296SAndrea Arcangeli 			struct userfaultfd_wake_range range;
166009fa5296SAndrea Arcangeli 			range.start = start;
166109fa5296SAndrea Arcangeli 			range.len = vma_end - start;
166209fa5296SAndrea Arcangeli 			wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
166309fa5296SAndrea Arcangeli 		}
166409fa5296SAndrea Arcangeli 
1665f369b07cSPeter Xu 		/* Reset ptes for the whole vma range if wr-protected */
1666f369b07cSPeter Xu 		if (userfaultfd_wp(vma))
166761c50040SAxel Rasmussen 			uffd_wp_range(vma, start, vma_end - start, false);
1668f369b07cSPeter Xu 
16697677f7fdSAxel Rasmussen 		new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
16709760ebffSLiam R. Howlett 		prev = vma_merge(&vmi, mm, prev, start, vma_end, new_flags,
167186039bd3SAndrea Arcangeli 				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
167286039bd3SAndrea Arcangeli 				 vma_policy(vma),
16735c26f6acSSuren Baghdasaryan 				 NULL_VM_UFFD_CTX, anon_vma_name(vma));
167486039bd3SAndrea Arcangeli 		if (prev) {
167586039bd3SAndrea Arcangeli 			vma = prev;
167686039bd3SAndrea Arcangeli 			goto next;
167786039bd3SAndrea Arcangeli 		}
167886039bd3SAndrea Arcangeli 		if (vma->vm_start < start) {
16799760ebffSLiam R. Howlett 			ret = split_vma(&vmi, vma, start, 1);
168086039bd3SAndrea Arcangeli 			if (ret)
168186039bd3SAndrea Arcangeli 				break;
168286039bd3SAndrea Arcangeli 		}
168386039bd3SAndrea Arcangeli 		if (vma->vm_end > end) {
16849760ebffSLiam R. Howlett 			ret = split_vma(&vmi, vma, end, 0);
168586039bd3SAndrea Arcangeli 			if (ret)
168686039bd3SAndrea Arcangeli 				break;
168786039bd3SAndrea Arcangeli 		}
168886039bd3SAndrea Arcangeli 	next:
168986039bd3SAndrea Arcangeli 		/*
169086039bd3SAndrea Arcangeli 		 * In the vma_merge() successful mprotect-like case 8:
169186039bd3SAndrea Arcangeli 		 * the next vma was merged into the current one and
169286039bd3SAndrea Arcangeli 		 * the current one has not been updated yet.
169386039bd3SAndrea Arcangeli 		 */
169451d3d5ebSDavid Hildenbrand 		userfaultfd_set_vm_flags(vma, new_flags);
169586039bd3SAndrea Arcangeli 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
169686039bd3SAndrea Arcangeli 
169786039bd3SAndrea Arcangeli 	skip:
169886039bd3SAndrea Arcangeli 		prev = vma;
169986039bd3SAndrea Arcangeli 		start = vma->vm_end;
170011a9b902SLiam R. Howlett 	}
170111a9b902SLiam R. Howlett 
170286039bd3SAndrea Arcangeli out_unlock:
1703d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
1704d2005e3fSOleg Nesterov 	mmput(mm);
170586039bd3SAndrea Arcangeli out:
170686039bd3SAndrea Arcangeli 	return ret;
170786039bd3SAndrea Arcangeli }
170886039bd3SAndrea Arcangeli 
170986039bd3SAndrea Arcangeli /*
1710ba85c702SAndrea Arcangeli  * userfaultfd_wake may be used in combination with the
1711ba85c702SAndrea Arcangeli  * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches.
171286039bd3SAndrea Arcangeli  */
171386039bd3SAndrea Arcangeli static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
171486039bd3SAndrea Arcangeli 			    unsigned long arg)
171586039bd3SAndrea Arcangeli {
171686039bd3SAndrea Arcangeli 	int ret;
171786039bd3SAndrea Arcangeli 	struct uffdio_range uffdio_wake;
171886039bd3SAndrea Arcangeli 	struct userfaultfd_wake_range range;
171986039bd3SAndrea Arcangeli 	const void __user *buf = (void __user *)arg;
172086039bd3SAndrea Arcangeli 
172186039bd3SAndrea Arcangeli 	ret = -EFAULT;
172286039bd3SAndrea Arcangeli 	if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
172386039bd3SAndrea Arcangeli 		goto out;
172486039bd3SAndrea Arcangeli 
1725e71e2aceSPeter Collingbourne 	ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
172686039bd3SAndrea Arcangeli 	if (ret)
172786039bd3SAndrea Arcangeli 		goto out;
172886039bd3SAndrea Arcangeli 
172986039bd3SAndrea Arcangeli 	range.start = uffdio_wake.start;
173086039bd3SAndrea Arcangeli 	range.len = uffdio_wake.len;
173186039bd3SAndrea Arcangeli 
173286039bd3SAndrea Arcangeli 	/*
173386039bd3SAndrea Arcangeli 	 * len == 0 means wake all and we don't want to wake all here,
173486039bd3SAndrea Arcangeli 	 * so check it again to be sure.
173586039bd3SAndrea Arcangeli 	 */
173686039bd3SAndrea Arcangeli 	VM_BUG_ON(!range.len);
173786039bd3SAndrea Arcangeli 
173886039bd3SAndrea Arcangeli 	wake_userfault(ctx, &range);
173986039bd3SAndrea Arcangeli 	ret = 0;
174086039bd3SAndrea Arcangeli 
174186039bd3SAndrea Arcangeli out:
174286039bd3SAndrea Arcangeli 	return ret;
174386039bd3SAndrea Arcangeli }
174486039bd3SAndrea Arcangeli 
1745ad465caeSAndrea Arcangeli static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
1746ad465caeSAndrea Arcangeli 			    unsigned long arg)
1747ad465caeSAndrea Arcangeli {
1748ad465caeSAndrea Arcangeli 	__s64 ret;
1749ad465caeSAndrea Arcangeli 	struct uffdio_copy uffdio_copy;
1750ad465caeSAndrea Arcangeli 	struct uffdio_copy __user *user_uffdio_copy;
1751ad465caeSAndrea Arcangeli 	struct userfaultfd_wake_range range;
1752d9712937SAxel Rasmussen 	uffd_flags_t flags = 0;
1753ad465caeSAndrea Arcangeli 
1754ad465caeSAndrea Arcangeli 	user_uffdio_copy = (struct uffdio_copy __user *) arg;
1755ad465caeSAndrea Arcangeli 
1756df2cc96eSMike Rapoport 	ret = -EAGAIN;
1757a759a909SNadav Amit 	if (atomic_read(&ctx->mmap_changing))
1758df2cc96eSMike Rapoport 		goto out;
1759df2cc96eSMike Rapoport 
1760ad465caeSAndrea Arcangeli 	ret = -EFAULT;
1761ad465caeSAndrea Arcangeli 	if (copy_from_user(&uffdio_copy, user_uffdio_copy,
1762ad465caeSAndrea Arcangeli 			   /* don't copy "copy" last field */
1763ad465caeSAndrea Arcangeli 			   sizeof(uffdio_copy)-sizeof(__s64)))
1764ad465caeSAndrea Arcangeli 		goto out;
1765ad465caeSAndrea Arcangeli 
1766e71e2aceSPeter Collingbourne 	ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
1767ad465caeSAndrea Arcangeli 	if (ret)
1768ad465caeSAndrea Arcangeli 		goto out;
1769ad465caeSAndrea Arcangeli 	/*
1770ad465caeSAndrea Arcangeli 	 * double check for wraparound just in case. copy_from_user()
1771ad465caeSAndrea Arcangeli 	 * will later check uffdio_copy.src + uffdio_copy.len to fit
1772ad465caeSAndrea Arcangeli 	 * in the userland range.
1773ad465caeSAndrea Arcangeli 	 */
1774ad465caeSAndrea Arcangeli 	ret = -EINVAL;
1775ad465caeSAndrea Arcangeli 	if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
1776ad465caeSAndrea Arcangeli 		goto out;
177772981e0eSAndrea Arcangeli 	if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP))
1778ad465caeSAndrea Arcangeli 		goto out;
1779d9712937SAxel Rasmussen 	if (uffdio_copy.mode & UFFDIO_COPY_MODE_WP)
1780d9712937SAxel Rasmussen 		flags |= MFILL_ATOMIC_WP;
1781d2005e3fSOleg Nesterov 	if (mmget_not_zero(ctx->mm)) {
1782a734991cSAxel Rasmussen 		ret = mfill_atomic_copy(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
178372981e0eSAndrea Arcangeli 					uffdio_copy.len, &ctx->mmap_changing,
1784d9712937SAxel Rasmussen 					flags);
1785d2005e3fSOleg Nesterov 		mmput(ctx->mm);
178696333187SMike Rapoport 	} else {
1787e86b298bSMike Rapoport 		return -ESRCH;
1788d2005e3fSOleg Nesterov 	}
1789ad465caeSAndrea Arcangeli 	if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
1790ad465caeSAndrea Arcangeli 		return -EFAULT;
1791ad465caeSAndrea Arcangeli 	if (ret < 0)
1792ad465caeSAndrea Arcangeli 		goto out;
1793ad465caeSAndrea Arcangeli 	BUG_ON(!ret);
1794ad465caeSAndrea Arcangeli 	/* len == 0 would wake all */
1795ad465caeSAndrea Arcangeli 	range.len = ret;
1796ad465caeSAndrea Arcangeli 	if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
1797ad465caeSAndrea Arcangeli 		range.start = uffdio_copy.dst;
1798ad465caeSAndrea Arcangeli 		wake_userfault(ctx, &range);
1799ad465caeSAndrea Arcangeli 	}
1800ad465caeSAndrea Arcangeli 	ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
1801ad465caeSAndrea Arcangeli out:
1802ad465caeSAndrea Arcangeli 	return ret;
1803ad465caeSAndrea Arcangeli }
1804ad465caeSAndrea Arcangeli 
1805ad465caeSAndrea Arcangeli static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
1806ad465caeSAndrea Arcangeli 				unsigned long arg)
1807ad465caeSAndrea Arcangeli {
1808ad465caeSAndrea Arcangeli 	__s64 ret;
1809ad465caeSAndrea Arcangeli 	struct uffdio_zeropage uffdio_zeropage;
1810ad465caeSAndrea Arcangeli 	struct uffdio_zeropage __user *user_uffdio_zeropage;
1811ad465caeSAndrea Arcangeli 	struct userfaultfd_wake_range range;
1812ad465caeSAndrea Arcangeli 
1813ad465caeSAndrea Arcangeli 	user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
1814ad465caeSAndrea Arcangeli 
1815df2cc96eSMike Rapoport 	ret = -EAGAIN;
1816a759a909SNadav Amit 	if (atomic_read(&ctx->mmap_changing))
1817df2cc96eSMike Rapoport 		goto out;
1818df2cc96eSMike Rapoport 
1819ad465caeSAndrea Arcangeli 	ret = -EFAULT;
1820ad465caeSAndrea Arcangeli 	if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
1821ad465caeSAndrea Arcangeli 			   /* don't copy "zeropage" last field */
1822ad465caeSAndrea Arcangeli 			   sizeof(uffdio_zeropage)-sizeof(__s64)))
1823ad465caeSAndrea Arcangeli 		goto out;
1824ad465caeSAndrea Arcangeli 
1825e71e2aceSPeter Collingbourne 	ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
1826ad465caeSAndrea Arcangeli 			     uffdio_zeropage.range.len);
1827ad465caeSAndrea Arcangeli 	if (ret)
1828ad465caeSAndrea Arcangeli 		goto out;
1829ad465caeSAndrea Arcangeli 	ret = -EINVAL;
1830ad465caeSAndrea Arcangeli 	if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
1831ad465caeSAndrea Arcangeli 		goto out;
1832ad465caeSAndrea Arcangeli 
1833d2005e3fSOleg Nesterov 	if (mmget_not_zero(ctx->mm)) {
1834a734991cSAxel Rasmussen 		ret = mfill_atomic_zeropage(ctx->mm, uffdio_zeropage.range.start,
1835df2cc96eSMike Rapoport 					   uffdio_zeropage.range.len,
1836df2cc96eSMike Rapoport 					   &ctx->mmap_changing);
1837d2005e3fSOleg Nesterov 		mmput(ctx->mm);
18389d95aa4bSMike Rapoport 	} else {
1839e86b298bSMike Rapoport 		return -ESRCH;
1840d2005e3fSOleg Nesterov 	}
1841ad465caeSAndrea Arcangeli 	if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
1842ad465caeSAndrea Arcangeli 		return -EFAULT;
1843ad465caeSAndrea Arcangeli 	if (ret < 0)
1844ad465caeSAndrea Arcangeli 		goto out;
1845ad465caeSAndrea Arcangeli 	/* len == 0 would wake all */
1846ad465caeSAndrea Arcangeli 	BUG_ON(!ret);
1847ad465caeSAndrea Arcangeli 	range.len = ret;
1848ad465caeSAndrea Arcangeli 	if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
1849ad465caeSAndrea Arcangeli 		range.start = uffdio_zeropage.range.start;
1850ad465caeSAndrea Arcangeli 		wake_userfault(ctx, &range);
1851ad465caeSAndrea Arcangeli 	}
1852ad465caeSAndrea Arcangeli 	ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
1853ad465caeSAndrea Arcangeli out:
1854ad465caeSAndrea Arcangeli 	return ret;
1855ad465caeSAndrea Arcangeli }
1856ad465caeSAndrea Arcangeli 
185763b2d417SAndrea Arcangeli static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
185863b2d417SAndrea Arcangeli 				    unsigned long arg)
185963b2d417SAndrea Arcangeli {
186063b2d417SAndrea Arcangeli 	int ret;
186163b2d417SAndrea Arcangeli 	struct uffdio_writeprotect uffdio_wp;
186263b2d417SAndrea Arcangeli 	struct uffdio_writeprotect __user *user_uffdio_wp;
186363b2d417SAndrea Arcangeli 	struct userfaultfd_wake_range range;
186423080e27SPeter Xu 	bool mode_wp, mode_dontwake;
186563b2d417SAndrea Arcangeli 
1866a759a909SNadav Amit 	if (atomic_read(&ctx->mmap_changing))
186763b2d417SAndrea Arcangeli 		return -EAGAIN;
186863b2d417SAndrea Arcangeli 
186963b2d417SAndrea Arcangeli 	user_uffdio_wp = (struct uffdio_writeprotect __user *) arg;
187063b2d417SAndrea Arcangeli 
187163b2d417SAndrea Arcangeli 	if (copy_from_user(&uffdio_wp, user_uffdio_wp,
187263b2d417SAndrea Arcangeli 			   sizeof(struct uffdio_writeprotect)))
187363b2d417SAndrea Arcangeli 		return -EFAULT;
187463b2d417SAndrea Arcangeli 
1875e71e2aceSPeter Collingbourne 	ret = validate_range(ctx->mm, uffdio_wp.range.start,
187663b2d417SAndrea Arcangeli 			     uffdio_wp.range.len);
187763b2d417SAndrea Arcangeli 	if (ret)
187863b2d417SAndrea Arcangeli 		return ret;
187963b2d417SAndrea Arcangeli 
188063b2d417SAndrea Arcangeli 	if (uffdio_wp.mode & ~(UFFDIO_WRITEPROTECT_MODE_DONTWAKE |
188163b2d417SAndrea Arcangeli 			       UFFDIO_WRITEPROTECT_MODE_WP))
188263b2d417SAndrea Arcangeli 		return -EINVAL;
188323080e27SPeter Xu 
188423080e27SPeter Xu 	mode_wp = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_WP;
188523080e27SPeter Xu 	mode_dontwake = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_DONTWAKE;
188623080e27SPeter Xu 
188723080e27SPeter Xu 	if (mode_wp && mode_dontwake)
188863b2d417SAndrea Arcangeli 		return -EINVAL;
188963b2d417SAndrea Arcangeli 
1890cb185d5fSNadav Amit 	if (mmget_not_zero(ctx->mm)) {
189163b2d417SAndrea Arcangeli 		ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
189223080e27SPeter Xu 					  uffdio_wp.range.len, mode_wp,
189363b2d417SAndrea Arcangeli 					  &ctx->mmap_changing);
1894cb185d5fSNadav Amit 		mmput(ctx->mm);
1895cb185d5fSNadav Amit 	} else {
1896cb185d5fSNadav Amit 		return -ESRCH;
1897cb185d5fSNadav Amit 	}
1898cb185d5fSNadav Amit 
189963b2d417SAndrea Arcangeli 	if (ret)
190063b2d417SAndrea Arcangeli 		return ret;
190163b2d417SAndrea Arcangeli 
190223080e27SPeter Xu 	if (!mode_wp && !mode_dontwake) {
190363b2d417SAndrea Arcangeli 		range.start = uffdio_wp.range.start;
190463b2d417SAndrea Arcangeli 		range.len = uffdio_wp.range.len;
190563b2d417SAndrea Arcangeli 		wake_userfault(ctx, &range);
190663b2d417SAndrea Arcangeli 	}
190763b2d417SAndrea Arcangeli 	return ret;
190863b2d417SAndrea Arcangeli }
190963b2d417SAndrea Arcangeli 
1910f6191471SAxel Rasmussen static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
1911f6191471SAxel Rasmussen {
1912f6191471SAxel Rasmussen 	__s64 ret;
1913f6191471SAxel Rasmussen 	struct uffdio_continue uffdio_continue;
1914f6191471SAxel Rasmussen 	struct uffdio_continue __user *user_uffdio_continue;
1915f6191471SAxel Rasmussen 	struct userfaultfd_wake_range range;
191602891844SAxel Rasmussen 	uffd_flags_t flags = 0;
1917f6191471SAxel Rasmussen 
1918f6191471SAxel Rasmussen 	user_uffdio_continue = (struct uffdio_continue __user *)arg;
1919f6191471SAxel Rasmussen 
1920f6191471SAxel Rasmussen 	ret = -EAGAIN;
1921a759a909SNadav Amit 	if (atomic_read(&ctx->mmap_changing))
1922f6191471SAxel Rasmussen 		goto out;
1923f6191471SAxel Rasmussen 
1924f6191471SAxel Rasmussen 	ret = -EFAULT;
1925f6191471SAxel Rasmussen 	if (copy_from_user(&uffdio_continue, user_uffdio_continue,
1926f6191471SAxel Rasmussen 			   /* don't copy the output fields */
1927f6191471SAxel Rasmussen 			   sizeof(uffdio_continue) - (sizeof(__s64))))
1928f6191471SAxel Rasmussen 		goto out;
1929f6191471SAxel Rasmussen 
1930e71e2aceSPeter Collingbourne 	ret = validate_range(ctx->mm, uffdio_continue.range.start,
1931f6191471SAxel Rasmussen 			     uffdio_continue.range.len);
1932f6191471SAxel Rasmussen 	if (ret)
1933f6191471SAxel Rasmussen 		goto out;
1934f6191471SAxel Rasmussen 
1935f6191471SAxel Rasmussen 	ret = -EINVAL;
1936f6191471SAxel Rasmussen 	/* double check for wraparound just in case. */
1937f6191471SAxel Rasmussen 	if (uffdio_continue.range.start + uffdio_continue.range.len <=
1938f6191471SAxel Rasmussen 	    uffdio_continue.range.start) {
1939f6191471SAxel Rasmussen 		goto out;
1940f6191471SAxel Rasmussen 	}
194102891844SAxel Rasmussen 	if (uffdio_continue.mode & ~(UFFDIO_CONTINUE_MODE_DONTWAKE |
194202891844SAxel Rasmussen 				     UFFDIO_CONTINUE_MODE_WP))
1943f6191471SAxel Rasmussen 		goto out;
194402891844SAxel Rasmussen 	if (uffdio_continue.mode & UFFDIO_CONTINUE_MODE_WP)
194502891844SAxel Rasmussen 		flags |= MFILL_ATOMIC_WP;
1946f6191471SAxel Rasmussen 
1947f6191471SAxel Rasmussen 	if (mmget_not_zero(ctx->mm)) {
1948a734991cSAxel Rasmussen 		ret = mfill_atomic_continue(ctx->mm, uffdio_continue.range.start,
1949f6191471SAxel Rasmussen 					    uffdio_continue.range.len,
195002891844SAxel Rasmussen 					    &ctx->mmap_changing, flags);
1951f6191471SAxel Rasmussen 		mmput(ctx->mm);
1952f6191471SAxel Rasmussen 	} else {
1953f6191471SAxel Rasmussen 		return -ESRCH;
1954f6191471SAxel Rasmussen 	}
1955f6191471SAxel Rasmussen 
1956f6191471SAxel Rasmussen 	if (unlikely(put_user(ret, &user_uffdio_continue->mapped)))
1957f6191471SAxel Rasmussen 		return -EFAULT;
1958f6191471SAxel Rasmussen 	if (ret < 0)
1959f6191471SAxel Rasmussen 		goto out;
1960f6191471SAxel Rasmussen 
1961f6191471SAxel Rasmussen 	/* len == 0 would wake all */
1962f6191471SAxel Rasmussen 	BUG_ON(!ret);
1963f6191471SAxel Rasmussen 	range.len = ret;
1964f6191471SAxel Rasmussen 	if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) {
1965f6191471SAxel Rasmussen 		range.start = uffdio_continue.range.start;
1966f6191471SAxel Rasmussen 		wake_userfault(ctx, &range);
1967f6191471SAxel Rasmussen 	}
1968f6191471SAxel Rasmussen 	ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN;
1969f6191471SAxel Rasmussen 
1970f6191471SAxel Rasmussen out:
1971f6191471SAxel Rasmussen 	return ret;
1972f6191471SAxel Rasmussen }
1973f6191471SAxel Rasmussen 
19749cd75c3cSPavel Emelyanov static inline unsigned int uffd_ctx_features(__u64 user_features)
19759cd75c3cSPavel Emelyanov {
19769cd75c3cSPavel Emelyanov 	/*
197722e5fe2aSNadav Amit 	 * For the current set of features the bits just coincide. Set
197822e5fe2aSNadav Amit 	 * UFFD_FEATURE_INITIALIZED to mark the features as enabled.
19799cd75c3cSPavel Emelyanov 	 */
198022e5fe2aSNadav Amit 	return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED;
19819cd75c3cSPavel Emelyanov }
19829cd75c3cSPavel Emelyanov 
198386039bd3SAndrea Arcangeli /*
198486039bd3SAndrea Arcangeli  * userland asks for a certain API version and we return which bits
198586039bd3SAndrea Arcangeli  * and ioctl commands are implemented in this kernel for such API
198686039bd3SAndrea Arcangeli  * version or -EINVAL if unknown.
198786039bd3SAndrea Arcangeli  */
198886039bd3SAndrea Arcangeli static int userfaultfd_api(struct userfaultfd_ctx *ctx,
198986039bd3SAndrea Arcangeli 			   unsigned long arg)
199086039bd3SAndrea Arcangeli {
199186039bd3SAndrea Arcangeli 	struct uffdio_api uffdio_api;
199286039bd3SAndrea Arcangeli 	void __user *buf = (void __user *)arg;
199322e5fe2aSNadav Amit 	unsigned int ctx_features;
199486039bd3SAndrea Arcangeli 	int ret;
199565603144SAndrea Arcangeli 	__u64 features;
199686039bd3SAndrea Arcangeli 
199786039bd3SAndrea Arcangeli 	ret = -EFAULT;
1998a9b85f94SAndrea Arcangeli 	if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
199986039bd3SAndrea Arcangeli 		goto out;
20002ff559f3SPeter Xu 	features = uffdio_api.features;
20012ff559f3SPeter Xu 	ret = -EINVAL;
20022ff559f3SPeter Xu 	if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
20032ff559f3SPeter Xu 		goto err_out;
20043c1c24d9SMike Rapoport 	ret = -EPERM;
20053c1c24d9SMike Rapoport 	if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
20063c1c24d9SMike Rapoport 		goto err_out;
200765603144SAndrea Arcangeli 	/* report all available features and ioctls to userland */
200865603144SAndrea Arcangeli 	uffdio_api.features = UFFD_API_FEATURES;
20097677f7fdSAxel Rasmussen #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
2010964ab004SAxel Rasmussen 	uffdio_api.features &=
2011964ab004SAxel Rasmussen 		~(UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM);
20127677f7fdSAxel Rasmussen #endif
201300b151f2SPeter Xu #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
201400b151f2SPeter Xu 	uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP;
201586039bd3SAndrea Arcangeli #endif
2016b1f9e876SPeter Xu #ifndef CONFIG_PTE_MARKER_UFFD_WP
2017b1f9e876SPeter Xu 	uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM;
20182bad466cSPeter Xu 	uffdio_api.features &= ~UFFD_FEATURE_WP_UNPOPULATED;
2019b1f9e876SPeter Xu #endif
202086039bd3SAndrea Arcangeli 	uffdio_api.ioctls = UFFD_API_IOCTLS;
202186039bd3SAndrea Arcangeli 	ret = -EFAULT;
202286039bd3SAndrea Arcangeli 	if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
202386039bd3SAndrea Arcangeli 		goto out;
202422e5fe2aSNadav Amit 
202586039bd3SAndrea Arcangeli 	/* only enable the requested features for this uffd context */
202622e5fe2aSNadav Amit 	ctx_features = uffd_ctx_features(features);
202722e5fe2aSNadav Amit 	ret = -EINVAL;
202822e5fe2aSNadav Amit 	if (cmpxchg(&ctx->features, 0, ctx_features) != 0)
202922e5fe2aSNadav Amit 		goto err_out;
203022e5fe2aSNadav Amit 
203186039bd3SAndrea Arcangeli 	ret = 0;
203286039bd3SAndrea Arcangeli out:
203386039bd3SAndrea Arcangeli 	return ret;
203486039bd3SAndrea Arcangeli err_out:
203586039bd3SAndrea Arcangeli 	memset(&uffdio_api, 0, sizeof(uffdio_api));
203686039bd3SAndrea Arcangeli 	if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
203786039bd3SAndrea Arcangeli 		ret = -EFAULT;
203886039bd3SAndrea Arcangeli 	goto out;
203986039bd3SAndrea Arcangeli }
204086039bd3SAndrea Arcangeli 
204186039bd3SAndrea Arcangeli static long userfaultfd_ioctl(struct file *file, unsigned cmd,
2042e6485a47SAndrea Arcangeli 			      unsigned long arg)
2043e6485a47SAndrea Arcangeli {
2044e6485a47SAndrea Arcangeli 	int ret = -EINVAL;
204586039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx = file->private_data;
204686039bd3SAndrea Arcangeli 
204722e5fe2aSNadav Amit 	if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx))
204886039bd3SAndrea Arcangeli 		return -EINVAL;
204986039bd3SAndrea Arcangeli 
205086039bd3SAndrea Arcangeli 	switch(cmd) {
205186039bd3SAndrea Arcangeli 	case UFFDIO_API:
205286039bd3SAndrea Arcangeli 		ret = userfaultfd_api(ctx, arg);
205386039bd3SAndrea Arcangeli 		break;
205486039bd3SAndrea Arcangeli 	case UFFDIO_REGISTER:
205586039bd3SAndrea Arcangeli 		ret = userfaultfd_register(ctx, arg);
205686039bd3SAndrea Arcangeli 		break;
205786039bd3SAndrea Arcangeli 	case UFFDIO_UNREGISTER:
205886039bd3SAndrea Arcangeli 		ret = userfaultfd_unregister(ctx, arg);
205986039bd3SAndrea Arcangeli 		break;
206086039bd3SAndrea Arcangeli 	case UFFDIO_WAKE:
2061ad465caeSAndrea Arcangeli 		ret = userfaultfd_wake(ctx, arg);
2062ad465caeSAndrea Arcangeli 		break;
2063ad465caeSAndrea Arcangeli 	case UFFDIO_COPY:
2064ad465caeSAndrea Arcangeli 		ret = userfaultfd_copy(ctx, arg);
2065ad465caeSAndrea Arcangeli 		break;
2066ad465caeSAndrea Arcangeli 	case UFFDIO_ZEROPAGE:
206786039bd3SAndrea Arcangeli 		ret = userfaultfd_zeropage(ctx, arg);
206886039bd3SAndrea Arcangeli 		break;
206963b2d417SAndrea Arcangeli 	case UFFDIO_WRITEPROTECT:
207063b2d417SAndrea Arcangeli 		ret = userfaultfd_writeprotect(ctx, arg);
207163b2d417SAndrea Arcangeli 		break;
2072f6191471SAxel Rasmussen 	case UFFDIO_CONTINUE:
2073f6191471SAxel Rasmussen 		ret = userfaultfd_continue(ctx, arg);
2074f6191471SAxel Rasmussen 		break;
207586039bd3SAndrea Arcangeli 	}
207686039bd3SAndrea Arcangeli 	return ret;
207786039bd3SAndrea Arcangeli }
207886039bd3SAndrea Arcangeli 
207986039bd3SAndrea Arcangeli #ifdef CONFIG_PROC_FS
208086039bd3SAndrea Arcangeli static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
208186039bd3SAndrea Arcangeli {
208286039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx = f->private_data;
2083ac6424b9SIngo Molnar 	wait_queue_entry_t *wq;
208486039bd3SAndrea Arcangeli 	unsigned long pending = 0, total = 0;
208586039bd3SAndrea Arcangeli 
2086cbcfa130SEric Biggers 	spin_lock_irq(&ctx->fault_pending_wqh.lock);
20872055da97SIngo Molnar 	list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
208886039bd3SAndrea Arcangeli 		pending++;
208986039bd3SAndrea Arcangeli 		total++;
209086039bd3SAndrea Arcangeli 	}
20912055da97SIngo Molnar 	list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
209215b726efSAndrea Arcangeli 		total++;
209315b726efSAndrea Arcangeli 	}
2094cbcfa130SEric Biggers 	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
209586039bd3SAndrea Arcangeli 
209686039bd3SAndrea Arcangeli 	/*
209786039bd3SAndrea Arcangeli 	 * If more protocols will be added, there will be all shown
209886039bd3SAndrea Arcangeli 	 * separated by a space. Like this:
209986039bd3SAndrea Arcangeli 	 *	protocols: aa:... bb:...
210086039bd3SAndrea Arcangeli 	 */
210186039bd3SAndrea Arcangeli 	seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
2102045098e9SMike Rapoport 		   pending, total, UFFD_API, ctx->features,
210386039bd3SAndrea Arcangeli 		   UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
210486039bd3SAndrea Arcangeli }
210586039bd3SAndrea Arcangeli #endif
210686039bd3SAndrea Arcangeli 
210786039bd3SAndrea Arcangeli static const struct file_operations userfaultfd_fops = {
210886039bd3SAndrea Arcangeli #ifdef CONFIG_PROC_FS
210986039bd3SAndrea Arcangeli 	.show_fdinfo	= userfaultfd_show_fdinfo,
211086039bd3SAndrea Arcangeli #endif
211186039bd3SAndrea Arcangeli 	.release	= userfaultfd_release,
211286039bd3SAndrea Arcangeli 	.poll		= userfaultfd_poll,
211386039bd3SAndrea Arcangeli 	.read		= userfaultfd_read,
211486039bd3SAndrea Arcangeli 	.unlocked_ioctl = userfaultfd_ioctl,
21151832f2d8SArnd Bergmann 	.compat_ioctl	= compat_ptr_ioctl,
211686039bd3SAndrea Arcangeli 	.llseek		= noop_llseek,
211786039bd3SAndrea Arcangeli };
211886039bd3SAndrea Arcangeli 
21193004ec9cSAndrea Arcangeli static void init_once_userfaultfd_ctx(void *mem)
21203004ec9cSAndrea Arcangeli {
21213004ec9cSAndrea Arcangeli 	struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
21223004ec9cSAndrea Arcangeli 
21233004ec9cSAndrea Arcangeli 	init_waitqueue_head(&ctx->fault_pending_wqh);
21243004ec9cSAndrea Arcangeli 	init_waitqueue_head(&ctx->fault_wqh);
21259cd75c3cSPavel Emelyanov 	init_waitqueue_head(&ctx->event_wqh);
21263004ec9cSAndrea Arcangeli 	init_waitqueue_head(&ctx->fd_wqh);
21272ca97ac8SAhmed S. Darwish 	seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock);
21283004ec9cSAndrea Arcangeli }
21293004ec9cSAndrea Arcangeli 
21302d5de004SAxel Rasmussen static int new_userfaultfd(int flags)
213186039bd3SAndrea Arcangeli {
213286039bd3SAndrea Arcangeli 	struct userfaultfd_ctx *ctx;
2133284cd241SEric Biggers 	int fd;
213486039bd3SAndrea Arcangeli 
213586039bd3SAndrea Arcangeli 	BUG_ON(!current->mm);
213686039bd3SAndrea Arcangeli 
213786039bd3SAndrea Arcangeli 	/* Check the UFFD_* constants for consistency.  */
213837cd0575SLokesh Gidra 	BUILD_BUG_ON(UFFD_USER_MODE_ONLY & UFFD_SHARED_FCNTL_FLAGS);
213986039bd3SAndrea Arcangeli 	BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
214086039bd3SAndrea Arcangeli 	BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
214186039bd3SAndrea Arcangeli 
214237cd0575SLokesh Gidra 	if (flags & ~(UFFD_SHARED_FCNTL_FLAGS | UFFD_USER_MODE_ONLY))
2143284cd241SEric Biggers 		return -EINVAL;
214486039bd3SAndrea Arcangeli 
21453004ec9cSAndrea Arcangeli 	ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
214686039bd3SAndrea Arcangeli 	if (!ctx)
2147284cd241SEric Biggers 		return -ENOMEM;
214886039bd3SAndrea Arcangeli 
2149ca880420SEric Biggers 	refcount_set(&ctx->refcount, 1);
215086039bd3SAndrea Arcangeli 	ctx->flags = flags;
21519cd75c3cSPavel Emelyanov 	ctx->features = 0;
215286039bd3SAndrea Arcangeli 	ctx->released = false;
2153a759a909SNadav Amit 	atomic_set(&ctx->mmap_changing, 0);
215486039bd3SAndrea Arcangeli 	ctx->mm = current->mm;
215586039bd3SAndrea Arcangeli 	/* prevent the mm struct to be freed */
2156f1f10076SVegard Nossum 	mmgrab(ctx->mm);
215786039bd3SAndrea Arcangeli 
2158b537900fSDaniel Colascione 	fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, ctx,
2159abec3d01SOndrej Mosnacek 			O_RDONLY | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL);
2160284cd241SEric Biggers 	if (fd < 0) {
2161d2005e3fSOleg Nesterov 		mmdrop(ctx->mm);
21623004ec9cSAndrea Arcangeli 		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
2163c03e946fSEric Biggers 	}
216486039bd3SAndrea Arcangeli 	return fd;
216586039bd3SAndrea Arcangeli }
21663004ec9cSAndrea Arcangeli 
21672d5de004SAxel Rasmussen static inline bool userfaultfd_syscall_allowed(int flags)
21682d5de004SAxel Rasmussen {
21692d5de004SAxel Rasmussen 	/* Userspace-only page faults are always allowed */
21702d5de004SAxel Rasmussen 	if (flags & UFFD_USER_MODE_ONLY)
21712d5de004SAxel Rasmussen 		return true;
21722d5de004SAxel Rasmussen 
21732d5de004SAxel Rasmussen 	/*
21742d5de004SAxel Rasmussen 	 * The user is requesting a userfaultfd which can handle kernel faults.
21752d5de004SAxel Rasmussen 	 * Privileged users are always allowed to do this.
21762d5de004SAxel Rasmussen 	 */
21772d5de004SAxel Rasmussen 	if (capable(CAP_SYS_PTRACE))
21782d5de004SAxel Rasmussen 		return true;
21792d5de004SAxel Rasmussen 
21802d5de004SAxel Rasmussen 	/* Otherwise, access to kernel fault handling is sysctl controlled. */
21812d5de004SAxel Rasmussen 	return sysctl_unprivileged_userfaultfd;
21822d5de004SAxel Rasmussen }
21832d5de004SAxel Rasmussen 
21842d5de004SAxel Rasmussen SYSCALL_DEFINE1(userfaultfd, int, flags)
21852d5de004SAxel Rasmussen {
21862d5de004SAxel Rasmussen 	if (!userfaultfd_syscall_allowed(flags))
21872d5de004SAxel Rasmussen 		return -EPERM;
21882d5de004SAxel Rasmussen 
21892d5de004SAxel Rasmussen 	return new_userfaultfd(flags);
21902d5de004SAxel Rasmussen }
21912d5de004SAxel Rasmussen 
21922d5de004SAxel Rasmussen static long userfaultfd_dev_ioctl(struct file *file, unsigned int cmd, unsigned long flags)
21932d5de004SAxel Rasmussen {
21942d5de004SAxel Rasmussen 	if (cmd != USERFAULTFD_IOC_NEW)
21952d5de004SAxel Rasmussen 		return -EINVAL;
21962d5de004SAxel Rasmussen 
21972d5de004SAxel Rasmussen 	return new_userfaultfd(flags);
21982d5de004SAxel Rasmussen }
21992d5de004SAxel Rasmussen 
22002d5de004SAxel Rasmussen static const struct file_operations userfaultfd_dev_fops = {
22012d5de004SAxel Rasmussen 	.unlocked_ioctl = userfaultfd_dev_ioctl,
22022d5de004SAxel Rasmussen 	.compat_ioctl = userfaultfd_dev_ioctl,
22032d5de004SAxel Rasmussen 	.owner = THIS_MODULE,
22042d5de004SAxel Rasmussen 	.llseek = noop_llseek,
22052d5de004SAxel Rasmussen };
22062d5de004SAxel Rasmussen 
22072d5de004SAxel Rasmussen static struct miscdevice userfaultfd_misc = {
22082d5de004SAxel Rasmussen 	.minor = MISC_DYNAMIC_MINOR,
22092d5de004SAxel Rasmussen 	.name = "userfaultfd",
22102d5de004SAxel Rasmussen 	.fops = &userfaultfd_dev_fops
22112d5de004SAxel Rasmussen };
22122d5de004SAxel Rasmussen 
22133004ec9cSAndrea Arcangeli static int __init userfaultfd_init(void)
22143004ec9cSAndrea Arcangeli {
22152d5de004SAxel Rasmussen 	int ret;
22162d5de004SAxel Rasmussen 
22172d5de004SAxel Rasmussen 	ret = misc_register(&userfaultfd_misc);
22182d5de004SAxel Rasmussen 	if (ret)
22192d5de004SAxel Rasmussen 		return ret;
22202d5de004SAxel Rasmussen 
22213004ec9cSAndrea Arcangeli 	userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
22223004ec9cSAndrea Arcangeli 						sizeof(struct userfaultfd_ctx),
22233004ec9cSAndrea Arcangeli 						0,
22243004ec9cSAndrea Arcangeli 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
22253004ec9cSAndrea Arcangeli 						init_once_userfaultfd_ctx);
22262d337b71SZhangPeng #ifdef CONFIG_SYSCTL
22272d337b71SZhangPeng 	register_sysctl_init("vm", vm_userfaultfd_table);
22282d337b71SZhangPeng #endif
22293004ec9cSAndrea Arcangeli 	return 0;
22303004ec9cSAndrea Arcangeli }
22313004ec9cSAndrea Arcangeli __initcall(userfaultfd_init);
2232