xref: /openbmc/linux/fs/userfaultfd.c (revision 5c041f5d1f23d3a172dd0db3215634c484b4acd6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  fs/userfaultfd.c
4  *
5  *  Copyright (C) 2007  Davide Libenzi <davidel@xmailserver.org>
6  *  Copyright (C) 2008-2009 Red Hat, Inc.
7  *  Copyright (C) 2015  Red Hat, Inc.
8  *
9  *  Some part derived from fs/eventfd.c (anon inode setup) and
10  *  mm/ksm.c (mm hashing).
11  */
12 
13 #include <linux/list.h>
14 #include <linux/hashtable.h>
15 #include <linux/sched/signal.h>
16 #include <linux/sched/mm.h>
17 #include <linux/mm.h>
18 #include <linux/mm_inline.h>
19 #include <linux/mmu_notifier.h>
20 #include <linux/poll.h>
21 #include <linux/slab.h>
22 #include <linux/seq_file.h>
23 #include <linux/file.h>
24 #include <linux/bug.h>
25 #include <linux/anon_inodes.h>
26 #include <linux/syscalls.h>
27 #include <linux/userfaultfd_k.h>
28 #include <linux/mempolicy.h>
29 #include <linux/ioctl.h>
30 #include <linux/security.h>
31 #include <linux/hugetlb.h>
32 #include <linux/swapops.h>
33 
34 int sysctl_unprivileged_userfaultfd __read_mostly;
35 
36 static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
37 
38 /*
39  * Start with fault_pending_wqh and fault_wqh so they're more likely
40  * to be in the same cacheline.
41  *
42  * Locking order:
43  *	fd_wqh.lock
44  *		fault_pending_wqh.lock
45  *			fault_wqh.lock
46  *		event_wqh.lock
47  *
48  * To avoid deadlocks, IRQs must be disabled when taking any of the above locks,
49  * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's
50  * also taken in IRQ context.
51  */
52 struct userfaultfd_ctx {
53 	/* waitqueue head for the pending (i.e. not read) userfaults */
54 	wait_queue_head_t fault_pending_wqh;
55 	/* waitqueue head for the userfaults */
56 	wait_queue_head_t fault_wqh;
57 	/* waitqueue head for the pseudo fd to wakeup poll/read */
58 	wait_queue_head_t fd_wqh;
59 	/* waitqueue head for events */
60 	wait_queue_head_t event_wqh;
61 	/* a refile sequence protected by fault_pending_wqh lock */
62 	seqcount_spinlock_t refile_seq;
63 	/* pseudo fd refcounting */
64 	refcount_t refcount;
65 	/* userfaultfd syscall flags */
66 	unsigned int flags;
67 	/* features requested from the userspace */
68 	unsigned int features;
69 	/* released */
70 	bool released;
71 	/* memory mappings are changing because of non-cooperative event */
72 	atomic_t mmap_changing;
73 	/* mm with one ore more vmas attached to this userfaultfd_ctx */
74 	struct mm_struct *mm;
75 };
76 
77 struct userfaultfd_fork_ctx {
78 	struct userfaultfd_ctx *orig;
79 	struct userfaultfd_ctx *new;
80 	struct list_head list;
81 };
82 
83 struct userfaultfd_unmap_ctx {
84 	struct userfaultfd_ctx *ctx;
85 	unsigned long start;
86 	unsigned long end;
87 	struct list_head list;
88 };
89 
90 struct userfaultfd_wait_queue {
91 	struct uffd_msg msg;
92 	wait_queue_entry_t wq;
93 	struct userfaultfd_ctx *ctx;
94 	bool waken;
95 };
96 
97 struct userfaultfd_wake_range {
98 	unsigned long start;
99 	unsigned long len;
100 };
101 
102 /* internal indication that UFFD_API ioctl was successfully executed */
103 #define UFFD_FEATURE_INITIALIZED		(1u << 31)
104 
105 static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
106 {
107 	return ctx->features & UFFD_FEATURE_INITIALIZED;
108 }
109 
110 static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
111 				     int wake_flags, void *key)
112 {
113 	struct userfaultfd_wake_range *range = key;
114 	int ret;
115 	struct userfaultfd_wait_queue *uwq;
116 	unsigned long start, len;
117 
118 	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
119 	ret = 0;
120 	/* len == 0 means wake all */
121 	start = range->start;
122 	len = range->len;
123 	if (len && (start > uwq->msg.arg.pagefault.address ||
124 		    start + len <= uwq->msg.arg.pagefault.address))
125 		goto out;
126 	WRITE_ONCE(uwq->waken, true);
127 	/*
128 	 * The Program-Order guarantees provided by the scheduler
129 	 * ensure uwq->waken is visible before the task is woken.
130 	 */
131 	ret = wake_up_state(wq->private, mode);
132 	if (ret) {
133 		/*
134 		 * Wake only once, autoremove behavior.
135 		 *
136 		 * After the effect of list_del_init is visible to the other
137 		 * CPUs, the waitqueue may disappear from under us, see the
138 		 * !list_empty_careful() in handle_userfault().
139 		 *
140 		 * try_to_wake_up() has an implicit smp_mb(), and the
141 		 * wq->private is read before calling the extern function
142 		 * "wake_up_state" (which in turns calls try_to_wake_up).
143 		 */
144 		list_del_init(&wq->entry);
145 	}
146 out:
147 	return ret;
148 }
149 
150 /**
151  * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
152  * context.
153  * @ctx: [in] Pointer to the userfaultfd context.
154  */
155 static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
156 {
157 	refcount_inc(&ctx->refcount);
158 }
159 
160 /**
161  * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
162  * context.
163  * @ctx: [in] Pointer to userfaultfd context.
164  *
165  * The userfaultfd context reference must have been previously acquired either
166  * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget().
167  */
168 static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
169 {
170 	if (refcount_dec_and_test(&ctx->refcount)) {
171 		VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
172 		VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
173 		VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
174 		VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
175 		VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
176 		VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
177 		VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
178 		VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
179 		mmdrop(ctx->mm);
180 		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
181 	}
182 }
183 
184 static inline void msg_init(struct uffd_msg *msg)
185 {
186 	BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
187 	/*
188 	 * Must use memset to zero out the paddings or kernel data is
189 	 * leaked to userland.
190 	 */
191 	memset(msg, 0, sizeof(struct uffd_msg));
192 }
193 
194 static inline struct uffd_msg userfault_msg(unsigned long address,
195 					    unsigned int flags,
196 					    unsigned long reason,
197 					    unsigned int features)
198 {
199 	struct uffd_msg msg;
200 	msg_init(&msg);
201 	msg.event = UFFD_EVENT_PAGEFAULT;
202 
203 	if (!(features & UFFD_FEATURE_EXACT_ADDRESS))
204 		address &= PAGE_MASK;
205 	msg.arg.pagefault.address = address;
206 	/*
207 	 * These flags indicate why the userfault occurred:
208 	 * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault.
209 	 * - UFFD_PAGEFAULT_FLAG_MINOR indicates a minor fault.
210 	 * - Neither of these flags being set indicates a MISSING fault.
211 	 *
212 	 * Separately, UFFD_PAGEFAULT_FLAG_WRITE indicates it was a write
213 	 * fault. Otherwise, it was a read fault.
214 	 */
215 	if (flags & FAULT_FLAG_WRITE)
216 		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
217 	if (reason & VM_UFFD_WP)
218 		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
219 	if (reason & VM_UFFD_MINOR)
220 		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_MINOR;
221 	if (features & UFFD_FEATURE_THREAD_ID)
222 		msg.arg.pagefault.feat.ptid = task_pid_vnr(current);
223 	return msg;
224 }
225 
226 #ifdef CONFIG_HUGETLB_PAGE
227 /*
228  * Same functionality as userfaultfd_must_wait below with modifications for
229  * hugepmd ranges.
230  */
231 static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
232 					 struct vm_area_struct *vma,
233 					 unsigned long address,
234 					 unsigned long flags,
235 					 unsigned long reason)
236 {
237 	struct mm_struct *mm = ctx->mm;
238 	pte_t *ptep, pte;
239 	bool ret = true;
240 
241 	mmap_assert_locked(mm);
242 
243 	ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
244 
245 	if (!ptep)
246 		goto out;
247 
248 	ret = false;
249 	pte = huge_ptep_get(ptep);
250 
251 	/*
252 	 * Lockless access: we're in a wait_event so it's ok if it
253 	 * changes under us.  PTE markers should be handled the same as none
254 	 * ptes here.
255 	 */
256 	if (huge_pte_none_mostly(pte))
257 		ret = true;
258 	if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
259 		ret = true;
260 out:
261 	return ret;
262 }
263 #else
264 static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
265 					 struct vm_area_struct *vma,
266 					 unsigned long address,
267 					 unsigned long flags,
268 					 unsigned long reason)
269 {
270 	return false;	/* should never get here */
271 }
272 #endif /* CONFIG_HUGETLB_PAGE */
273 
274 /*
275  * Verify the pagetables are still not ok after having reigstered into
276  * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
277  * userfault that has already been resolved, if userfaultfd_read and
278  * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different
279  * threads.
280  */
281 static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
282 					 unsigned long address,
283 					 unsigned long flags,
284 					 unsigned long reason)
285 {
286 	struct mm_struct *mm = ctx->mm;
287 	pgd_t *pgd;
288 	p4d_t *p4d;
289 	pud_t *pud;
290 	pmd_t *pmd, _pmd;
291 	pte_t *pte;
292 	bool ret = true;
293 
294 	mmap_assert_locked(mm);
295 
296 	pgd = pgd_offset(mm, address);
297 	if (!pgd_present(*pgd))
298 		goto out;
299 	p4d = p4d_offset(pgd, address);
300 	if (!p4d_present(*p4d))
301 		goto out;
302 	pud = pud_offset(p4d, address);
303 	if (!pud_present(*pud))
304 		goto out;
305 	pmd = pmd_offset(pud, address);
306 	/*
307 	 * READ_ONCE must function as a barrier with narrower scope
308 	 * and it must be equivalent to:
309 	 *	_pmd = *pmd; barrier();
310 	 *
311 	 * This is to deal with the instability (as in
312 	 * pmd_trans_unstable) of the pmd.
313 	 */
314 	_pmd = READ_ONCE(*pmd);
315 	if (pmd_none(_pmd))
316 		goto out;
317 
318 	ret = false;
319 	if (!pmd_present(_pmd))
320 		goto out;
321 
322 	if (pmd_trans_huge(_pmd)) {
323 		if (!pmd_write(_pmd) && (reason & VM_UFFD_WP))
324 			ret = true;
325 		goto out;
326 	}
327 
328 	/*
329 	 * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it
330 	 * and use the standard pte_offset_map() instead of parsing _pmd.
331 	 */
332 	pte = pte_offset_map(pmd, address);
333 	/*
334 	 * Lockless access: we're in a wait_event so it's ok if it
335 	 * changes under us.  PTE markers should be handled the same as none
336 	 * ptes here.
337 	 */
338 	if (pte_none_mostly(*pte))
339 		ret = true;
340 	if (!pte_write(*pte) && (reason & VM_UFFD_WP))
341 		ret = true;
342 	pte_unmap(pte);
343 
344 out:
345 	return ret;
346 }
347 
348 static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags)
349 {
350 	if (flags & FAULT_FLAG_INTERRUPTIBLE)
351 		return TASK_INTERRUPTIBLE;
352 
353 	if (flags & FAULT_FLAG_KILLABLE)
354 		return TASK_KILLABLE;
355 
356 	return TASK_UNINTERRUPTIBLE;
357 }
358 
359 /*
360  * The locking rules involved in returning VM_FAULT_RETRY depending on
361  * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
362  * FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
363  * recommendation in __lock_page_or_retry is not an understatement.
364  *
365  * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_lock must be released
366  * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
367  * not set.
368  *
369  * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
370  * set, VM_FAULT_RETRY can still be returned if and only if there are
371  * fatal_signal_pending()s, and the mmap_lock must be released before
372  * returning it.
373  */
374 vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
375 {
376 	struct mm_struct *mm = vmf->vma->vm_mm;
377 	struct userfaultfd_ctx *ctx;
378 	struct userfaultfd_wait_queue uwq;
379 	vm_fault_t ret = VM_FAULT_SIGBUS;
380 	bool must_wait;
381 	unsigned int blocking_state;
382 
383 	/*
384 	 * We don't do userfault handling for the final child pid update.
385 	 *
386 	 * We also don't do userfault handling during
387 	 * coredumping. hugetlbfs has the special
388 	 * follow_hugetlb_page() to skip missing pages in the
389 	 * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
390 	 * the no_page_table() helper in follow_page_mask(), but the
391 	 * shmem_vm_ops->fault method is invoked even during
392 	 * coredumping without mmap_lock and it ends up here.
393 	 */
394 	if (current->flags & (PF_EXITING|PF_DUMPCORE))
395 		goto out;
396 
397 	/*
398 	 * Coredumping runs without mmap_lock so we can only check that
399 	 * the mmap_lock is held, if PF_DUMPCORE was not set.
400 	 */
401 	mmap_assert_locked(mm);
402 
403 	ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
404 	if (!ctx)
405 		goto out;
406 
407 	BUG_ON(ctx->mm != mm);
408 
409 	/* Any unrecognized flag is a bug. */
410 	VM_BUG_ON(reason & ~__VM_UFFD_FLAGS);
411 	/* 0 or > 1 flags set is a bug; we expect exactly 1. */
412 	VM_BUG_ON(!reason || (reason & (reason - 1)));
413 
414 	if (ctx->features & UFFD_FEATURE_SIGBUS)
415 		goto out;
416 	if ((vmf->flags & FAULT_FLAG_USER) == 0 &&
417 	    ctx->flags & UFFD_USER_MODE_ONLY) {
418 		printk_once(KERN_WARNING "uffd: Set unprivileged_userfaultfd "
419 			"sysctl knob to 1 if kernel faults must be handled "
420 			"without obtaining CAP_SYS_PTRACE capability\n");
421 		goto out;
422 	}
423 
424 	/*
425 	 * If it's already released don't get it. This avoids to loop
426 	 * in __get_user_pages if userfaultfd_release waits on the
427 	 * caller of handle_userfault to release the mmap_lock.
428 	 */
429 	if (unlikely(READ_ONCE(ctx->released))) {
430 		/*
431 		 * Don't return VM_FAULT_SIGBUS in this case, so a non
432 		 * cooperative manager can close the uffd after the
433 		 * last UFFDIO_COPY, without risking to trigger an
434 		 * involuntary SIGBUS if the process was starting the
435 		 * userfaultfd while the userfaultfd was still armed
436 		 * (but after the last UFFDIO_COPY). If the uffd
437 		 * wasn't already closed when the userfault reached
438 		 * this point, that would normally be solved by
439 		 * userfaultfd_must_wait returning 'false'.
440 		 *
441 		 * If we were to return VM_FAULT_SIGBUS here, the non
442 		 * cooperative manager would be instead forced to
443 		 * always call UFFDIO_UNREGISTER before it can safely
444 		 * close the uffd.
445 		 */
446 		ret = VM_FAULT_NOPAGE;
447 		goto out;
448 	}
449 
450 	/*
451 	 * Check that we can return VM_FAULT_RETRY.
452 	 *
453 	 * NOTE: it should become possible to return VM_FAULT_RETRY
454 	 * even if FAULT_FLAG_TRIED is set without leading to gup()
455 	 * -EBUSY failures, if the userfaultfd is to be extended for
456 	 * VM_UFFD_WP tracking and we intend to arm the userfault
457 	 * without first stopping userland access to the memory. For
458 	 * VM_UFFD_MISSING userfaults this is enough for now.
459 	 */
460 	if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
461 		/*
462 		 * Validate the invariant that nowait must allow retry
463 		 * to be sure not to return SIGBUS erroneously on
464 		 * nowait invocations.
465 		 */
466 		BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
467 #ifdef CONFIG_DEBUG_VM
468 		if (printk_ratelimit()) {
469 			printk(KERN_WARNING
470 			       "FAULT_FLAG_ALLOW_RETRY missing %x\n",
471 			       vmf->flags);
472 			dump_stack();
473 		}
474 #endif
475 		goto out;
476 	}
477 
478 	/*
479 	 * Handle nowait, not much to do other than tell it to retry
480 	 * and wait.
481 	 */
482 	ret = VM_FAULT_RETRY;
483 	if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
484 		goto out;
485 
486 	/* take the reference before dropping the mmap_lock */
487 	userfaultfd_ctx_get(ctx);
488 
489 	init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
490 	uwq.wq.private = current;
491 	uwq.msg = userfault_msg(vmf->real_address, vmf->flags, reason,
492 			ctx->features);
493 	uwq.ctx = ctx;
494 	uwq.waken = false;
495 
496 	blocking_state = userfaultfd_get_blocking_state(vmf->flags);
497 
498 	spin_lock_irq(&ctx->fault_pending_wqh.lock);
499 	/*
500 	 * After the __add_wait_queue the uwq is visible to userland
501 	 * through poll/read().
502 	 */
503 	__add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
504 	/*
505 	 * The smp_mb() after __set_current_state prevents the reads
506 	 * following the spin_unlock to happen before the list_add in
507 	 * __add_wait_queue.
508 	 */
509 	set_current_state(blocking_state);
510 	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
511 
512 	if (!is_vm_hugetlb_page(vmf->vma))
513 		must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
514 						  reason);
515 	else
516 		must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
517 						       vmf->address,
518 						       vmf->flags, reason);
519 	mmap_read_unlock(mm);
520 
521 	if (likely(must_wait && !READ_ONCE(ctx->released))) {
522 		wake_up_poll(&ctx->fd_wqh, EPOLLIN);
523 		schedule();
524 	}
525 
526 	__set_current_state(TASK_RUNNING);
527 
528 	/*
529 	 * Here we race with the list_del; list_add in
530 	 * userfaultfd_ctx_read(), however because we don't ever run
531 	 * list_del_init() to refile across the two lists, the prev
532 	 * and next pointers will never point to self. list_add also
533 	 * would never let any of the two pointers to point to
534 	 * self. So list_empty_careful won't risk to see both pointers
535 	 * pointing to self at any time during the list refile. The
536 	 * only case where list_del_init() is called is the full
537 	 * removal in the wake function and there we don't re-list_add
538 	 * and it's fine not to block on the spinlock. The uwq on this
539 	 * kernel stack can be released after the list_del_init.
540 	 */
541 	if (!list_empty_careful(&uwq.wq.entry)) {
542 		spin_lock_irq(&ctx->fault_pending_wqh.lock);
543 		/*
544 		 * No need of list_del_init(), the uwq on the stack
545 		 * will be freed shortly anyway.
546 		 */
547 		list_del(&uwq.wq.entry);
548 		spin_unlock_irq(&ctx->fault_pending_wqh.lock);
549 	}
550 
551 	/*
552 	 * ctx may go away after this if the userfault pseudo fd is
553 	 * already released.
554 	 */
555 	userfaultfd_ctx_put(ctx);
556 
557 out:
558 	return ret;
559 }
560 
561 static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
562 					      struct userfaultfd_wait_queue *ewq)
563 {
564 	struct userfaultfd_ctx *release_new_ctx;
565 
566 	if (WARN_ON_ONCE(current->flags & PF_EXITING))
567 		goto out;
568 
569 	ewq->ctx = ctx;
570 	init_waitqueue_entry(&ewq->wq, current);
571 	release_new_ctx = NULL;
572 
573 	spin_lock_irq(&ctx->event_wqh.lock);
574 	/*
575 	 * After the __add_wait_queue the uwq is visible to userland
576 	 * through poll/read().
577 	 */
578 	__add_wait_queue(&ctx->event_wqh, &ewq->wq);
579 	for (;;) {
580 		set_current_state(TASK_KILLABLE);
581 		if (ewq->msg.event == 0)
582 			break;
583 		if (READ_ONCE(ctx->released) ||
584 		    fatal_signal_pending(current)) {
585 			/*
586 			 * &ewq->wq may be queued in fork_event, but
587 			 * __remove_wait_queue ignores the head
588 			 * parameter. It would be a problem if it
589 			 * didn't.
590 			 */
591 			__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
592 			if (ewq->msg.event == UFFD_EVENT_FORK) {
593 				struct userfaultfd_ctx *new;
594 
595 				new = (struct userfaultfd_ctx *)
596 					(unsigned long)
597 					ewq->msg.arg.reserved.reserved1;
598 				release_new_ctx = new;
599 			}
600 			break;
601 		}
602 
603 		spin_unlock_irq(&ctx->event_wqh.lock);
604 
605 		wake_up_poll(&ctx->fd_wqh, EPOLLIN);
606 		schedule();
607 
608 		spin_lock_irq(&ctx->event_wqh.lock);
609 	}
610 	__set_current_state(TASK_RUNNING);
611 	spin_unlock_irq(&ctx->event_wqh.lock);
612 
613 	if (release_new_ctx) {
614 		struct vm_area_struct *vma;
615 		struct mm_struct *mm = release_new_ctx->mm;
616 
617 		/* the various vma->vm_userfaultfd_ctx still points to it */
618 		mmap_write_lock(mm);
619 		for (vma = mm->mmap; vma; vma = vma->vm_next)
620 			if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
621 				vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
622 				vma->vm_flags &= ~__VM_UFFD_FLAGS;
623 			}
624 		mmap_write_unlock(mm);
625 
626 		userfaultfd_ctx_put(release_new_ctx);
627 	}
628 
629 	/*
630 	 * ctx may go away after this if the userfault pseudo fd is
631 	 * already released.
632 	 */
633 out:
634 	atomic_dec(&ctx->mmap_changing);
635 	VM_BUG_ON(atomic_read(&ctx->mmap_changing) < 0);
636 	userfaultfd_ctx_put(ctx);
637 }
638 
639 static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
640 				       struct userfaultfd_wait_queue *ewq)
641 {
642 	ewq->msg.event = 0;
643 	wake_up_locked(&ctx->event_wqh);
644 	__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
645 }
646 
647 int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
648 {
649 	struct userfaultfd_ctx *ctx = NULL, *octx;
650 	struct userfaultfd_fork_ctx *fctx;
651 
652 	octx = vma->vm_userfaultfd_ctx.ctx;
653 	if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
654 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
655 		vma->vm_flags &= ~__VM_UFFD_FLAGS;
656 		return 0;
657 	}
658 
659 	list_for_each_entry(fctx, fcs, list)
660 		if (fctx->orig == octx) {
661 			ctx = fctx->new;
662 			break;
663 		}
664 
665 	if (!ctx) {
666 		fctx = kmalloc(sizeof(*fctx), GFP_KERNEL);
667 		if (!fctx)
668 			return -ENOMEM;
669 
670 		ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
671 		if (!ctx) {
672 			kfree(fctx);
673 			return -ENOMEM;
674 		}
675 
676 		refcount_set(&ctx->refcount, 1);
677 		ctx->flags = octx->flags;
678 		ctx->features = octx->features;
679 		ctx->released = false;
680 		atomic_set(&ctx->mmap_changing, 0);
681 		ctx->mm = vma->vm_mm;
682 		mmgrab(ctx->mm);
683 
684 		userfaultfd_ctx_get(octx);
685 		atomic_inc(&octx->mmap_changing);
686 		fctx->orig = octx;
687 		fctx->new = ctx;
688 		list_add_tail(&fctx->list, fcs);
689 	}
690 
691 	vma->vm_userfaultfd_ctx.ctx = ctx;
692 	return 0;
693 }
694 
695 static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
696 {
697 	struct userfaultfd_ctx *ctx = fctx->orig;
698 	struct userfaultfd_wait_queue ewq;
699 
700 	msg_init(&ewq.msg);
701 
702 	ewq.msg.event = UFFD_EVENT_FORK;
703 	ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
704 
705 	userfaultfd_event_wait_completion(ctx, &ewq);
706 }
707 
708 void dup_userfaultfd_complete(struct list_head *fcs)
709 {
710 	struct userfaultfd_fork_ctx *fctx, *n;
711 
712 	list_for_each_entry_safe(fctx, n, fcs, list) {
713 		dup_fctx(fctx);
714 		list_del(&fctx->list);
715 		kfree(fctx);
716 	}
717 }
718 
719 void mremap_userfaultfd_prep(struct vm_area_struct *vma,
720 			     struct vm_userfaultfd_ctx *vm_ctx)
721 {
722 	struct userfaultfd_ctx *ctx;
723 
724 	ctx = vma->vm_userfaultfd_ctx.ctx;
725 
726 	if (!ctx)
727 		return;
728 
729 	if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
730 		vm_ctx->ctx = ctx;
731 		userfaultfd_ctx_get(ctx);
732 		atomic_inc(&ctx->mmap_changing);
733 	} else {
734 		/* Drop uffd context if remap feature not enabled */
735 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
736 		vma->vm_flags &= ~__VM_UFFD_FLAGS;
737 	}
738 }
739 
740 void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
741 				 unsigned long from, unsigned long to,
742 				 unsigned long len)
743 {
744 	struct userfaultfd_ctx *ctx = vm_ctx->ctx;
745 	struct userfaultfd_wait_queue ewq;
746 
747 	if (!ctx)
748 		return;
749 
750 	if (to & ~PAGE_MASK) {
751 		userfaultfd_ctx_put(ctx);
752 		return;
753 	}
754 
755 	msg_init(&ewq.msg);
756 
757 	ewq.msg.event = UFFD_EVENT_REMAP;
758 	ewq.msg.arg.remap.from = from;
759 	ewq.msg.arg.remap.to = to;
760 	ewq.msg.arg.remap.len = len;
761 
762 	userfaultfd_event_wait_completion(ctx, &ewq);
763 }
764 
765 bool userfaultfd_remove(struct vm_area_struct *vma,
766 			unsigned long start, unsigned long end)
767 {
768 	struct mm_struct *mm = vma->vm_mm;
769 	struct userfaultfd_ctx *ctx;
770 	struct userfaultfd_wait_queue ewq;
771 
772 	ctx = vma->vm_userfaultfd_ctx.ctx;
773 	if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
774 		return true;
775 
776 	userfaultfd_ctx_get(ctx);
777 	atomic_inc(&ctx->mmap_changing);
778 	mmap_read_unlock(mm);
779 
780 	msg_init(&ewq.msg);
781 
782 	ewq.msg.event = UFFD_EVENT_REMOVE;
783 	ewq.msg.arg.remove.start = start;
784 	ewq.msg.arg.remove.end = end;
785 
786 	userfaultfd_event_wait_completion(ctx, &ewq);
787 
788 	return false;
789 }
790 
791 static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
792 			  unsigned long start, unsigned long end)
793 {
794 	struct userfaultfd_unmap_ctx *unmap_ctx;
795 
796 	list_for_each_entry(unmap_ctx, unmaps, list)
797 		if (unmap_ctx->ctx == ctx && unmap_ctx->start == start &&
798 		    unmap_ctx->end == end)
799 			return true;
800 
801 	return false;
802 }
803 
804 int userfaultfd_unmap_prep(struct vm_area_struct *vma,
805 			   unsigned long start, unsigned long end,
806 			   struct list_head *unmaps)
807 {
808 	for ( ; vma && vma->vm_start < end; vma = vma->vm_next) {
809 		struct userfaultfd_unmap_ctx *unmap_ctx;
810 		struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
811 
812 		if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
813 		    has_unmap_ctx(ctx, unmaps, start, end))
814 			continue;
815 
816 		unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL);
817 		if (!unmap_ctx)
818 			return -ENOMEM;
819 
820 		userfaultfd_ctx_get(ctx);
821 		atomic_inc(&ctx->mmap_changing);
822 		unmap_ctx->ctx = ctx;
823 		unmap_ctx->start = start;
824 		unmap_ctx->end = end;
825 		list_add_tail(&unmap_ctx->list, unmaps);
826 	}
827 
828 	return 0;
829 }
830 
831 void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
832 {
833 	struct userfaultfd_unmap_ctx *ctx, *n;
834 	struct userfaultfd_wait_queue ewq;
835 
836 	list_for_each_entry_safe(ctx, n, uf, list) {
837 		msg_init(&ewq.msg);
838 
839 		ewq.msg.event = UFFD_EVENT_UNMAP;
840 		ewq.msg.arg.remove.start = ctx->start;
841 		ewq.msg.arg.remove.end = ctx->end;
842 
843 		userfaultfd_event_wait_completion(ctx->ctx, &ewq);
844 
845 		list_del(&ctx->list);
846 		kfree(ctx);
847 	}
848 }
849 
850 static int userfaultfd_release(struct inode *inode, struct file *file)
851 {
852 	struct userfaultfd_ctx *ctx = file->private_data;
853 	struct mm_struct *mm = ctx->mm;
854 	struct vm_area_struct *vma, *prev;
855 	/* len == 0 means wake all */
856 	struct userfaultfd_wake_range range = { .len = 0, };
857 	unsigned long new_flags;
858 
859 	WRITE_ONCE(ctx->released, true);
860 
861 	if (!mmget_not_zero(mm))
862 		goto wakeup;
863 
864 	/*
865 	 * Flush page faults out of all CPUs. NOTE: all page faults
866 	 * must be retried without returning VM_FAULT_SIGBUS if
867 	 * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
868 	 * changes while handle_userfault released the mmap_lock. So
869 	 * it's critical that released is set to true (above), before
870 	 * taking the mmap_lock for writing.
871 	 */
872 	mmap_write_lock(mm);
873 	prev = NULL;
874 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
875 		cond_resched();
876 		BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
877 		       !!(vma->vm_flags & __VM_UFFD_FLAGS));
878 		if (vma->vm_userfaultfd_ctx.ctx != ctx) {
879 			prev = vma;
880 			continue;
881 		}
882 		new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
883 		prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
884 				 new_flags, vma->anon_vma,
885 				 vma->vm_file, vma->vm_pgoff,
886 				 vma_policy(vma),
887 				 NULL_VM_UFFD_CTX, anon_vma_name(vma));
888 		if (prev)
889 			vma = prev;
890 		else
891 			prev = vma;
892 		vma->vm_flags = new_flags;
893 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
894 	}
895 	mmap_write_unlock(mm);
896 	mmput(mm);
897 wakeup:
898 	/*
899 	 * After no new page faults can wait on this fault_*wqh, flush
900 	 * the last page faults that may have been already waiting on
901 	 * the fault_*wqh.
902 	 */
903 	spin_lock_irq(&ctx->fault_pending_wqh.lock);
904 	__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
905 	__wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
906 	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
907 
908 	/* Flush pending events that may still wait on event_wqh */
909 	wake_up_all(&ctx->event_wqh);
910 
911 	wake_up_poll(&ctx->fd_wqh, EPOLLHUP);
912 	userfaultfd_ctx_put(ctx);
913 	return 0;
914 }
915 
916 /* fault_pending_wqh.lock must be hold by the caller */
917 static inline struct userfaultfd_wait_queue *find_userfault_in(
918 		wait_queue_head_t *wqh)
919 {
920 	wait_queue_entry_t *wq;
921 	struct userfaultfd_wait_queue *uwq;
922 
923 	lockdep_assert_held(&wqh->lock);
924 
925 	uwq = NULL;
926 	if (!waitqueue_active(wqh))
927 		goto out;
928 	/* walk in reverse to provide FIFO behavior to read userfaults */
929 	wq = list_last_entry(&wqh->head, typeof(*wq), entry);
930 	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
931 out:
932 	return uwq;
933 }
934 
935 static inline struct userfaultfd_wait_queue *find_userfault(
936 		struct userfaultfd_ctx *ctx)
937 {
938 	return find_userfault_in(&ctx->fault_pending_wqh);
939 }
940 
941 static inline struct userfaultfd_wait_queue *find_userfault_evt(
942 		struct userfaultfd_ctx *ctx)
943 {
944 	return find_userfault_in(&ctx->event_wqh);
945 }
946 
947 static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
948 {
949 	struct userfaultfd_ctx *ctx = file->private_data;
950 	__poll_t ret;
951 
952 	poll_wait(file, &ctx->fd_wqh, wait);
953 
954 	if (!userfaultfd_is_initialized(ctx))
955 		return EPOLLERR;
956 
957 	/*
958 	 * poll() never guarantees that read won't block.
959 	 * userfaults can be waken before they're read().
960 	 */
961 	if (unlikely(!(file->f_flags & O_NONBLOCK)))
962 		return EPOLLERR;
963 	/*
964 	 * lockless access to see if there are pending faults
965 	 * __pollwait last action is the add_wait_queue but
966 	 * the spin_unlock would allow the waitqueue_active to
967 	 * pass above the actual list_add inside
968 	 * add_wait_queue critical section. So use a full
969 	 * memory barrier to serialize the list_add write of
970 	 * add_wait_queue() with the waitqueue_active read
971 	 * below.
972 	 */
973 	ret = 0;
974 	smp_mb();
975 	if (waitqueue_active(&ctx->fault_pending_wqh))
976 		ret = EPOLLIN;
977 	else if (waitqueue_active(&ctx->event_wqh))
978 		ret = EPOLLIN;
979 
980 	return ret;
981 }
982 
983 static const struct file_operations userfaultfd_fops;
984 
985 static int resolve_userfault_fork(struct userfaultfd_ctx *new,
986 				  struct inode *inode,
987 				  struct uffd_msg *msg)
988 {
989 	int fd;
990 
991 	fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, new,
992 			O_RDWR | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode);
993 	if (fd < 0)
994 		return fd;
995 
996 	msg->arg.reserved.reserved1 = 0;
997 	msg->arg.fork.ufd = fd;
998 	return 0;
999 }
1000 
1001 static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
1002 				    struct uffd_msg *msg, struct inode *inode)
1003 {
1004 	ssize_t ret;
1005 	DECLARE_WAITQUEUE(wait, current);
1006 	struct userfaultfd_wait_queue *uwq;
1007 	/*
1008 	 * Handling fork event requires sleeping operations, so
1009 	 * we drop the event_wqh lock, then do these ops, then
1010 	 * lock it back and wake up the waiter. While the lock is
1011 	 * dropped the ewq may go away so we keep track of it
1012 	 * carefully.
1013 	 */
1014 	LIST_HEAD(fork_event);
1015 	struct userfaultfd_ctx *fork_nctx = NULL;
1016 
1017 	/* always take the fd_wqh lock before the fault_pending_wqh lock */
1018 	spin_lock_irq(&ctx->fd_wqh.lock);
1019 	__add_wait_queue(&ctx->fd_wqh, &wait);
1020 	for (;;) {
1021 		set_current_state(TASK_INTERRUPTIBLE);
1022 		spin_lock(&ctx->fault_pending_wqh.lock);
1023 		uwq = find_userfault(ctx);
1024 		if (uwq) {
1025 			/*
1026 			 * Use a seqcount to repeat the lockless check
1027 			 * in wake_userfault() to avoid missing
1028 			 * wakeups because during the refile both
1029 			 * waitqueue could become empty if this is the
1030 			 * only userfault.
1031 			 */
1032 			write_seqcount_begin(&ctx->refile_seq);
1033 
1034 			/*
1035 			 * The fault_pending_wqh.lock prevents the uwq
1036 			 * to disappear from under us.
1037 			 *
1038 			 * Refile this userfault from
1039 			 * fault_pending_wqh to fault_wqh, it's not
1040 			 * pending anymore after we read it.
1041 			 *
1042 			 * Use list_del() by hand (as
1043 			 * userfaultfd_wake_function also uses
1044 			 * list_del_init() by hand) to be sure nobody
1045 			 * changes __remove_wait_queue() to use
1046 			 * list_del_init() in turn breaking the
1047 			 * !list_empty_careful() check in
1048 			 * handle_userfault(). The uwq->wq.head list
1049 			 * must never be empty at any time during the
1050 			 * refile, or the waitqueue could disappear
1051 			 * from under us. The "wait_queue_head_t"
1052 			 * parameter of __remove_wait_queue() is unused
1053 			 * anyway.
1054 			 */
1055 			list_del(&uwq->wq.entry);
1056 			add_wait_queue(&ctx->fault_wqh, &uwq->wq);
1057 
1058 			write_seqcount_end(&ctx->refile_seq);
1059 
1060 			/* careful to always initialize msg if ret == 0 */
1061 			*msg = uwq->msg;
1062 			spin_unlock(&ctx->fault_pending_wqh.lock);
1063 			ret = 0;
1064 			break;
1065 		}
1066 		spin_unlock(&ctx->fault_pending_wqh.lock);
1067 
1068 		spin_lock(&ctx->event_wqh.lock);
1069 		uwq = find_userfault_evt(ctx);
1070 		if (uwq) {
1071 			*msg = uwq->msg;
1072 
1073 			if (uwq->msg.event == UFFD_EVENT_FORK) {
1074 				fork_nctx = (struct userfaultfd_ctx *)
1075 					(unsigned long)
1076 					uwq->msg.arg.reserved.reserved1;
1077 				list_move(&uwq->wq.entry, &fork_event);
1078 				/*
1079 				 * fork_nctx can be freed as soon as
1080 				 * we drop the lock, unless we take a
1081 				 * reference on it.
1082 				 */
1083 				userfaultfd_ctx_get(fork_nctx);
1084 				spin_unlock(&ctx->event_wqh.lock);
1085 				ret = 0;
1086 				break;
1087 			}
1088 
1089 			userfaultfd_event_complete(ctx, uwq);
1090 			spin_unlock(&ctx->event_wqh.lock);
1091 			ret = 0;
1092 			break;
1093 		}
1094 		spin_unlock(&ctx->event_wqh.lock);
1095 
1096 		if (signal_pending(current)) {
1097 			ret = -ERESTARTSYS;
1098 			break;
1099 		}
1100 		if (no_wait) {
1101 			ret = -EAGAIN;
1102 			break;
1103 		}
1104 		spin_unlock_irq(&ctx->fd_wqh.lock);
1105 		schedule();
1106 		spin_lock_irq(&ctx->fd_wqh.lock);
1107 	}
1108 	__remove_wait_queue(&ctx->fd_wqh, &wait);
1109 	__set_current_state(TASK_RUNNING);
1110 	spin_unlock_irq(&ctx->fd_wqh.lock);
1111 
1112 	if (!ret && msg->event == UFFD_EVENT_FORK) {
1113 		ret = resolve_userfault_fork(fork_nctx, inode, msg);
1114 		spin_lock_irq(&ctx->event_wqh.lock);
1115 		if (!list_empty(&fork_event)) {
1116 			/*
1117 			 * The fork thread didn't abort, so we can
1118 			 * drop the temporary refcount.
1119 			 */
1120 			userfaultfd_ctx_put(fork_nctx);
1121 
1122 			uwq = list_first_entry(&fork_event,
1123 					       typeof(*uwq),
1124 					       wq.entry);
1125 			/*
1126 			 * If fork_event list wasn't empty and in turn
1127 			 * the event wasn't already released by fork
1128 			 * (the event is allocated on fork kernel
1129 			 * stack), put the event back to its place in
1130 			 * the event_wq. fork_event head will be freed
1131 			 * as soon as we return so the event cannot
1132 			 * stay queued there no matter the current
1133 			 * "ret" value.
1134 			 */
1135 			list_del(&uwq->wq.entry);
1136 			__add_wait_queue(&ctx->event_wqh, &uwq->wq);
1137 
1138 			/*
1139 			 * Leave the event in the waitqueue and report
1140 			 * error to userland if we failed to resolve
1141 			 * the userfault fork.
1142 			 */
1143 			if (likely(!ret))
1144 				userfaultfd_event_complete(ctx, uwq);
1145 		} else {
1146 			/*
1147 			 * Here the fork thread aborted and the
1148 			 * refcount from the fork thread on fork_nctx
1149 			 * has already been released. We still hold
1150 			 * the reference we took before releasing the
1151 			 * lock above. If resolve_userfault_fork
1152 			 * failed we've to drop it because the
1153 			 * fork_nctx has to be freed in such case. If
1154 			 * it succeeded we'll hold it because the new
1155 			 * uffd references it.
1156 			 */
1157 			if (ret)
1158 				userfaultfd_ctx_put(fork_nctx);
1159 		}
1160 		spin_unlock_irq(&ctx->event_wqh.lock);
1161 	}
1162 
1163 	return ret;
1164 }
1165 
1166 static ssize_t userfaultfd_read(struct file *file, char __user *buf,
1167 				size_t count, loff_t *ppos)
1168 {
1169 	struct userfaultfd_ctx *ctx = file->private_data;
1170 	ssize_t _ret, ret = 0;
1171 	struct uffd_msg msg;
1172 	int no_wait = file->f_flags & O_NONBLOCK;
1173 	struct inode *inode = file_inode(file);
1174 
1175 	if (!userfaultfd_is_initialized(ctx))
1176 		return -EINVAL;
1177 
1178 	for (;;) {
1179 		if (count < sizeof(msg))
1180 			return ret ? ret : -EINVAL;
1181 		_ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode);
1182 		if (_ret < 0)
1183 			return ret ? ret : _ret;
1184 		if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
1185 			return ret ? ret : -EFAULT;
1186 		ret += sizeof(msg);
1187 		buf += sizeof(msg);
1188 		count -= sizeof(msg);
1189 		/*
1190 		 * Allow to read more than one fault at time but only
1191 		 * block if waiting for the very first one.
1192 		 */
1193 		no_wait = O_NONBLOCK;
1194 	}
1195 }
1196 
1197 static void __wake_userfault(struct userfaultfd_ctx *ctx,
1198 			     struct userfaultfd_wake_range *range)
1199 {
1200 	spin_lock_irq(&ctx->fault_pending_wqh.lock);
1201 	/* wake all in the range and autoremove */
1202 	if (waitqueue_active(&ctx->fault_pending_wqh))
1203 		__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
1204 				     range);
1205 	if (waitqueue_active(&ctx->fault_wqh))
1206 		__wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
1207 	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
1208 }
1209 
1210 static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
1211 					   struct userfaultfd_wake_range *range)
1212 {
1213 	unsigned seq;
1214 	bool need_wakeup;
1215 
1216 	/*
1217 	 * To be sure waitqueue_active() is not reordered by the CPU
1218 	 * before the pagetable update, use an explicit SMP memory
1219 	 * barrier here. PT lock release or mmap_read_unlock(mm) still
1220 	 * have release semantics that can allow the
1221 	 * waitqueue_active() to be reordered before the pte update.
1222 	 */
1223 	smp_mb();
1224 
1225 	/*
1226 	 * Use waitqueue_active because it's very frequent to
1227 	 * change the address space atomically even if there are no
1228 	 * userfaults yet. So we take the spinlock only when we're
1229 	 * sure we've userfaults to wake.
1230 	 */
1231 	do {
1232 		seq = read_seqcount_begin(&ctx->refile_seq);
1233 		need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
1234 			waitqueue_active(&ctx->fault_wqh);
1235 		cond_resched();
1236 	} while (read_seqcount_retry(&ctx->refile_seq, seq));
1237 	if (need_wakeup)
1238 		__wake_userfault(ctx, range);
1239 }
1240 
1241 static __always_inline int validate_range(struct mm_struct *mm,
1242 					  __u64 start, __u64 len)
1243 {
1244 	__u64 task_size = mm->task_size;
1245 
1246 	if (start & ~PAGE_MASK)
1247 		return -EINVAL;
1248 	if (len & ~PAGE_MASK)
1249 		return -EINVAL;
1250 	if (!len)
1251 		return -EINVAL;
1252 	if (start < mmap_min_addr)
1253 		return -EINVAL;
1254 	if (start >= task_size)
1255 		return -EINVAL;
1256 	if (len > task_size - start)
1257 		return -EINVAL;
1258 	return 0;
1259 }
1260 
1261 static inline bool vma_can_userfault(struct vm_area_struct *vma,
1262 				     unsigned long vm_flags)
1263 {
1264 	/* FIXME: add WP support to hugetlbfs and shmem */
1265 	if (vm_flags & VM_UFFD_WP) {
1266 		if (is_vm_hugetlb_page(vma) || vma_is_shmem(vma))
1267 			return false;
1268 	}
1269 
1270 	if (vm_flags & VM_UFFD_MINOR) {
1271 		if (!(is_vm_hugetlb_page(vma) || vma_is_shmem(vma)))
1272 			return false;
1273 	}
1274 
1275 	return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
1276 	       vma_is_shmem(vma);
1277 }
1278 
1279 static int userfaultfd_register(struct userfaultfd_ctx *ctx,
1280 				unsigned long arg)
1281 {
1282 	struct mm_struct *mm = ctx->mm;
1283 	struct vm_area_struct *vma, *prev, *cur;
1284 	int ret;
1285 	struct uffdio_register uffdio_register;
1286 	struct uffdio_register __user *user_uffdio_register;
1287 	unsigned long vm_flags, new_flags;
1288 	bool found;
1289 	bool basic_ioctls;
1290 	unsigned long start, end, vma_end;
1291 
1292 	user_uffdio_register = (struct uffdio_register __user *) arg;
1293 
1294 	ret = -EFAULT;
1295 	if (copy_from_user(&uffdio_register, user_uffdio_register,
1296 			   sizeof(uffdio_register)-sizeof(__u64)))
1297 		goto out;
1298 
1299 	ret = -EINVAL;
1300 	if (!uffdio_register.mode)
1301 		goto out;
1302 	if (uffdio_register.mode & ~UFFD_API_REGISTER_MODES)
1303 		goto out;
1304 	vm_flags = 0;
1305 	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
1306 		vm_flags |= VM_UFFD_MISSING;
1307 	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
1308 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
1309 		goto out;
1310 #endif
1311 		vm_flags |= VM_UFFD_WP;
1312 	}
1313 	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) {
1314 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
1315 		goto out;
1316 #endif
1317 		vm_flags |= VM_UFFD_MINOR;
1318 	}
1319 
1320 	ret = validate_range(mm, uffdio_register.range.start,
1321 			     uffdio_register.range.len);
1322 	if (ret)
1323 		goto out;
1324 
1325 	start = uffdio_register.range.start;
1326 	end = start + uffdio_register.range.len;
1327 
1328 	ret = -ENOMEM;
1329 	if (!mmget_not_zero(mm))
1330 		goto out;
1331 
1332 	mmap_write_lock(mm);
1333 	vma = find_vma_prev(mm, start, &prev);
1334 	if (!vma)
1335 		goto out_unlock;
1336 
1337 	/* check that there's at least one vma in the range */
1338 	ret = -EINVAL;
1339 	if (vma->vm_start >= end)
1340 		goto out_unlock;
1341 
1342 	/*
1343 	 * If the first vma contains huge pages, make sure start address
1344 	 * is aligned to huge page size.
1345 	 */
1346 	if (is_vm_hugetlb_page(vma)) {
1347 		unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1348 
1349 		if (start & (vma_hpagesize - 1))
1350 			goto out_unlock;
1351 	}
1352 
1353 	/*
1354 	 * Search for not compatible vmas.
1355 	 */
1356 	found = false;
1357 	basic_ioctls = false;
1358 	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
1359 		cond_resched();
1360 
1361 		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
1362 		       !!(cur->vm_flags & __VM_UFFD_FLAGS));
1363 
1364 		/* check not compatible vmas */
1365 		ret = -EINVAL;
1366 		if (!vma_can_userfault(cur, vm_flags))
1367 			goto out_unlock;
1368 
1369 		/*
1370 		 * UFFDIO_COPY will fill file holes even without
1371 		 * PROT_WRITE. This check enforces that if this is a
1372 		 * MAP_SHARED, the process has write permission to the backing
1373 		 * file. If VM_MAYWRITE is set it also enforces that on a
1374 		 * MAP_SHARED vma: there is no F_WRITE_SEAL and no further
1375 		 * F_WRITE_SEAL can be taken until the vma is destroyed.
1376 		 */
1377 		ret = -EPERM;
1378 		if (unlikely(!(cur->vm_flags & VM_MAYWRITE)))
1379 			goto out_unlock;
1380 
1381 		/*
1382 		 * If this vma contains ending address, and huge pages
1383 		 * check alignment.
1384 		 */
1385 		if (is_vm_hugetlb_page(cur) && end <= cur->vm_end &&
1386 		    end > cur->vm_start) {
1387 			unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
1388 
1389 			ret = -EINVAL;
1390 
1391 			if (end & (vma_hpagesize - 1))
1392 				goto out_unlock;
1393 		}
1394 		if ((vm_flags & VM_UFFD_WP) && !(cur->vm_flags & VM_MAYWRITE))
1395 			goto out_unlock;
1396 
1397 		/*
1398 		 * Check that this vma isn't already owned by a
1399 		 * different userfaultfd. We can't allow more than one
1400 		 * userfaultfd to own a single vma simultaneously or we
1401 		 * wouldn't know which one to deliver the userfaults to.
1402 		 */
1403 		ret = -EBUSY;
1404 		if (cur->vm_userfaultfd_ctx.ctx &&
1405 		    cur->vm_userfaultfd_ctx.ctx != ctx)
1406 			goto out_unlock;
1407 
1408 		/*
1409 		 * Note vmas containing huge pages
1410 		 */
1411 		if (is_vm_hugetlb_page(cur))
1412 			basic_ioctls = true;
1413 
1414 		found = true;
1415 	}
1416 	BUG_ON(!found);
1417 
1418 	if (vma->vm_start < start)
1419 		prev = vma;
1420 
1421 	ret = 0;
1422 	do {
1423 		cond_resched();
1424 
1425 		BUG_ON(!vma_can_userfault(vma, vm_flags));
1426 		BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
1427 		       vma->vm_userfaultfd_ctx.ctx != ctx);
1428 		WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1429 
1430 		/*
1431 		 * Nothing to do: this vma is already registered into this
1432 		 * userfaultfd and with the right tracking mode too.
1433 		 */
1434 		if (vma->vm_userfaultfd_ctx.ctx == ctx &&
1435 		    (vma->vm_flags & vm_flags) == vm_flags)
1436 			goto skip;
1437 
1438 		if (vma->vm_start > start)
1439 			start = vma->vm_start;
1440 		vma_end = min(end, vma->vm_end);
1441 
1442 		new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
1443 		prev = vma_merge(mm, prev, start, vma_end, new_flags,
1444 				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
1445 				 vma_policy(vma),
1446 				 ((struct vm_userfaultfd_ctx){ ctx }),
1447 				 anon_vma_name(vma));
1448 		if (prev) {
1449 			vma = prev;
1450 			goto next;
1451 		}
1452 		if (vma->vm_start < start) {
1453 			ret = split_vma(mm, vma, start, 1);
1454 			if (ret)
1455 				break;
1456 		}
1457 		if (vma->vm_end > end) {
1458 			ret = split_vma(mm, vma, end, 0);
1459 			if (ret)
1460 				break;
1461 		}
1462 	next:
1463 		/*
1464 		 * In the vma_merge() successful mprotect-like case 8:
1465 		 * the next vma was merged into the current one and
1466 		 * the current one has not been updated yet.
1467 		 */
1468 		vma->vm_flags = new_flags;
1469 		vma->vm_userfaultfd_ctx.ctx = ctx;
1470 
1471 		if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
1472 			hugetlb_unshare_all_pmds(vma);
1473 
1474 	skip:
1475 		prev = vma;
1476 		start = vma->vm_end;
1477 		vma = vma->vm_next;
1478 	} while (vma && vma->vm_start < end);
1479 out_unlock:
1480 	mmap_write_unlock(mm);
1481 	mmput(mm);
1482 	if (!ret) {
1483 		__u64 ioctls_out;
1484 
1485 		ioctls_out = basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC :
1486 		    UFFD_API_RANGE_IOCTLS;
1487 
1488 		/*
1489 		 * Declare the WP ioctl only if the WP mode is
1490 		 * specified and all checks passed with the range
1491 		 */
1492 		if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP))
1493 			ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT);
1494 
1495 		/* CONTINUE ioctl is only supported for MINOR ranges. */
1496 		if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR))
1497 			ioctls_out &= ~((__u64)1 << _UFFDIO_CONTINUE);
1498 
1499 		/*
1500 		 * Now that we scanned all vmas we can already tell
1501 		 * userland which ioctls methods are guaranteed to
1502 		 * succeed on this range.
1503 		 */
1504 		if (put_user(ioctls_out, &user_uffdio_register->ioctls))
1505 			ret = -EFAULT;
1506 	}
1507 out:
1508 	return ret;
1509 }
1510 
1511 static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
1512 				  unsigned long arg)
1513 {
1514 	struct mm_struct *mm = ctx->mm;
1515 	struct vm_area_struct *vma, *prev, *cur;
1516 	int ret;
1517 	struct uffdio_range uffdio_unregister;
1518 	unsigned long new_flags;
1519 	bool found;
1520 	unsigned long start, end, vma_end;
1521 	const void __user *buf = (void __user *)arg;
1522 
1523 	ret = -EFAULT;
1524 	if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
1525 		goto out;
1526 
1527 	ret = validate_range(mm, uffdio_unregister.start,
1528 			     uffdio_unregister.len);
1529 	if (ret)
1530 		goto out;
1531 
1532 	start = uffdio_unregister.start;
1533 	end = start + uffdio_unregister.len;
1534 
1535 	ret = -ENOMEM;
1536 	if (!mmget_not_zero(mm))
1537 		goto out;
1538 
1539 	mmap_write_lock(mm);
1540 	vma = find_vma_prev(mm, start, &prev);
1541 	if (!vma)
1542 		goto out_unlock;
1543 
1544 	/* check that there's at least one vma in the range */
1545 	ret = -EINVAL;
1546 	if (vma->vm_start >= end)
1547 		goto out_unlock;
1548 
1549 	/*
1550 	 * If the first vma contains huge pages, make sure start address
1551 	 * is aligned to huge page size.
1552 	 */
1553 	if (is_vm_hugetlb_page(vma)) {
1554 		unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1555 
1556 		if (start & (vma_hpagesize - 1))
1557 			goto out_unlock;
1558 	}
1559 
1560 	/*
1561 	 * Search for not compatible vmas.
1562 	 */
1563 	found = false;
1564 	ret = -EINVAL;
1565 	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
1566 		cond_resched();
1567 
1568 		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
1569 		       !!(cur->vm_flags & __VM_UFFD_FLAGS));
1570 
1571 		/*
1572 		 * Check not compatible vmas, not strictly required
1573 		 * here as not compatible vmas cannot have an
1574 		 * userfaultfd_ctx registered on them, but this
1575 		 * provides for more strict behavior to notice
1576 		 * unregistration errors.
1577 		 */
1578 		if (!vma_can_userfault(cur, cur->vm_flags))
1579 			goto out_unlock;
1580 
1581 		found = true;
1582 	}
1583 	BUG_ON(!found);
1584 
1585 	if (vma->vm_start < start)
1586 		prev = vma;
1587 
1588 	ret = 0;
1589 	do {
1590 		cond_resched();
1591 
1592 		BUG_ON(!vma_can_userfault(vma, vma->vm_flags));
1593 
1594 		/*
1595 		 * Nothing to do: this vma is already registered into this
1596 		 * userfaultfd and with the right tracking mode too.
1597 		 */
1598 		if (!vma->vm_userfaultfd_ctx.ctx)
1599 			goto skip;
1600 
1601 		WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1602 
1603 		if (vma->vm_start > start)
1604 			start = vma->vm_start;
1605 		vma_end = min(end, vma->vm_end);
1606 
1607 		if (userfaultfd_missing(vma)) {
1608 			/*
1609 			 * Wake any concurrent pending userfault while
1610 			 * we unregister, so they will not hang
1611 			 * permanently and it avoids userland to call
1612 			 * UFFDIO_WAKE explicitly.
1613 			 */
1614 			struct userfaultfd_wake_range range;
1615 			range.start = start;
1616 			range.len = vma_end - start;
1617 			wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
1618 		}
1619 
1620 		new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
1621 		prev = vma_merge(mm, prev, start, vma_end, new_flags,
1622 				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
1623 				 vma_policy(vma),
1624 				 NULL_VM_UFFD_CTX, anon_vma_name(vma));
1625 		if (prev) {
1626 			vma = prev;
1627 			goto next;
1628 		}
1629 		if (vma->vm_start < start) {
1630 			ret = split_vma(mm, vma, start, 1);
1631 			if (ret)
1632 				break;
1633 		}
1634 		if (vma->vm_end > end) {
1635 			ret = split_vma(mm, vma, end, 0);
1636 			if (ret)
1637 				break;
1638 		}
1639 	next:
1640 		/*
1641 		 * In the vma_merge() successful mprotect-like case 8:
1642 		 * the next vma was merged into the current one and
1643 		 * the current one has not been updated yet.
1644 		 */
1645 		vma->vm_flags = new_flags;
1646 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
1647 
1648 	skip:
1649 		prev = vma;
1650 		start = vma->vm_end;
1651 		vma = vma->vm_next;
1652 	} while (vma && vma->vm_start < end);
1653 out_unlock:
1654 	mmap_write_unlock(mm);
1655 	mmput(mm);
1656 out:
1657 	return ret;
1658 }
1659 
1660 /*
1661  * userfaultfd_wake may be used in combination with the
1662  * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches.
1663  */
1664 static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
1665 			    unsigned long arg)
1666 {
1667 	int ret;
1668 	struct uffdio_range uffdio_wake;
1669 	struct userfaultfd_wake_range range;
1670 	const void __user *buf = (void __user *)arg;
1671 
1672 	ret = -EFAULT;
1673 	if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
1674 		goto out;
1675 
1676 	ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
1677 	if (ret)
1678 		goto out;
1679 
1680 	range.start = uffdio_wake.start;
1681 	range.len = uffdio_wake.len;
1682 
1683 	/*
1684 	 * len == 0 means wake all and we don't want to wake all here,
1685 	 * so check it again to be sure.
1686 	 */
1687 	VM_BUG_ON(!range.len);
1688 
1689 	wake_userfault(ctx, &range);
1690 	ret = 0;
1691 
1692 out:
1693 	return ret;
1694 }
1695 
1696 static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
1697 			    unsigned long arg)
1698 {
1699 	__s64 ret;
1700 	struct uffdio_copy uffdio_copy;
1701 	struct uffdio_copy __user *user_uffdio_copy;
1702 	struct userfaultfd_wake_range range;
1703 
1704 	user_uffdio_copy = (struct uffdio_copy __user *) arg;
1705 
1706 	ret = -EAGAIN;
1707 	if (atomic_read(&ctx->mmap_changing))
1708 		goto out;
1709 
1710 	ret = -EFAULT;
1711 	if (copy_from_user(&uffdio_copy, user_uffdio_copy,
1712 			   /* don't copy "copy" last field */
1713 			   sizeof(uffdio_copy)-sizeof(__s64)))
1714 		goto out;
1715 
1716 	ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
1717 	if (ret)
1718 		goto out;
1719 	/*
1720 	 * double check for wraparound just in case. copy_from_user()
1721 	 * will later check uffdio_copy.src + uffdio_copy.len to fit
1722 	 * in the userland range.
1723 	 */
1724 	ret = -EINVAL;
1725 	if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
1726 		goto out;
1727 	if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP))
1728 		goto out;
1729 	if (mmget_not_zero(ctx->mm)) {
1730 		ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
1731 				   uffdio_copy.len, &ctx->mmap_changing,
1732 				   uffdio_copy.mode);
1733 		mmput(ctx->mm);
1734 	} else {
1735 		return -ESRCH;
1736 	}
1737 	if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
1738 		return -EFAULT;
1739 	if (ret < 0)
1740 		goto out;
1741 	BUG_ON(!ret);
1742 	/* len == 0 would wake all */
1743 	range.len = ret;
1744 	if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
1745 		range.start = uffdio_copy.dst;
1746 		wake_userfault(ctx, &range);
1747 	}
1748 	ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
1749 out:
1750 	return ret;
1751 }
1752 
1753 static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
1754 				unsigned long arg)
1755 {
1756 	__s64 ret;
1757 	struct uffdio_zeropage uffdio_zeropage;
1758 	struct uffdio_zeropage __user *user_uffdio_zeropage;
1759 	struct userfaultfd_wake_range range;
1760 
1761 	user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
1762 
1763 	ret = -EAGAIN;
1764 	if (atomic_read(&ctx->mmap_changing))
1765 		goto out;
1766 
1767 	ret = -EFAULT;
1768 	if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
1769 			   /* don't copy "zeropage" last field */
1770 			   sizeof(uffdio_zeropage)-sizeof(__s64)))
1771 		goto out;
1772 
1773 	ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
1774 			     uffdio_zeropage.range.len);
1775 	if (ret)
1776 		goto out;
1777 	ret = -EINVAL;
1778 	if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
1779 		goto out;
1780 
1781 	if (mmget_not_zero(ctx->mm)) {
1782 		ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
1783 				     uffdio_zeropage.range.len,
1784 				     &ctx->mmap_changing);
1785 		mmput(ctx->mm);
1786 	} else {
1787 		return -ESRCH;
1788 	}
1789 	if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
1790 		return -EFAULT;
1791 	if (ret < 0)
1792 		goto out;
1793 	/* len == 0 would wake all */
1794 	BUG_ON(!ret);
1795 	range.len = ret;
1796 	if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
1797 		range.start = uffdio_zeropage.range.start;
1798 		wake_userfault(ctx, &range);
1799 	}
1800 	ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
1801 out:
1802 	return ret;
1803 }
1804 
1805 static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
1806 				    unsigned long arg)
1807 {
1808 	int ret;
1809 	struct uffdio_writeprotect uffdio_wp;
1810 	struct uffdio_writeprotect __user *user_uffdio_wp;
1811 	struct userfaultfd_wake_range range;
1812 	bool mode_wp, mode_dontwake;
1813 
1814 	if (atomic_read(&ctx->mmap_changing))
1815 		return -EAGAIN;
1816 
1817 	user_uffdio_wp = (struct uffdio_writeprotect __user *) arg;
1818 
1819 	if (copy_from_user(&uffdio_wp, user_uffdio_wp,
1820 			   sizeof(struct uffdio_writeprotect)))
1821 		return -EFAULT;
1822 
1823 	ret = validate_range(ctx->mm, uffdio_wp.range.start,
1824 			     uffdio_wp.range.len);
1825 	if (ret)
1826 		return ret;
1827 
1828 	if (uffdio_wp.mode & ~(UFFDIO_WRITEPROTECT_MODE_DONTWAKE |
1829 			       UFFDIO_WRITEPROTECT_MODE_WP))
1830 		return -EINVAL;
1831 
1832 	mode_wp = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_WP;
1833 	mode_dontwake = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_DONTWAKE;
1834 
1835 	if (mode_wp && mode_dontwake)
1836 		return -EINVAL;
1837 
1838 	if (mmget_not_zero(ctx->mm)) {
1839 		ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
1840 					  uffdio_wp.range.len, mode_wp,
1841 					  &ctx->mmap_changing);
1842 		mmput(ctx->mm);
1843 	} else {
1844 		return -ESRCH;
1845 	}
1846 
1847 	if (ret)
1848 		return ret;
1849 
1850 	if (!mode_wp && !mode_dontwake) {
1851 		range.start = uffdio_wp.range.start;
1852 		range.len = uffdio_wp.range.len;
1853 		wake_userfault(ctx, &range);
1854 	}
1855 	return ret;
1856 }
1857 
1858 static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
1859 {
1860 	__s64 ret;
1861 	struct uffdio_continue uffdio_continue;
1862 	struct uffdio_continue __user *user_uffdio_continue;
1863 	struct userfaultfd_wake_range range;
1864 
1865 	user_uffdio_continue = (struct uffdio_continue __user *)arg;
1866 
1867 	ret = -EAGAIN;
1868 	if (atomic_read(&ctx->mmap_changing))
1869 		goto out;
1870 
1871 	ret = -EFAULT;
1872 	if (copy_from_user(&uffdio_continue, user_uffdio_continue,
1873 			   /* don't copy the output fields */
1874 			   sizeof(uffdio_continue) - (sizeof(__s64))))
1875 		goto out;
1876 
1877 	ret = validate_range(ctx->mm, uffdio_continue.range.start,
1878 			     uffdio_continue.range.len);
1879 	if (ret)
1880 		goto out;
1881 
1882 	ret = -EINVAL;
1883 	/* double check for wraparound just in case. */
1884 	if (uffdio_continue.range.start + uffdio_continue.range.len <=
1885 	    uffdio_continue.range.start) {
1886 		goto out;
1887 	}
1888 	if (uffdio_continue.mode & ~UFFDIO_CONTINUE_MODE_DONTWAKE)
1889 		goto out;
1890 
1891 	if (mmget_not_zero(ctx->mm)) {
1892 		ret = mcopy_continue(ctx->mm, uffdio_continue.range.start,
1893 				     uffdio_continue.range.len,
1894 				     &ctx->mmap_changing);
1895 		mmput(ctx->mm);
1896 	} else {
1897 		return -ESRCH;
1898 	}
1899 
1900 	if (unlikely(put_user(ret, &user_uffdio_continue->mapped)))
1901 		return -EFAULT;
1902 	if (ret < 0)
1903 		goto out;
1904 
1905 	/* len == 0 would wake all */
1906 	BUG_ON(!ret);
1907 	range.len = ret;
1908 	if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) {
1909 		range.start = uffdio_continue.range.start;
1910 		wake_userfault(ctx, &range);
1911 	}
1912 	ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN;
1913 
1914 out:
1915 	return ret;
1916 }
1917 
1918 static inline unsigned int uffd_ctx_features(__u64 user_features)
1919 {
1920 	/*
1921 	 * For the current set of features the bits just coincide. Set
1922 	 * UFFD_FEATURE_INITIALIZED to mark the features as enabled.
1923 	 */
1924 	return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED;
1925 }
1926 
1927 /*
1928  * userland asks for a certain API version and we return which bits
1929  * and ioctl commands are implemented in this kernel for such API
1930  * version or -EINVAL if unknown.
1931  */
1932 static int userfaultfd_api(struct userfaultfd_ctx *ctx,
1933 			   unsigned long arg)
1934 {
1935 	struct uffdio_api uffdio_api;
1936 	void __user *buf = (void __user *)arg;
1937 	unsigned int ctx_features;
1938 	int ret;
1939 	__u64 features;
1940 
1941 	ret = -EFAULT;
1942 	if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
1943 		goto out;
1944 	features = uffdio_api.features;
1945 	ret = -EINVAL;
1946 	if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
1947 		goto err_out;
1948 	ret = -EPERM;
1949 	if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
1950 		goto err_out;
1951 	/* report all available features and ioctls to userland */
1952 	uffdio_api.features = UFFD_API_FEATURES;
1953 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
1954 	uffdio_api.features &=
1955 		~(UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM);
1956 #endif
1957 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
1958 	uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP;
1959 #endif
1960 	uffdio_api.ioctls = UFFD_API_IOCTLS;
1961 	ret = -EFAULT;
1962 	if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
1963 		goto out;
1964 
1965 	/* only enable the requested features for this uffd context */
1966 	ctx_features = uffd_ctx_features(features);
1967 	ret = -EINVAL;
1968 	if (cmpxchg(&ctx->features, 0, ctx_features) != 0)
1969 		goto err_out;
1970 
1971 	ret = 0;
1972 out:
1973 	return ret;
1974 err_out:
1975 	memset(&uffdio_api, 0, sizeof(uffdio_api));
1976 	if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
1977 		ret = -EFAULT;
1978 	goto out;
1979 }
1980 
1981 static long userfaultfd_ioctl(struct file *file, unsigned cmd,
1982 			      unsigned long arg)
1983 {
1984 	int ret = -EINVAL;
1985 	struct userfaultfd_ctx *ctx = file->private_data;
1986 
1987 	if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx))
1988 		return -EINVAL;
1989 
1990 	switch(cmd) {
1991 	case UFFDIO_API:
1992 		ret = userfaultfd_api(ctx, arg);
1993 		break;
1994 	case UFFDIO_REGISTER:
1995 		ret = userfaultfd_register(ctx, arg);
1996 		break;
1997 	case UFFDIO_UNREGISTER:
1998 		ret = userfaultfd_unregister(ctx, arg);
1999 		break;
2000 	case UFFDIO_WAKE:
2001 		ret = userfaultfd_wake(ctx, arg);
2002 		break;
2003 	case UFFDIO_COPY:
2004 		ret = userfaultfd_copy(ctx, arg);
2005 		break;
2006 	case UFFDIO_ZEROPAGE:
2007 		ret = userfaultfd_zeropage(ctx, arg);
2008 		break;
2009 	case UFFDIO_WRITEPROTECT:
2010 		ret = userfaultfd_writeprotect(ctx, arg);
2011 		break;
2012 	case UFFDIO_CONTINUE:
2013 		ret = userfaultfd_continue(ctx, arg);
2014 		break;
2015 	}
2016 	return ret;
2017 }
2018 
2019 #ifdef CONFIG_PROC_FS
2020 static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
2021 {
2022 	struct userfaultfd_ctx *ctx = f->private_data;
2023 	wait_queue_entry_t *wq;
2024 	unsigned long pending = 0, total = 0;
2025 
2026 	spin_lock_irq(&ctx->fault_pending_wqh.lock);
2027 	list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
2028 		pending++;
2029 		total++;
2030 	}
2031 	list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
2032 		total++;
2033 	}
2034 	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
2035 
2036 	/*
2037 	 * If more protocols will be added, there will be all shown
2038 	 * separated by a space. Like this:
2039 	 *	protocols: aa:... bb:...
2040 	 */
2041 	seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
2042 		   pending, total, UFFD_API, ctx->features,
2043 		   UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
2044 }
2045 #endif
2046 
2047 static const struct file_operations userfaultfd_fops = {
2048 #ifdef CONFIG_PROC_FS
2049 	.show_fdinfo	= userfaultfd_show_fdinfo,
2050 #endif
2051 	.release	= userfaultfd_release,
2052 	.poll		= userfaultfd_poll,
2053 	.read		= userfaultfd_read,
2054 	.unlocked_ioctl = userfaultfd_ioctl,
2055 	.compat_ioctl	= compat_ptr_ioctl,
2056 	.llseek		= noop_llseek,
2057 };
2058 
2059 static void init_once_userfaultfd_ctx(void *mem)
2060 {
2061 	struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
2062 
2063 	init_waitqueue_head(&ctx->fault_pending_wqh);
2064 	init_waitqueue_head(&ctx->fault_wqh);
2065 	init_waitqueue_head(&ctx->event_wqh);
2066 	init_waitqueue_head(&ctx->fd_wqh);
2067 	seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock);
2068 }
2069 
2070 SYSCALL_DEFINE1(userfaultfd, int, flags)
2071 {
2072 	struct userfaultfd_ctx *ctx;
2073 	int fd;
2074 
2075 	if (!sysctl_unprivileged_userfaultfd &&
2076 	    (flags & UFFD_USER_MODE_ONLY) == 0 &&
2077 	    !capable(CAP_SYS_PTRACE)) {
2078 		printk_once(KERN_WARNING "uffd: Set unprivileged_userfaultfd "
2079 			"sysctl knob to 1 if kernel faults must be handled "
2080 			"without obtaining CAP_SYS_PTRACE capability\n");
2081 		return -EPERM;
2082 	}
2083 
2084 	BUG_ON(!current->mm);
2085 
2086 	/* Check the UFFD_* constants for consistency.  */
2087 	BUILD_BUG_ON(UFFD_USER_MODE_ONLY & UFFD_SHARED_FCNTL_FLAGS);
2088 	BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
2089 	BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
2090 
2091 	if (flags & ~(UFFD_SHARED_FCNTL_FLAGS | UFFD_USER_MODE_ONLY))
2092 		return -EINVAL;
2093 
2094 	ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
2095 	if (!ctx)
2096 		return -ENOMEM;
2097 
2098 	refcount_set(&ctx->refcount, 1);
2099 	ctx->flags = flags;
2100 	ctx->features = 0;
2101 	ctx->released = false;
2102 	atomic_set(&ctx->mmap_changing, 0);
2103 	ctx->mm = current->mm;
2104 	/* prevent the mm struct to be freed */
2105 	mmgrab(ctx->mm);
2106 
2107 	fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, ctx,
2108 			O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL);
2109 	if (fd < 0) {
2110 		mmdrop(ctx->mm);
2111 		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
2112 	}
2113 	return fd;
2114 }
2115 
2116 static int __init userfaultfd_init(void)
2117 {
2118 	userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
2119 						sizeof(struct userfaultfd_ctx),
2120 						0,
2121 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2122 						init_once_userfaultfd_ctx);
2123 	return 0;
2124 }
2125 __initcall(userfaultfd_init);
2126