120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
286039bd3SAndrea Arcangeli /*
386039bd3SAndrea Arcangeli * fs/userfaultfd.c
486039bd3SAndrea Arcangeli *
586039bd3SAndrea Arcangeli * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
686039bd3SAndrea Arcangeli * Copyright (C) 2008-2009 Red Hat, Inc.
786039bd3SAndrea Arcangeli * Copyright (C) 2015 Red Hat, Inc.
886039bd3SAndrea Arcangeli *
986039bd3SAndrea Arcangeli * Some part derived from fs/eventfd.c (anon inode setup) and
1086039bd3SAndrea Arcangeli * mm/ksm.c (mm hashing).
1186039bd3SAndrea Arcangeli */
1286039bd3SAndrea Arcangeli
139cd75c3cSPavel Emelyanov #include <linux/list.h>
1486039bd3SAndrea Arcangeli #include <linux/hashtable.h>
15174cd4b1SIngo Molnar #include <linux/sched/signal.h>
166e84f315SIngo Molnar #include <linux/sched/mm.h>
1786039bd3SAndrea Arcangeli #include <linux/mm.h>
1817fca131SArnd Bergmann #include <linux/mm_inline.h>
196dfeaff9SPeter Xu #include <linux/mmu_notifier.h>
2086039bd3SAndrea Arcangeli #include <linux/poll.h>
2186039bd3SAndrea Arcangeli #include <linux/slab.h>
2286039bd3SAndrea Arcangeli #include <linux/seq_file.h>
2386039bd3SAndrea Arcangeli #include <linux/file.h>
2486039bd3SAndrea Arcangeli #include <linux/bug.h>
2586039bd3SAndrea Arcangeli #include <linux/anon_inodes.h>
2686039bd3SAndrea Arcangeli #include <linux/syscalls.h>
2786039bd3SAndrea Arcangeli #include <linux/userfaultfd_k.h>
2886039bd3SAndrea Arcangeli #include <linux/mempolicy.h>
2986039bd3SAndrea Arcangeli #include <linux/ioctl.h>
3086039bd3SAndrea Arcangeli #include <linux/security.h>
31cab350afSMike Kravetz #include <linux/hugetlb.h>
325c041f5dSPeter Xu #include <linux/swapops.h>
332d5de004SAxel Rasmussen #include <linux/miscdevice.h>
3486039bd3SAndrea Arcangeli
352d337b71SZhangPeng static int sysctl_unprivileged_userfaultfd __read_mostly;
362d337b71SZhangPeng
372d337b71SZhangPeng #ifdef CONFIG_SYSCTL
382d337b71SZhangPeng static struct ctl_table vm_userfaultfd_table[] = {
392d337b71SZhangPeng {
402d337b71SZhangPeng .procname = "unprivileged_userfaultfd",
412d337b71SZhangPeng .data = &sysctl_unprivileged_userfaultfd,
422d337b71SZhangPeng .maxlen = sizeof(sysctl_unprivileged_userfaultfd),
432d337b71SZhangPeng .mode = 0644,
442d337b71SZhangPeng .proc_handler = proc_dointvec_minmax,
452d337b71SZhangPeng .extra1 = SYSCTL_ZERO,
462d337b71SZhangPeng .extra2 = SYSCTL_ONE,
472d337b71SZhangPeng },
482d337b71SZhangPeng { }
492d337b71SZhangPeng };
502d337b71SZhangPeng #endif
51cefdca0aSPeter Xu
523004ec9cSAndrea Arcangeli static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
533004ec9cSAndrea Arcangeli
543004ec9cSAndrea Arcangeli /*
553004ec9cSAndrea Arcangeli * Start with fault_pending_wqh and fault_wqh so they're more likely
563004ec9cSAndrea Arcangeli * to be in the same cacheline.
57cbcfa130SEric Biggers *
58cbcfa130SEric Biggers * Locking order:
59cbcfa130SEric Biggers * fd_wqh.lock
60cbcfa130SEric Biggers * fault_pending_wqh.lock
61cbcfa130SEric Biggers * fault_wqh.lock
62cbcfa130SEric Biggers * event_wqh.lock
63cbcfa130SEric Biggers *
64cbcfa130SEric Biggers * To avoid deadlocks, IRQs must be disabled when taking any of the above locks,
65cbcfa130SEric Biggers * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's
66cbcfa130SEric Biggers * also taken in IRQ context.
673004ec9cSAndrea Arcangeli */
6886039bd3SAndrea Arcangeli struct userfaultfd_ctx {
6915b726efSAndrea Arcangeli /* waitqueue head for the pending (i.e. not read) userfaults */
7015b726efSAndrea Arcangeli wait_queue_head_t fault_pending_wqh;
7115b726efSAndrea Arcangeli /* waitqueue head for the userfaults */
7286039bd3SAndrea Arcangeli wait_queue_head_t fault_wqh;
7386039bd3SAndrea Arcangeli /* waitqueue head for the pseudo fd to wakeup poll/read */
7486039bd3SAndrea Arcangeli wait_queue_head_t fd_wqh;
759cd75c3cSPavel Emelyanov /* waitqueue head for events */
769cd75c3cSPavel Emelyanov wait_queue_head_t event_wqh;
772c5b7e1bSAndrea Arcangeli /* a refile sequence protected by fault_pending_wqh lock */
782ca97ac8SAhmed S. Darwish seqcount_spinlock_t refile_seq;
793004ec9cSAndrea Arcangeli /* pseudo fd refcounting */
80ca880420SEric Biggers refcount_t refcount;
8186039bd3SAndrea Arcangeli /* userfaultfd syscall flags */
8286039bd3SAndrea Arcangeli unsigned int flags;
839cd75c3cSPavel Emelyanov /* features requested from the userspace */
849cd75c3cSPavel Emelyanov unsigned int features;
8586039bd3SAndrea Arcangeli /* released */
8686039bd3SAndrea Arcangeli bool released;
87df2cc96eSMike Rapoport /* memory mappings are changing because of non-cooperative event */
88a759a909SNadav Amit atomic_t mmap_changing;
8986039bd3SAndrea Arcangeli /* mm with one ore more vmas attached to this userfaultfd_ctx */
9086039bd3SAndrea Arcangeli struct mm_struct *mm;
9186039bd3SAndrea Arcangeli };
9286039bd3SAndrea Arcangeli
93893e26e6SPavel Emelyanov struct userfaultfd_fork_ctx {
94893e26e6SPavel Emelyanov struct userfaultfd_ctx *orig;
95893e26e6SPavel Emelyanov struct userfaultfd_ctx *new;
96893e26e6SPavel Emelyanov struct list_head list;
97893e26e6SPavel Emelyanov };
98893e26e6SPavel Emelyanov
99897ab3e0SMike Rapoport struct userfaultfd_unmap_ctx {
100897ab3e0SMike Rapoport struct userfaultfd_ctx *ctx;
101897ab3e0SMike Rapoport unsigned long start;
102897ab3e0SMike Rapoport unsigned long end;
103897ab3e0SMike Rapoport struct list_head list;
104897ab3e0SMike Rapoport };
105897ab3e0SMike Rapoport
10686039bd3SAndrea Arcangeli struct userfaultfd_wait_queue {
107a9b85f94SAndrea Arcangeli struct uffd_msg msg;
108ac6424b9SIngo Molnar wait_queue_entry_t wq;
10986039bd3SAndrea Arcangeli struct userfaultfd_ctx *ctx;
11015a77c6fSAndrea Arcangeli bool waken;
11186039bd3SAndrea Arcangeli };
11286039bd3SAndrea Arcangeli
11386039bd3SAndrea Arcangeli struct userfaultfd_wake_range {
11486039bd3SAndrea Arcangeli unsigned long start;
11586039bd3SAndrea Arcangeli unsigned long len;
11686039bd3SAndrea Arcangeli };
11786039bd3SAndrea Arcangeli
11822e5fe2aSNadav Amit /* internal indication that UFFD_API ioctl was successfully executed */
11922e5fe2aSNadav Amit #define UFFD_FEATURE_INITIALIZED (1u << 31)
12022e5fe2aSNadav Amit
userfaultfd_is_initialized(struct userfaultfd_ctx * ctx)12122e5fe2aSNadav Amit static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
12222e5fe2aSNadav Amit {
12322e5fe2aSNadav Amit return ctx->features & UFFD_FEATURE_INITIALIZED;
12422e5fe2aSNadav Amit }
12522e5fe2aSNadav Amit
1262bad466cSPeter Xu /*
1272bad466cSPeter Xu * Whether WP_UNPOPULATED is enabled on the uffd context. It is only
1282bad466cSPeter Xu * meaningful when userfaultfd_wp()==true on the vma and when it's
1292bad466cSPeter Xu * anonymous.
1302bad466cSPeter Xu */
userfaultfd_wp_unpopulated(struct vm_area_struct * vma)1312bad466cSPeter Xu bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma)
1322bad466cSPeter Xu {
1332bad466cSPeter Xu struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
1342bad466cSPeter Xu
1352bad466cSPeter Xu if (!ctx)
1362bad466cSPeter Xu return false;
1372bad466cSPeter Xu
1382bad466cSPeter Xu return ctx->features & UFFD_FEATURE_WP_UNPOPULATED;
1392bad466cSPeter Xu }
1402bad466cSPeter Xu
userfaultfd_set_vm_flags(struct vm_area_struct * vma,vm_flags_t flags)14151d3d5ebSDavid Hildenbrand static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
14251d3d5ebSDavid Hildenbrand vm_flags_t flags)
14351d3d5ebSDavid Hildenbrand {
14451d3d5ebSDavid Hildenbrand const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP;
14551d3d5ebSDavid Hildenbrand
1461c71222eSSuren Baghdasaryan vm_flags_reset(vma, flags);
14751d3d5ebSDavid Hildenbrand /*
14851d3d5ebSDavid Hildenbrand * For shared mappings, we want to enable writenotify while
14951d3d5ebSDavid Hildenbrand * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply
15051d3d5ebSDavid Hildenbrand * recalculate vma->vm_page_prot whenever userfaultfd-wp changes.
15151d3d5ebSDavid Hildenbrand */
15251d3d5ebSDavid Hildenbrand if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed)
15351d3d5ebSDavid Hildenbrand vma_set_page_prot(vma);
15451d3d5ebSDavid Hildenbrand }
15551d3d5ebSDavid Hildenbrand
userfaultfd_wake_function(wait_queue_entry_t * wq,unsigned mode,int wake_flags,void * key)156ac6424b9SIngo Molnar static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
15786039bd3SAndrea Arcangeli int wake_flags, void *key)
15886039bd3SAndrea Arcangeli {
15986039bd3SAndrea Arcangeli struct userfaultfd_wake_range *range = key;
16086039bd3SAndrea Arcangeli int ret;
16186039bd3SAndrea Arcangeli struct userfaultfd_wait_queue *uwq;
16286039bd3SAndrea Arcangeli unsigned long start, len;
16386039bd3SAndrea Arcangeli
16486039bd3SAndrea Arcangeli uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
16586039bd3SAndrea Arcangeli ret = 0;
16686039bd3SAndrea Arcangeli /* len == 0 means wake all */
16786039bd3SAndrea Arcangeli start = range->start;
16886039bd3SAndrea Arcangeli len = range->len;
169a9b85f94SAndrea Arcangeli if (len && (start > uwq->msg.arg.pagefault.address ||
170a9b85f94SAndrea Arcangeli start + len <= uwq->msg.arg.pagefault.address))
17186039bd3SAndrea Arcangeli goto out;
17215a77c6fSAndrea Arcangeli WRITE_ONCE(uwq->waken, true);
17315a77c6fSAndrea Arcangeli /*
174a9668cd6SPeter Zijlstra * The Program-Order guarantees provided by the scheduler
175a9668cd6SPeter Zijlstra * ensure uwq->waken is visible before the task is woken.
17615a77c6fSAndrea Arcangeli */
17786039bd3SAndrea Arcangeli ret = wake_up_state(wq->private, mode);
178a9668cd6SPeter Zijlstra if (ret) {
17986039bd3SAndrea Arcangeli /*
18086039bd3SAndrea Arcangeli * Wake only once, autoremove behavior.
18186039bd3SAndrea Arcangeli *
182a9668cd6SPeter Zijlstra * After the effect of list_del_init is visible to the other
183a9668cd6SPeter Zijlstra * CPUs, the waitqueue may disappear from under us, see the
184a9668cd6SPeter Zijlstra * !list_empty_careful() in handle_userfault().
185a9668cd6SPeter Zijlstra *
186a9668cd6SPeter Zijlstra * try_to_wake_up() has an implicit smp_mb(), and the
187a9668cd6SPeter Zijlstra * wq->private is read before calling the extern function
188a9668cd6SPeter Zijlstra * "wake_up_state" (which in turns calls try_to_wake_up).
18986039bd3SAndrea Arcangeli */
1902055da97SIngo Molnar list_del_init(&wq->entry);
191a9668cd6SPeter Zijlstra }
19286039bd3SAndrea Arcangeli out:
19386039bd3SAndrea Arcangeli return ret;
19486039bd3SAndrea Arcangeli }
19586039bd3SAndrea Arcangeli
19686039bd3SAndrea Arcangeli /**
19786039bd3SAndrea Arcangeli * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
19886039bd3SAndrea Arcangeli * context.
19986039bd3SAndrea Arcangeli * @ctx: [in] Pointer to the userfaultfd context.
20086039bd3SAndrea Arcangeli */
userfaultfd_ctx_get(struct userfaultfd_ctx * ctx)20186039bd3SAndrea Arcangeli static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
20286039bd3SAndrea Arcangeli {
203ca880420SEric Biggers refcount_inc(&ctx->refcount);
20486039bd3SAndrea Arcangeli }
20586039bd3SAndrea Arcangeli
20686039bd3SAndrea Arcangeli /**
20786039bd3SAndrea Arcangeli * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
20886039bd3SAndrea Arcangeli * context.
20986039bd3SAndrea Arcangeli * @ctx: [in] Pointer to userfaultfd context.
21086039bd3SAndrea Arcangeli *
21186039bd3SAndrea Arcangeli * The userfaultfd context reference must have been previously acquired either
21286039bd3SAndrea Arcangeli * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget().
21386039bd3SAndrea Arcangeli */
userfaultfd_ctx_put(struct userfaultfd_ctx * ctx)21486039bd3SAndrea Arcangeli static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
21586039bd3SAndrea Arcangeli {
216ca880420SEric Biggers if (refcount_dec_and_test(&ctx->refcount)) {
21786039bd3SAndrea Arcangeli VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
21886039bd3SAndrea Arcangeli VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
21986039bd3SAndrea Arcangeli VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
22086039bd3SAndrea Arcangeli VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
2219cd75c3cSPavel Emelyanov VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
2229cd75c3cSPavel Emelyanov VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
22386039bd3SAndrea Arcangeli VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
22486039bd3SAndrea Arcangeli VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
225d2005e3fSOleg Nesterov mmdrop(ctx->mm);
2263004ec9cSAndrea Arcangeli kmem_cache_free(userfaultfd_ctx_cachep, ctx);
22786039bd3SAndrea Arcangeli }
22886039bd3SAndrea Arcangeli }
22986039bd3SAndrea Arcangeli
msg_init(struct uffd_msg * msg)230a9b85f94SAndrea Arcangeli static inline void msg_init(struct uffd_msg *msg)
231a9b85f94SAndrea Arcangeli {
232a9b85f94SAndrea Arcangeli BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
233a9b85f94SAndrea Arcangeli /*
234a9b85f94SAndrea Arcangeli * Must use memset to zero out the paddings or kernel data is
235a9b85f94SAndrea Arcangeli * leaked to userland.
236a9b85f94SAndrea Arcangeli */
237a9b85f94SAndrea Arcangeli memset(msg, 0, sizeof(struct uffd_msg));
238a9b85f94SAndrea Arcangeli }
239a9b85f94SAndrea Arcangeli
userfault_msg(unsigned long address,unsigned long real_address,unsigned int flags,unsigned long reason,unsigned int features)240a9b85f94SAndrea Arcangeli static inline struct uffd_msg userfault_msg(unsigned long address,
241d172b1a3SNadav Amit unsigned long real_address,
24286039bd3SAndrea Arcangeli unsigned int flags,
2439d4ac934SAlexey Perevalov unsigned long reason,
2449d4ac934SAlexey Perevalov unsigned int features)
24586039bd3SAndrea Arcangeli {
246a9b85f94SAndrea Arcangeli struct uffd_msg msg;
247d172b1a3SNadav Amit
248a9b85f94SAndrea Arcangeli msg_init(&msg);
249a9b85f94SAndrea Arcangeli msg.event = UFFD_EVENT_PAGEFAULT;
250824ddc60SNadav Amit
251d172b1a3SNadav Amit msg.arg.pagefault.address = (features & UFFD_FEATURE_EXACT_ADDRESS) ?
252d172b1a3SNadav Amit real_address : address;
253d172b1a3SNadav Amit
25486039bd3SAndrea Arcangeli /*
2557677f7fdSAxel Rasmussen * These flags indicate why the userfault occurred:
2567677f7fdSAxel Rasmussen * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault.
2577677f7fdSAxel Rasmussen * - UFFD_PAGEFAULT_FLAG_MINOR indicates a minor fault.
2587677f7fdSAxel Rasmussen * - Neither of these flags being set indicates a MISSING fault.
2597677f7fdSAxel Rasmussen *
2607677f7fdSAxel Rasmussen * Separately, UFFD_PAGEFAULT_FLAG_WRITE indicates it was a write
2617677f7fdSAxel Rasmussen * fault. Otherwise, it was a read fault.
26286039bd3SAndrea Arcangeli */
2637677f7fdSAxel Rasmussen if (flags & FAULT_FLAG_WRITE)
264a9b85f94SAndrea Arcangeli msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
26586039bd3SAndrea Arcangeli if (reason & VM_UFFD_WP)
266a9b85f94SAndrea Arcangeli msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
2677677f7fdSAxel Rasmussen if (reason & VM_UFFD_MINOR)
2687677f7fdSAxel Rasmussen msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_MINOR;
2699d4ac934SAlexey Perevalov if (features & UFFD_FEATURE_THREAD_ID)
270a36985d3SAndrea Arcangeli msg.arg.pagefault.feat.ptid = task_pid_vnr(current);
271a9b85f94SAndrea Arcangeli return msg;
27286039bd3SAndrea Arcangeli }
27386039bd3SAndrea Arcangeli
274369cd212SMike Kravetz #ifdef CONFIG_HUGETLB_PAGE
275369cd212SMike Kravetz /*
276369cd212SMike Kravetz * Same functionality as userfaultfd_must_wait below with modifications for
277369cd212SMike Kravetz * hugepmd ranges.
278369cd212SMike Kravetz */
userfaultfd_huge_must_wait(struct userfaultfd_ctx * ctx,struct vm_fault * vmf,unsigned long reason)279369cd212SMike Kravetz static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
28029a22b9eSSuren Baghdasaryan struct vm_fault *vmf,
281369cd212SMike Kravetz unsigned long reason)
282369cd212SMike Kravetz {
28329a22b9eSSuren Baghdasaryan struct vm_area_struct *vma = vmf->vma;
2841e2c0436SJanosch Frank pte_t *ptep, pte;
285369cd212SMike Kravetz bool ret = true;
286369cd212SMike Kravetz
28729a22b9eSSuren Baghdasaryan assert_fault_locked(vmf);
288369cd212SMike Kravetz
28929a22b9eSSuren Baghdasaryan ptep = hugetlb_walk(vma, vmf->address, vma_mmu_pagesize(vma));
2901e2c0436SJanosch Frank if (!ptep)
291369cd212SMike Kravetz goto out;
292369cd212SMike Kravetz
293369cd212SMike Kravetz ret = false;
2941e2c0436SJanosch Frank pte = huge_ptep_get(ptep);
295369cd212SMike Kravetz
296369cd212SMike Kravetz /*
297369cd212SMike Kravetz * Lockless access: we're in a wait_event so it's ok if it
2985c041f5dSPeter Xu * changes under us. PTE markers should be handled the same as none
2995c041f5dSPeter Xu * ptes here.
300369cd212SMike Kravetz */
3015c041f5dSPeter Xu if (huge_pte_none_mostly(pte))
302369cd212SMike Kravetz ret = true;
3031e2c0436SJanosch Frank if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
304369cd212SMike Kravetz ret = true;
305369cd212SMike Kravetz out:
306369cd212SMike Kravetz return ret;
307369cd212SMike Kravetz }
308369cd212SMike Kravetz #else
userfaultfd_huge_must_wait(struct userfaultfd_ctx * ctx,struct vm_fault * vmf,unsigned long reason)309369cd212SMike Kravetz static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
31029a22b9eSSuren Baghdasaryan struct vm_fault *vmf,
311369cd212SMike Kravetz unsigned long reason)
312369cd212SMike Kravetz {
313369cd212SMike Kravetz return false; /* should never get here */
314369cd212SMike Kravetz }
315369cd212SMike Kravetz #endif /* CONFIG_HUGETLB_PAGE */
316369cd212SMike Kravetz
31786039bd3SAndrea Arcangeli /*
3188d2afd96SAndrea Arcangeli * Verify the pagetables are still not ok after having reigstered into
3198d2afd96SAndrea Arcangeli * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
3208d2afd96SAndrea Arcangeli * userfault that has already been resolved, if userfaultfd_read and
3218d2afd96SAndrea Arcangeli * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different
3228d2afd96SAndrea Arcangeli * threads.
3238d2afd96SAndrea Arcangeli */
userfaultfd_must_wait(struct userfaultfd_ctx * ctx,struct vm_fault * vmf,unsigned long reason)3248d2afd96SAndrea Arcangeli static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
32529a22b9eSSuren Baghdasaryan struct vm_fault *vmf,
3268d2afd96SAndrea Arcangeli unsigned long reason)
3278d2afd96SAndrea Arcangeli {
3288d2afd96SAndrea Arcangeli struct mm_struct *mm = ctx->mm;
32929a22b9eSSuren Baghdasaryan unsigned long address = vmf->address;
3308d2afd96SAndrea Arcangeli pgd_t *pgd;
331c2febafcSKirill A. Shutemov p4d_t *p4d;
3328d2afd96SAndrea Arcangeli pud_t *pud;
3338d2afd96SAndrea Arcangeli pmd_t *pmd, _pmd;
3348d2afd96SAndrea Arcangeli pte_t *pte;
335c33c7948SRyan Roberts pte_t ptent;
3368d2afd96SAndrea Arcangeli bool ret = true;
3378d2afd96SAndrea Arcangeli
33829a22b9eSSuren Baghdasaryan assert_fault_locked(vmf);
3398d2afd96SAndrea Arcangeli
3408d2afd96SAndrea Arcangeli pgd = pgd_offset(mm, address);
3418d2afd96SAndrea Arcangeli if (!pgd_present(*pgd))
3428d2afd96SAndrea Arcangeli goto out;
343c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, address);
344c2febafcSKirill A. Shutemov if (!p4d_present(*p4d))
345c2febafcSKirill A. Shutemov goto out;
346c2febafcSKirill A. Shutemov pud = pud_offset(p4d, address);
3478d2afd96SAndrea Arcangeli if (!pud_present(*pud))
3488d2afd96SAndrea Arcangeli goto out;
3498d2afd96SAndrea Arcangeli pmd = pmd_offset(pud, address);
3502b683a4fSHugh Dickins again:
35126e1a0c3SHugh Dickins _pmd = pmdp_get_lockless(pmd);
352a365ac09SHuang Ying if (pmd_none(_pmd))
3538d2afd96SAndrea Arcangeli goto out;
3548d2afd96SAndrea Arcangeli
3558d2afd96SAndrea Arcangeli ret = false;
3562b683a4fSHugh Dickins if (!pmd_present(_pmd) || pmd_devmap(_pmd))
357a365ac09SHuang Ying goto out;
358a365ac09SHuang Ying
35963b2d417SAndrea Arcangeli if (pmd_trans_huge(_pmd)) {
36063b2d417SAndrea Arcangeli if (!pmd_write(_pmd) && (reason & VM_UFFD_WP))
36163b2d417SAndrea Arcangeli ret = true;
3628d2afd96SAndrea Arcangeli goto out;
36363b2d417SAndrea Arcangeli }
3648d2afd96SAndrea Arcangeli
3658d2afd96SAndrea Arcangeli pte = pte_offset_map(pmd, address);
3662b683a4fSHugh Dickins if (!pte) {
3672b683a4fSHugh Dickins ret = true;
3682b683a4fSHugh Dickins goto again;
3692b683a4fSHugh Dickins }
3708d2afd96SAndrea Arcangeli /*
3718d2afd96SAndrea Arcangeli * Lockless access: we're in a wait_event so it's ok if it
3725c041f5dSPeter Xu * changes under us. PTE markers should be handled the same as none
3735c041f5dSPeter Xu * ptes here.
3748d2afd96SAndrea Arcangeli */
375c33c7948SRyan Roberts ptent = ptep_get(pte);
376c33c7948SRyan Roberts if (pte_none_mostly(ptent))
3778d2afd96SAndrea Arcangeli ret = true;
378c33c7948SRyan Roberts if (!pte_write(ptent) && (reason & VM_UFFD_WP))
37963b2d417SAndrea Arcangeli ret = true;
3808d2afd96SAndrea Arcangeli pte_unmap(pte);
3818d2afd96SAndrea Arcangeli
3828d2afd96SAndrea Arcangeli out:
3838d2afd96SAndrea Arcangeli return ret;
3848d2afd96SAndrea Arcangeli }
3858d2afd96SAndrea Arcangeli
userfaultfd_get_blocking_state(unsigned int flags)3862f064a59SPeter Zijlstra static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags)
3873e69ad08SPeter Xu {
3883e69ad08SPeter Xu if (flags & FAULT_FLAG_INTERRUPTIBLE)
3893e69ad08SPeter Xu return TASK_INTERRUPTIBLE;
3903e69ad08SPeter Xu
3913e69ad08SPeter Xu if (flags & FAULT_FLAG_KILLABLE)
3923e69ad08SPeter Xu return TASK_KILLABLE;
3933e69ad08SPeter Xu
3943e69ad08SPeter Xu return TASK_UNINTERRUPTIBLE;
3953e69ad08SPeter Xu }
3963e69ad08SPeter Xu
3978d2afd96SAndrea Arcangeli /*
39886039bd3SAndrea Arcangeli * The locking rules involved in returning VM_FAULT_RETRY depending on
39986039bd3SAndrea Arcangeli * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
40086039bd3SAndrea Arcangeli * FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
40186039bd3SAndrea Arcangeli * recommendation in __lock_page_or_retry is not an understatement.
40286039bd3SAndrea Arcangeli *
403c1e8d7c6SMichel Lespinasse * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_lock must be released
40486039bd3SAndrea Arcangeli * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
40586039bd3SAndrea Arcangeli * not set.
40686039bd3SAndrea Arcangeli *
40786039bd3SAndrea Arcangeli * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
40886039bd3SAndrea Arcangeli * set, VM_FAULT_RETRY can still be returned if and only if there are
409c1e8d7c6SMichel Lespinasse * fatal_signal_pending()s, and the mmap_lock must be released before
41086039bd3SAndrea Arcangeli * returning it.
41186039bd3SAndrea Arcangeli */
handle_userfault(struct vm_fault * vmf,unsigned long reason)4122b740303SSouptick Joarder vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
41386039bd3SAndrea Arcangeli {
414b8da2e46SPeter Xu struct vm_area_struct *vma = vmf->vma;
415b8da2e46SPeter Xu struct mm_struct *mm = vma->vm_mm;
41686039bd3SAndrea Arcangeli struct userfaultfd_ctx *ctx;
41786039bd3SAndrea Arcangeli struct userfaultfd_wait_queue uwq;
4182b740303SSouptick Joarder vm_fault_t ret = VM_FAULT_SIGBUS;
4193e69ad08SPeter Xu bool must_wait;
4202f064a59SPeter Zijlstra unsigned int blocking_state;
42186039bd3SAndrea Arcangeli
42264c2b203SAndrea Arcangeli /*
42364c2b203SAndrea Arcangeli * We don't do userfault handling for the final child pid update.
42464c2b203SAndrea Arcangeli *
42564c2b203SAndrea Arcangeli * We also don't do userfault handling during
42664c2b203SAndrea Arcangeli * coredumping. hugetlbfs has the special
42748498071SPeter Xu * hugetlb_follow_page_mask() to skip missing pages in the
42864c2b203SAndrea Arcangeli * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
42964c2b203SAndrea Arcangeli * the no_page_table() helper in follow_page_mask(), but the
43064c2b203SAndrea Arcangeli * shmem_vm_ops->fault method is invoked even during
431004a9a38SJann Horn * coredumping and it ends up here.
43264c2b203SAndrea Arcangeli */
43364c2b203SAndrea Arcangeli if (current->flags & (PF_EXITING|PF_DUMPCORE))
43464c2b203SAndrea Arcangeli goto out;
43564c2b203SAndrea Arcangeli
43629a22b9eSSuren Baghdasaryan assert_fault_locked(vmf);
43764c2b203SAndrea Arcangeli
438b8da2e46SPeter Xu ctx = vma->vm_userfaultfd_ctx.ctx;
43986039bd3SAndrea Arcangeli if (!ctx)
440ba85c702SAndrea Arcangeli goto out;
44186039bd3SAndrea Arcangeli
44286039bd3SAndrea Arcangeli BUG_ON(ctx->mm != mm);
44386039bd3SAndrea Arcangeli
4447677f7fdSAxel Rasmussen /* Any unrecognized flag is a bug. */
4457677f7fdSAxel Rasmussen VM_BUG_ON(reason & ~__VM_UFFD_FLAGS);
4467677f7fdSAxel Rasmussen /* 0 or > 1 flags set is a bug; we expect exactly 1. */
4477677f7fdSAxel Rasmussen VM_BUG_ON(!reason || (reason & (reason - 1)));
44886039bd3SAndrea Arcangeli
4492d6d6f5aSPrakash Sangappa if (ctx->features & UFFD_FEATURE_SIGBUS)
4502d6d6f5aSPrakash Sangappa goto out;
4512d5de004SAxel Rasmussen if (!(vmf->flags & FAULT_FLAG_USER) && (ctx->flags & UFFD_USER_MODE_ONLY))
45237cd0575SLokesh Gidra goto out;
4532d6d6f5aSPrakash Sangappa
45486039bd3SAndrea Arcangeli /*
45586039bd3SAndrea Arcangeli * If it's already released don't get it. This avoids to loop
45686039bd3SAndrea Arcangeli * in __get_user_pages if userfaultfd_release waits on the
457c1e8d7c6SMichel Lespinasse * caller of handle_userfault to release the mmap_lock.
45886039bd3SAndrea Arcangeli */
4596aa7de05SMark Rutland if (unlikely(READ_ONCE(ctx->released))) {
460656710a6SAndrea Arcangeli /*
461656710a6SAndrea Arcangeli * Don't return VM_FAULT_SIGBUS in this case, so a non
462656710a6SAndrea Arcangeli * cooperative manager can close the uffd after the
463656710a6SAndrea Arcangeli * last UFFDIO_COPY, without risking to trigger an
464656710a6SAndrea Arcangeli * involuntary SIGBUS if the process was starting the
465656710a6SAndrea Arcangeli * userfaultfd while the userfaultfd was still armed
466656710a6SAndrea Arcangeli * (but after the last UFFDIO_COPY). If the uffd
467656710a6SAndrea Arcangeli * wasn't already closed when the userfault reached
468656710a6SAndrea Arcangeli * this point, that would normally be solved by
469656710a6SAndrea Arcangeli * userfaultfd_must_wait returning 'false'.
470656710a6SAndrea Arcangeli *
471656710a6SAndrea Arcangeli * If we were to return VM_FAULT_SIGBUS here, the non
472656710a6SAndrea Arcangeli * cooperative manager would be instead forced to
473656710a6SAndrea Arcangeli * always call UFFDIO_UNREGISTER before it can safely
474656710a6SAndrea Arcangeli * close the uffd.
475656710a6SAndrea Arcangeli */
476656710a6SAndrea Arcangeli ret = VM_FAULT_NOPAGE;
477ba85c702SAndrea Arcangeli goto out;
478656710a6SAndrea Arcangeli }
47986039bd3SAndrea Arcangeli
48086039bd3SAndrea Arcangeli /*
48186039bd3SAndrea Arcangeli * Check that we can return VM_FAULT_RETRY.
48286039bd3SAndrea Arcangeli *
48386039bd3SAndrea Arcangeli * NOTE: it should become possible to return VM_FAULT_RETRY
48486039bd3SAndrea Arcangeli * even if FAULT_FLAG_TRIED is set without leading to gup()
48586039bd3SAndrea Arcangeli * -EBUSY failures, if the userfaultfd is to be extended for
48686039bd3SAndrea Arcangeli * VM_UFFD_WP tracking and we intend to arm the userfault
48786039bd3SAndrea Arcangeli * without first stopping userland access to the memory. For
48886039bd3SAndrea Arcangeli * VM_UFFD_MISSING userfaults this is enough for now.
48986039bd3SAndrea Arcangeli */
49082b0f8c3SJan Kara if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
49186039bd3SAndrea Arcangeli /*
49286039bd3SAndrea Arcangeli * Validate the invariant that nowait must allow retry
49386039bd3SAndrea Arcangeli * to be sure not to return SIGBUS erroneously on
49486039bd3SAndrea Arcangeli * nowait invocations.
49586039bd3SAndrea Arcangeli */
49682b0f8c3SJan Kara BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
49786039bd3SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM
49886039bd3SAndrea Arcangeli if (printk_ratelimit()) {
49986039bd3SAndrea Arcangeli printk(KERN_WARNING
50082b0f8c3SJan Kara "FAULT_FLAG_ALLOW_RETRY missing %x\n",
50182b0f8c3SJan Kara vmf->flags);
50286039bd3SAndrea Arcangeli dump_stack();
50386039bd3SAndrea Arcangeli }
50486039bd3SAndrea Arcangeli #endif
505ba85c702SAndrea Arcangeli goto out;
50686039bd3SAndrea Arcangeli }
50786039bd3SAndrea Arcangeli
50886039bd3SAndrea Arcangeli /*
50986039bd3SAndrea Arcangeli * Handle nowait, not much to do other than tell it to retry
51086039bd3SAndrea Arcangeli * and wait.
51186039bd3SAndrea Arcangeli */
512ba85c702SAndrea Arcangeli ret = VM_FAULT_RETRY;
51382b0f8c3SJan Kara if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
514ba85c702SAndrea Arcangeli goto out;
51586039bd3SAndrea Arcangeli
516c1e8d7c6SMichel Lespinasse /* take the reference before dropping the mmap_lock */
51786039bd3SAndrea Arcangeli userfaultfd_ctx_get(ctx);
51886039bd3SAndrea Arcangeli
51986039bd3SAndrea Arcangeli init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
52086039bd3SAndrea Arcangeli uwq.wq.private = current;
521d172b1a3SNadav Amit uwq.msg = userfault_msg(vmf->address, vmf->real_address, vmf->flags,
522d172b1a3SNadav Amit reason, ctx->features);
52386039bd3SAndrea Arcangeli uwq.ctx = ctx;
52415a77c6fSAndrea Arcangeli uwq.waken = false;
52586039bd3SAndrea Arcangeli
5263e69ad08SPeter Xu blocking_state = userfaultfd_get_blocking_state(vmf->flags);
527dfa37dc3SAndrea Arcangeli
528b8da2e46SPeter Xu /*
529b8da2e46SPeter Xu * Take the vma lock now, in order to safely call
530b8da2e46SPeter Xu * userfaultfd_huge_must_wait() later. Since acquiring the
531b8da2e46SPeter Xu * (sleepable) vma lock can modify the current task state, that
532b8da2e46SPeter Xu * must be before explicitly calling set_current_state().
533b8da2e46SPeter Xu */
534b8da2e46SPeter Xu if (is_vm_hugetlb_page(vma))
535b8da2e46SPeter Xu hugetlb_vma_lock_read(vma);
536b8da2e46SPeter Xu
537cbcfa130SEric Biggers spin_lock_irq(&ctx->fault_pending_wqh.lock);
53886039bd3SAndrea Arcangeli /*
53986039bd3SAndrea Arcangeli * After the __add_wait_queue the uwq is visible to userland
54086039bd3SAndrea Arcangeli * through poll/read().
54186039bd3SAndrea Arcangeli */
54215b726efSAndrea Arcangeli __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
54315b726efSAndrea Arcangeli /*
54415b726efSAndrea Arcangeli * The smp_mb() after __set_current_state prevents the reads
54515b726efSAndrea Arcangeli * following the spin_unlock to happen before the list_add in
54615b726efSAndrea Arcangeli * __add_wait_queue.
54715b726efSAndrea Arcangeli */
54815a77c6fSAndrea Arcangeli set_current_state(blocking_state);
549cbcfa130SEric Biggers spin_unlock_irq(&ctx->fault_pending_wqh.lock);
55086039bd3SAndrea Arcangeli
551b8da2e46SPeter Xu if (!is_vm_hugetlb_page(vma))
55229a22b9eSSuren Baghdasaryan must_wait = userfaultfd_must_wait(ctx, vmf, reason);
553369cd212SMike Kravetz else
55429a22b9eSSuren Baghdasaryan must_wait = userfaultfd_huge_must_wait(ctx, vmf, reason);
555b8da2e46SPeter Xu if (is_vm_hugetlb_page(vma))
556b8da2e46SPeter Xu hugetlb_vma_unlock_read(vma);
55729a22b9eSSuren Baghdasaryan release_fault_lock(vmf);
5588d2afd96SAndrea Arcangeli
559f9bf3522SLinus Torvalds if (likely(must_wait && !READ_ONCE(ctx->released))) {
560a9a08845SLinus Torvalds wake_up_poll(&ctx->fd_wqh, EPOLLIN);
56186039bd3SAndrea Arcangeli schedule();
56286039bd3SAndrea Arcangeli }
563ba85c702SAndrea Arcangeli
56486039bd3SAndrea Arcangeli __set_current_state(TASK_RUNNING);
56515b726efSAndrea Arcangeli
56615b726efSAndrea Arcangeli /*
56715b726efSAndrea Arcangeli * Here we race with the list_del; list_add in
56815b726efSAndrea Arcangeli * userfaultfd_ctx_read(), however because we don't ever run
56915b726efSAndrea Arcangeli * list_del_init() to refile across the two lists, the prev
57015b726efSAndrea Arcangeli * and next pointers will never point to self. list_add also
57115b726efSAndrea Arcangeli * would never let any of the two pointers to point to
57215b726efSAndrea Arcangeli * self. So list_empty_careful won't risk to see both pointers
57315b726efSAndrea Arcangeli * pointing to self at any time during the list refile. The
57415b726efSAndrea Arcangeli * only case where list_del_init() is called is the full
57515b726efSAndrea Arcangeli * removal in the wake function and there we don't re-list_add
57615b726efSAndrea Arcangeli * and it's fine not to block on the spinlock. The uwq on this
57715b726efSAndrea Arcangeli * kernel stack can be released after the list_del_init.
57815b726efSAndrea Arcangeli */
5792055da97SIngo Molnar if (!list_empty_careful(&uwq.wq.entry)) {
580cbcfa130SEric Biggers spin_lock_irq(&ctx->fault_pending_wqh.lock);
58115b726efSAndrea Arcangeli /*
58215b726efSAndrea Arcangeli * No need of list_del_init(), the uwq on the stack
58315b726efSAndrea Arcangeli * will be freed shortly anyway.
58415b726efSAndrea Arcangeli */
5852055da97SIngo Molnar list_del(&uwq.wq.entry);
586cbcfa130SEric Biggers spin_unlock_irq(&ctx->fault_pending_wqh.lock);
587ba85c702SAndrea Arcangeli }
58886039bd3SAndrea Arcangeli
58986039bd3SAndrea Arcangeli /*
59086039bd3SAndrea Arcangeli * ctx may go away after this if the userfault pseudo fd is
59186039bd3SAndrea Arcangeli * already released.
59286039bd3SAndrea Arcangeli */
59386039bd3SAndrea Arcangeli userfaultfd_ctx_put(ctx);
59486039bd3SAndrea Arcangeli
595ba85c702SAndrea Arcangeli out:
596ba85c702SAndrea Arcangeli return ret;
59786039bd3SAndrea Arcangeli }
59886039bd3SAndrea Arcangeli
userfaultfd_event_wait_completion(struct userfaultfd_ctx * ctx,struct userfaultfd_wait_queue * ewq)5998c9e7bb7SAndrea Arcangeli static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
6009cd75c3cSPavel Emelyanov struct userfaultfd_wait_queue *ewq)
6019cd75c3cSPavel Emelyanov {
6020cbb4b4fSAndrea Arcangeli struct userfaultfd_ctx *release_new_ctx;
6030cbb4b4fSAndrea Arcangeli
6049a69a829SAndrea Arcangeli if (WARN_ON_ONCE(current->flags & PF_EXITING))
6059a69a829SAndrea Arcangeli goto out;
6069a69a829SAndrea Arcangeli
6079cd75c3cSPavel Emelyanov ewq->ctx = ctx;
6089cd75c3cSPavel Emelyanov init_waitqueue_entry(&ewq->wq, current);
6090cbb4b4fSAndrea Arcangeli release_new_ctx = NULL;
6109cd75c3cSPavel Emelyanov
611cbcfa130SEric Biggers spin_lock_irq(&ctx->event_wqh.lock);
6129cd75c3cSPavel Emelyanov /*
6139cd75c3cSPavel Emelyanov * After the __add_wait_queue the uwq is visible to userland
6149cd75c3cSPavel Emelyanov * through poll/read().
6159cd75c3cSPavel Emelyanov */
6169cd75c3cSPavel Emelyanov __add_wait_queue(&ctx->event_wqh, &ewq->wq);
6179cd75c3cSPavel Emelyanov for (;;) {
6189cd75c3cSPavel Emelyanov set_current_state(TASK_KILLABLE);
6199cd75c3cSPavel Emelyanov if (ewq->msg.event == 0)
6209cd75c3cSPavel Emelyanov break;
6216aa7de05SMark Rutland if (READ_ONCE(ctx->released) ||
6229cd75c3cSPavel Emelyanov fatal_signal_pending(current)) {
623384632e6SAndrea Arcangeli /*
624384632e6SAndrea Arcangeli * &ewq->wq may be queued in fork_event, but
625384632e6SAndrea Arcangeli * __remove_wait_queue ignores the head
626384632e6SAndrea Arcangeli * parameter. It would be a problem if it
627384632e6SAndrea Arcangeli * didn't.
628384632e6SAndrea Arcangeli */
6299cd75c3cSPavel Emelyanov __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
6307eb76d45SMike Rapoport if (ewq->msg.event == UFFD_EVENT_FORK) {
6317eb76d45SMike Rapoport struct userfaultfd_ctx *new;
6327eb76d45SMike Rapoport
6337eb76d45SMike Rapoport new = (struct userfaultfd_ctx *)
6347eb76d45SMike Rapoport (unsigned long)
6357eb76d45SMike Rapoport ewq->msg.arg.reserved.reserved1;
6360cbb4b4fSAndrea Arcangeli release_new_ctx = new;
6377eb76d45SMike Rapoport }
6389cd75c3cSPavel Emelyanov break;
6399cd75c3cSPavel Emelyanov }
6409cd75c3cSPavel Emelyanov
641cbcfa130SEric Biggers spin_unlock_irq(&ctx->event_wqh.lock);
6429cd75c3cSPavel Emelyanov
643a9a08845SLinus Torvalds wake_up_poll(&ctx->fd_wqh, EPOLLIN);
6449cd75c3cSPavel Emelyanov schedule();
6459cd75c3cSPavel Emelyanov
646cbcfa130SEric Biggers spin_lock_irq(&ctx->event_wqh.lock);
6479cd75c3cSPavel Emelyanov }
6489cd75c3cSPavel Emelyanov __set_current_state(TASK_RUNNING);
649cbcfa130SEric Biggers spin_unlock_irq(&ctx->event_wqh.lock);
6509cd75c3cSPavel Emelyanov
6510cbb4b4fSAndrea Arcangeli if (release_new_ctx) {
6520cbb4b4fSAndrea Arcangeli struct vm_area_struct *vma;
6530cbb4b4fSAndrea Arcangeli struct mm_struct *mm = release_new_ctx->mm;
65469dbe6daSLiam R. Howlett VMA_ITERATOR(vmi, mm, 0);
6550cbb4b4fSAndrea Arcangeli
6560cbb4b4fSAndrea Arcangeli /* the various vma->vm_userfaultfd_ctx still points to it */
657d8ed45c5SMichel Lespinasse mmap_write_lock(mm);
65869dbe6daSLiam R. Howlett for_each_vma(vmi, vma) {
65931e810aaSMike Rapoport if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
66060081bf1SSuren Baghdasaryan vma_start_write(vma);
6610cbb4b4fSAndrea Arcangeli vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
66251d3d5ebSDavid Hildenbrand userfaultfd_set_vm_flags(vma,
66351d3d5ebSDavid Hildenbrand vma->vm_flags & ~__VM_UFFD_FLAGS);
66431e810aaSMike Rapoport }
66569dbe6daSLiam R. Howlett }
666d8ed45c5SMichel Lespinasse mmap_write_unlock(mm);
6670cbb4b4fSAndrea Arcangeli
6680cbb4b4fSAndrea Arcangeli userfaultfd_ctx_put(release_new_ctx);
6690cbb4b4fSAndrea Arcangeli }
6700cbb4b4fSAndrea Arcangeli
6719cd75c3cSPavel Emelyanov /*
6729cd75c3cSPavel Emelyanov * ctx may go away after this if the userfault pseudo fd is
6739cd75c3cSPavel Emelyanov * already released.
6749cd75c3cSPavel Emelyanov */
6759a69a829SAndrea Arcangeli out:
676a759a909SNadav Amit atomic_dec(&ctx->mmap_changing);
677a759a909SNadav Amit VM_BUG_ON(atomic_read(&ctx->mmap_changing) < 0);
6789cd75c3cSPavel Emelyanov userfaultfd_ctx_put(ctx);
6799cd75c3cSPavel Emelyanov }
6809cd75c3cSPavel Emelyanov
userfaultfd_event_complete(struct userfaultfd_ctx * ctx,struct userfaultfd_wait_queue * ewq)6819cd75c3cSPavel Emelyanov static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
6829cd75c3cSPavel Emelyanov struct userfaultfd_wait_queue *ewq)
6839cd75c3cSPavel Emelyanov {
6849cd75c3cSPavel Emelyanov ewq->msg.event = 0;
6859cd75c3cSPavel Emelyanov wake_up_locked(&ctx->event_wqh);
6869cd75c3cSPavel Emelyanov __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
6879cd75c3cSPavel Emelyanov }
6889cd75c3cSPavel Emelyanov
dup_userfaultfd(struct vm_area_struct * vma,struct list_head * fcs)689893e26e6SPavel Emelyanov int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
690893e26e6SPavel Emelyanov {
691893e26e6SPavel Emelyanov struct userfaultfd_ctx *ctx = NULL, *octx;
692893e26e6SPavel Emelyanov struct userfaultfd_fork_ctx *fctx;
693893e26e6SPavel Emelyanov
694893e26e6SPavel Emelyanov octx = vma->vm_userfaultfd_ctx.ctx;
695893e26e6SPavel Emelyanov if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
69660081bf1SSuren Baghdasaryan vma_start_write(vma);
697893e26e6SPavel Emelyanov vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
69851d3d5ebSDavid Hildenbrand userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
699893e26e6SPavel Emelyanov return 0;
700893e26e6SPavel Emelyanov }
701893e26e6SPavel Emelyanov
702893e26e6SPavel Emelyanov list_for_each_entry(fctx, fcs, list)
703893e26e6SPavel Emelyanov if (fctx->orig == octx) {
704893e26e6SPavel Emelyanov ctx = fctx->new;
705893e26e6SPavel Emelyanov break;
706893e26e6SPavel Emelyanov }
707893e26e6SPavel Emelyanov
708893e26e6SPavel Emelyanov if (!ctx) {
709893e26e6SPavel Emelyanov fctx = kmalloc(sizeof(*fctx), GFP_KERNEL);
710893e26e6SPavel Emelyanov if (!fctx)
711893e26e6SPavel Emelyanov return -ENOMEM;
712893e26e6SPavel Emelyanov
713893e26e6SPavel Emelyanov ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
714893e26e6SPavel Emelyanov if (!ctx) {
715893e26e6SPavel Emelyanov kfree(fctx);
716893e26e6SPavel Emelyanov return -ENOMEM;
717893e26e6SPavel Emelyanov }
718893e26e6SPavel Emelyanov
719ca880420SEric Biggers refcount_set(&ctx->refcount, 1);
720893e26e6SPavel Emelyanov ctx->flags = octx->flags;
721893e26e6SPavel Emelyanov ctx->features = octx->features;
722893e26e6SPavel Emelyanov ctx->released = false;
723a759a909SNadav Amit atomic_set(&ctx->mmap_changing, 0);
724893e26e6SPavel Emelyanov ctx->mm = vma->vm_mm;
72500bb31faSMike Rapoport mmgrab(ctx->mm);
726893e26e6SPavel Emelyanov
727893e26e6SPavel Emelyanov userfaultfd_ctx_get(octx);
728a759a909SNadav Amit atomic_inc(&octx->mmap_changing);
729893e26e6SPavel Emelyanov fctx->orig = octx;
730893e26e6SPavel Emelyanov fctx->new = ctx;
731893e26e6SPavel Emelyanov list_add_tail(&fctx->list, fcs);
732893e26e6SPavel Emelyanov }
733893e26e6SPavel Emelyanov
734893e26e6SPavel Emelyanov vma->vm_userfaultfd_ctx.ctx = ctx;
735893e26e6SPavel Emelyanov return 0;
736893e26e6SPavel Emelyanov }
737893e26e6SPavel Emelyanov
dup_fctx(struct userfaultfd_fork_ctx * fctx)7388c9e7bb7SAndrea Arcangeli static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
739893e26e6SPavel Emelyanov {
740893e26e6SPavel Emelyanov struct userfaultfd_ctx *ctx = fctx->orig;
741893e26e6SPavel Emelyanov struct userfaultfd_wait_queue ewq;
742893e26e6SPavel Emelyanov
743893e26e6SPavel Emelyanov msg_init(&ewq.msg);
744893e26e6SPavel Emelyanov
745893e26e6SPavel Emelyanov ewq.msg.event = UFFD_EVENT_FORK;
746893e26e6SPavel Emelyanov ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
747893e26e6SPavel Emelyanov
7488c9e7bb7SAndrea Arcangeli userfaultfd_event_wait_completion(ctx, &ewq);
749893e26e6SPavel Emelyanov }
750893e26e6SPavel Emelyanov
dup_userfaultfd_complete(struct list_head * fcs)751893e26e6SPavel Emelyanov void dup_userfaultfd_complete(struct list_head *fcs)
752893e26e6SPavel Emelyanov {
753893e26e6SPavel Emelyanov struct userfaultfd_fork_ctx *fctx, *n;
754893e26e6SPavel Emelyanov
755893e26e6SPavel Emelyanov list_for_each_entry_safe(fctx, n, fcs, list) {
7568c9e7bb7SAndrea Arcangeli dup_fctx(fctx);
757893e26e6SPavel Emelyanov list_del(&fctx->list);
758893e26e6SPavel Emelyanov kfree(fctx);
759893e26e6SPavel Emelyanov }
760893e26e6SPavel Emelyanov }
761893e26e6SPavel Emelyanov
mremap_userfaultfd_prep(struct vm_area_struct * vma,struct vm_userfaultfd_ctx * vm_ctx)76272f87654SPavel Emelyanov void mremap_userfaultfd_prep(struct vm_area_struct *vma,
76372f87654SPavel Emelyanov struct vm_userfaultfd_ctx *vm_ctx)
76472f87654SPavel Emelyanov {
76572f87654SPavel Emelyanov struct userfaultfd_ctx *ctx;
76672f87654SPavel Emelyanov
76772f87654SPavel Emelyanov ctx = vma->vm_userfaultfd_ctx.ctx;
7683cfd22beSPeter Xu
7693cfd22beSPeter Xu if (!ctx)
7703cfd22beSPeter Xu return;
7713cfd22beSPeter Xu
7723cfd22beSPeter Xu if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
77372f87654SPavel Emelyanov vm_ctx->ctx = ctx;
77472f87654SPavel Emelyanov userfaultfd_ctx_get(ctx);
775a759a909SNadav Amit atomic_inc(&ctx->mmap_changing);
7763cfd22beSPeter Xu } else {
7773cfd22beSPeter Xu /* Drop uffd context if remap feature not enabled */
77860081bf1SSuren Baghdasaryan vma_start_write(vma);
7793cfd22beSPeter Xu vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
78051d3d5ebSDavid Hildenbrand userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
78172f87654SPavel Emelyanov }
78272f87654SPavel Emelyanov }
78372f87654SPavel Emelyanov
mremap_userfaultfd_complete(struct vm_userfaultfd_ctx * vm_ctx,unsigned long from,unsigned long to,unsigned long len)78490794bf1SAndrea Arcangeli void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
78572f87654SPavel Emelyanov unsigned long from, unsigned long to,
78672f87654SPavel Emelyanov unsigned long len)
78772f87654SPavel Emelyanov {
78890794bf1SAndrea Arcangeli struct userfaultfd_ctx *ctx = vm_ctx->ctx;
78972f87654SPavel Emelyanov struct userfaultfd_wait_queue ewq;
79072f87654SPavel Emelyanov
79172f87654SPavel Emelyanov if (!ctx)
79272f87654SPavel Emelyanov return;
79372f87654SPavel Emelyanov
79472f87654SPavel Emelyanov if (to & ~PAGE_MASK) {
79572f87654SPavel Emelyanov userfaultfd_ctx_put(ctx);
79672f87654SPavel Emelyanov return;
79772f87654SPavel Emelyanov }
79872f87654SPavel Emelyanov
79972f87654SPavel Emelyanov msg_init(&ewq.msg);
80072f87654SPavel Emelyanov
80172f87654SPavel Emelyanov ewq.msg.event = UFFD_EVENT_REMAP;
80272f87654SPavel Emelyanov ewq.msg.arg.remap.from = from;
80372f87654SPavel Emelyanov ewq.msg.arg.remap.to = to;
80472f87654SPavel Emelyanov ewq.msg.arg.remap.len = len;
80572f87654SPavel Emelyanov
80672f87654SPavel Emelyanov userfaultfd_event_wait_completion(ctx, &ewq);
80772f87654SPavel Emelyanov }
80872f87654SPavel Emelyanov
userfaultfd_remove(struct vm_area_struct * vma,unsigned long start,unsigned long end)80970ccb92fSAndrea Arcangeli bool userfaultfd_remove(struct vm_area_struct *vma,
81005ce7724SPavel Emelyanov unsigned long start, unsigned long end)
81105ce7724SPavel Emelyanov {
81205ce7724SPavel Emelyanov struct mm_struct *mm = vma->vm_mm;
81305ce7724SPavel Emelyanov struct userfaultfd_ctx *ctx;
81405ce7724SPavel Emelyanov struct userfaultfd_wait_queue ewq;
81505ce7724SPavel Emelyanov
81605ce7724SPavel Emelyanov ctx = vma->vm_userfaultfd_ctx.ctx;
817d811914dSMike Rapoport if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
81870ccb92fSAndrea Arcangeli return true;
81905ce7724SPavel Emelyanov
82005ce7724SPavel Emelyanov userfaultfd_ctx_get(ctx);
821a759a909SNadav Amit atomic_inc(&ctx->mmap_changing);
822d8ed45c5SMichel Lespinasse mmap_read_unlock(mm);
82305ce7724SPavel Emelyanov
82405ce7724SPavel Emelyanov msg_init(&ewq.msg);
82505ce7724SPavel Emelyanov
826d811914dSMike Rapoport ewq.msg.event = UFFD_EVENT_REMOVE;
827d811914dSMike Rapoport ewq.msg.arg.remove.start = start;
828d811914dSMike Rapoport ewq.msg.arg.remove.end = end;
82905ce7724SPavel Emelyanov
83005ce7724SPavel Emelyanov userfaultfd_event_wait_completion(ctx, &ewq);
83105ce7724SPavel Emelyanov
83270ccb92fSAndrea Arcangeli return false;
83305ce7724SPavel Emelyanov }
83405ce7724SPavel Emelyanov
has_unmap_ctx(struct userfaultfd_ctx * ctx,struct list_head * unmaps,unsigned long start,unsigned long end)835897ab3e0SMike Rapoport static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
836897ab3e0SMike Rapoport unsigned long start, unsigned long end)
837897ab3e0SMike Rapoport {
838897ab3e0SMike Rapoport struct userfaultfd_unmap_ctx *unmap_ctx;
839897ab3e0SMike Rapoport
840897ab3e0SMike Rapoport list_for_each_entry(unmap_ctx, unmaps, list)
841897ab3e0SMike Rapoport if (unmap_ctx->ctx == ctx && unmap_ctx->start == start &&
842897ab3e0SMike Rapoport unmap_ctx->end == end)
843897ab3e0SMike Rapoport return true;
844897ab3e0SMike Rapoport
845897ab3e0SMike Rapoport return false;
846897ab3e0SMike Rapoport }
847897ab3e0SMike Rapoport
userfaultfd_unmap_prep(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * unmaps)84865ac1320SLiam R. Howlett int userfaultfd_unmap_prep(struct vm_area_struct *vma, unsigned long start,
84969dbe6daSLiam R. Howlett unsigned long end, struct list_head *unmaps)
850897ab3e0SMike Rapoport {
851897ab3e0SMike Rapoport struct userfaultfd_unmap_ctx *unmap_ctx;
852897ab3e0SMike Rapoport struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
853897ab3e0SMike Rapoport
854897ab3e0SMike Rapoport if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
855897ab3e0SMike Rapoport has_unmap_ctx(ctx, unmaps, start, end))
85665ac1320SLiam R. Howlett return 0;
857897ab3e0SMike Rapoport
858897ab3e0SMike Rapoport unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL);
859897ab3e0SMike Rapoport if (!unmap_ctx)
860897ab3e0SMike Rapoport return -ENOMEM;
861897ab3e0SMike Rapoport
862897ab3e0SMike Rapoport userfaultfd_ctx_get(ctx);
863a759a909SNadav Amit atomic_inc(&ctx->mmap_changing);
864897ab3e0SMike Rapoport unmap_ctx->ctx = ctx;
865897ab3e0SMike Rapoport unmap_ctx->start = start;
866897ab3e0SMike Rapoport unmap_ctx->end = end;
867897ab3e0SMike Rapoport list_add_tail(&unmap_ctx->list, unmaps);
868897ab3e0SMike Rapoport
869897ab3e0SMike Rapoport return 0;
870897ab3e0SMike Rapoport }
871897ab3e0SMike Rapoport
userfaultfd_unmap_complete(struct mm_struct * mm,struct list_head * uf)872897ab3e0SMike Rapoport void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
873897ab3e0SMike Rapoport {
874897ab3e0SMike Rapoport struct userfaultfd_unmap_ctx *ctx, *n;
875897ab3e0SMike Rapoport struct userfaultfd_wait_queue ewq;
876897ab3e0SMike Rapoport
877897ab3e0SMike Rapoport list_for_each_entry_safe(ctx, n, uf, list) {
878897ab3e0SMike Rapoport msg_init(&ewq.msg);
879897ab3e0SMike Rapoport
880897ab3e0SMike Rapoport ewq.msg.event = UFFD_EVENT_UNMAP;
881897ab3e0SMike Rapoport ewq.msg.arg.remove.start = ctx->start;
882897ab3e0SMike Rapoport ewq.msg.arg.remove.end = ctx->end;
883897ab3e0SMike Rapoport
884897ab3e0SMike Rapoport userfaultfd_event_wait_completion(ctx->ctx, &ewq);
885897ab3e0SMike Rapoport
886897ab3e0SMike Rapoport list_del(&ctx->list);
887897ab3e0SMike Rapoport kfree(ctx);
888897ab3e0SMike Rapoport }
889897ab3e0SMike Rapoport }
890897ab3e0SMike Rapoport
userfaultfd_release(struct inode * inode,struct file * file)89186039bd3SAndrea Arcangeli static int userfaultfd_release(struct inode *inode, struct file *file)
89286039bd3SAndrea Arcangeli {
89386039bd3SAndrea Arcangeli struct userfaultfd_ctx *ctx = file->private_data;
89486039bd3SAndrea Arcangeli struct mm_struct *mm = ctx->mm;
89586039bd3SAndrea Arcangeli struct vm_area_struct *vma, *prev;
89686039bd3SAndrea Arcangeli /* len == 0 means wake all */
89786039bd3SAndrea Arcangeli struct userfaultfd_wake_range range = { .len = 0, };
89886039bd3SAndrea Arcangeli unsigned long new_flags;
89911a9b902SLiam R. Howlett VMA_ITERATOR(vmi, mm, 0);
90086039bd3SAndrea Arcangeli
9016aa7de05SMark Rutland WRITE_ONCE(ctx->released, true);
90286039bd3SAndrea Arcangeli
903d2005e3fSOleg Nesterov if (!mmget_not_zero(mm))
904d2005e3fSOleg Nesterov goto wakeup;
905d2005e3fSOleg Nesterov
90686039bd3SAndrea Arcangeli /*
90786039bd3SAndrea Arcangeli * Flush page faults out of all CPUs. NOTE: all page faults
90886039bd3SAndrea Arcangeli * must be retried without returning VM_FAULT_SIGBUS if
90986039bd3SAndrea Arcangeli * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
910c1e8d7c6SMichel Lespinasse * changes while handle_userfault released the mmap_lock. So
91186039bd3SAndrea Arcangeli * it's critical that released is set to true (above), before
912c1e8d7c6SMichel Lespinasse * taking the mmap_lock for writing.
91386039bd3SAndrea Arcangeli */
914d8ed45c5SMichel Lespinasse mmap_write_lock(mm);
91586039bd3SAndrea Arcangeli prev = NULL;
91611a9b902SLiam R. Howlett for_each_vma(vmi, vma) {
91786039bd3SAndrea Arcangeli cond_resched();
91886039bd3SAndrea Arcangeli BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
9197677f7fdSAxel Rasmussen !!(vma->vm_flags & __VM_UFFD_FLAGS));
92086039bd3SAndrea Arcangeli if (vma->vm_userfaultfd_ctx.ctx != ctx) {
92186039bd3SAndrea Arcangeli prev = vma;
92286039bd3SAndrea Arcangeli continue;
92386039bd3SAndrea Arcangeli }
924377f3a9aSPeter Xu /* Reset ptes for the whole vma range if wr-protected */
925377f3a9aSPeter Xu if (userfaultfd_wp(vma))
926377f3a9aSPeter Xu uffd_wp_range(vma, vma->vm_start,
927377f3a9aSPeter Xu vma->vm_end - vma->vm_start, false);
9287677f7fdSAxel Rasmussen new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
9299760ebffSLiam R. Howlett prev = vma_merge(&vmi, mm, prev, vma->vm_start, vma->vm_end,
93086039bd3SAndrea Arcangeli new_flags, vma->anon_vma,
93186039bd3SAndrea Arcangeli vma->vm_file, vma->vm_pgoff,
93286039bd3SAndrea Arcangeli vma_policy(vma),
9335c26f6acSSuren Baghdasaryan NULL_VM_UFFD_CTX, anon_vma_name(vma));
93469dbe6daSLiam R. Howlett if (prev) {
93586039bd3SAndrea Arcangeli vma = prev;
93669dbe6daSLiam R. Howlett } else {
93786039bd3SAndrea Arcangeli prev = vma;
93869dbe6daSLiam R. Howlett }
93969dbe6daSLiam R. Howlett
94060081bf1SSuren Baghdasaryan vma_start_write(vma);
94151d3d5ebSDavid Hildenbrand userfaultfd_set_vm_flags(vma, new_flags);
94286039bd3SAndrea Arcangeli vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
94386039bd3SAndrea Arcangeli }
944d8ed45c5SMichel Lespinasse mmap_write_unlock(mm);
945d2005e3fSOleg Nesterov mmput(mm);
946d2005e3fSOleg Nesterov wakeup:
94786039bd3SAndrea Arcangeli /*
94815b726efSAndrea Arcangeli * After no new page faults can wait on this fault_*wqh, flush
94986039bd3SAndrea Arcangeli * the last page faults that may have been already waiting on
95015b726efSAndrea Arcangeli * the fault_*wqh.
95186039bd3SAndrea Arcangeli */
952cbcfa130SEric Biggers spin_lock_irq(&ctx->fault_pending_wqh.lock);
953ac5be6b4SAndrea Arcangeli __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
954c430d1e8SMatthew Wilcox __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
955cbcfa130SEric Biggers spin_unlock_irq(&ctx->fault_pending_wqh.lock);
95686039bd3SAndrea Arcangeli
9575a18b64eSMike Rapoport /* Flush pending events that may still wait on event_wqh */
9585a18b64eSMike Rapoport wake_up_all(&ctx->event_wqh);
9595a18b64eSMike Rapoport
960a9a08845SLinus Torvalds wake_up_poll(&ctx->fd_wqh, EPOLLHUP);
96186039bd3SAndrea Arcangeli userfaultfd_ctx_put(ctx);
96286039bd3SAndrea Arcangeli return 0;
96386039bd3SAndrea Arcangeli }
96486039bd3SAndrea Arcangeli
96515b726efSAndrea Arcangeli /* fault_pending_wqh.lock must be hold by the caller */
find_userfault_in(wait_queue_head_t * wqh)9666dcc27fdSPavel Emelyanov static inline struct userfaultfd_wait_queue *find_userfault_in(
9676dcc27fdSPavel Emelyanov wait_queue_head_t *wqh)
96886039bd3SAndrea Arcangeli {
969ac6424b9SIngo Molnar wait_queue_entry_t *wq;
97015b726efSAndrea Arcangeli struct userfaultfd_wait_queue *uwq;
97186039bd3SAndrea Arcangeli
972456a7378SLance Roy lockdep_assert_held(&wqh->lock);
97386039bd3SAndrea Arcangeli
97415b726efSAndrea Arcangeli uwq = NULL;
9756dcc27fdSPavel Emelyanov if (!waitqueue_active(wqh))
97615b726efSAndrea Arcangeli goto out;
97715b726efSAndrea Arcangeli /* walk in reverse to provide FIFO behavior to read userfaults */
9782055da97SIngo Molnar wq = list_last_entry(&wqh->head, typeof(*wq), entry);
97915b726efSAndrea Arcangeli uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
98015b726efSAndrea Arcangeli out:
98115b726efSAndrea Arcangeli return uwq;
98286039bd3SAndrea Arcangeli }
98386039bd3SAndrea Arcangeli
find_userfault(struct userfaultfd_ctx * ctx)9846dcc27fdSPavel Emelyanov static inline struct userfaultfd_wait_queue *find_userfault(
9856dcc27fdSPavel Emelyanov struct userfaultfd_ctx *ctx)
9866dcc27fdSPavel Emelyanov {
9876dcc27fdSPavel Emelyanov return find_userfault_in(&ctx->fault_pending_wqh);
9886dcc27fdSPavel Emelyanov }
9896dcc27fdSPavel Emelyanov
find_userfault_evt(struct userfaultfd_ctx * ctx)9909cd75c3cSPavel Emelyanov static inline struct userfaultfd_wait_queue *find_userfault_evt(
9919cd75c3cSPavel Emelyanov struct userfaultfd_ctx *ctx)
9929cd75c3cSPavel Emelyanov {
9939cd75c3cSPavel Emelyanov return find_userfault_in(&ctx->event_wqh);
9949cd75c3cSPavel Emelyanov }
9959cd75c3cSPavel Emelyanov
userfaultfd_poll(struct file * file,poll_table * wait)996076ccb76SAl Viro static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
99786039bd3SAndrea Arcangeli {
99886039bd3SAndrea Arcangeli struct userfaultfd_ctx *ctx = file->private_data;
999076ccb76SAl Viro __poll_t ret;
100086039bd3SAndrea Arcangeli
100186039bd3SAndrea Arcangeli poll_wait(file, &ctx->fd_wqh, wait);
100286039bd3SAndrea Arcangeli
100322e5fe2aSNadav Amit if (!userfaultfd_is_initialized(ctx))
1004a9a08845SLinus Torvalds return EPOLLERR;
100522e5fe2aSNadav Amit
1006ba85c702SAndrea Arcangeli /*
1007ba85c702SAndrea Arcangeli * poll() never guarantees that read won't block.
1008ba85c702SAndrea Arcangeli * userfaults can be waken before they're read().
1009ba85c702SAndrea Arcangeli */
1010ba85c702SAndrea Arcangeli if (unlikely(!(file->f_flags & O_NONBLOCK)))
1011a9a08845SLinus Torvalds return EPOLLERR;
101215b726efSAndrea Arcangeli /*
101315b726efSAndrea Arcangeli * lockless access to see if there are pending faults
101415b726efSAndrea Arcangeli * __pollwait last action is the add_wait_queue but
101515b726efSAndrea Arcangeli * the spin_unlock would allow the waitqueue_active to
101615b726efSAndrea Arcangeli * pass above the actual list_add inside
101715b726efSAndrea Arcangeli * add_wait_queue critical section. So use a full
101815b726efSAndrea Arcangeli * memory barrier to serialize the list_add write of
101915b726efSAndrea Arcangeli * add_wait_queue() with the waitqueue_active read
102015b726efSAndrea Arcangeli * below.
102115b726efSAndrea Arcangeli */
102215b726efSAndrea Arcangeli ret = 0;
102315b726efSAndrea Arcangeli smp_mb();
102415b726efSAndrea Arcangeli if (waitqueue_active(&ctx->fault_pending_wqh))
1025a9a08845SLinus Torvalds ret = EPOLLIN;
10269cd75c3cSPavel Emelyanov else if (waitqueue_active(&ctx->event_wqh))
1027a9a08845SLinus Torvalds ret = EPOLLIN;
10289cd75c3cSPavel Emelyanov
102986039bd3SAndrea Arcangeli return ret;
103086039bd3SAndrea Arcangeli }
103186039bd3SAndrea Arcangeli
1032893e26e6SPavel Emelyanov static const struct file_operations userfaultfd_fops;
1033893e26e6SPavel Emelyanov
resolve_userfault_fork(struct userfaultfd_ctx * new,struct inode * inode,struct uffd_msg * msg)1034b537900fSDaniel Colascione static int resolve_userfault_fork(struct userfaultfd_ctx *new,
1035b537900fSDaniel Colascione struct inode *inode,
1036893e26e6SPavel Emelyanov struct uffd_msg *msg)
1037893e26e6SPavel Emelyanov {
1038893e26e6SPavel Emelyanov int fd;
1039893e26e6SPavel Emelyanov
1040b537900fSDaniel Colascione fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, new,
1041abec3d01SOndrej Mosnacek O_RDONLY | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode);
1042893e26e6SPavel Emelyanov if (fd < 0)
1043893e26e6SPavel Emelyanov return fd;
1044893e26e6SPavel Emelyanov
1045893e26e6SPavel Emelyanov msg->arg.reserved.reserved1 = 0;
1046893e26e6SPavel Emelyanov msg->arg.fork.ufd = fd;
1047893e26e6SPavel Emelyanov return 0;
1048893e26e6SPavel Emelyanov }
1049893e26e6SPavel Emelyanov
userfaultfd_ctx_read(struct userfaultfd_ctx * ctx,int no_wait,struct uffd_msg * msg,struct inode * inode)105086039bd3SAndrea Arcangeli static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
1051b537900fSDaniel Colascione struct uffd_msg *msg, struct inode *inode)
105286039bd3SAndrea Arcangeli {
105386039bd3SAndrea Arcangeli ssize_t ret;
105486039bd3SAndrea Arcangeli DECLARE_WAITQUEUE(wait, current);
105515b726efSAndrea Arcangeli struct userfaultfd_wait_queue *uwq;
1056893e26e6SPavel Emelyanov /*
1057893e26e6SPavel Emelyanov * Handling fork event requires sleeping operations, so
1058893e26e6SPavel Emelyanov * we drop the event_wqh lock, then do these ops, then
1059893e26e6SPavel Emelyanov * lock it back and wake up the waiter. While the lock is
1060893e26e6SPavel Emelyanov * dropped the ewq may go away so we keep track of it
1061893e26e6SPavel Emelyanov * carefully.
1062893e26e6SPavel Emelyanov */
1063893e26e6SPavel Emelyanov LIST_HEAD(fork_event);
1064893e26e6SPavel Emelyanov struct userfaultfd_ctx *fork_nctx = NULL;
106586039bd3SAndrea Arcangeli
106615b726efSAndrea Arcangeli /* always take the fd_wqh lock before the fault_pending_wqh lock */
1067ae62c16eSChristoph Hellwig spin_lock_irq(&ctx->fd_wqh.lock);
106886039bd3SAndrea Arcangeli __add_wait_queue(&ctx->fd_wqh, &wait);
106986039bd3SAndrea Arcangeli for (;;) {
107086039bd3SAndrea Arcangeli set_current_state(TASK_INTERRUPTIBLE);
107115b726efSAndrea Arcangeli spin_lock(&ctx->fault_pending_wqh.lock);
107215b726efSAndrea Arcangeli uwq = find_userfault(ctx);
107315b726efSAndrea Arcangeli if (uwq) {
107486039bd3SAndrea Arcangeli /*
10752c5b7e1bSAndrea Arcangeli * Use a seqcount to repeat the lockless check
10762c5b7e1bSAndrea Arcangeli * in wake_userfault() to avoid missing
10772c5b7e1bSAndrea Arcangeli * wakeups because during the refile both
10782c5b7e1bSAndrea Arcangeli * waitqueue could become empty if this is the
10792c5b7e1bSAndrea Arcangeli * only userfault.
10802c5b7e1bSAndrea Arcangeli */
10812c5b7e1bSAndrea Arcangeli write_seqcount_begin(&ctx->refile_seq);
10822c5b7e1bSAndrea Arcangeli
10832c5b7e1bSAndrea Arcangeli /*
108415b726efSAndrea Arcangeli * The fault_pending_wqh.lock prevents the uwq
108515b726efSAndrea Arcangeli * to disappear from under us.
108615b726efSAndrea Arcangeli *
108715b726efSAndrea Arcangeli * Refile this userfault from
108815b726efSAndrea Arcangeli * fault_pending_wqh to fault_wqh, it's not
108915b726efSAndrea Arcangeli * pending anymore after we read it.
109015b726efSAndrea Arcangeli *
109115b726efSAndrea Arcangeli * Use list_del() by hand (as
109215b726efSAndrea Arcangeli * userfaultfd_wake_function also uses
109315b726efSAndrea Arcangeli * list_del_init() by hand) to be sure nobody
109415b726efSAndrea Arcangeli * changes __remove_wait_queue() to use
109515b726efSAndrea Arcangeli * list_del_init() in turn breaking the
109615b726efSAndrea Arcangeli * !list_empty_careful() check in
10972055da97SIngo Molnar * handle_userfault(). The uwq->wq.head list
109815b726efSAndrea Arcangeli * must never be empty at any time during the
109915b726efSAndrea Arcangeli * refile, or the waitqueue could disappear
110015b726efSAndrea Arcangeli * from under us. The "wait_queue_head_t"
110115b726efSAndrea Arcangeli * parameter of __remove_wait_queue() is unused
110215b726efSAndrea Arcangeli * anyway.
110386039bd3SAndrea Arcangeli */
11042055da97SIngo Molnar list_del(&uwq->wq.entry);
1105c430d1e8SMatthew Wilcox add_wait_queue(&ctx->fault_wqh, &uwq->wq);
110615b726efSAndrea Arcangeli
11072c5b7e1bSAndrea Arcangeli write_seqcount_end(&ctx->refile_seq);
11082c5b7e1bSAndrea Arcangeli
1109a9b85f94SAndrea Arcangeli /* careful to always initialize msg if ret == 0 */
1110a9b85f94SAndrea Arcangeli *msg = uwq->msg;
111115b726efSAndrea Arcangeli spin_unlock(&ctx->fault_pending_wqh.lock);
111286039bd3SAndrea Arcangeli ret = 0;
111386039bd3SAndrea Arcangeli break;
111486039bd3SAndrea Arcangeli }
111515b726efSAndrea Arcangeli spin_unlock(&ctx->fault_pending_wqh.lock);
11169cd75c3cSPavel Emelyanov
11179cd75c3cSPavel Emelyanov spin_lock(&ctx->event_wqh.lock);
11189cd75c3cSPavel Emelyanov uwq = find_userfault_evt(ctx);
11199cd75c3cSPavel Emelyanov if (uwq) {
11209cd75c3cSPavel Emelyanov *msg = uwq->msg;
11219cd75c3cSPavel Emelyanov
1122893e26e6SPavel Emelyanov if (uwq->msg.event == UFFD_EVENT_FORK) {
1123893e26e6SPavel Emelyanov fork_nctx = (struct userfaultfd_ctx *)
1124893e26e6SPavel Emelyanov (unsigned long)
1125893e26e6SPavel Emelyanov uwq->msg.arg.reserved.reserved1;
11262055da97SIngo Molnar list_move(&uwq->wq.entry, &fork_event);
1127384632e6SAndrea Arcangeli /*
1128384632e6SAndrea Arcangeli * fork_nctx can be freed as soon as
1129384632e6SAndrea Arcangeli * we drop the lock, unless we take a
1130384632e6SAndrea Arcangeli * reference on it.
1131384632e6SAndrea Arcangeli */
1132384632e6SAndrea Arcangeli userfaultfd_ctx_get(fork_nctx);
1133893e26e6SPavel Emelyanov spin_unlock(&ctx->event_wqh.lock);
1134893e26e6SPavel Emelyanov ret = 0;
1135893e26e6SPavel Emelyanov break;
1136893e26e6SPavel Emelyanov }
1137893e26e6SPavel Emelyanov
11389cd75c3cSPavel Emelyanov userfaultfd_event_complete(ctx, uwq);
11399cd75c3cSPavel Emelyanov spin_unlock(&ctx->event_wqh.lock);
11409cd75c3cSPavel Emelyanov ret = 0;
11419cd75c3cSPavel Emelyanov break;
11429cd75c3cSPavel Emelyanov }
11439cd75c3cSPavel Emelyanov spin_unlock(&ctx->event_wqh.lock);
11449cd75c3cSPavel Emelyanov
114586039bd3SAndrea Arcangeli if (signal_pending(current)) {
114686039bd3SAndrea Arcangeli ret = -ERESTARTSYS;
114786039bd3SAndrea Arcangeli break;
114886039bd3SAndrea Arcangeli }
114986039bd3SAndrea Arcangeli if (no_wait) {
115086039bd3SAndrea Arcangeli ret = -EAGAIN;
115186039bd3SAndrea Arcangeli break;
115286039bd3SAndrea Arcangeli }
1153ae62c16eSChristoph Hellwig spin_unlock_irq(&ctx->fd_wqh.lock);
115486039bd3SAndrea Arcangeli schedule();
1155ae62c16eSChristoph Hellwig spin_lock_irq(&ctx->fd_wqh.lock);
115686039bd3SAndrea Arcangeli }
115786039bd3SAndrea Arcangeli __remove_wait_queue(&ctx->fd_wqh, &wait);
115886039bd3SAndrea Arcangeli __set_current_state(TASK_RUNNING);
1159ae62c16eSChristoph Hellwig spin_unlock_irq(&ctx->fd_wqh.lock);
116086039bd3SAndrea Arcangeli
1161893e26e6SPavel Emelyanov if (!ret && msg->event == UFFD_EVENT_FORK) {
1162b537900fSDaniel Colascione ret = resolve_userfault_fork(fork_nctx, inode, msg);
1163cbcfa130SEric Biggers spin_lock_irq(&ctx->event_wqh.lock);
1164893e26e6SPavel Emelyanov if (!list_empty(&fork_event)) {
1165384632e6SAndrea Arcangeli /*
1166384632e6SAndrea Arcangeli * The fork thread didn't abort, so we can
1167384632e6SAndrea Arcangeli * drop the temporary refcount.
1168384632e6SAndrea Arcangeli */
1169384632e6SAndrea Arcangeli userfaultfd_ctx_put(fork_nctx);
1170384632e6SAndrea Arcangeli
1171893e26e6SPavel Emelyanov uwq = list_first_entry(&fork_event,
1172893e26e6SPavel Emelyanov typeof(*uwq),
11732055da97SIngo Molnar wq.entry);
1174384632e6SAndrea Arcangeli /*
1175384632e6SAndrea Arcangeli * If fork_event list wasn't empty and in turn
1176384632e6SAndrea Arcangeli * the event wasn't already released by fork
1177384632e6SAndrea Arcangeli * (the event is allocated on fork kernel
1178384632e6SAndrea Arcangeli * stack), put the event back to its place in
1179384632e6SAndrea Arcangeli * the event_wq. fork_event head will be freed
1180384632e6SAndrea Arcangeli * as soon as we return so the event cannot
1181384632e6SAndrea Arcangeli * stay queued there no matter the current
1182384632e6SAndrea Arcangeli * "ret" value.
1183384632e6SAndrea Arcangeli */
11842055da97SIngo Molnar list_del(&uwq->wq.entry);
1185893e26e6SPavel Emelyanov __add_wait_queue(&ctx->event_wqh, &uwq->wq);
1186384632e6SAndrea Arcangeli
1187384632e6SAndrea Arcangeli /*
1188384632e6SAndrea Arcangeli * Leave the event in the waitqueue and report
1189384632e6SAndrea Arcangeli * error to userland if we failed to resolve
1190384632e6SAndrea Arcangeli * the userfault fork.
1191384632e6SAndrea Arcangeli */
1192384632e6SAndrea Arcangeli if (likely(!ret))
1193893e26e6SPavel Emelyanov userfaultfd_event_complete(ctx, uwq);
1194384632e6SAndrea Arcangeli } else {
1195384632e6SAndrea Arcangeli /*
1196384632e6SAndrea Arcangeli * Here the fork thread aborted and the
1197384632e6SAndrea Arcangeli * refcount from the fork thread on fork_nctx
1198384632e6SAndrea Arcangeli * has already been released. We still hold
1199384632e6SAndrea Arcangeli * the reference we took before releasing the
1200384632e6SAndrea Arcangeli * lock above. If resolve_userfault_fork
1201384632e6SAndrea Arcangeli * failed we've to drop it because the
1202384632e6SAndrea Arcangeli * fork_nctx has to be freed in such case. If
1203384632e6SAndrea Arcangeli * it succeeded we'll hold it because the new
1204384632e6SAndrea Arcangeli * uffd references it.
1205384632e6SAndrea Arcangeli */
1206384632e6SAndrea Arcangeli if (ret)
1207384632e6SAndrea Arcangeli userfaultfd_ctx_put(fork_nctx);
1208893e26e6SPavel Emelyanov }
1209cbcfa130SEric Biggers spin_unlock_irq(&ctx->event_wqh.lock);
1210893e26e6SPavel Emelyanov }
1211893e26e6SPavel Emelyanov
121286039bd3SAndrea Arcangeli return ret;
121386039bd3SAndrea Arcangeli }
121486039bd3SAndrea Arcangeli
userfaultfd_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)121586039bd3SAndrea Arcangeli static ssize_t userfaultfd_read(struct file *file, char __user *buf,
121686039bd3SAndrea Arcangeli size_t count, loff_t *ppos)
121786039bd3SAndrea Arcangeli {
121886039bd3SAndrea Arcangeli struct userfaultfd_ctx *ctx = file->private_data;
121986039bd3SAndrea Arcangeli ssize_t _ret, ret = 0;
1220a9b85f94SAndrea Arcangeli struct uffd_msg msg;
122186039bd3SAndrea Arcangeli int no_wait = file->f_flags & O_NONBLOCK;
1222b537900fSDaniel Colascione struct inode *inode = file_inode(file);
122386039bd3SAndrea Arcangeli
122422e5fe2aSNadav Amit if (!userfaultfd_is_initialized(ctx))
122586039bd3SAndrea Arcangeli return -EINVAL;
122686039bd3SAndrea Arcangeli
122786039bd3SAndrea Arcangeli for (;;) {
1228a9b85f94SAndrea Arcangeli if (count < sizeof(msg))
122986039bd3SAndrea Arcangeli return ret ? ret : -EINVAL;
1230b537900fSDaniel Colascione _ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode);
123186039bd3SAndrea Arcangeli if (_ret < 0)
123286039bd3SAndrea Arcangeli return ret ? ret : _ret;
1233a9b85f94SAndrea Arcangeli if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
123486039bd3SAndrea Arcangeli return ret ? ret : -EFAULT;
1235a9b85f94SAndrea Arcangeli ret += sizeof(msg);
1236a9b85f94SAndrea Arcangeli buf += sizeof(msg);
1237a9b85f94SAndrea Arcangeli count -= sizeof(msg);
123886039bd3SAndrea Arcangeli /*
123986039bd3SAndrea Arcangeli * Allow to read more than one fault at time but only
124086039bd3SAndrea Arcangeli * block if waiting for the very first one.
124186039bd3SAndrea Arcangeli */
124286039bd3SAndrea Arcangeli no_wait = O_NONBLOCK;
124386039bd3SAndrea Arcangeli }
124486039bd3SAndrea Arcangeli }
124586039bd3SAndrea Arcangeli
__wake_userfault(struct userfaultfd_ctx * ctx,struct userfaultfd_wake_range * range)124686039bd3SAndrea Arcangeli static void __wake_userfault(struct userfaultfd_ctx *ctx,
124786039bd3SAndrea Arcangeli struct userfaultfd_wake_range *range)
124886039bd3SAndrea Arcangeli {
1249cbcfa130SEric Biggers spin_lock_irq(&ctx->fault_pending_wqh.lock);
125086039bd3SAndrea Arcangeli /* wake all in the range and autoremove */
125115b726efSAndrea Arcangeli if (waitqueue_active(&ctx->fault_pending_wqh))
1252ac5be6b4SAndrea Arcangeli __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
125315b726efSAndrea Arcangeli range);
125415b726efSAndrea Arcangeli if (waitqueue_active(&ctx->fault_wqh))
1255c430d1e8SMatthew Wilcox __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
1256cbcfa130SEric Biggers spin_unlock_irq(&ctx->fault_pending_wqh.lock);
125786039bd3SAndrea Arcangeli }
125886039bd3SAndrea Arcangeli
wake_userfault(struct userfaultfd_ctx * ctx,struct userfaultfd_wake_range * range)125986039bd3SAndrea Arcangeli static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
126086039bd3SAndrea Arcangeli struct userfaultfd_wake_range *range)
126186039bd3SAndrea Arcangeli {
12622c5b7e1bSAndrea Arcangeli unsigned seq;
12632c5b7e1bSAndrea Arcangeli bool need_wakeup;
12642c5b7e1bSAndrea Arcangeli
126586039bd3SAndrea Arcangeli /*
126686039bd3SAndrea Arcangeli * To be sure waitqueue_active() is not reordered by the CPU
126786039bd3SAndrea Arcangeli * before the pagetable update, use an explicit SMP memory
12683e4e28c5SMichel Lespinasse * barrier here. PT lock release or mmap_read_unlock(mm) still
126986039bd3SAndrea Arcangeli * have release semantics that can allow the
127086039bd3SAndrea Arcangeli * waitqueue_active() to be reordered before the pte update.
127186039bd3SAndrea Arcangeli */
127286039bd3SAndrea Arcangeli smp_mb();
127386039bd3SAndrea Arcangeli
127486039bd3SAndrea Arcangeli /*
127586039bd3SAndrea Arcangeli * Use waitqueue_active because it's very frequent to
127686039bd3SAndrea Arcangeli * change the address space atomically even if there are no
127786039bd3SAndrea Arcangeli * userfaults yet. So we take the spinlock only when we're
127886039bd3SAndrea Arcangeli * sure we've userfaults to wake.
127986039bd3SAndrea Arcangeli */
12802c5b7e1bSAndrea Arcangeli do {
12812c5b7e1bSAndrea Arcangeli seq = read_seqcount_begin(&ctx->refile_seq);
12822c5b7e1bSAndrea Arcangeli need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
12832c5b7e1bSAndrea Arcangeli waitqueue_active(&ctx->fault_wqh);
12842c5b7e1bSAndrea Arcangeli cond_resched();
12852c5b7e1bSAndrea Arcangeli } while (read_seqcount_retry(&ctx->refile_seq, seq));
12862c5b7e1bSAndrea Arcangeli if (need_wakeup)
128786039bd3SAndrea Arcangeli __wake_userfault(ctx, range);
128886039bd3SAndrea Arcangeli }
128986039bd3SAndrea Arcangeli
validate_unaligned_range(struct mm_struct * mm,__u64 start,__u64 len)12902ef5d724SAxel Rasmussen static __always_inline int validate_unaligned_range(
12912ef5d724SAxel Rasmussen struct mm_struct *mm, __u64 start, __u64 len)
129286039bd3SAndrea Arcangeli {
129386039bd3SAndrea Arcangeli __u64 task_size = mm->task_size;
129486039bd3SAndrea Arcangeli
129586039bd3SAndrea Arcangeli if (len & ~PAGE_MASK)
129686039bd3SAndrea Arcangeli return -EINVAL;
129786039bd3SAndrea Arcangeli if (!len)
129886039bd3SAndrea Arcangeli return -EINVAL;
1299e71e2aceSPeter Collingbourne if (start < mmap_min_addr)
130086039bd3SAndrea Arcangeli return -EINVAL;
1301e71e2aceSPeter Collingbourne if (start >= task_size)
130286039bd3SAndrea Arcangeli return -EINVAL;
1303e71e2aceSPeter Collingbourne if (len > task_size - start)
130486039bd3SAndrea Arcangeli return -EINVAL;
13052ef5d724SAxel Rasmussen if (start + len <= start)
13062ef5d724SAxel Rasmussen return -EINVAL;
130786039bd3SAndrea Arcangeli return 0;
130886039bd3SAndrea Arcangeli }
130986039bd3SAndrea Arcangeli
validate_range(struct mm_struct * mm,__u64 start,__u64 len)13102ef5d724SAxel Rasmussen static __always_inline int validate_range(struct mm_struct *mm,
13112ef5d724SAxel Rasmussen __u64 start, __u64 len)
13122ef5d724SAxel Rasmussen {
13132ef5d724SAxel Rasmussen if (start & ~PAGE_MASK)
13142ef5d724SAxel Rasmussen return -EINVAL;
13152ef5d724SAxel Rasmussen
13162ef5d724SAxel Rasmussen return validate_unaligned_range(mm, start, len);
13172ef5d724SAxel Rasmussen }
13182ef5d724SAxel Rasmussen
userfaultfd_register(struct userfaultfd_ctx * ctx,unsigned long arg)131986039bd3SAndrea Arcangeli static int userfaultfd_register(struct userfaultfd_ctx *ctx,
132086039bd3SAndrea Arcangeli unsigned long arg)
132186039bd3SAndrea Arcangeli {
132286039bd3SAndrea Arcangeli struct mm_struct *mm = ctx->mm;
132386039bd3SAndrea Arcangeli struct vm_area_struct *vma, *prev, *cur;
132486039bd3SAndrea Arcangeli int ret;
132586039bd3SAndrea Arcangeli struct uffdio_register uffdio_register;
132686039bd3SAndrea Arcangeli struct uffdio_register __user *user_uffdio_register;
132786039bd3SAndrea Arcangeli unsigned long vm_flags, new_flags;
132886039bd3SAndrea Arcangeli bool found;
1329ce53e8e6SMike Rapoport bool basic_ioctls;
133086039bd3SAndrea Arcangeli unsigned long start, end, vma_end;
133111a9b902SLiam R. Howlett struct vma_iterator vmi;
13325543d3c4SPeter Xu pgoff_t pgoff;
133386039bd3SAndrea Arcangeli
133486039bd3SAndrea Arcangeli user_uffdio_register = (struct uffdio_register __user *) arg;
133586039bd3SAndrea Arcangeli
133686039bd3SAndrea Arcangeli ret = -EFAULT;
133786039bd3SAndrea Arcangeli if (copy_from_user(&uffdio_register, user_uffdio_register,
133886039bd3SAndrea Arcangeli sizeof(uffdio_register)-sizeof(__u64)))
133986039bd3SAndrea Arcangeli goto out;
134086039bd3SAndrea Arcangeli
134186039bd3SAndrea Arcangeli ret = -EINVAL;
134286039bd3SAndrea Arcangeli if (!uffdio_register.mode)
134386039bd3SAndrea Arcangeli goto out;
13447677f7fdSAxel Rasmussen if (uffdio_register.mode & ~UFFD_API_REGISTER_MODES)
134586039bd3SAndrea Arcangeli goto out;
134686039bd3SAndrea Arcangeli vm_flags = 0;
134786039bd3SAndrea Arcangeli if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
134886039bd3SAndrea Arcangeli vm_flags |= VM_UFFD_MISSING;
134900b151f2SPeter Xu if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
135000b151f2SPeter Xu #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
135100b151f2SPeter Xu goto out;
135200b151f2SPeter Xu #endif
135386039bd3SAndrea Arcangeli vm_flags |= VM_UFFD_WP;
135400b151f2SPeter Xu }
13557677f7fdSAxel Rasmussen if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) {
13567677f7fdSAxel Rasmussen #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
13577677f7fdSAxel Rasmussen goto out;
13587677f7fdSAxel Rasmussen #endif
13597677f7fdSAxel Rasmussen vm_flags |= VM_UFFD_MINOR;
13607677f7fdSAxel Rasmussen }
136186039bd3SAndrea Arcangeli
1362e71e2aceSPeter Collingbourne ret = validate_range(mm, uffdio_register.range.start,
136386039bd3SAndrea Arcangeli uffdio_register.range.len);
136486039bd3SAndrea Arcangeli if (ret)
136586039bd3SAndrea Arcangeli goto out;
136686039bd3SAndrea Arcangeli
136786039bd3SAndrea Arcangeli start = uffdio_register.range.start;
136886039bd3SAndrea Arcangeli end = start + uffdio_register.range.len;
136986039bd3SAndrea Arcangeli
1370d2005e3fSOleg Nesterov ret = -ENOMEM;
1371d2005e3fSOleg Nesterov if (!mmget_not_zero(mm))
1372d2005e3fSOleg Nesterov goto out;
1373d2005e3fSOleg Nesterov
137486039bd3SAndrea Arcangeli ret = -EINVAL;
137511a9b902SLiam R. Howlett mmap_write_lock(mm);
137611a9b902SLiam R. Howlett vma_iter_init(&vmi, mm, start);
137711a9b902SLiam R. Howlett vma = vma_find(&vmi, end);
137811a9b902SLiam R. Howlett if (!vma)
137986039bd3SAndrea Arcangeli goto out_unlock;
138086039bd3SAndrea Arcangeli
138186039bd3SAndrea Arcangeli /*
1382cab350afSMike Kravetz * If the first vma contains huge pages, make sure start address
1383cab350afSMike Kravetz * is aligned to huge page size.
1384cab350afSMike Kravetz */
1385cab350afSMike Kravetz if (is_vm_hugetlb_page(vma)) {
1386cab350afSMike Kravetz unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1387cab350afSMike Kravetz
1388cab350afSMike Kravetz if (start & (vma_hpagesize - 1))
1389cab350afSMike Kravetz goto out_unlock;
1390cab350afSMike Kravetz }
1391cab350afSMike Kravetz
1392cab350afSMike Kravetz /*
139386039bd3SAndrea Arcangeli * Search for not compatible vmas.
139486039bd3SAndrea Arcangeli */
139586039bd3SAndrea Arcangeli found = false;
1396ce53e8e6SMike Rapoport basic_ioctls = false;
139711a9b902SLiam R. Howlett cur = vma;
139811a9b902SLiam R. Howlett do {
139986039bd3SAndrea Arcangeli cond_resched();
140086039bd3SAndrea Arcangeli
140186039bd3SAndrea Arcangeli BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
14027677f7fdSAxel Rasmussen !!(cur->vm_flags & __VM_UFFD_FLAGS));
140386039bd3SAndrea Arcangeli
140486039bd3SAndrea Arcangeli /* check not compatible vmas */
140586039bd3SAndrea Arcangeli ret = -EINVAL;
140663b2d417SAndrea Arcangeli if (!vma_can_userfault(cur, vm_flags))
140786039bd3SAndrea Arcangeli goto out_unlock;
140829ec9066SAndrea Arcangeli
140929ec9066SAndrea Arcangeli /*
141029ec9066SAndrea Arcangeli * UFFDIO_COPY will fill file holes even without
141129ec9066SAndrea Arcangeli * PROT_WRITE. This check enforces that if this is a
141229ec9066SAndrea Arcangeli * MAP_SHARED, the process has write permission to the backing
141329ec9066SAndrea Arcangeli * file. If VM_MAYWRITE is set it also enforces that on a
141429ec9066SAndrea Arcangeli * MAP_SHARED vma: there is no F_WRITE_SEAL and no further
141529ec9066SAndrea Arcangeli * F_WRITE_SEAL can be taken until the vma is destroyed.
141629ec9066SAndrea Arcangeli */
141729ec9066SAndrea Arcangeli ret = -EPERM;
141829ec9066SAndrea Arcangeli if (unlikely(!(cur->vm_flags & VM_MAYWRITE)))
141929ec9066SAndrea Arcangeli goto out_unlock;
142029ec9066SAndrea Arcangeli
1421cab350afSMike Kravetz /*
1422cab350afSMike Kravetz * If this vma contains ending address, and huge pages
1423cab350afSMike Kravetz * check alignment.
1424cab350afSMike Kravetz */
1425cab350afSMike Kravetz if (is_vm_hugetlb_page(cur) && end <= cur->vm_end &&
1426cab350afSMike Kravetz end > cur->vm_start) {
1427cab350afSMike Kravetz unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
1428cab350afSMike Kravetz
1429cab350afSMike Kravetz ret = -EINVAL;
1430cab350afSMike Kravetz
1431cab350afSMike Kravetz if (end & (vma_hpagesize - 1))
1432cab350afSMike Kravetz goto out_unlock;
1433cab350afSMike Kravetz }
143463b2d417SAndrea Arcangeli if ((vm_flags & VM_UFFD_WP) && !(cur->vm_flags & VM_MAYWRITE))
143563b2d417SAndrea Arcangeli goto out_unlock;
143686039bd3SAndrea Arcangeli
143786039bd3SAndrea Arcangeli /*
143886039bd3SAndrea Arcangeli * Check that this vma isn't already owned by a
143986039bd3SAndrea Arcangeli * different userfaultfd. We can't allow more than one
144086039bd3SAndrea Arcangeli * userfaultfd to own a single vma simultaneously or we
144186039bd3SAndrea Arcangeli * wouldn't know which one to deliver the userfaults to.
144286039bd3SAndrea Arcangeli */
144386039bd3SAndrea Arcangeli ret = -EBUSY;
144486039bd3SAndrea Arcangeli if (cur->vm_userfaultfd_ctx.ctx &&
144586039bd3SAndrea Arcangeli cur->vm_userfaultfd_ctx.ctx != ctx)
144686039bd3SAndrea Arcangeli goto out_unlock;
144786039bd3SAndrea Arcangeli
1448cab350afSMike Kravetz /*
1449cab350afSMike Kravetz * Note vmas containing huge pages
1450cab350afSMike Kravetz */
1451ce53e8e6SMike Rapoport if (is_vm_hugetlb_page(cur))
1452ce53e8e6SMike Rapoport basic_ioctls = true;
1453cab350afSMike Kravetz
145486039bd3SAndrea Arcangeli found = true;
145511a9b902SLiam R. Howlett } for_each_vma_range(vmi, cur, end);
145686039bd3SAndrea Arcangeli BUG_ON(!found);
145786039bd3SAndrea Arcangeli
145811a9b902SLiam R. Howlett vma_iter_set(&vmi, start);
145911a9b902SLiam R. Howlett prev = vma_prev(&vmi);
1460270aa010SPeter Xu if (vma->vm_start < start)
1461270aa010SPeter Xu prev = vma;
146286039bd3SAndrea Arcangeli
146386039bd3SAndrea Arcangeli ret = 0;
146411a9b902SLiam R. Howlett for_each_vma_range(vmi, vma, end) {
146586039bd3SAndrea Arcangeli cond_resched();
146686039bd3SAndrea Arcangeli
146763b2d417SAndrea Arcangeli BUG_ON(!vma_can_userfault(vma, vm_flags));
146886039bd3SAndrea Arcangeli BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
146986039bd3SAndrea Arcangeli vma->vm_userfaultfd_ctx.ctx != ctx);
147029ec9066SAndrea Arcangeli WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
147186039bd3SAndrea Arcangeli
147286039bd3SAndrea Arcangeli /*
147386039bd3SAndrea Arcangeli * Nothing to do: this vma is already registered into this
147486039bd3SAndrea Arcangeli * userfaultfd and with the right tracking mode too.
147586039bd3SAndrea Arcangeli */
147686039bd3SAndrea Arcangeli if (vma->vm_userfaultfd_ctx.ctx == ctx &&
147786039bd3SAndrea Arcangeli (vma->vm_flags & vm_flags) == vm_flags)
147886039bd3SAndrea Arcangeli goto skip;
147986039bd3SAndrea Arcangeli
148086039bd3SAndrea Arcangeli if (vma->vm_start > start)
148186039bd3SAndrea Arcangeli start = vma->vm_start;
148286039bd3SAndrea Arcangeli vma_end = min(end, vma->vm_end);
148386039bd3SAndrea Arcangeli
14847677f7fdSAxel Rasmussen new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
14855543d3c4SPeter Xu pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
14869760ebffSLiam R. Howlett prev = vma_merge(&vmi, mm, prev, start, vma_end, new_flags,
14875543d3c4SPeter Xu vma->anon_vma, vma->vm_file, pgoff,
148886039bd3SAndrea Arcangeli vma_policy(vma),
14899a10064fSColin Cross ((struct vm_userfaultfd_ctx){ ctx }),
14905c26f6acSSuren Baghdasaryan anon_vma_name(vma));
149186039bd3SAndrea Arcangeli if (prev) {
149269dbe6daSLiam R. Howlett /* vma_merge() invalidated the mas */
149386039bd3SAndrea Arcangeli vma = prev;
149486039bd3SAndrea Arcangeli goto next;
149586039bd3SAndrea Arcangeli }
149686039bd3SAndrea Arcangeli if (vma->vm_start < start) {
14979760ebffSLiam R. Howlett ret = split_vma(&vmi, vma, start, 1);
149886039bd3SAndrea Arcangeli if (ret)
149986039bd3SAndrea Arcangeli break;
150086039bd3SAndrea Arcangeli }
150186039bd3SAndrea Arcangeli if (vma->vm_end > end) {
15029760ebffSLiam R. Howlett ret = split_vma(&vmi, vma, end, 0);
150386039bd3SAndrea Arcangeli if (ret)
150486039bd3SAndrea Arcangeli break;
150586039bd3SAndrea Arcangeli }
150686039bd3SAndrea Arcangeli next:
150786039bd3SAndrea Arcangeli /*
150886039bd3SAndrea Arcangeli * In the vma_merge() successful mprotect-like case 8:
150986039bd3SAndrea Arcangeli * the next vma was merged into the current one and
151086039bd3SAndrea Arcangeli * the current one has not been updated yet.
151186039bd3SAndrea Arcangeli */
151260081bf1SSuren Baghdasaryan vma_start_write(vma);
151351d3d5ebSDavid Hildenbrand userfaultfd_set_vm_flags(vma, new_flags);
151486039bd3SAndrea Arcangeli vma->vm_userfaultfd_ctx.ctx = ctx;
151586039bd3SAndrea Arcangeli
15166dfeaff9SPeter Xu if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
15176dfeaff9SPeter Xu hugetlb_unshare_all_pmds(vma);
15186dfeaff9SPeter Xu
151986039bd3SAndrea Arcangeli skip:
152086039bd3SAndrea Arcangeli prev = vma;
152186039bd3SAndrea Arcangeli start = vma->vm_end;
152211a9b902SLiam R. Howlett }
152311a9b902SLiam R. Howlett
152486039bd3SAndrea Arcangeli out_unlock:
1525d8ed45c5SMichel Lespinasse mmap_write_unlock(mm);
1526d2005e3fSOleg Nesterov mmput(mm);
152786039bd3SAndrea Arcangeli if (!ret) {
152814819305SPeter Xu __u64 ioctls_out;
152914819305SPeter Xu
153014819305SPeter Xu ioctls_out = basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC :
153114819305SPeter Xu UFFD_API_RANGE_IOCTLS;
153214819305SPeter Xu
153314819305SPeter Xu /*
153414819305SPeter Xu * Declare the WP ioctl only if the WP mode is
153514819305SPeter Xu * specified and all checks passed with the range
153614819305SPeter Xu */
153714819305SPeter Xu if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP))
153814819305SPeter Xu ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT);
153914819305SPeter Xu
1540f6191471SAxel Rasmussen /* CONTINUE ioctl is only supported for MINOR ranges. */
1541f6191471SAxel Rasmussen if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR))
1542f6191471SAxel Rasmussen ioctls_out &= ~((__u64)1 << _UFFDIO_CONTINUE);
1543f6191471SAxel Rasmussen
154486039bd3SAndrea Arcangeli /*
154586039bd3SAndrea Arcangeli * Now that we scanned all vmas we can already tell
154686039bd3SAndrea Arcangeli * userland which ioctls methods are guaranteed to
154786039bd3SAndrea Arcangeli * succeed on this range.
154886039bd3SAndrea Arcangeli */
154914819305SPeter Xu if (put_user(ioctls_out, &user_uffdio_register->ioctls))
155086039bd3SAndrea Arcangeli ret = -EFAULT;
155186039bd3SAndrea Arcangeli }
155286039bd3SAndrea Arcangeli out:
155386039bd3SAndrea Arcangeli return ret;
155486039bd3SAndrea Arcangeli }
155586039bd3SAndrea Arcangeli
userfaultfd_unregister(struct userfaultfd_ctx * ctx,unsigned long arg)155686039bd3SAndrea Arcangeli static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
155786039bd3SAndrea Arcangeli unsigned long arg)
155886039bd3SAndrea Arcangeli {
155986039bd3SAndrea Arcangeli struct mm_struct *mm = ctx->mm;
156086039bd3SAndrea Arcangeli struct vm_area_struct *vma, *prev, *cur;
156186039bd3SAndrea Arcangeli int ret;
156286039bd3SAndrea Arcangeli struct uffdio_range uffdio_unregister;
156386039bd3SAndrea Arcangeli unsigned long new_flags;
156486039bd3SAndrea Arcangeli bool found;
156586039bd3SAndrea Arcangeli unsigned long start, end, vma_end;
156686039bd3SAndrea Arcangeli const void __user *buf = (void __user *)arg;
156711a9b902SLiam R. Howlett struct vma_iterator vmi;
15685543d3c4SPeter Xu pgoff_t pgoff;
156986039bd3SAndrea Arcangeli
157086039bd3SAndrea Arcangeli ret = -EFAULT;
157186039bd3SAndrea Arcangeli if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
157286039bd3SAndrea Arcangeli goto out;
157386039bd3SAndrea Arcangeli
1574e71e2aceSPeter Collingbourne ret = validate_range(mm, uffdio_unregister.start,
157586039bd3SAndrea Arcangeli uffdio_unregister.len);
157686039bd3SAndrea Arcangeli if (ret)
157786039bd3SAndrea Arcangeli goto out;
157886039bd3SAndrea Arcangeli
157986039bd3SAndrea Arcangeli start = uffdio_unregister.start;
158086039bd3SAndrea Arcangeli end = start + uffdio_unregister.len;
158186039bd3SAndrea Arcangeli
1582d2005e3fSOleg Nesterov ret = -ENOMEM;
1583d2005e3fSOleg Nesterov if (!mmget_not_zero(mm))
1584d2005e3fSOleg Nesterov goto out;
1585d2005e3fSOleg Nesterov
1586d8ed45c5SMichel Lespinasse mmap_write_lock(mm);
158786039bd3SAndrea Arcangeli ret = -EINVAL;
158811a9b902SLiam R. Howlett vma_iter_init(&vmi, mm, start);
158911a9b902SLiam R. Howlett vma = vma_find(&vmi, end);
159011a9b902SLiam R. Howlett if (!vma)
159186039bd3SAndrea Arcangeli goto out_unlock;
159286039bd3SAndrea Arcangeli
159386039bd3SAndrea Arcangeli /*
1594cab350afSMike Kravetz * If the first vma contains huge pages, make sure start address
1595cab350afSMike Kravetz * is aligned to huge page size.
1596cab350afSMike Kravetz */
1597cab350afSMike Kravetz if (is_vm_hugetlb_page(vma)) {
1598cab350afSMike Kravetz unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1599cab350afSMike Kravetz
1600cab350afSMike Kravetz if (start & (vma_hpagesize - 1))
1601cab350afSMike Kravetz goto out_unlock;
1602cab350afSMike Kravetz }
1603cab350afSMike Kravetz
1604cab350afSMike Kravetz /*
160586039bd3SAndrea Arcangeli * Search for not compatible vmas.
160686039bd3SAndrea Arcangeli */
160786039bd3SAndrea Arcangeli found = false;
160811a9b902SLiam R. Howlett cur = vma;
160911a9b902SLiam R. Howlett do {
161086039bd3SAndrea Arcangeli cond_resched();
161186039bd3SAndrea Arcangeli
161286039bd3SAndrea Arcangeli BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
16137677f7fdSAxel Rasmussen !!(cur->vm_flags & __VM_UFFD_FLAGS));
161486039bd3SAndrea Arcangeli
161586039bd3SAndrea Arcangeli /*
161686039bd3SAndrea Arcangeli * Check not compatible vmas, not strictly required
161786039bd3SAndrea Arcangeli * here as not compatible vmas cannot have an
161886039bd3SAndrea Arcangeli * userfaultfd_ctx registered on them, but this
161986039bd3SAndrea Arcangeli * provides for more strict behavior to notice
162086039bd3SAndrea Arcangeli * unregistration errors.
162186039bd3SAndrea Arcangeli */
162263b2d417SAndrea Arcangeli if (!vma_can_userfault(cur, cur->vm_flags))
162386039bd3SAndrea Arcangeli goto out_unlock;
162486039bd3SAndrea Arcangeli
162586039bd3SAndrea Arcangeli found = true;
162611a9b902SLiam R. Howlett } for_each_vma_range(vmi, cur, end);
162786039bd3SAndrea Arcangeli BUG_ON(!found);
162886039bd3SAndrea Arcangeli
162911a9b902SLiam R. Howlett vma_iter_set(&vmi, start);
163011a9b902SLiam R. Howlett prev = vma_prev(&vmi);
1631270aa010SPeter Xu if (vma->vm_start < start)
1632270aa010SPeter Xu prev = vma;
1633270aa010SPeter Xu
163486039bd3SAndrea Arcangeli ret = 0;
163511a9b902SLiam R. Howlett for_each_vma_range(vmi, vma, end) {
163686039bd3SAndrea Arcangeli cond_resched();
163786039bd3SAndrea Arcangeli
163863b2d417SAndrea Arcangeli BUG_ON(!vma_can_userfault(vma, vma->vm_flags));
163986039bd3SAndrea Arcangeli
164086039bd3SAndrea Arcangeli /*
164186039bd3SAndrea Arcangeli * Nothing to do: this vma is already registered into this
164286039bd3SAndrea Arcangeli * userfaultfd and with the right tracking mode too.
164386039bd3SAndrea Arcangeli */
164486039bd3SAndrea Arcangeli if (!vma->vm_userfaultfd_ctx.ctx)
164586039bd3SAndrea Arcangeli goto skip;
164686039bd3SAndrea Arcangeli
164701e881f5SAndrea Arcangeli WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
164801e881f5SAndrea Arcangeli
164986039bd3SAndrea Arcangeli if (vma->vm_start > start)
165086039bd3SAndrea Arcangeli start = vma->vm_start;
165186039bd3SAndrea Arcangeli vma_end = min(end, vma->vm_end);
165286039bd3SAndrea Arcangeli
165309fa5296SAndrea Arcangeli if (userfaultfd_missing(vma)) {
165409fa5296SAndrea Arcangeli /*
165509fa5296SAndrea Arcangeli * Wake any concurrent pending userfault while
165609fa5296SAndrea Arcangeli * we unregister, so they will not hang
165709fa5296SAndrea Arcangeli * permanently and it avoids userland to call
165809fa5296SAndrea Arcangeli * UFFDIO_WAKE explicitly.
165909fa5296SAndrea Arcangeli */
166009fa5296SAndrea Arcangeli struct userfaultfd_wake_range range;
166109fa5296SAndrea Arcangeli range.start = start;
166209fa5296SAndrea Arcangeli range.len = vma_end - start;
166309fa5296SAndrea Arcangeli wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
166409fa5296SAndrea Arcangeli }
166509fa5296SAndrea Arcangeli
1666f369b07cSPeter Xu /* Reset ptes for the whole vma range if wr-protected */
1667f369b07cSPeter Xu if (userfaultfd_wp(vma))
166861c50040SAxel Rasmussen uffd_wp_range(vma, start, vma_end - start, false);
1669f369b07cSPeter Xu
16707677f7fdSAxel Rasmussen new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
16715543d3c4SPeter Xu pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
16729760ebffSLiam R. Howlett prev = vma_merge(&vmi, mm, prev, start, vma_end, new_flags,
16735543d3c4SPeter Xu vma->anon_vma, vma->vm_file, pgoff,
167486039bd3SAndrea Arcangeli vma_policy(vma),
16755c26f6acSSuren Baghdasaryan NULL_VM_UFFD_CTX, anon_vma_name(vma));
167686039bd3SAndrea Arcangeli if (prev) {
167786039bd3SAndrea Arcangeli vma = prev;
167886039bd3SAndrea Arcangeli goto next;
167986039bd3SAndrea Arcangeli }
168086039bd3SAndrea Arcangeli if (vma->vm_start < start) {
16819760ebffSLiam R. Howlett ret = split_vma(&vmi, vma, start, 1);
168286039bd3SAndrea Arcangeli if (ret)
168386039bd3SAndrea Arcangeli break;
168486039bd3SAndrea Arcangeli }
168586039bd3SAndrea Arcangeli if (vma->vm_end > end) {
16869760ebffSLiam R. Howlett ret = split_vma(&vmi, vma, end, 0);
168786039bd3SAndrea Arcangeli if (ret)
168886039bd3SAndrea Arcangeli break;
168986039bd3SAndrea Arcangeli }
169086039bd3SAndrea Arcangeli next:
169186039bd3SAndrea Arcangeli /*
169286039bd3SAndrea Arcangeli * In the vma_merge() successful mprotect-like case 8:
169386039bd3SAndrea Arcangeli * the next vma was merged into the current one and
169486039bd3SAndrea Arcangeli * the current one has not been updated yet.
169586039bd3SAndrea Arcangeli */
169660081bf1SSuren Baghdasaryan vma_start_write(vma);
169751d3d5ebSDavid Hildenbrand userfaultfd_set_vm_flags(vma, new_flags);
169886039bd3SAndrea Arcangeli vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
169986039bd3SAndrea Arcangeli
170086039bd3SAndrea Arcangeli skip:
170186039bd3SAndrea Arcangeli prev = vma;
170286039bd3SAndrea Arcangeli start = vma->vm_end;
170311a9b902SLiam R. Howlett }
170411a9b902SLiam R. Howlett
170586039bd3SAndrea Arcangeli out_unlock:
1706d8ed45c5SMichel Lespinasse mmap_write_unlock(mm);
1707d2005e3fSOleg Nesterov mmput(mm);
170886039bd3SAndrea Arcangeli out:
170986039bd3SAndrea Arcangeli return ret;
171086039bd3SAndrea Arcangeli }
171186039bd3SAndrea Arcangeli
171286039bd3SAndrea Arcangeli /*
1713ba85c702SAndrea Arcangeli * userfaultfd_wake may be used in combination with the
1714ba85c702SAndrea Arcangeli * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches.
171586039bd3SAndrea Arcangeli */
userfaultfd_wake(struct userfaultfd_ctx * ctx,unsigned long arg)171686039bd3SAndrea Arcangeli static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
171786039bd3SAndrea Arcangeli unsigned long arg)
171886039bd3SAndrea Arcangeli {
171986039bd3SAndrea Arcangeli int ret;
172086039bd3SAndrea Arcangeli struct uffdio_range uffdio_wake;
172186039bd3SAndrea Arcangeli struct userfaultfd_wake_range range;
172286039bd3SAndrea Arcangeli const void __user *buf = (void __user *)arg;
172386039bd3SAndrea Arcangeli
172486039bd3SAndrea Arcangeli ret = -EFAULT;
172586039bd3SAndrea Arcangeli if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
172686039bd3SAndrea Arcangeli goto out;
172786039bd3SAndrea Arcangeli
1728e71e2aceSPeter Collingbourne ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
172986039bd3SAndrea Arcangeli if (ret)
173086039bd3SAndrea Arcangeli goto out;
173186039bd3SAndrea Arcangeli
173286039bd3SAndrea Arcangeli range.start = uffdio_wake.start;
173386039bd3SAndrea Arcangeli range.len = uffdio_wake.len;
173486039bd3SAndrea Arcangeli
173586039bd3SAndrea Arcangeli /*
173686039bd3SAndrea Arcangeli * len == 0 means wake all and we don't want to wake all here,
173786039bd3SAndrea Arcangeli * so check it again to be sure.
173886039bd3SAndrea Arcangeli */
173986039bd3SAndrea Arcangeli VM_BUG_ON(!range.len);
174086039bd3SAndrea Arcangeli
174186039bd3SAndrea Arcangeli wake_userfault(ctx, &range);
174286039bd3SAndrea Arcangeli ret = 0;
174386039bd3SAndrea Arcangeli
174486039bd3SAndrea Arcangeli out:
174586039bd3SAndrea Arcangeli return ret;
174686039bd3SAndrea Arcangeli }
174786039bd3SAndrea Arcangeli
userfaultfd_copy(struct userfaultfd_ctx * ctx,unsigned long arg)1748ad465caeSAndrea Arcangeli static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
1749ad465caeSAndrea Arcangeli unsigned long arg)
1750ad465caeSAndrea Arcangeli {
1751ad465caeSAndrea Arcangeli __s64 ret;
1752ad465caeSAndrea Arcangeli struct uffdio_copy uffdio_copy;
1753ad465caeSAndrea Arcangeli struct uffdio_copy __user *user_uffdio_copy;
1754ad465caeSAndrea Arcangeli struct userfaultfd_wake_range range;
1755d9712937SAxel Rasmussen uffd_flags_t flags = 0;
1756ad465caeSAndrea Arcangeli
1757ad465caeSAndrea Arcangeli user_uffdio_copy = (struct uffdio_copy __user *) arg;
1758ad465caeSAndrea Arcangeli
1759df2cc96eSMike Rapoport ret = -EAGAIN;
1760a759a909SNadav Amit if (atomic_read(&ctx->mmap_changing))
1761df2cc96eSMike Rapoport goto out;
1762df2cc96eSMike Rapoport
1763ad465caeSAndrea Arcangeli ret = -EFAULT;
1764ad465caeSAndrea Arcangeli if (copy_from_user(&uffdio_copy, user_uffdio_copy,
1765ad465caeSAndrea Arcangeli /* don't copy "copy" last field */
1766ad465caeSAndrea Arcangeli sizeof(uffdio_copy)-sizeof(__s64)))
1767ad465caeSAndrea Arcangeli goto out;
1768ad465caeSAndrea Arcangeli
17692ef5d724SAxel Rasmussen ret = validate_unaligned_range(ctx->mm, uffdio_copy.src,
17702ef5d724SAxel Rasmussen uffdio_copy.len);
17712ef5d724SAxel Rasmussen if (ret)
17722ef5d724SAxel Rasmussen goto out;
1773e71e2aceSPeter Collingbourne ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
1774ad465caeSAndrea Arcangeli if (ret)
1775ad465caeSAndrea Arcangeli goto out;
17762ef5d724SAxel Rasmussen
1777ad465caeSAndrea Arcangeli ret = -EINVAL;
177872981e0eSAndrea Arcangeli if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP))
1779ad465caeSAndrea Arcangeli goto out;
1780d9712937SAxel Rasmussen if (uffdio_copy.mode & UFFDIO_COPY_MODE_WP)
1781d9712937SAxel Rasmussen flags |= MFILL_ATOMIC_WP;
1782d2005e3fSOleg Nesterov if (mmget_not_zero(ctx->mm)) {
1783a734991cSAxel Rasmussen ret = mfill_atomic_copy(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
178472981e0eSAndrea Arcangeli uffdio_copy.len, &ctx->mmap_changing,
1785d9712937SAxel Rasmussen flags);
1786d2005e3fSOleg Nesterov mmput(ctx->mm);
178796333187SMike Rapoport } else {
1788e86b298bSMike Rapoport return -ESRCH;
1789d2005e3fSOleg Nesterov }
1790ad465caeSAndrea Arcangeli if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
1791ad465caeSAndrea Arcangeli return -EFAULT;
1792ad465caeSAndrea Arcangeli if (ret < 0)
1793ad465caeSAndrea Arcangeli goto out;
1794ad465caeSAndrea Arcangeli BUG_ON(!ret);
1795ad465caeSAndrea Arcangeli /* len == 0 would wake all */
1796ad465caeSAndrea Arcangeli range.len = ret;
1797ad465caeSAndrea Arcangeli if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
1798ad465caeSAndrea Arcangeli range.start = uffdio_copy.dst;
1799ad465caeSAndrea Arcangeli wake_userfault(ctx, &range);
1800ad465caeSAndrea Arcangeli }
1801ad465caeSAndrea Arcangeli ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
1802ad465caeSAndrea Arcangeli out:
1803ad465caeSAndrea Arcangeli return ret;
1804ad465caeSAndrea Arcangeli }
1805ad465caeSAndrea Arcangeli
userfaultfd_zeropage(struct userfaultfd_ctx * ctx,unsigned long arg)1806ad465caeSAndrea Arcangeli static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
1807ad465caeSAndrea Arcangeli unsigned long arg)
1808ad465caeSAndrea Arcangeli {
1809ad465caeSAndrea Arcangeli __s64 ret;
1810ad465caeSAndrea Arcangeli struct uffdio_zeropage uffdio_zeropage;
1811ad465caeSAndrea Arcangeli struct uffdio_zeropage __user *user_uffdio_zeropage;
1812ad465caeSAndrea Arcangeli struct userfaultfd_wake_range range;
1813ad465caeSAndrea Arcangeli
1814ad465caeSAndrea Arcangeli user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
1815ad465caeSAndrea Arcangeli
1816df2cc96eSMike Rapoport ret = -EAGAIN;
1817a759a909SNadav Amit if (atomic_read(&ctx->mmap_changing))
1818df2cc96eSMike Rapoport goto out;
1819df2cc96eSMike Rapoport
1820ad465caeSAndrea Arcangeli ret = -EFAULT;
1821ad465caeSAndrea Arcangeli if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
1822ad465caeSAndrea Arcangeli /* don't copy "zeropage" last field */
1823ad465caeSAndrea Arcangeli sizeof(uffdio_zeropage)-sizeof(__s64)))
1824ad465caeSAndrea Arcangeli goto out;
1825ad465caeSAndrea Arcangeli
1826e71e2aceSPeter Collingbourne ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
1827ad465caeSAndrea Arcangeli uffdio_zeropage.range.len);
1828ad465caeSAndrea Arcangeli if (ret)
1829ad465caeSAndrea Arcangeli goto out;
1830ad465caeSAndrea Arcangeli ret = -EINVAL;
1831ad465caeSAndrea Arcangeli if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
1832ad465caeSAndrea Arcangeli goto out;
1833ad465caeSAndrea Arcangeli
1834d2005e3fSOleg Nesterov if (mmget_not_zero(ctx->mm)) {
1835a734991cSAxel Rasmussen ret = mfill_atomic_zeropage(ctx->mm, uffdio_zeropage.range.start,
1836df2cc96eSMike Rapoport uffdio_zeropage.range.len,
1837df2cc96eSMike Rapoport &ctx->mmap_changing);
1838d2005e3fSOleg Nesterov mmput(ctx->mm);
18399d95aa4bSMike Rapoport } else {
1840e86b298bSMike Rapoport return -ESRCH;
1841d2005e3fSOleg Nesterov }
1842ad465caeSAndrea Arcangeli if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
1843ad465caeSAndrea Arcangeli return -EFAULT;
1844ad465caeSAndrea Arcangeli if (ret < 0)
1845ad465caeSAndrea Arcangeli goto out;
1846ad465caeSAndrea Arcangeli /* len == 0 would wake all */
1847ad465caeSAndrea Arcangeli BUG_ON(!ret);
1848ad465caeSAndrea Arcangeli range.len = ret;
1849ad465caeSAndrea Arcangeli if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
1850ad465caeSAndrea Arcangeli range.start = uffdio_zeropage.range.start;
1851ad465caeSAndrea Arcangeli wake_userfault(ctx, &range);
1852ad465caeSAndrea Arcangeli }
1853ad465caeSAndrea Arcangeli ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
1854ad465caeSAndrea Arcangeli out:
1855ad465caeSAndrea Arcangeli return ret;
1856ad465caeSAndrea Arcangeli }
1857ad465caeSAndrea Arcangeli
userfaultfd_writeprotect(struct userfaultfd_ctx * ctx,unsigned long arg)185863b2d417SAndrea Arcangeli static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
185963b2d417SAndrea Arcangeli unsigned long arg)
186063b2d417SAndrea Arcangeli {
186163b2d417SAndrea Arcangeli int ret;
186263b2d417SAndrea Arcangeli struct uffdio_writeprotect uffdio_wp;
186363b2d417SAndrea Arcangeli struct uffdio_writeprotect __user *user_uffdio_wp;
186463b2d417SAndrea Arcangeli struct userfaultfd_wake_range range;
186523080e27SPeter Xu bool mode_wp, mode_dontwake;
186663b2d417SAndrea Arcangeli
1867a759a909SNadav Amit if (atomic_read(&ctx->mmap_changing))
186863b2d417SAndrea Arcangeli return -EAGAIN;
186963b2d417SAndrea Arcangeli
187063b2d417SAndrea Arcangeli user_uffdio_wp = (struct uffdio_writeprotect __user *) arg;
187163b2d417SAndrea Arcangeli
187263b2d417SAndrea Arcangeli if (copy_from_user(&uffdio_wp, user_uffdio_wp,
187363b2d417SAndrea Arcangeli sizeof(struct uffdio_writeprotect)))
187463b2d417SAndrea Arcangeli return -EFAULT;
187563b2d417SAndrea Arcangeli
1876e71e2aceSPeter Collingbourne ret = validate_range(ctx->mm, uffdio_wp.range.start,
187763b2d417SAndrea Arcangeli uffdio_wp.range.len);
187863b2d417SAndrea Arcangeli if (ret)
187963b2d417SAndrea Arcangeli return ret;
188063b2d417SAndrea Arcangeli
188163b2d417SAndrea Arcangeli if (uffdio_wp.mode & ~(UFFDIO_WRITEPROTECT_MODE_DONTWAKE |
188263b2d417SAndrea Arcangeli UFFDIO_WRITEPROTECT_MODE_WP))
188363b2d417SAndrea Arcangeli return -EINVAL;
188423080e27SPeter Xu
188523080e27SPeter Xu mode_wp = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_WP;
188623080e27SPeter Xu mode_dontwake = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_DONTWAKE;
188723080e27SPeter Xu
188823080e27SPeter Xu if (mode_wp && mode_dontwake)
188963b2d417SAndrea Arcangeli return -EINVAL;
189063b2d417SAndrea Arcangeli
1891cb185d5fSNadav Amit if (mmget_not_zero(ctx->mm)) {
189263b2d417SAndrea Arcangeli ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
189323080e27SPeter Xu uffdio_wp.range.len, mode_wp,
189463b2d417SAndrea Arcangeli &ctx->mmap_changing);
1895cb185d5fSNadav Amit mmput(ctx->mm);
1896cb185d5fSNadav Amit } else {
1897cb185d5fSNadav Amit return -ESRCH;
1898cb185d5fSNadav Amit }
1899cb185d5fSNadav Amit
190063b2d417SAndrea Arcangeli if (ret)
190163b2d417SAndrea Arcangeli return ret;
190263b2d417SAndrea Arcangeli
190323080e27SPeter Xu if (!mode_wp && !mode_dontwake) {
190463b2d417SAndrea Arcangeli range.start = uffdio_wp.range.start;
190563b2d417SAndrea Arcangeli range.len = uffdio_wp.range.len;
190663b2d417SAndrea Arcangeli wake_userfault(ctx, &range);
190763b2d417SAndrea Arcangeli }
190863b2d417SAndrea Arcangeli return ret;
190963b2d417SAndrea Arcangeli }
191063b2d417SAndrea Arcangeli
userfaultfd_continue(struct userfaultfd_ctx * ctx,unsigned long arg)1911f6191471SAxel Rasmussen static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
1912f6191471SAxel Rasmussen {
1913f6191471SAxel Rasmussen __s64 ret;
1914f6191471SAxel Rasmussen struct uffdio_continue uffdio_continue;
1915f6191471SAxel Rasmussen struct uffdio_continue __user *user_uffdio_continue;
1916f6191471SAxel Rasmussen struct userfaultfd_wake_range range;
191702891844SAxel Rasmussen uffd_flags_t flags = 0;
1918f6191471SAxel Rasmussen
1919f6191471SAxel Rasmussen user_uffdio_continue = (struct uffdio_continue __user *)arg;
1920f6191471SAxel Rasmussen
1921f6191471SAxel Rasmussen ret = -EAGAIN;
1922a759a909SNadav Amit if (atomic_read(&ctx->mmap_changing))
1923f6191471SAxel Rasmussen goto out;
1924f6191471SAxel Rasmussen
1925f6191471SAxel Rasmussen ret = -EFAULT;
1926f6191471SAxel Rasmussen if (copy_from_user(&uffdio_continue, user_uffdio_continue,
1927f6191471SAxel Rasmussen /* don't copy the output fields */
1928f6191471SAxel Rasmussen sizeof(uffdio_continue) - (sizeof(__s64))))
1929f6191471SAxel Rasmussen goto out;
1930f6191471SAxel Rasmussen
1931e71e2aceSPeter Collingbourne ret = validate_range(ctx->mm, uffdio_continue.range.start,
1932f6191471SAxel Rasmussen uffdio_continue.range.len);
1933f6191471SAxel Rasmussen if (ret)
1934f6191471SAxel Rasmussen goto out;
1935f6191471SAxel Rasmussen
1936f6191471SAxel Rasmussen ret = -EINVAL;
193702891844SAxel Rasmussen if (uffdio_continue.mode & ~(UFFDIO_CONTINUE_MODE_DONTWAKE |
193802891844SAxel Rasmussen UFFDIO_CONTINUE_MODE_WP))
1939f6191471SAxel Rasmussen goto out;
194002891844SAxel Rasmussen if (uffdio_continue.mode & UFFDIO_CONTINUE_MODE_WP)
194102891844SAxel Rasmussen flags |= MFILL_ATOMIC_WP;
1942f6191471SAxel Rasmussen
1943f6191471SAxel Rasmussen if (mmget_not_zero(ctx->mm)) {
1944a734991cSAxel Rasmussen ret = mfill_atomic_continue(ctx->mm, uffdio_continue.range.start,
1945f6191471SAxel Rasmussen uffdio_continue.range.len,
194602891844SAxel Rasmussen &ctx->mmap_changing, flags);
1947f6191471SAxel Rasmussen mmput(ctx->mm);
1948f6191471SAxel Rasmussen } else {
1949f6191471SAxel Rasmussen return -ESRCH;
1950f6191471SAxel Rasmussen }
1951f6191471SAxel Rasmussen
1952f6191471SAxel Rasmussen if (unlikely(put_user(ret, &user_uffdio_continue->mapped)))
1953f6191471SAxel Rasmussen return -EFAULT;
1954f6191471SAxel Rasmussen if (ret < 0)
1955f6191471SAxel Rasmussen goto out;
1956f6191471SAxel Rasmussen
1957f6191471SAxel Rasmussen /* len == 0 would wake all */
1958f6191471SAxel Rasmussen BUG_ON(!ret);
1959f6191471SAxel Rasmussen range.len = ret;
1960f6191471SAxel Rasmussen if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) {
1961f6191471SAxel Rasmussen range.start = uffdio_continue.range.start;
1962f6191471SAxel Rasmussen wake_userfault(ctx, &range);
1963f6191471SAxel Rasmussen }
1964f6191471SAxel Rasmussen ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN;
1965f6191471SAxel Rasmussen
1966f6191471SAxel Rasmussen out:
1967f6191471SAxel Rasmussen return ret;
1968f6191471SAxel Rasmussen }
1969f6191471SAxel Rasmussen
userfaultfd_poison(struct userfaultfd_ctx * ctx,unsigned long arg)1970fc71884aSAxel Rasmussen static inline int userfaultfd_poison(struct userfaultfd_ctx *ctx, unsigned long arg)
1971fc71884aSAxel Rasmussen {
1972fc71884aSAxel Rasmussen __s64 ret;
1973fc71884aSAxel Rasmussen struct uffdio_poison uffdio_poison;
1974fc71884aSAxel Rasmussen struct uffdio_poison __user *user_uffdio_poison;
1975fc71884aSAxel Rasmussen struct userfaultfd_wake_range range;
1976fc71884aSAxel Rasmussen
1977fc71884aSAxel Rasmussen user_uffdio_poison = (struct uffdio_poison __user *)arg;
1978fc71884aSAxel Rasmussen
1979fc71884aSAxel Rasmussen ret = -EAGAIN;
1980fc71884aSAxel Rasmussen if (atomic_read(&ctx->mmap_changing))
1981fc71884aSAxel Rasmussen goto out;
1982fc71884aSAxel Rasmussen
1983fc71884aSAxel Rasmussen ret = -EFAULT;
1984fc71884aSAxel Rasmussen if (copy_from_user(&uffdio_poison, user_uffdio_poison,
1985fc71884aSAxel Rasmussen /* don't copy the output fields */
1986fc71884aSAxel Rasmussen sizeof(uffdio_poison) - (sizeof(__s64))))
1987fc71884aSAxel Rasmussen goto out;
1988fc71884aSAxel Rasmussen
1989fc71884aSAxel Rasmussen ret = validate_range(ctx->mm, uffdio_poison.range.start,
1990fc71884aSAxel Rasmussen uffdio_poison.range.len);
1991fc71884aSAxel Rasmussen if (ret)
1992fc71884aSAxel Rasmussen goto out;
1993fc71884aSAxel Rasmussen
1994fc71884aSAxel Rasmussen ret = -EINVAL;
1995fc71884aSAxel Rasmussen if (uffdio_poison.mode & ~UFFDIO_POISON_MODE_DONTWAKE)
1996fc71884aSAxel Rasmussen goto out;
1997fc71884aSAxel Rasmussen
1998fc71884aSAxel Rasmussen if (mmget_not_zero(ctx->mm)) {
1999fc71884aSAxel Rasmussen ret = mfill_atomic_poison(ctx->mm, uffdio_poison.range.start,
2000fc71884aSAxel Rasmussen uffdio_poison.range.len,
2001fc71884aSAxel Rasmussen &ctx->mmap_changing, 0);
2002fc71884aSAxel Rasmussen mmput(ctx->mm);
2003fc71884aSAxel Rasmussen } else {
2004fc71884aSAxel Rasmussen return -ESRCH;
2005fc71884aSAxel Rasmussen }
2006fc71884aSAxel Rasmussen
2007fc71884aSAxel Rasmussen if (unlikely(put_user(ret, &user_uffdio_poison->updated)))
2008fc71884aSAxel Rasmussen return -EFAULT;
2009fc71884aSAxel Rasmussen if (ret < 0)
2010fc71884aSAxel Rasmussen goto out;
2011fc71884aSAxel Rasmussen
2012fc71884aSAxel Rasmussen /* len == 0 would wake all */
2013fc71884aSAxel Rasmussen BUG_ON(!ret);
2014fc71884aSAxel Rasmussen range.len = ret;
2015fc71884aSAxel Rasmussen if (!(uffdio_poison.mode & UFFDIO_POISON_MODE_DONTWAKE)) {
2016fc71884aSAxel Rasmussen range.start = uffdio_poison.range.start;
2017fc71884aSAxel Rasmussen wake_userfault(ctx, &range);
2018fc71884aSAxel Rasmussen }
2019fc71884aSAxel Rasmussen ret = range.len == uffdio_poison.range.len ? 0 : -EAGAIN;
2020fc71884aSAxel Rasmussen
2021fc71884aSAxel Rasmussen out:
2022fc71884aSAxel Rasmussen return ret;
2023fc71884aSAxel Rasmussen }
2024fc71884aSAxel Rasmussen
uffd_ctx_features(__u64 user_features)20259cd75c3cSPavel Emelyanov static inline unsigned int uffd_ctx_features(__u64 user_features)
20269cd75c3cSPavel Emelyanov {
20279cd75c3cSPavel Emelyanov /*
202822e5fe2aSNadav Amit * For the current set of features the bits just coincide. Set
202922e5fe2aSNadav Amit * UFFD_FEATURE_INITIALIZED to mark the features as enabled.
20309cd75c3cSPavel Emelyanov */
203122e5fe2aSNadav Amit return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED;
20329cd75c3cSPavel Emelyanov }
20339cd75c3cSPavel Emelyanov
203486039bd3SAndrea Arcangeli /*
203586039bd3SAndrea Arcangeli * userland asks for a certain API version and we return which bits
203686039bd3SAndrea Arcangeli * and ioctl commands are implemented in this kernel for such API
203786039bd3SAndrea Arcangeli * version or -EINVAL if unknown.
203886039bd3SAndrea Arcangeli */
userfaultfd_api(struct userfaultfd_ctx * ctx,unsigned long arg)203986039bd3SAndrea Arcangeli static int userfaultfd_api(struct userfaultfd_ctx *ctx,
204086039bd3SAndrea Arcangeli unsigned long arg)
204186039bd3SAndrea Arcangeli {
204286039bd3SAndrea Arcangeli struct uffdio_api uffdio_api;
204386039bd3SAndrea Arcangeli void __user *buf = (void __user *)arg;
204422e5fe2aSNadav Amit unsigned int ctx_features;
204586039bd3SAndrea Arcangeli int ret;
204665603144SAndrea Arcangeli __u64 features;
204786039bd3SAndrea Arcangeli
204886039bd3SAndrea Arcangeli ret = -EFAULT;
2049a9b85f94SAndrea Arcangeli if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
205086039bd3SAndrea Arcangeli goto out;
20512ff559f3SPeter Xu features = uffdio_api.features;
20522ff559f3SPeter Xu ret = -EINVAL;
2053*cd94cac4SAudra Mitchell if (uffdio_api.api != UFFD_API)
20542ff559f3SPeter Xu goto err_out;
20553c1c24d9SMike Rapoport ret = -EPERM;
20563c1c24d9SMike Rapoport if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
20573c1c24d9SMike Rapoport goto err_out;
205865603144SAndrea Arcangeli /* report all available features and ioctls to userland */
205965603144SAndrea Arcangeli uffdio_api.features = UFFD_API_FEATURES;
20607677f7fdSAxel Rasmussen #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
2061964ab004SAxel Rasmussen uffdio_api.features &=
2062964ab004SAxel Rasmussen ~(UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM);
20637677f7fdSAxel Rasmussen #endif
206400b151f2SPeter Xu #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
206500b151f2SPeter Xu uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP;
206686039bd3SAndrea Arcangeli #endif
2067b1f9e876SPeter Xu #ifndef CONFIG_PTE_MARKER_UFFD_WP
2068b1f9e876SPeter Xu uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM;
20692bad466cSPeter Xu uffdio_api.features &= ~UFFD_FEATURE_WP_UNPOPULATED;
2070b1f9e876SPeter Xu #endif
2071*cd94cac4SAudra Mitchell
2072*cd94cac4SAudra Mitchell ret = -EINVAL;
2073*cd94cac4SAudra Mitchell if (features & ~uffdio_api.features)
2074*cd94cac4SAudra Mitchell goto err_out;
2075*cd94cac4SAudra Mitchell
207686039bd3SAndrea Arcangeli uffdio_api.ioctls = UFFD_API_IOCTLS;
207786039bd3SAndrea Arcangeli ret = -EFAULT;
207886039bd3SAndrea Arcangeli if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
207986039bd3SAndrea Arcangeli goto out;
208022e5fe2aSNadav Amit
208186039bd3SAndrea Arcangeli /* only enable the requested features for this uffd context */
208222e5fe2aSNadav Amit ctx_features = uffd_ctx_features(features);
208322e5fe2aSNadav Amit ret = -EINVAL;
208422e5fe2aSNadav Amit if (cmpxchg(&ctx->features, 0, ctx_features) != 0)
208522e5fe2aSNadav Amit goto err_out;
208622e5fe2aSNadav Amit
208786039bd3SAndrea Arcangeli ret = 0;
208886039bd3SAndrea Arcangeli out:
208986039bd3SAndrea Arcangeli return ret;
209086039bd3SAndrea Arcangeli err_out:
209186039bd3SAndrea Arcangeli memset(&uffdio_api, 0, sizeof(uffdio_api));
209286039bd3SAndrea Arcangeli if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
209386039bd3SAndrea Arcangeli ret = -EFAULT;
209486039bd3SAndrea Arcangeli goto out;
209586039bd3SAndrea Arcangeli }
209686039bd3SAndrea Arcangeli
userfaultfd_ioctl(struct file * file,unsigned cmd,unsigned long arg)209786039bd3SAndrea Arcangeli static long userfaultfd_ioctl(struct file *file, unsigned cmd,
2098e6485a47SAndrea Arcangeli unsigned long arg)
2099e6485a47SAndrea Arcangeli {
2100e6485a47SAndrea Arcangeli int ret = -EINVAL;
210186039bd3SAndrea Arcangeli struct userfaultfd_ctx *ctx = file->private_data;
210286039bd3SAndrea Arcangeli
210322e5fe2aSNadav Amit if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx))
210486039bd3SAndrea Arcangeli return -EINVAL;
210586039bd3SAndrea Arcangeli
210686039bd3SAndrea Arcangeli switch(cmd) {
210786039bd3SAndrea Arcangeli case UFFDIO_API:
210886039bd3SAndrea Arcangeli ret = userfaultfd_api(ctx, arg);
210986039bd3SAndrea Arcangeli break;
211086039bd3SAndrea Arcangeli case UFFDIO_REGISTER:
211186039bd3SAndrea Arcangeli ret = userfaultfd_register(ctx, arg);
211286039bd3SAndrea Arcangeli break;
211386039bd3SAndrea Arcangeli case UFFDIO_UNREGISTER:
211486039bd3SAndrea Arcangeli ret = userfaultfd_unregister(ctx, arg);
211586039bd3SAndrea Arcangeli break;
211686039bd3SAndrea Arcangeli case UFFDIO_WAKE:
2117ad465caeSAndrea Arcangeli ret = userfaultfd_wake(ctx, arg);
2118ad465caeSAndrea Arcangeli break;
2119ad465caeSAndrea Arcangeli case UFFDIO_COPY:
2120ad465caeSAndrea Arcangeli ret = userfaultfd_copy(ctx, arg);
2121ad465caeSAndrea Arcangeli break;
2122ad465caeSAndrea Arcangeli case UFFDIO_ZEROPAGE:
212386039bd3SAndrea Arcangeli ret = userfaultfd_zeropage(ctx, arg);
212486039bd3SAndrea Arcangeli break;
212563b2d417SAndrea Arcangeli case UFFDIO_WRITEPROTECT:
212663b2d417SAndrea Arcangeli ret = userfaultfd_writeprotect(ctx, arg);
212763b2d417SAndrea Arcangeli break;
2128f6191471SAxel Rasmussen case UFFDIO_CONTINUE:
2129f6191471SAxel Rasmussen ret = userfaultfd_continue(ctx, arg);
2130f6191471SAxel Rasmussen break;
2131fc71884aSAxel Rasmussen case UFFDIO_POISON:
2132fc71884aSAxel Rasmussen ret = userfaultfd_poison(ctx, arg);
2133fc71884aSAxel Rasmussen break;
213486039bd3SAndrea Arcangeli }
213586039bd3SAndrea Arcangeli return ret;
213686039bd3SAndrea Arcangeli }
213786039bd3SAndrea Arcangeli
213886039bd3SAndrea Arcangeli #ifdef CONFIG_PROC_FS
userfaultfd_show_fdinfo(struct seq_file * m,struct file * f)213986039bd3SAndrea Arcangeli static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
214086039bd3SAndrea Arcangeli {
214186039bd3SAndrea Arcangeli struct userfaultfd_ctx *ctx = f->private_data;
2142ac6424b9SIngo Molnar wait_queue_entry_t *wq;
214386039bd3SAndrea Arcangeli unsigned long pending = 0, total = 0;
214486039bd3SAndrea Arcangeli
2145cbcfa130SEric Biggers spin_lock_irq(&ctx->fault_pending_wqh.lock);
21462055da97SIngo Molnar list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
214786039bd3SAndrea Arcangeli pending++;
214886039bd3SAndrea Arcangeli total++;
214986039bd3SAndrea Arcangeli }
21502055da97SIngo Molnar list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
215115b726efSAndrea Arcangeli total++;
215215b726efSAndrea Arcangeli }
2153cbcfa130SEric Biggers spin_unlock_irq(&ctx->fault_pending_wqh.lock);
215486039bd3SAndrea Arcangeli
215586039bd3SAndrea Arcangeli /*
215686039bd3SAndrea Arcangeli * If more protocols will be added, there will be all shown
215786039bd3SAndrea Arcangeli * separated by a space. Like this:
215886039bd3SAndrea Arcangeli * protocols: aa:... bb:...
215986039bd3SAndrea Arcangeli */
216086039bd3SAndrea Arcangeli seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
2161045098e9SMike Rapoport pending, total, UFFD_API, ctx->features,
216286039bd3SAndrea Arcangeli UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
216386039bd3SAndrea Arcangeli }
216486039bd3SAndrea Arcangeli #endif
216586039bd3SAndrea Arcangeli
216686039bd3SAndrea Arcangeli static const struct file_operations userfaultfd_fops = {
216786039bd3SAndrea Arcangeli #ifdef CONFIG_PROC_FS
216886039bd3SAndrea Arcangeli .show_fdinfo = userfaultfd_show_fdinfo,
216986039bd3SAndrea Arcangeli #endif
217086039bd3SAndrea Arcangeli .release = userfaultfd_release,
217186039bd3SAndrea Arcangeli .poll = userfaultfd_poll,
217286039bd3SAndrea Arcangeli .read = userfaultfd_read,
217386039bd3SAndrea Arcangeli .unlocked_ioctl = userfaultfd_ioctl,
21741832f2d8SArnd Bergmann .compat_ioctl = compat_ptr_ioctl,
217586039bd3SAndrea Arcangeli .llseek = noop_llseek,
217686039bd3SAndrea Arcangeli };
217786039bd3SAndrea Arcangeli
init_once_userfaultfd_ctx(void * mem)21783004ec9cSAndrea Arcangeli static void init_once_userfaultfd_ctx(void *mem)
21793004ec9cSAndrea Arcangeli {
21803004ec9cSAndrea Arcangeli struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
21813004ec9cSAndrea Arcangeli
21823004ec9cSAndrea Arcangeli init_waitqueue_head(&ctx->fault_pending_wqh);
21833004ec9cSAndrea Arcangeli init_waitqueue_head(&ctx->fault_wqh);
21849cd75c3cSPavel Emelyanov init_waitqueue_head(&ctx->event_wqh);
21853004ec9cSAndrea Arcangeli init_waitqueue_head(&ctx->fd_wqh);
21862ca97ac8SAhmed S. Darwish seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock);
21873004ec9cSAndrea Arcangeli }
21883004ec9cSAndrea Arcangeli
new_userfaultfd(int flags)21892d5de004SAxel Rasmussen static int new_userfaultfd(int flags)
219086039bd3SAndrea Arcangeli {
219186039bd3SAndrea Arcangeli struct userfaultfd_ctx *ctx;
2192284cd241SEric Biggers int fd;
219386039bd3SAndrea Arcangeli
219486039bd3SAndrea Arcangeli BUG_ON(!current->mm);
219586039bd3SAndrea Arcangeli
219686039bd3SAndrea Arcangeli /* Check the UFFD_* constants for consistency. */
219737cd0575SLokesh Gidra BUILD_BUG_ON(UFFD_USER_MODE_ONLY & UFFD_SHARED_FCNTL_FLAGS);
219886039bd3SAndrea Arcangeli BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
219986039bd3SAndrea Arcangeli BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
220086039bd3SAndrea Arcangeli
220137cd0575SLokesh Gidra if (flags & ~(UFFD_SHARED_FCNTL_FLAGS | UFFD_USER_MODE_ONLY))
2202284cd241SEric Biggers return -EINVAL;
220386039bd3SAndrea Arcangeli
22043004ec9cSAndrea Arcangeli ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
220586039bd3SAndrea Arcangeli if (!ctx)
2206284cd241SEric Biggers return -ENOMEM;
220786039bd3SAndrea Arcangeli
2208ca880420SEric Biggers refcount_set(&ctx->refcount, 1);
220986039bd3SAndrea Arcangeli ctx->flags = flags;
22109cd75c3cSPavel Emelyanov ctx->features = 0;
221186039bd3SAndrea Arcangeli ctx->released = false;
2212a759a909SNadav Amit atomic_set(&ctx->mmap_changing, 0);
221386039bd3SAndrea Arcangeli ctx->mm = current->mm;
221486039bd3SAndrea Arcangeli /* prevent the mm struct to be freed */
2215f1f10076SVegard Nossum mmgrab(ctx->mm);
221686039bd3SAndrea Arcangeli
2217b537900fSDaniel Colascione fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, ctx,
2218abec3d01SOndrej Mosnacek O_RDONLY | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL);
2219284cd241SEric Biggers if (fd < 0) {
2220d2005e3fSOleg Nesterov mmdrop(ctx->mm);
22213004ec9cSAndrea Arcangeli kmem_cache_free(userfaultfd_ctx_cachep, ctx);
2222c03e946fSEric Biggers }
222386039bd3SAndrea Arcangeli return fd;
222486039bd3SAndrea Arcangeli }
22253004ec9cSAndrea Arcangeli
userfaultfd_syscall_allowed(int flags)22262d5de004SAxel Rasmussen static inline bool userfaultfd_syscall_allowed(int flags)
22272d5de004SAxel Rasmussen {
22282d5de004SAxel Rasmussen /* Userspace-only page faults are always allowed */
22292d5de004SAxel Rasmussen if (flags & UFFD_USER_MODE_ONLY)
22302d5de004SAxel Rasmussen return true;
22312d5de004SAxel Rasmussen
22322d5de004SAxel Rasmussen /*
22332d5de004SAxel Rasmussen * The user is requesting a userfaultfd which can handle kernel faults.
22342d5de004SAxel Rasmussen * Privileged users are always allowed to do this.
22352d5de004SAxel Rasmussen */
22362d5de004SAxel Rasmussen if (capable(CAP_SYS_PTRACE))
22372d5de004SAxel Rasmussen return true;
22382d5de004SAxel Rasmussen
22392d5de004SAxel Rasmussen /* Otherwise, access to kernel fault handling is sysctl controlled. */
22402d5de004SAxel Rasmussen return sysctl_unprivileged_userfaultfd;
22412d5de004SAxel Rasmussen }
22422d5de004SAxel Rasmussen
SYSCALL_DEFINE1(userfaultfd,int,flags)22432d5de004SAxel Rasmussen SYSCALL_DEFINE1(userfaultfd, int, flags)
22442d5de004SAxel Rasmussen {
22452d5de004SAxel Rasmussen if (!userfaultfd_syscall_allowed(flags))
22462d5de004SAxel Rasmussen return -EPERM;
22472d5de004SAxel Rasmussen
22482d5de004SAxel Rasmussen return new_userfaultfd(flags);
22492d5de004SAxel Rasmussen }
22502d5de004SAxel Rasmussen
userfaultfd_dev_ioctl(struct file * file,unsigned int cmd,unsigned long flags)22512d5de004SAxel Rasmussen static long userfaultfd_dev_ioctl(struct file *file, unsigned int cmd, unsigned long flags)
22522d5de004SAxel Rasmussen {
22532d5de004SAxel Rasmussen if (cmd != USERFAULTFD_IOC_NEW)
22542d5de004SAxel Rasmussen return -EINVAL;
22552d5de004SAxel Rasmussen
22562d5de004SAxel Rasmussen return new_userfaultfd(flags);
22572d5de004SAxel Rasmussen }
22582d5de004SAxel Rasmussen
22592d5de004SAxel Rasmussen static const struct file_operations userfaultfd_dev_fops = {
22602d5de004SAxel Rasmussen .unlocked_ioctl = userfaultfd_dev_ioctl,
22612d5de004SAxel Rasmussen .compat_ioctl = userfaultfd_dev_ioctl,
22622d5de004SAxel Rasmussen .owner = THIS_MODULE,
22632d5de004SAxel Rasmussen .llseek = noop_llseek,
22642d5de004SAxel Rasmussen };
22652d5de004SAxel Rasmussen
22662d5de004SAxel Rasmussen static struct miscdevice userfaultfd_misc = {
22672d5de004SAxel Rasmussen .minor = MISC_DYNAMIC_MINOR,
22682d5de004SAxel Rasmussen .name = "userfaultfd",
22692d5de004SAxel Rasmussen .fops = &userfaultfd_dev_fops
22702d5de004SAxel Rasmussen };
22712d5de004SAxel Rasmussen
userfaultfd_init(void)22723004ec9cSAndrea Arcangeli static int __init userfaultfd_init(void)
22733004ec9cSAndrea Arcangeli {
22742d5de004SAxel Rasmussen int ret;
22752d5de004SAxel Rasmussen
22762d5de004SAxel Rasmussen ret = misc_register(&userfaultfd_misc);
22772d5de004SAxel Rasmussen if (ret)
22782d5de004SAxel Rasmussen return ret;
22792d5de004SAxel Rasmussen
22803004ec9cSAndrea Arcangeli userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
22813004ec9cSAndrea Arcangeli sizeof(struct userfaultfd_ctx),
22823004ec9cSAndrea Arcangeli 0,
22833004ec9cSAndrea Arcangeli SLAB_HWCACHE_ALIGN|SLAB_PANIC,
22843004ec9cSAndrea Arcangeli init_once_userfaultfd_ctx);
22852d337b71SZhangPeng #ifdef CONFIG_SYSCTL
22862d337b71SZhangPeng register_sysctl_init("vm", vm_userfaultfd_table);
22872d337b71SZhangPeng #endif
22883004ec9cSAndrea Arcangeli return 0;
22893004ec9cSAndrea Arcangeli }
22903004ec9cSAndrea Arcangeli __initcall(userfaultfd_init);
2291