120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 286039bd3SAndrea Arcangeli /* 386039bd3SAndrea Arcangeli * fs/userfaultfd.c 486039bd3SAndrea Arcangeli * 586039bd3SAndrea Arcangeli * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> 686039bd3SAndrea Arcangeli * Copyright (C) 2008-2009 Red Hat, Inc. 786039bd3SAndrea Arcangeli * Copyright (C) 2015 Red Hat, Inc. 886039bd3SAndrea Arcangeli * 986039bd3SAndrea Arcangeli * Some part derived from fs/eventfd.c (anon inode setup) and 1086039bd3SAndrea Arcangeli * mm/ksm.c (mm hashing). 1186039bd3SAndrea Arcangeli */ 1286039bd3SAndrea Arcangeli 139cd75c3cSPavel Emelyanov #include <linux/list.h> 1486039bd3SAndrea Arcangeli #include <linux/hashtable.h> 15174cd4b1SIngo Molnar #include <linux/sched/signal.h> 166e84f315SIngo Molnar #include <linux/sched/mm.h> 1786039bd3SAndrea Arcangeli #include <linux/mm.h> 1817fca131SArnd Bergmann #include <linux/mm_inline.h> 196dfeaff9SPeter Xu #include <linux/mmu_notifier.h> 2086039bd3SAndrea Arcangeli #include <linux/poll.h> 2186039bd3SAndrea Arcangeli #include <linux/slab.h> 2286039bd3SAndrea Arcangeli #include <linux/seq_file.h> 2386039bd3SAndrea Arcangeli #include <linux/file.h> 2486039bd3SAndrea Arcangeli #include <linux/bug.h> 2586039bd3SAndrea Arcangeli #include <linux/anon_inodes.h> 2686039bd3SAndrea Arcangeli #include <linux/syscalls.h> 2786039bd3SAndrea Arcangeli #include <linux/userfaultfd_k.h> 2886039bd3SAndrea Arcangeli #include <linux/mempolicy.h> 2986039bd3SAndrea Arcangeli #include <linux/ioctl.h> 3086039bd3SAndrea Arcangeli #include <linux/security.h> 31cab350afSMike Kravetz #include <linux/hugetlb.h> 325c041f5dSPeter Xu #include <linux/swapops.h> 332d5de004SAxel Rasmussen #include <linux/miscdevice.h> 3486039bd3SAndrea Arcangeli 352d337b71SZhangPeng static int sysctl_unprivileged_userfaultfd __read_mostly; 362d337b71SZhangPeng 372d337b71SZhangPeng #ifdef CONFIG_SYSCTL 382d337b71SZhangPeng static struct ctl_table vm_userfaultfd_table[] = { 392d337b71SZhangPeng { 402d337b71SZhangPeng .procname = "unprivileged_userfaultfd", 412d337b71SZhangPeng .data = &sysctl_unprivileged_userfaultfd, 422d337b71SZhangPeng .maxlen = sizeof(sysctl_unprivileged_userfaultfd), 432d337b71SZhangPeng .mode = 0644, 442d337b71SZhangPeng .proc_handler = proc_dointvec_minmax, 452d337b71SZhangPeng .extra1 = SYSCTL_ZERO, 462d337b71SZhangPeng .extra2 = SYSCTL_ONE, 472d337b71SZhangPeng }, 482d337b71SZhangPeng { } 492d337b71SZhangPeng }; 502d337b71SZhangPeng #endif 51cefdca0aSPeter Xu 523004ec9cSAndrea Arcangeli static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly; 533004ec9cSAndrea Arcangeli 543004ec9cSAndrea Arcangeli /* 553004ec9cSAndrea Arcangeli * Start with fault_pending_wqh and fault_wqh so they're more likely 563004ec9cSAndrea Arcangeli * to be in the same cacheline. 57cbcfa130SEric Biggers * 58cbcfa130SEric Biggers * Locking order: 59cbcfa130SEric Biggers * fd_wqh.lock 60cbcfa130SEric Biggers * fault_pending_wqh.lock 61cbcfa130SEric Biggers * fault_wqh.lock 62cbcfa130SEric Biggers * event_wqh.lock 63cbcfa130SEric Biggers * 64cbcfa130SEric Biggers * To avoid deadlocks, IRQs must be disabled when taking any of the above locks, 65cbcfa130SEric Biggers * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's 66cbcfa130SEric Biggers * also taken in IRQ context. 673004ec9cSAndrea Arcangeli */ 6886039bd3SAndrea Arcangeli struct userfaultfd_ctx { 6915b726efSAndrea Arcangeli /* waitqueue head for the pending (i.e. not read) userfaults */ 7015b726efSAndrea Arcangeli wait_queue_head_t fault_pending_wqh; 7115b726efSAndrea Arcangeli /* waitqueue head for the userfaults */ 7286039bd3SAndrea Arcangeli wait_queue_head_t fault_wqh; 7386039bd3SAndrea Arcangeli /* waitqueue head for the pseudo fd to wakeup poll/read */ 7486039bd3SAndrea Arcangeli wait_queue_head_t fd_wqh; 759cd75c3cSPavel Emelyanov /* waitqueue head for events */ 769cd75c3cSPavel Emelyanov wait_queue_head_t event_wqh; 772c5b7e1bSAndrea Arcangeli /* a refile sequence protected by fault_pending_wqh lock */ 782ca97ac8SAhmed S. Darwish seqcount_spinlock_t refile_seq; 793004ec9cSAndrea Arcangeli /* pseudo fd refcounting */ 80ca880420SEric Biggers refcount_t refcount; 8186039bd3SAndrea Arcangeli /* userfaultfd syscall flags */ 8286039bd3SAndrea Arcangeli unsigned int flags; 839cd75c3cSPavel Emelyanov /* features requested from the userspace */ 849cd75c3cSPavel Emelyanov unsigned int features; 8586039bd3SAndrea Arcangeli /* released */ 8686039bd3SAndrea Arcangeli bool released; 87df2cc96eSMike Rapoport /* memory mappings are changing because of non-cooperative event */ 88a759a909SNadav Amit atomic_t mmap_changing; 8986039bd3SAndrea Arcangeli /* mm with one ore more vmas attached to this userfaultfd_ctx */ 9086039bd3SAndrea Arcangeli struct mm_struct *mm; 9186039bd3SAndrea Arcangeli }; 9286039bd3SAndrea Arcangeli 93893e26e6SPavel Emelyanov struct userfaultfd_fork_ctx { 94893e26e6SPavel Emelyanov struct userfaultfd_ctx *orig; 95893e26e6SPavel Emelyanov struct userfaultfd_ctx *new; 96893e26e6SPavel Emelyanov struct list_head list; 97893e26e6SPavel Emelyanov }; 98893e26e6SPavel Emelyanov 99897ab3e0SMike Rapoport struct userfaultfd_unmap_ctx { 100897ab3e0SMike Rapoport struct userfaultfd_ctx *ctx; 101897ab3e0SMike Rapoport unsigned long start; 102897ab3e0SMike Rapoport unsigned long end; 103897ab3e0SMike Rapoport struct list_head list; 104897ab3e0SMike Rapoport }; 105897ab3e0SMike Rapoport 10686039bd3SAndrea Arcangeli struct userfaultfd_wait_queue { 107a9b85f94SAndrea Arcangeli struct uffd_msg msg; 108ac6424b9SIngo Molnar wait_queue_entry_t wq; 10986039bd3SAndrea Arcangeli struct userfaultfd_ctx *ctx; 11015a77c6fSAndrea Arcangeli bool waken; 11186039bd3SAndrea Arcangeli }; 11286039bd3SAndrea Arcangeli 11386039bd3SAndrea Arcangeli struct userfaultfd_wake_range { 11486039bd3SAndrea Arcangeli unsigned long start; 11586039bd3SAndrea Arcangeli unsigned long len; 11686039bd3SAndrea Arcangeli }; 11786039bd3SAndrea Arcangeli 11822e5fe2aSNadav Amit /* internal indication that UFFD_API ioctl was successfully executed */ 11922e5fe2aSNadav Amit #define UFFD_FEATURE_INITIALIZED (1u << 31) 12022e5fe2aSNadav Amit 12122e5fe2aSNadav Amit static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx) 12222e5fe2aSNadav Amit { 12322e5fe2aSNadav Amit return ctx->features & UFFD_FEATURE_INITIALIZED; 12422e5fe2aSNadav Amit } 12522e5fe2aSNadav Amit 1262bad466cSPeter Xu /* 1272bad466cSPeter Xu * Whether WP_UNPOPULATED is enabled on the uffd context. It is only 1282bad466cSPeter Xu * meaningful when userfaultfd_wp()==true on the vma and when it's 1292bad466cSPeter Xu * anonymous. 1302bad466cSPeter Xu */ 1312bad466cSPeter Xu bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma) 1322bad466cSPeter Xu { 1332bad466cSPeter Xu struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; 1342bad466cSPeter Xu 1352bad466cSPeter Xu if (!ctx) 1362bad466cSPeter Xu return false; 1372bad466cSPeter Xu 1382bad466cSPeter Xu return ctx->features & UFFD_FEATURE_WP_UNPOPULATED; 1392bad466cSPeter Xu } 1402bad466cSPeter Xu 14151d3d5ebSDavid Hildenbrand static void userfaultfd_set_vm_flags(struct vm_area_struct *vma, 14251d3d5ebSDavid Hildenbrand vm_flags_t flags) 14351d3d5ebSDavid Hildenbrand { 14451d3d5ebSDavid Hildenbrand const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP; 14551d3d5ebSDavid Hildenbrand 1461c71222eSSuren Baghdasaryan vm_flags_reset(vma, flags); 14751d3d5ebSDavid Hildenbrand /* 14851d3d5ebSDavid Hildenbrand * For shared mappings, we want to enable writenotify while 14951d3d5ebSDavid Hildenbrand * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply 15051d3d5ebSDavid Hildenbrand * recalculate vma->vm_page_prot whenever userfaultfd-wp changes. 15151d3d5ebSDavid Hildenbrand */ 15251d3d5ebSDavid Hildenbrand if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed) 15351d3d5ebSDavid Hildenbrand vma_set_page_prot(vma); 15451d3d5ebSDavid Hildenbrand } 15551d3d5ebSDavid Hildenbrand 156ac6424b9SIngo Molnar static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode, 15786039bd3SAndrea Arcangeli int wake_flags, void *key) 15886039bd3SAndrea Arcangeli { 15986039bd3SAndrea Arcangeli struct userfaultfd_wake_range *range = key; 16086039bd3SAndrea Arcangeli int ret; 16186039bd3SAndrea Arcangeli struct userfaultfd_wait_queue *uwq; 16286039bd3SAndrea Arcangeli unsigned long start, len; 16386039bd3SAndrea Arcangeli 16486039bd3SAndrea Arcangeli uwq = container_of(wq, struct userfaultfd_wait_queue, wq); 16586039bd3SAndrea Arcangeli ret = 0; 16686039bd3SAndrea Arcangeli /* len == 0 means wake all */ 16786039bd3SAndrea Arcangeli start = range->start; 16886039bd3SAndrea Arcangeli len = range->len; 169a9b85f94SAndrea Arcangeli if (len && (start > uwq->msg.arg.pagefault.address || 170a9b85f94SAndrea Arcangeli start + len <= uwq->msg.arg.pagefault.address)) 17186039bd3SAndrea Arcangeli goto out; 17215a77c6fSAndrea Arcangeli WRITE_ONCE(uwq->waken, true); 17315a77c6fSAndrea Arcangeli /* 174a9668cd6SPeter Zijlstra * The Program-Order guarantees provided by the scheduler 175a9668cd6SPeter Zijlstra * ensure uwq->waken is visible before the task is woken. 17615a77c6fSAndrea Arcangeli */ 17786039bd3SAndrea Arcangeli ret = wake_up_state(wq->private, mode); 178a9668cd6SPeter Zijlstra if (ret) { 17986039bd3SAndrea Arcangeli /* 18086039bd3SAndrea Arcangeli * Wake only once, autoremove behavior. 18186039bd3SAndrea Arcangeli * 182a9668cd6SPeter Zijlstra * After the effect of list_del_init is visible to the other 183a9668cd6SPeter Zijlstra * CPUs, the waitqueue may disappear from under us, see the 184a9668cd6SPeter Zijlstra * !list_empty_careful() in handle_userfault(). 185a9668cd6SPeter Zijlstra * 186a9668cd6SPeter Zijlstra * try_to_wake_up() has an implicit smp_mb(), and the 187a9668cd6SPeter Zijlstra * wq->private is read before calling the extern function 188a9668cd6SPeter Zijlstra * "wake_up_state" (which in turns calls try_to_wake_up). 18986039bd3SAndrea Arcangeli */ 1902055da97SIngo Molnar list_del_init(&wq->entry); 191a9668cd6SPeter Zijlstra } 19286039bd3SAndrea Arcangeli out: 19386039bd3SAndrea Arcangeli return ret; 19486039bd3SAndrea Arcangeli } 19586039bd3SAndrea Arcangeli 19686039bd3SAndrea Arcangeli /** 19786039bd3SAndrea Arcangeli * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd 19886039bd3SAndrea Arcangeli * context. 19986039bd3SAndrea Arcangeli * @ctx: [in] Pointer to the userfaultfd context. 20086039bd3SAndrea Arcangeli */ 20186039bd3SAndrea Arcangeli static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx) 20286039bd3SAndrea Arcangeli { 203ca880420SEric Biggers refcount_inc(&ctx->refcount); 20486039bd3SAndrea Arcangeli } 20586039bd3SAndrea Arcangeli 20686039bd3SAndrea Arcangeli /** 20786039bd3SAndrea Arcangeli * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd 20886039bd3SAndrea Arcangeli * context. 20986039bd3SAndrea Arcangeli * @ctx: [in] Pointer to userfaultfd context. 21086039bd3SAndrea Arcangeli * 21186039bd3SAndrea Arcangeli * The userfaultfd context reference must have been previously acquired either 21286039bd3SAndrea Arcangeli * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget(). 21386039bd3SAndrea Arcangeli */ 21486039bd3SAndrea Arcangeli static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx) 21586039bd3SAndrea Arcangeli { 216ca880420SEric Biggers if (refcount_dec_and_test(&ctx->refcount)) { 21786039bd3SAndrea Arcangeli VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock)); 21886039bd3SAndrea Arcangeli VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh)); 21986039bd3SAndrea Arcangeli VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock)); 22086039bd3SAndrea Arcangeli VM_BUG_ON(waitqueue_active(&ctx->fault_wqh)); 2219cd75c3cSPavel Emelyanov VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock)); 2229cd75c3cSPavel Emelyanov VM_BUG_ON(waitqueue_active(&ctx->event_wqh)); 22386039bd3SAndrea Arcangeli VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock)); 22486039bd3SAndrea Arcangeli VM_BUG_ON(waitqueue_active(&ctx->fd_wqh)); 225d2005e3fSOleg Nesterov mmdrop(ctx->mm); 2263004ec9cSAndrea Arcangeli kmem_cache_free(userfaultfd_ctx_cachep, ctx); 22786039bd3SAndrea Arcangeli } 22886039bd3SAndrea Arcangeli } 22986039bd3SAndrea Arcangeli 230a9b85f94SAndrea Arcangeli static inline void msg_init(struct uffd_msg *msg) 231a9b85f94SAndrea Arcangeli { 232a9b85f94SAndrea Arcangeli BUILD_BUG_ON(sizeof(struct uffd_msg) != 32); 233a9b85f94SAndrea Arcangeli /* 234a9b85f94SAndrea Arcangeli * Must use memset to zero out the paddings or kernel data is 235a9b85f94SAndrea Arcangeli * leaked to userland. 236a9b85f94SAndrea Arcangeli */ 237a9b85f94SAndrea Arcangeli memset(msg, 0, sizeof(struct uffd_msg)); 238a9b85f94SAndrea Arcangeli } 239a9b85f94SAndrea Arcangeli 240a9b85f94SAndrea Arcangeli static inline struct uffd_msg userfault_msg(unsigned long address, 241d172b1a3SNadav Amit unsigned long real_address, 24286039bd3SAndrea Arcangeli unsigned int flags, 2439d4ac934SAlexey Perevalov unsigned long reason, 2449d4ac934SAlexey Perevalov unsigned int features) 24586039bd3SAndrea Arcangeli { 246a9b85f94SAndrea Arcangeli struct uffd_msg msg; 247d172b1a3SNadav Amit 248a9b85f94SAndrea Arcangeli msg_init(&msg); 249a9b85f94SAndrea Arcangeli msg.event = UFFD_EVENT_PAGEFAULT; 250824ddc60SNadav Amit 251d172b1a3SNadav Amit msg.arg.pagefault.address = (features & UFFD_FEATURE_EXACT_ADDRESS) ? 252d172b1a3SNadav Amit real_address : address; 253d172b1a3SNadav Amit 25486039bd3SAndrea Arcangeli /* 2557677f7fdSAxel Rasmussen * These flags indicate why the userfault occurred: 2567677f7fdSAxel Rasmussen * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault. 2577677f7fdSAxel Rasmussen * - UFFD_PAGEFAULT_FLAG_MINOR indicates a minor fault. 2587677f7fdSAxel Rasmussen * - Neither of these flags being set indicates a MISSING fault. 2597677f7fdSAxel Rasmussen * 2607677f7fdSAxel Rasmussen * Separately, UFFD_PAGEFAULT_FLAG_WRITE indicates it was a write 2617677f7fdSAxel Rasmussen * fault. Otherwise, it was a read fault. 26286039bd3SAndrea Arcangeli */ 2637677f7fdSAxel Rasmussen if (flags & FAULT_FLAG_WRITE) 264a9b85f94SAndrea Arcangeli msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE; 26586039bd3SAndrea Arcangeli if (reason & VM_UFFD_WP) 266a9b85f94SAndrea Arcangeli msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP; 2677677f7fdSAxel Rasmussen if (reason & VM_UFFD_MINOR) 2687677f7fdSAxel Rasmussen msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_MINOR; 2699d4ac934SAlexey Perevalov if (features & UFFD_FEATURE_THREAD_ID) 270a36985d3SAndrea Arcangeli msg.arg.pagefault.feat.ptid = task_pid_vnr(current); 271a9b85f94SAndrea Arcangeli return msg; 27286039bd3SAndrea Arcangeli } 27386039bd3SAndrea Arcangeli 274369cd212SMike Kravetz #ifdef CONFIG_HUGETLB_PAGE 275369cd212SMike Kravetz /* 276369cd212SMike Kravetz * Same functionality as userfaultfd_must_wait below with modifications for 277369cd212SMike Kravetz * hugepmd ranges. 278369cd212SMike Kravetz */ 279369cd212SMike Kravetz static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, 2807868a208SPunit Agrawal struct vm_area_struct *vma, 281369cd212SMike Kravetz unsigned long address, 282369cd212SMike Kravetz unsigned long flags, 283369cd212SMike Kravetz unsigned long reason) 284369cd212SMike Kravetz { 2851e2c0436SJanosch Frank pte_t *ptep, pte; 286369cd212SMike Kravetz bool ret = true; 287369cd212SMike Kravetz 2889c67a207SPeter Xu mmap_assert_locked(ctx->mm); 289369cd212SMike Kravetz 2909c67a207SPeter Xu ptep = hugetlb_walk(vma, address, vma_mmu_pagesize(vma)); 2911e2c0436SJanosch Frank if (!ptep) 292369cd212SMike Kravetz goto out; 293369cd212SMike Kravetz 294369cd212SMike Kravetz ret = false; 2951e2c0436SJanosch Frank pte = huge_ptep_get(ptep); 296369cd212SMike Kravetz 297369cd212SMike Kravetz /* 298369cd212SMike Kravetz * Lockless access: we're in a wait_event so it's ok if it 2995c041f5dSPeter Xu * changes under us. PTE markers should be handled the same as none 3005c041f5dSPeter Xu * ptes here. 301369cd212SMike Kravetz */ 3025c041f5dSPeter Xu if (huge_pte_none_mostly(pte)) 303369cd212SMike Kravetz ret = true; 3041e2c0436SJanosch Frank if (!huge_pte_write(pte) && (reason & VM_UFFD_WP)) 305369cd212SMike Kravetz ret = true; 306369cd212SMike Kravetz out: 307369cd212SMike Kravetz return ret; 308369cd212SMike Kravetz } 309369cd212SMike Kravetz #else 310369cd212SMike Kravetz static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, 3117868a208SPunit Agrawal struct vm_area_struct *vma, 312369cd212SMike Kravetz unsigned long address, 313369cd212SMike Kravetz unsigned long flags, 314369cd212SMike Kravetz unsigned long reason) 315369cd212SMike Kravetz { 316369cd212SMike Kravetz return false; /* should never get here */ 317369cd212SMike Kravetz } 318369cd212SMike Kravetz #endif /* CONFIG_HUGETLB_PAGE */ 319369cd212SMike Kravetz 32086039bd3SAndrea Arcangeli /* 3218d2afd96SAndrea Arcangeli * Verify the pagetables are still not ok after having reigstered into 3228d2afd96SAndrea Arcangeli * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any 3238d2afd96SAndrea Arcangeli * userfault that has already been resolved, if userfaultfd_read and 3248d2afd96SAndrea Arcangeli * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different 3258d2afd96SAndrea Arcangeli * threads. 3268d2afd96SAndrea Arcangeli */ 3278d2afd96SAndrea Arcangeli static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, 3288d2afd96SAndrea Arcangeli unsigned long address, 3298d2afd96SAndrea Arcangeli unsigned long flags, 3308d2afd96SAndrea Arcangeli unsigned long reason) 3318d2afd96SAndrea Arcangeli { 3328d2afd96SAndrea Arcangeli struct mm_struct *mm = ctx->mm; 3338d2afd96SAndrea Arcangeli pgd_t *pgd; 334c2febafcSKirill A. Shutemov p4d_t *p4d; 3358d2afd96SAndrea Arcangeli pud_t *pud; 3368d2afd96SAndrea Arcangeli pmd_t *pmd, _pmd; 3378d2afd96SAndrea Arcangeli pte_t *pte; 338c33c7948SRyan Roberts pte_t ptent; 3398d2afd96SAndrea Arcangeli bool ret = true; 3408d2afd96SAndrea Arcangeli 34142fc5414SMichel Lespinasse mmap_assert_locked(mm); 3428d2afd96SAndrea Arcangeli 3438d2afd96SAndrea Arcangeli pgd = pgd_offset(mm, address); 3448d2afd96SAndrea Arcangeli if (!pgd_present(*pgd)) 3458d2afd96SAndrea Arcangeli goto out; 346c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, address); 347c2febafcSKirill A. Shutemov if (!p4d_present(*p4d)) 348c2febafcSKirill A. Shutemov goto out; 349c2febafcSKirill A. Shutemov pud = pud_offset(p4d, address); 3508d2afd96SAndrea Arcangeli if (!pud_present(*pud)) 3518d2afd96SAndrea Arcangeli goto out; 3528d2afd96SAndrea Arcangeli pmd = pmd_offset(pud, address); 3532b683a4fSHugh Dickins again: 35426e1a0c3SHugh Dickins _pmd = pmdp_get_lockless(pmd); 355a365ac09SHuang Ying if (pmd_none(_pmd)) 3568d2afd96SAndrea Arcangeli goto out; 3578d2afd96SAndrea Arcangeli 3588d2afd96SAndrea Arcangeli ret = false; 3592b683a4fSHugh Dickins if (!pmd_present(_pmd) || pmd_devmap(_pmd)) 360a365ac09SHuang Ying goto out; 361a365ac09SHuang Ying 36263b2d417SAndrea Arcangeli if (pmd_trans_huge(_pmd)) { 36363b2d417SAndrea Arcangeli if (!pmd_write(_pmd) && (reason & VM_UFFD_WP)) 36463b2d417SAndrea Arcangeli ret = true; 3658d2afd96SAndrea Arcangeli goto out; 36663b2d417SAndrea Arcangeli } 3678d2afd96SAndrea Arcangeli 3688d2afd96SAndrea Arcangeli pte = pte_offset_map(pmd, address); 3692b683a4fSHugh Dickins if (!pte) { 3702b683a4fSHugh Dickins ret = true; 3712b683a4fSHugh Dickins goto again; 3722b683a4fSHugh Dickins } 3738d2afd96SAndrea Arcangeli /* 3748d2afd96SAndrea Arcangeli * Lockless access: we're in a wait_event so it's ok if it 3755c041f5dSPeter Xu * changes under us. PTE markers should be handled the same as none 3765c041f5dSPeter Xu * ptes here. 3778d2afd96SAndrea Arcangeli */ 378c33c7948SRyan Roberts ptent = ptep_get(pte); 379c33c7948SRyan Roberts if (pte_none_mostly(ptent)) 3808d2afd96SAndrea Arcangeli ret = true; 381c33c7948SRyan Roberts if (!pte_write(ptent) && (reason & VM_UFFD_WP)) 38263b2d417SAndrea Arcangeli ret = true; 3838d2afd96SAndrea Arcangeli pte_unmap(pte); 3848d2afd96SAndrea Arcangeli 3858d2afd96SAndrea Arcangeli out: 3868d2afd96SAndrea Arcangeli return ret; 3878d2afd96SAndrea Arcangeli } 3888d2afd96SAndrea Arcangeli 3892f064a59SPeter Zijlstra static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags) 3903e69ad08SPeter Xu { 3913e69ad08SPeter Xu if (flags & FAULT_FLAG_INTERRUPTIBLE) 3923e69ad08SPeter Xu return TASK_INTERRUPTIBLE; 3933e69ad08SPeter Xu 3943e69ad08SPeter Xu if (flags & FAULT_FLAG_KILLABLE) 3953e69ad08SPeter Xu return TASK_KILLABLE; 3963e69ad08SPeter Xu 3973e69ad08SPeter Xu return TASK_UNINTERRUPTIBLE; 3983e69ad08SPeter Xu } 3993e69ad08SPeter Xu 4008d2afd96SAndrea Arcangeli /* 40186039bd3SAndrea Arcangeli * The locking rules involved in returning VM_FAULT_RETRY depending on 40286039bd3SAndrea Arcangeli * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and 40386039bd3SAndrea Arcangeli * FAULT_FLAG_KILLABLE are not straightforward. The "Caution" 40486039bd3SAndrea Arcangeli * recommendation in __lock_page_or_retry is not an understatement. 40586039bd3SAndrea Arcangeli * 406c1e8d7c6SMichel Lespinasse * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_lock must be released 40786039bd3SAndrea Arcangeli * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is 40886039bd3SAndrea Arcangeli * not set. 40986039bd3SAndrea Arcangeli * 41086039bd3SAndrea Arcangeli * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not 41186039bd3SAndrea Arcangeli * set, VM_FAULT_RETRY can still be returned if and only if there are 412c1e8d7c6SMichel Lespinasse * fatal_signal_pending()s, and the mmap_lock must be released before 41386039bd3SAndrea Arcangeli * returning it. 41486039bd3SAndrea Arcangeli */ 4152b740303SSouptick Joarder vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) 41686039bd3SAndrea Arcangeli { 417b8da2e46SPeter Xu struct vm_area_struct *vma = vmf->vma; 418b8da2e46SPeter Xu struct mm_struct *mm = vma->vm_mm; 41986039bd3SAndrea Arcangeli struct userfaultfd_ctx *ctx; 42086039bd3SAndrea Arcangeli struct userfaultfd_wait_queue uwq; 4212b740303SSouptick Joarder vm_fault_t ret = VM_FAULT_SIGBUS; 4223e69ad08SPeter Xu bool must_wait; 4232f064a59SPeter Zijlstra unsigned int blocking_state; 42486039bd3SAndrea Arcangeli 42564c2b203SAndrea Arcangeli /* 42664c2b203SAndrea Arcangeli * We don't do userfault handling for the final child pid update. 42764c2b203SAndrea Arcangeli * 42864c2b203SAndrea Arcangeli * We also don't do userfault handling during 42964c2b203SAndrea Arcangeli * coredumping. hugetlbfs has the special 430*48498071SPeter Xu * hugetlb_follow_page_mask() to skip missing pages in the 43164c2b203SAndrea Arcangeli * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with 43264c2b203SAndrea Arcangeli * the no_page_table() helper in follow_page_mask(), but the 43364c2b203SAndrea Arcangeli * shmem_vm_ops->fault method is invoked even during 434c1e8d7c6SMichel Lespinasse * coredumping without mmap_lock and it ends up here. 43564c2b203SAndrea Arcangeli */ 43664c2b203SAndrea Arcangeli if (current->flags & (PF_EXITING|PF_DUMPCORE)) 43764c2b203SAndrea Arcangeli goto out; 43864c2b203SAndrea Arcangeli 43964c2b203SAndrea Arcangeli /* 440c1e8d7c6SMichel Lespinasse * Coredumping runs without mmap_lock so we can only check that 441c1e8d7c6SMichel Lespinasse * the mmap_lock is held, if PF_DUMPCORE was not set. 44264c2b203SAndrea Arcangeli */ 44342fc5414SMichel Lespinasse mmap_assert_locked(mm); 44464c2b203SAndrea Arcangeli 445b8da2e46SPeter Xu ctx = vma->vm_userfaultfd_ctx.ctx; 44686039bd3SAndrea Arcangeli if (!ctx) 447ba85c702SAndrea Arcangeli goto out; 44886039bd3SAndrea Arcangeli 44986039bd3SAndrea Arcangeli BUG_ON(ctx->mm != mm); 45086039bd3SAndrea Arcangeli 4517677f7fdSAxel Rasmussen /* Any unrecognized flag is a bug. */ 4527677f7fdSAxel Rasmussen VM_BUG_ON(reason & ~__VM_UFFD_FLAGS); 4537677f7fdSAxel Rasmussen /* 0 or > 1 flags set is a bug; we expect exactly 1. */ 4547677f7fdSAxel Rasmussen VM_BUG_ON(!reason || (reason & (reason - 1))); 45586039bd3SAndrea Arcangeli 4562d6d6f5aSPrakash Sangappa if (ctx->features & UFFD_FEATURE_SIGBUS) 4572d6d6f5aSPrakash Sangappa goto out; 4582d5de004SAxel Rasmussen if (!(vmf->flags & FAULT_FLAG_USER) && (ctx->flags & UFFD_USER_MODE_ONLY)) 45937cd0575SLokesh Gidra goto out; 4602d6d6f5aSPrakash Sangappa 46186039bd3SAndrea Arcangeli /* 46286039bd3SAndrea Arcangeli * If it's already released don't get it. This avoids to loop 46386039bd3SAndrea Arcangeli * in __get_user_pages if userfaultfd_release waits on the 464c1e8d7c6SMichel Lespinasse * caller of handle_userfault to release the mmap_lock. 46586039bd3SAndrea Arcangeli */ 4666aa7de05SMark Rutland if (unlikely(READ_ONCE(ctx->released))) { 467656710a6SAndrea Arcangeli /* 468656710a6SAndrea Arcangeli * Don't return VM_FAULT_SIGBUS in this case, so a non 469656710a6SAndrea Arcangeli * cooperative manager can close the uffd after the 470656710a6SAndrea Arcangeli * last UFFDIO_COPY, without risking to trigger an 471656710a6SAndrea Arcangeli * involuntary SIGBUS if the process was starting the 472656710a6SAndrea Arcangeli * userfaultfd while the userfaultfd was still armed 473656710a6SAndrea Arcangeli * (but after the last UFFDIO_COPY). If the uffd 474656710a6SAndrea Arcangeli * wasn't already closed when the userfault reached 475656710a6SAndrea Arcangeli * this point, that would normally be solved by 476656710a6SAndrea Arcangeli * userfaultfd_must_wait returning 'false'. 477656710a6SAndrea Arcangeli * 478656710a6SAndrea Arcangeli * If we were to return VM_FAULT_SIGBUS here, the non 479656710a6SAndrea Arcangeli * cooperative manager would be instead forced to 480656710a6SAndrea Arcangeli * always call UFFDIO_UNREGISTER before it can safely 481656710a6SAndrea Arcangeli * close the uffd. 482656710a6SAndrea Arcangeli */ 483656710a6SAndrea Arcangeli ret = VM_FAULT_NOPAGE; 484ba85c702SAndrea Arcangeli goto out; 485656710a6SAndrea Arcangeli } 48686039bd3SAndrea Arcangeli 48786039bd3SAndrea Arcangeli /* 48886039bd3SAndrea Arcangeli * Check that we can return VM_FAULT_RETRY. 48986039bd3SAndrea Arcangeli * 49086039bd3SAndrea Arcangeli * NOTE: it should become possible to return VM_FAULT_RETRY 49186039bd3SAndrea Arcangeli * even if FAULT_FLAG_TRIED is set without leading to gup() 49286039bd3SAndrea Arcangeli * -EBUSY failures, if the userfaultfd is to be extended for 49386039bd3SAndrea Arcangeli * VM_UFFD_WP tracking and we intend to arm the userfault 49486039bd3SAndrea Arcangeli * without first stopping userland access to the memory. For 49586039bd3SAndrea Arcangeli * VM_UFFD_MISSING userfaults this is enough for now. 49686039bd3SAndrea Arcangeli */ 49782b0f8c3SJan Kara if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) { 49886039bd3SAndrea Arcangeli /* 49986039bd3SAndrea Arcangeli * Validate the invariant that nowait must allow retry 50086039bd3SAndrea Arcangeli * to be sure not to return SIGBUS erroneously on 50186039bd3SAndrea Arcangeli * nowait invocations. 50286039bd3SAndrea Arcangeli */ 50382b0f8c3SJan Kara BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT); 50486039bd3SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 50586039bd3SAndrea Arcangeli if (printk_ratelimit()) { 50686039bd3SAndrea Arcangeli printk(KERN_WARNING 50782b0f8c3SJan Kara "FAULT_FLAG_ALLOW_RETRY missing %x\n", 50882b0f8c3SJan Kara vmf->flags); 50986039bd3SAndrea Arcangeli dump_stack(); 51086039bd3SAndrea Arcangeli } 51186039bd3SAndrea Arcangeli #endif 512ba85c702SAndrea Arcangeli goto out; 51386039bd3SAndrea Arcangeli } 51486039bd3SAndrea Arcangeli 51586039bd3SAndrea Arcangeli /* 51686039bd3SAndrea Arcangeli * Handle nowait, not much to do other than tell it to retry 51786039bd3SAndrea Arcangeli * and wait. 51886039bd3SAndrea Arcangeli */ 519ba85c702SAndrea Arcangeli ret = VM_FAULT_RETRY; 52082b0f8c3SJan Kara if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) 521ba85c702SAndrea Arcangeli goto out; 52286039bd3SAndrea Arcangeli 523c1e8d7c6SMichel Lespinasse /* take the reference before dropping the mmap_lock */ 52486039bd3SAndrea Arcangeli userfaultfd_ctx_get(ctx); 52586039bd3SAndrea Arcangeli 52686039bd3SAndrea Arcangeli init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); 52786039bd3SAndrea Arcangeli uwq.wq.private = current; 528d172b1a3SNadav Amit uwq.msg = userfault_msg(vmf->address, vmf->real_address, vmf->flags, 529d172b1a3SNadav Amit reason, ctx->features); 53086039bd3SAndrea Arcangeli uwq.ctx = ctx; 53115a77c6fSAndrea Arcangeli uwq.waken = false; 53286039bd3SAndrea Arcangeli 5333e69ad08SPeter Xu blocking_state = userfaultfd_get_blocking_state(vmf->flags); 534dfa37dc3SAndrea Arcangeli 535b8da2e46SPeter Xu /* 536b8da2e46SPeter Xu * Take the vma lock now, in order to safely call 537b8da2e46SPeter Xu * userfaultfd_huge_must_wait() later. Since acquiring the 538b8da2e46SPeter Xu * (sleepable) vma lock can modify the current task state, that 539b8da2e46SPeter Xu * must be before explicitly calling set_current_state(). 540b8da2e46SPeter Xu */ 541b8da2e46SPeter Xu if (is_vm_hugetlb_page(vma)) 542b8da2e46SPeter Xu hugetlb_vma_lock_read(vma); 543b8da2e46SPeter Xu 544cbcfa130SEric Biggers spin_lock_irq(&ctx->fault_pending_wqh.lock); 54586039bd3SAndrea Arcangeli /* 54686039bd3SAndrea Arcangeli * After the __add_wait_queue the uwq is visible to userland 54786039bd3SAndrea Arcangeli * through poll/read(). 54886039bd3SAndrea Arcangeli */ 54915b726efSAndrea Arcangeli __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq); 55015b726efSAndrea Arcangeli /* 55115b726efSAndrea Arcangeli * The smp_mb() after __set_current_state prevents the reads 55215b726efSAndrea Arcangeli * following the spin_unlock to happen before the list_add in 55315b726efSAndrea Arcangeli * __add_wait_queue. 55415b726efSAndrea Arcangeli */ 55515a77c6fSAndrea Arcangeli set_current_state(blocking_state); 556cbcfa130SEric Biggers spin_unlock_irq(&ctx->fault_pending_wqh.lock); 55786039bd3SAndrea Arcangeli 558b8da2e46SPeter Xu if (!is_vm_hugetlb_page(vma)) 55982b0f8c3SJan Kara must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, 56082b0f8c3SJan Kara reason); 561369cd212SMike Kravetz else 562b8da2e46SPeter Xu must_wait = userfaultfd_huge_must_wait(ctx, vma, 5637868a208SPunit Agrawal vmf->address, 564369cd212SMike Kravetz vmf->flags, reason); 565b8da2e46SPeter Xu if (is_vm_hugetlb_page(vma)) 566b8da2e46SPeter Xu hugetlb_vma_unlock_read(vma); 567d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 5688d2afd96SAndrea Arcangeli 569f9bf3522SLinus Torvalds if (likely(must_wait && !READ_ONCE(ctx->released))) { 570a9a08845SLinus Torvalds wake_up_poll(&ctx->fd_wqh, EPOLLIN); 57186039bd3SAndrea Arcangeli schedule(); 57286039bd3SAndrea Arcangeli } 573ba85c702SAndrea Arcangeli 57486039bd3SAndrea Arcangeli __set_current_state(TASK_RUNNING); 57515b726efSAndrea Arcangeli 57615b726efSAndrea Arcangeli /* 57715b726efSAndrea Arcangeli * Here we race with the list_del; list_add in 57815b726efSAndrea Arcangeli * userfaultfd_ctx_read(), however because we don't ever run 57915b726efSAndrea Arcangeli * list_del_init() to refile across the two lists, the prev 58015b726efSAndrea Arcangeli * and next pointers will never point to self. list_add also 58115b726efSAndrea Arcangeli * would never let any of the two pointers to point to 58215b726efSAndrea Arcangeli * self. So list_empty_careful won't risk to see both pointers 58315b726efSAndrea Arcangeli * pointing to self at any time during the list refile. The 58415b726efSAndrea Arcangeli * only case where list_del_init() is called is the full 58515b726efSAndrea Arcangeli * removal in the wake function and there we don't re-list_add 58615b726efSAndrea Arcangeli * and it's fine not to block on the spinlock. The uwq on this 58715b726efSAndrea Arcangeli * kernel stack can be released after the list_del_init. 58815b726efSAndrea Arcangeli */ 5892055da97SIngo Molnar if (!list_empty_careful(&uwq.wq.entry)) { 590cbcfa130SEric Biggers spin_lock_irq(&ctx->fault_pending_wqh.lock); 59115b726efSAndrea Arcangeli /* 59215b726efSAndrea Arcangeli * No need of list_del_init(), the uwq on the stack 59315b726efSAndrea Arcangeli * will be freed shortly anyway. 59415b726efSAndrea Arcangeli */ 5952055da97SIngo Molnar list_del(&uwq.wq.entry); 596cbcfa130SEric Biggers spin_unlock_irq(&ctx->fault_pending_wqh.lock); 597ba85c702SAndrea Arcangeli } 59886039bd3SAndrea Arcangeli 59986039bd3SAndrea Arcangeli /* 60086039bd3SAndrea Arcangeli * ctx may go away after this if the userfault pseudo fd is 60186039bd3SAndrea Arcangeli * already released. 60286039bd3SAndrea Arcangeli */ 60386039bd3SAndrea Arcangeli userfaultfd_ctx_put(ctx); 60486039bd3SAndrea Arcangeli 605ba85c702SAndrea Arcangeli out: 606ba85c702SAndrea Arcangeli return ret; 60786039bd3SAndrea Arcangeli } 60886039bd3SAndrea Arcangeli 6098c9e7bb7SAndrea Arcangeli static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, 6109cd75c3cSPavel Emelyanov struct userfaultfd_wait_queue *ewq) 6119cd75c3cSPavel Emelyanov { 6120cbb4b4fSAndrea Arcangeli struct userfaultfd_ctx *release_new_ctx; 6130cbb4b4fSAndrea Arcangeli 6149a69a829SAndrea Arcangeli if (WARN_ON_ONCE(current->flags & PF_EXITING)) 6159a69a829SAndrea Arcangeli goto out; 6169a69a829SAndrea Arcangeli 6179cd75c3cSPavel Emelyanov ewq->ctx = ctx; 6189cd75c3cSPavel Emelyanov init_waitqueue_entry(&ewq->wq, current); 6190cbb4b4fSAndrea Arcangeli release_new_ctx = NULL; 6209cd75c3cSPavel Emelyanov 621cbcfa130SEric Biggers spin_lock_irq(&ctx->event_wqh.lock); 6229cd75c3cSPavel Emelyanov /* 6239cd75c3cSPavel Emelyanov * After the __add_wait_queue the uwq is visible to userland 6249cd75c3cSPavel Emelyanov * through poll/read(). 6259cd75c3cSPavel Emelyanov */ 6269cd75c3cSPavel Emelyanov __add_wait_queue(&ctx->event_wqh, &ewq->wq); 6279cd75c3cSPavel Emelyanov for (;;) { 6289cd75c3cSPavel Emelyanov set_current_state(TASK_KILLABLE); 6299cd75c3cSPavel Emelyanov if (ewq->msg.event == 0) 6309cd75c3cSPavel Emelyanov break; 6316aa7de05SMark Rutland if (READ_ONCE(ctx->released) || 6329cd75c3cSPavel Emelyanov fatal_signal_pending(current)) { 633384632e6SAndrea Arcangeli /* 634384632e6SAndrea Arcangeli * &ewq->wq may be queued in fork_event, but 635384632e6SAndrea Arcangeli * __remove_wait_queue ignores the head 636384632e6SAndrea Arcangeli * parameter. It would be a problem if it 637384632e6SAndrea Arcangeli * didn't. 638384632e6SAndrea Arcangeli */ 6399cd75c3cSPavel Emelyanov __remove_wait_queue(&ctx->event_wqh, &ewq->wq); 6407eb76d45SMike Rapoport if (ewq->msg.event == UFFD_EVENT_FORK) { 6417eb76d45SMike Rapoport struct userfaultfd_ctx *new; 6427eb76d45SMike Rapoport 6437eb76d45SMike Rapoport new = (struct userfaultfd_ctx *) 6447eb76d45SMike Rapoport (unsigned long) 6457eb76d45SMike Rapoport ewq->msg.arg.reserved.reserved1; 6460cbb4b4fSAndrea Arcangeli release_new_ctx = new; 6477eb76d45SMike Rapoport } 6489cd75c3cSPavel Emelyanov break; 6499cd75c3cSPavel Emelyanov } 6509cd75c3cSPavel Emelyanov 651cbcfa130SEric Biggers spin_unlock_irq(&ctx->event_wqh.lock); 6529cd75c3cSPavel Emelyanov 653a9a08845SLinus Torvalds wake_up_poll(&ctx->fd_wqh, EPOLLIN); 6549cd75c3cSPavel Emelyanov schedule(); 6559cd75c3cSPavel Emelyanov 656cbcfa130SEric Biggers spin_lock_irq(&ctx->event_wqh.lock); 6579cd75c3cSPavel Emelyanov } 6589cd75c3cSPavel Emelyanov __set_current_state(TASK_RUNNING); 659cbcfa130SEric Biggers spin_unlock_irq(&ctx->event_wqh.lock); 6609cd75c3cSPavel Emelyanov 6610cbb4b4fSAndrea Arcangeli if (release_new_ctx) { 6620cbb4b4fSAndrea Arcangeli struct vm_area_struct *vma; 6630cbb4b4fSAndrea Arcangeli struct mm_struct *mm = release_new_ctx->mm; 66469dbe6daSLiam R. Howlett VMA_ITERATOR(vmi, mm, 0); 6650cbb4b4fSAndrea Arcangeli 6660cbb4b4fSAndrea Arcangeli /* the various vma->vm_userfaultfd_ctx still points to it */ 667d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 66869dbe6daSLiam R. Howlett for_each_vma(vmi, vma) { 66931e810aaSMike Rapoport if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { 6700cbb4b4fSAndrea Arcangeli vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 67151d3d5ebSDavid Hildenbrand userfaultfd_set_vm_flags(vma, 67251d3d5ebSDavid Hildenbrand vma->vm_flags & ~__VM_UFFD_FLAGS); 67331e810aaSMike Rapoport } 67469dbe6daSLiam R. Howlett } 675d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 6760cbb4b4fSAndrea Arcangeli 6770cbb4b4fSAndrea Arcangeli userfaultfd_ctx_put(release_new_ctx); 6780cbb4b4fSAndrea Arcangeli } 6790cbb4b4fSAndrea Arcangeli 6809cd75c3cSPavel Emelyanov /* 6819cd75c3cSPavel Emelyanov * ctx may go away after this if the userfault pseudo fd is 6829cd75c3cSPavel Emelyanov * already released. 6839cd75c3cSPavel Emelyanov */ 6849a69a829SAndrea Arcangeli out: 685a759a909SNadav Amit atomic_dec(&ctx->mmap_changing); 686a759a909SNadav Amit VM_BUG_ON(atomic_read(&ctx->mmap_changing) < 0); 6879cd75c3cSPavel Emelyanov userfaultfd_ctx_put(ctx); 6889cd75c3cSPavel Emelyanov } 6899cd75c3cSPavel Emelyanov 6909cd75c3cSPavel Emelyanov static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx, 6919cd75c3cSPavel Emelyanov struct userfaultfd_wait_queue *ewq) 6929cd75c3cSPavel Emelyanov { 6939cd75c3cSPavel Emelyanov ewq->msg.event = 0; 6949cd75c3cSPavel Emelyanov wake_up_locked(&ctx->event_wqh); 6959cd75c3cSPavel Emelyanov __remove_wait_queue(&ctx->event_wqh, &ewq->wq); 6969cd75c3cSPavel Emelyanov } 6979cd75c3cSPavel Emelyanov 698893e26e6SPavel Emelyanov int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) 699893e26e6SPavel Emelyanov { 700893e26e6SPavel Emelyanov struct userfaultfd_ctx *ctx = NULL, *octx; 701893e26e6SPavel Emelyanov struct userfaultfd_fork_ctx *fctx; 702893e26e6SPavel Emelyanov 703893e26e6SPavel Emelyanov octx = vma->vm_userfaultfd_ctx.ctx; 704893e26e6SPavel Emelyanov if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) { 705893e26e6SPavel Emelyanov vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 70651d3d5ebSDavid Hildenbrand userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS); 707893e26e6SPavel Emelyanov return 0; 708893e26e6SPavel Emelyanov } 709893e26e6SPavel Emelyanov 710893e26e6SPavel Emelyanov list_for_each_entry(fctx, fcs, list) 711893e26e6SPavel Emelyanov if (fctx->orig == octx) { 712893e26e6SPavel Emelyanov ctx = fctx->new; 713893e26e6SPavel Emelyanov break; 714893e26e6SPavel Emelyanov } 715893e26e6SPavel Emelyanov 716893e26e6SPavel Emelyanov if (!ctx) { 717893e26e6SPavel Emelyanov fctx = kmalloc(sizeof(*fctx), GFP_KERNEL); 718893e26e6SPavel Emelyanov if (!fctx) 719893e26e6SPavel Emelyanov return -ENOMEM; 720893e26e6SPavel Emelyanov 721893e26e6SPavel Emelyanov ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); 722893e26e6SPavel Emelyanov if (!ctx) { 723893e26e6SPavel Emelyanov kfree(fctx); 724893e26e6SPavel Emelyanov return -ENOMEM; 725893e26e6SPavel Emelyanov } 726893e26e6SPavel Emelyanov 727ca880420SEric Biggers refcount_set(&ctx->refcount, 1); 728893e26e6SPavel Emelyanov ctx->flags = octx->flags; 729893e26e6SPavel Emelyanov ctx->features = octx->features; 730893e26e6SPavel Emelyanov ctx->released = false; 731a759a909SNadav Amit atomic_set(&ctx->mmap_changing, 0); 732893e26e6SPavel Emelyanov ctx->mm = vma->vm_mm; 73300bb31faSMike Rapoport mmgrab(ctx->mm); 734893e26e6SPavel Emelyanov 735893e26e6SPavel Emelyanov userfaultfd_ctx_get(octx); 736a759a909SNadav Amit atomic_inc(&octx->mmap_changing); 737893e26e6SPavel Emelyanov fctx->orig = octx; 738893e26e6SPavel Emelyanov fctx->new = ctx; 739893e26e6SPavel Emelyanov list_add_tail(&fctx->list, fcs); 740893e26e6SPavel Emelyanov } 741893e26e6SPavel Emelyanov 742893e26e6SPavel Emelyanov vma->vm_userfaultfd_ctx.ctx = ctx; 743893e26e6SPavel Emelyanov return 0; 744893e26e6SPavel Emelyanov } 745893e26e6SPavel Emelyanov 7468c9e7bb7SAndrea Arcangeli static void dup_fctx(struct userfaultfd_fork_ctx *fctx) 747893e26e6SPavel Emelyanov { 748893e26e6SPavel Emelyanov struct userfaultfd_ctx *ctx = fctx->orig; 749893e26e6SPavel Emelyanov struct userfaultfd_wait_queue ewq; 750893e26e6SPavel Emelyanov 751893e26e6SPavel Emelyanov msg_init(&ewq.msg); 752893e26e6SPavel Emelyanov 753893e26e6SPavel Emelyanov ewq.msg.event = UFFD_EVENT_FORK; 754893e26e6SPavel Emelyanov ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new; 755893e26e6SPavel Emelyanov 7568c9e7bb7SAndrea Arcangeli userfaultfd_event_wait_completion(ctx, &ewq); 757893e26e6SPavel Emelyanov } 758893e26e6SPavel Emelyanov 759893e26e6SPavel Emelyanov void dup_userfaultfd_complete(struct list_head *fcs) 760893e26e6SPavel Emelyanov { 761893e26e6SPavel Emelyanov struct userfaultfd_fork_ctx *fctx, *n; 762893e26e6SPavel Emelyanov 763893e26e6SPavel Emelyanov list_for_each_entry_safe(fctx, n, fcs, list) { 7648c9e7bb7SAndrea Arcangeli dup_fctx(fctx); 765893e26e6SPavel Emelyanov list_del(&fctx->list); 766893e26e6SPavel Emelyanov kfree(fctx); 767893e26e6SPavel Emelyanov } 768893e26e6SPavel Emelyanov } 769893e26e6SPavel Emelyanov 77072f87654SPavel Emelyanov void mremap_userfaultfd_prep(struct vm_area_struct *vma, 77172f87654SPavel Emelyanov struct vm_userfaultfd_ctx *vm_ctx) 77272f87654SPavel Emelyanov { 77372f87654SPavel Emelyanov struct userfaultfd_ctx *ctx; 77472f87654SPavel Emelyanov 77572f87654SPavel Emelyanov ctx = vma->vm_userfaultfd_ctx.ctx; 7763cfd22beSPeter Xu 7773cfd22beSPeter Xu if (!ctx) 7783cfd22beSPeter Xu return; 7793cfd22beSPeter Xu 7803cfd22beSPeter Xu if (ctx->features & UFFD_FEATURE_EVENT_REMAP) { 78172f87654SPavel Emelyanov vm_ctx->ctx = ctx; 78272f87654SPavel Emelyanov userfaultfd_ctx_get(ctx); 783a759a909SNadav Amit atomic_inc(&ctx->mmap_changing); 7843cfd22beSPeter Xu } else { 7853cfd22beSPeter Xu /* Drop uffd context if remap feature not enabled */ 7863cfd22beSPeter Xu vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 78751d3d5ebSDavid Hildenbrand userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS); 78872f87654SPavel Emelyanov } 78972f87654SPavel Emelyanov } 79072f87654SPavel Emelyanov 79190794bf1SAndrea Arcangeli void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx, 79272f87654SPavel Emelyanov unsigned long from, unsigned long to, 79372f87654SPavel Emelyanov unsigned long len) 79472f87654SPavel Emelyanov { 79590794bf1SAndrea Arcangeli struct userfaultfd_ctx *ctx = vm_ctx->ctx; 79672f87654SPavel Emelyanov struct userfaultfd_wait_queue ewq; 79772f87654SPavel Emelyanov 79872f87654SPavel Emelyanov if (!ctx) 79972f87654SPavel Emelyanov return; 80072f87654SPavel Emelyanov 80172f87654SPavel Emelyanov if (to & ~PAGE_MASK) { 80272f87654SPavel Emelyanov userfaultfd_ctx_put(ctx); 80372f87654SPavel Emelyanov return; 80472f87654SPavel Emelyanov } 80572f87654SPavel Emelyanov 80672f87654SPavel Emelyanov msg_init(&ewq.msg); 80772f87654SPavel Emelyanov 80872f87654SPavel Emelyanov ewq.msg.event = UFFD_EVENT_REMAP; 80972f87654SPavel Emelyanov ewq.msg.arg.remap.from = from; 81072f87654SPavel Emelyanov ewq.msg.arg.remap.to = to; 81172f87654SPavel Emelyanov ewq.msg.arg.remap.len = len; 81272f87654SPavel Emelyanov 81372f87654SPavel Emelyanov userfaultfd_event_wait_completion(ctx, &ewq); 81472f87654SPavel Emelyanov } 81572f87654SPavel Emelyanov 81670ccb92fSAndrea Arcangeli bool userfaultfd_remove(struct vm_area_struct *vma, 81705ce7724SPavel Emelyanov unsigned long start, unsigned long end) 81805ce7724SPavel Emelyanov { 81905ce7724SPavel Emelyanov struct mm_struct *mm = vma->vm_mm; 82005ce7724SPavel Emelyanov struct userfaultfd_ctx *ctx; 82105ce7724SPavel Emelyanov struct userfaultfd_wait_queue ewq; 82205ce7724SPavel Emelyanov 82305ce7724SPavel Emelyanov ctx = vma->vm_userfaultfd_ctx.ctx; 824d811914dSMike Rapoport if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE)) 82570ccb92fSAndrea Arcangeli return true; 82605ce7724SPavel Emelyanov 82705ce7724SPavel Emelyanov userfaultfd_ctx_get(ctx); 828a759a909SNadav Amit atomic_inc(&ctx->mmap_changing); 829d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 83005ce7724SPavel Emelyanov 83105ce7724SPavel Emelyanov msg_init(&ewq.msg); 83205ce7724SPavel Emelyanov 833d811914dSMike Rapoport ewq.msg.event = UFFD_EVENT_REMOVE; 834d811914dSMike Rapoport ewq.msg.arg.remove.start = start; 835d811914dSMike Rapoport ewq.msg.arg.remove.end = end; 83605ce7724SPavel Emelyanov 83705ce7724SPavel Emelyanov userfaultfd_event_wait_completion(ctx, &ewq); 83805ce7724SPavel Emelyanov 83970ccb92fSAndrea Arcangeli return false; 84005ce7724SPavel Emelyanov } 84105ce7724SPavel Emelyanov 842897ab3e0SMike Rapoport static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps, 843897ab3e0SMike Rapoport unsigned long start, unsigned long end) 844897ab3e0SMike Rapoport { 845897ab3e0SMike Rapoport struct userfaultfd_unmap_ctx *unmap_ctx; 846897ab3e0SMike Rapoport 847897ab3e0SMike Rapoport list_for_each_entry(unmap_ctx, unmaps, list) 848897ab3e0SMike Rapoport if (unmap_ctx->ctx == ctx && unmap_ctx->start == start && 849897ab3e0SMike Rapoport unmap_ctx->end == end) 850897ab3e0SMike Rapoport return true; 851897ab3e0SMike Rapoport 852897ab3e0SMike Rapoport return false; 853897ab3e0SMike Rapoport } 854897ab3e0SMike Rapoport 85565ac1320SLiam R. Howlett int userfaultfd_unmap_prep(struct vm_area_struct *vma, unsigned long start, 85669dbe6daSLiam R. Howlett unsigned long end, struct list_head *unmaps) 857897ab3e0SMike Rapoport { 858897ab3e0SMike Rapoport struct userfaultfd_unmap_ctx *unmap_ctx; 859897ab3e0SMike Rapoport struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; 860897ab3e0SMike Rapoport 861897ab3e0SMike Rapoport if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) || 862897ab3e0SMike Rapoport has_unmap_ctx(ctx, unmaps, start, end)) 86365ac1320SLiam R. Howlett return 0; 864897ab3e0SMike Rapoport 865897ab3e0SMike Rapoport unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL); 866897ab3e0SMike Rapoport if (!unmap_ctx) 867897ab3e0SMike Rapoport return -ENOMEM; 868897ab3e0SMike Rapoport 869897ab3e0SMike Rapoport userfaultfd_ctx_get(ctx); 870a759a909SNadav Amit atomic_inc(&ctx->mmap_changing); 871897ab3e0SMike Rapoport unmap_ctx->ctx = ctx; 872897ab3e0SMike Rapoport unmap_ctx->start = start; 873897ab3e0SMike Rapoport unmap_ctx->end = end; 874897ab3e0SMike Rapoport list_add_tail(&unmap_ctx->list, unmaps); 875897ab3e0SMike Rapoport 876897ab3e0SMike Rapoport return 0; 877897ab3e0SMike Rapoport } 878897ab3e0SMike Rapoport 879897ab3e0SMike Rapoport void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf) 880897ab3e0SMike Rapoport { 881897ab3e0SMike Rapoport struct userfaultfd_unmap_ctx *ctx, *n; 882897ab3e0SMike Rapoport struct userfaultfd_wait_queue ewq; 883897ab3e0SMike Rapoport 884897ab3e0SMike Rapoport list_for_each_entry_safe(ctx, n, uf, list) { 885897ab3e0SMike Rapoport msg_init(&ewq.msg); 886897ab3e0SMike Rapoport 887897ab3e0SMike Rapoport ewq.msg.event = UFFD_EVENT_UNMAP; 888897ab3e0SMike Rapoport ewq.msg.arg.remove.start = ctx->start; 889897ab3e0SMike Rapoport ewq.msg.arg.remove.end = ctx->end; 890897ab3e0SMike Rapoport 891897ab3e0SMike Rapoport userfaultfd_event_wait_completion(ctx->ctx, &ewq); 892897ab3e0SMike Rapoport 893897ab3e0SMike Rapoport list_del(&ctx->list); 894897ab3e0SMike Rapoport kfree(ctx); 895897ab3e0SMike Rapoport } 896897ab3e0SMike Rapoport } 897897ab3e0SMike Rapoport 89886039bd3SAndrea Arcangeli static int userfaultfd_release(struct inode *inode, struct file *file) 89986039bd3SAndrea Arcangeli { 90086039bd3SAndrea Arcangeli struct userfaultfd_ctx *ctx = file->private_data; 90186039bd3SAndrea Arcangeli struct mm_struct *mm = ctx->mm; 90286039bd3SAndrea Arcangeli struct vm_area_struct *vma, *prev; 90386039bd3SAndrea Arcangeli /* len == 0 means wake all */ 90486039bd3SAndrea Arcangeli struct userfaultfd_wake_range range = { .len = 0, }; 90586039bd3SAndrea Arcangeli unsigned long new_flags; 90611a9b902SLiam R. Howlett VMA_ITERATOR(vmi, mm, 0); 90786039bd3SAndrea Arcangeli 9086aa7de05SMark Rutland WRITE_ONCE(ctx->released, true); 90986039bd3SAndrea Arcangeli 910d2005e3fSOleg Nesterov if (!mmget_not_zero(mm)) 911d2005e3fSOleg Nesterov goto wakeup; 912d2005e3fSOleg Nesterov 91386039bd3SAndrea Arcangeli /* 91486039bd3SAndrea Arcangeli * Flush page faults out of all CPUs. NOTE: all page faults 91586039bd3SAndrea Arcangeli * must be retried without returning VM_FAULT_SIGBUS if 91686039bd3SAndrea Arcangeli * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx 917c1e8d7c6SMichel Lespinasse * changes while handle_userfault released the mmap_lock. So 91886039bd3SAndrea Arcangeli * it's critical that released is set to true (above), before 919c1e8d7c6SMichel Lespinasse * taking the mmap_lock for writing. 92086039bd3SAndrea Arcangeli */ 921d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 92286039bd3SAndrea Arcangeli prev = NULL; 92311a9b902SLiam R. Howlett for_each_vma(vmi, vma) { 92486039bd3SAndrea Arcangeli cond_resched(); 92586039bd3SAndrea Arcangeli BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^ 9267677f7fdSAxel Rasmussen !!(vma->vm_flags & __VM_UFFD_FLAGS)); 92786039bd3SAndrea Arcangeli if (vma->vm_userfaultfd_ctx.ctx != ctx) { 92886039bd3SAndrea Arcangeli prev = vma; 92986039bd3SAndrea Arcangeli continue; 93086039bd3SAndrea Arcangeli } 9317677f7fdSAxel Rasmussen new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; 9329760ebffSLiam R. Howlett prev = vma_merge(&vmi, mm, prev, vma->vm_start, vma->vm_end, 93386039bd3SAndrea Arcangeli new_flags, vma->anon_vma, 93486039bd3SAndrea Arcangeli vma->vm_file, vma->vm_pgoff, 93586039bd3SAndrea Arcangeli vma_policy(vma), 9365c26f6acSSuren Baghdasaryan NULL_VM_UFFD_CTX, anon_vma_name(vma)); 93769dbe6daSLiam R. Howlett if (prev) { 93886039bd3SAndrea Arcangeli vma = prev; 93969dbe6daSLiam R. Howlett } else { 94086039bd3SAndrea Arcangeli prev = vma; 94169dbe6daSLiam R. Howlett } 94269dbe6daSLiam R. Howlett 94351d3d5ebSDavid Hildenbrand userfaultfd_set_vm_flags(vma, new_flags); 94486039bd3SAndrea Arcangeli vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 94586039bd3SAndrea Arcangeli } 946d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 947d2005e3fSOleg Nesterov mmput(mm); 948d2005e3fSOleg Nesterov wakeup: 94986039bd3SAndrea Arcangeli /* 95015b726efSAndrea Arcangeli * After no new page faults can wait on this fault_*wqh, flush 95186039bd3SAndrea Arcangeli * the last page faults that may have been already waiting on 95215b726efSAndrea Arcangeli * the fault_*wqh. 95386039bd3SAndrea Arcangeli */ 954cbcfa130SEric Biggers spin_lock_irq(&ctx->fault_pending_wqh.lock); 955ac5be6b4SAndrea Arcangeli __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range); 956c430d1e8SMatthew Wilcox __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range); 957cbcfa130SEric Biggers spin_unlock_irq(&ctx->fault_pending_wqh.lock); 95886039bd3SAndrea Arcangeli 9595a18b64eSMike Rapoport /* Flush pending events that may still wait on event_wqh */ 9605a18b64eSMike Rapoport wake_up_all(&ctx->event_wqh); 9615a18b64eSMike Rapoport 962a9a08845SLinus Torvalds wake_up_poll(&ctx->fd_wqh, EPOLLHUP); 96386039bd3SAndrea Arcangeli userfaultfd_ctx_put(ctx); 96486039bd3SAndrea Arcangeli return 0; 96586039bd3SAndrea Arcangeli } 96686039bd3SAndrea Arcangeli 96715b726efSAndrea Arcangeli /* fault_pending_wqh.lock must be hold by the caller */ 9686dcc27fdSPavel Emelyanov static inline struct userfaultfd_wait_queue *find_userfault_in( 9696dcc27fdSPavel Emelyanov wait_queue_head_t *wqh) 97086039bd3SAndrea Arcangeli { 971ac6424b9SIngo Molnar wait_queue_entry_t *wq; 97215b726efSAndrea Arcangeli struct userfaultfd_wait_queue *uwq; 97386039bd3SAndrea Arcangeli 974456a7378SLance Roy lockdep_assert_held(&wqh->lock); 97586039bd3SAndrea Arcangeli 97615b726efSAndrea Arcangeli uwq = NULL; 9776dcc27fdSPavel Emelyanov if (!waitqueue_active(wqh)) 97815b726efSAndrea Arcangeli goto out; 97915b726efSAndrea Arcangeli /* walk in reverse to provide FIFO behavior to read userfaults */ 9802055da97SIngo Molnar wq = list_last_entry(&wqh->head, typeof(*wq), entry); 98115b726efSAndrea Arcangeli uwq = container_of(wq, struct userfaultfd_wait_queue, wq); 98215b726efSAndrea Arcangeli out: 98315b726efSAndrea Arcangeli return uwq; 98486039bd3SAndrea Arcangeli } 98586039bd3SAndrea Arcangeli 9866dcc27fdSPavel Emelyanov static inline struct userfaultfd_wait_queue *find_userfault( 9876dcc27fdSPavel Emelyanov struct userfaultfd_ctx *ctx) 9886dcc27fdSPavel Emelyanov { 9896dcc27fdSPavel Emelyanov return find_userfault_in(&ctx->fault_pending_wqh); 9906dcc27fdSPavel Emelyanov } 9916dcc27fdSPavel Emelyanov 9929cd75c3cSPavel Emelyanov static inline struct userfaultfd_wait_queue *find_userfault_evt( 9939cd75c3cSPavel Emelyanov struct userfaultfd_ctx *ctx) 9949cd75c3cSPavel Emelyanov { 9959cd75c3cSPavel Emelyanov return find_userfault_in(&ctx->event_wqh); 9969cd75c3cSPavel Emelyanov } 9979cd75c3cSPavel Emelyanov 998076ccb76SAl Viro static __poll_t userfaultfd_poll(struct file *file, poll_table *wait) 99986039bd3SAndrea Arcangeli { 100086039bd3SAndrea Arcangeli struct userfaultfd_ctx *ctx = file->private_data; 1001076ccb76SAl Viro __poll_t ret; 100286039bd3SAndrea Arcangeli 100386039bd3SAndrea Arcangeli poll_wait(file, &ctx->fd_wqh, wait); 100486039bd3SAndrea Arcangeli 100522e5fe2aSNadav Amit if (!userfaultfd_is_initialized(ctx)) 1006a9a08845SLinus Torvalds return EPOLLERR; 100722e5fe2aSNadav Amit 1008ba85c702SAndrea Arcangeli /* 1009ba85c702SAndrea Arcangeli * poll() never guarantees that read won't block. 1010ba85c702SAndrea Arcangeli * userfaults can be waken before they're read(). 1011ba85c702SAndrea Arcangeli */ 1012ba85c702SAndrea Arcangeli if (unlikely(!(file->f_flags & O_NONBLOCK))) 1013a9a08845SLinus Torvalds return EPOLLERR; 101415b726efSAndrea Arcangeli /* 101515b726efSAndrea Arcangeli * lockless access to see if there are pending faults 101615b726efSAndrea Arcangeli * __pollwait last action is the add_wait_queue but 101715b726efSAndrea Arcangeli * the spin_unlock would allow the waitqueue_active to 101815b726efSAndrea Arcangeli * pass above the actual list_add inside 101915b726efSAndrea Arcangeli * add_wait_queue critical section. So use a full 102015b726efSAndrea Arcangeli * memory barrier to serialize the list_add write of 102115b726efSAndrea Arcangeli * add_wait_queue() with the waitqueue_active read 102215b726efSAndrea Arcangeli * below. 102315b726efSAndrea Arcangeli */ 102415b726efSAndrea Arcangeli ret = 0; 102515b726efSAndrea Arcangeli smp_mb(); 102615b726efSAndrea Arcangeli if (waitqueue_active(&ctx->fault_pending_wqh)) 1027a9a08845SLinus Torvalds ret = EPOLLIN; 10289cd75c3cSPavel Emelyanov else if (waitqueue_active(&ctx->event_wqh)) 1029a9a08845SLinus Torvalds ret = EPOLLIN; 10309cd75c3cSPavel Emelyanov 103186039bd3SAndrea Arcangeli return ret; 103286039bd3SAndrea Arcangeli } 103386039bd3SAndrea Arcangeli 1034893e26e6SPavel Emelyanov static const struct file_operations userfaultfd_fops; 1035893e26e6SPavel Emelyanov 1036b537900fSDaniel Colascione static int resolve_userfault_fork(struct userfaultfd_ctx *new, 1037b537900fSDaniel Colascione struct inode *inode, 1038893e26e6SPavel Emelyanov struct uffd_msg *msg) 1039893e26e6SPavel Emelyanov { 1040893e26e6SPavel Emelyanov int fd; 1041893e26e6SPavel Emelyanov 1042b537900fSDaniel Colascione fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, new, 1043abec3d01SOndrej Mosnacek O_RDONLY | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode); 1044893e26e6SPavel Emelyanov if (fd < 0) 1045893e26e6SPavel Emelyanov return fd; 1046893e26e6SPavel Emelyanov 1047893e26e6SPavel Emelyanov msg->arg.reserved.reserved1 = 0; 1048893e26e6SPavel Emelyanov msg->arg.fork.ufd = fd; 1049893e26e6SPavel Emelyanov return 0; 1050893e26e6SPavel Emelyanov } 1051893e26e6SPavel Emelyanov 105286039bd3SAndrea Arcangeli static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, 1053b537900fSDaniel Colascione struct uffd_msg *msg, struct inode *inode) 105486039bd3SAndrea Arcangeli { 105586039bd3SAndrea Arcangeli ssize_t ret; 105686039bd3SAndrea Arcangeli DECLARE_WAITQUEUE(wait, current); 105715b726efSAndrea Arcangeli struct userfaultfd_wait_queue *uwq; 1058893e26e6SPavel Emelyanov /* 1059893e26e6SPavel Emelyanov * Handling fork event requires sleeping operations, so 1060893e26e6SPavel Emelyanov * we drop the event_wqh lock, then do these ops, then 1061893e26e6SPavel Emelyanov * lock it back and wake up the waiter. While the lock is 1062893e26e6SPavel Emelyanov * dropped the ewq may go away so we keep track of it 1063893e26e6SPavel Emelyanov * carefully. 1064893e26e6SPavel Emelyanov */ 1065893e26e6SPavel Emelyanov LIST_HEAD(fork_event); 1066893e26e6SPavel Emelyanov struct userfaultfd_ctx *fork_nctx = NULL; 106786039bd3SAndrea Arcangeli 106815b726efSAndrea Arcangeli /* always take the fd_wqh lock before the fault_pending_wqh lock */ 1069ae62c16eSChristoph Hellwig spin_lock_irq(&ctx->fd_wqh.lock); 107086039bd3SAndrea Arcangeli __add_wait_queue(&ctx->fd_wqh, &wait); 107186039bd3SAndrea Arcangeli for (;;) { 107286039bd3SAndrea Arcangeli set_current_state(TASK_INTERRUPTIBLE); 107315b726efSAndrea Arcangeli spin_lock(&ctx->fault_pending_wqh.lock); 107415b726efSAndrea Arcangeli uwq = find_userfault(ctx); 107515b726efSAndrea Arcangeli if (uwq) { 107686039bd3SAndrea Arcangeli /* 10772c5b7e1bSAndrea Arcangeli * Use a seqcount to repeat the lockless check 10782c5b7e1bSAndrea Arcangeli * in wake_userfault() to avoid missing 10792c5b7e1bSAndrea Arcangeli * wakeups because during the refile both 10802c5b7e1bSAndrea Arcangeli * waitqueue could become empty if this is the 10812c5b7e1bSAndrea Arcangeli * only userfault. 10822c5b7e1bSAndrea Arcangeli */ 10832c5b7e1bSAndrea Arcangeli write_seqcount_begin(&ctx->refile_seq); 10842c5b7e1bSAndrea Arcangeli 10852c5b7e1bSAndrea Arcangeli /* 108615b726efSAndrea Arcangeli * The fault_pending_wqh.lock prevents the uwq 108715b726efSAndrea Arcangeli * to disappear from under us. 108815b726efSAndrea Arcangeli * 108915b726efSAndrea Arcangeli * Refile this userfault from 109015b726efSAndrea Arcangeli * fault_pending_wqh to fault_wqh, it's not 109115b726efSAndrea Arcangeli * pending anymore after we read it. 109215b726efSAndrea Arcangeli * 109315b726efSAndrea Arcangeli * Use list_del() by hand (as 109415b726efSAndrea Arcangeli * userfaultfd_wake_function also uses 109515b726efSAndrea Arcangeli * list_del_init() by hand) to be sure nobody 109615b726efSAndrea Arcangeli * changes __remove_wait_queue() to use 109715b726efSAndrea Arcangeli * list_del_init() in turn breaking the 109815b726efSAndrea Arcangeli * !list_empty_careful() check in 10992055da97SIngo Molnar * handle_userfault(). The uwq->wq.head list 110015b726efSAndrea Arcangeli * must never be empty at any time during the 110115b726efSAndrea Arcangeli * refile, or the waitqueue could disappear 110215b726efSAndrea Arcangeli * from under us. The "wait_queue_head_t" 110315b726efSAndrea Arcangeli * parameter of __remove_wait_queue() is unused 110415b726efSAndrea Arcangeli * anyway. 110586039bd3SAndrea Arcangeli */ 11062055da97SIngo Molnar list_del(&uwq->wq.entry); 1107c430d1e8SMatthew Wilcox add_wait_queue(&ctx->fault_wqh, &uwq->wq); 110815b726efSAndrea Arcangeli 11092c5b7e1bSAndrea Arcangeli write_seqcount_end(&ctx->refile_seq); 11102c5b7e1bSAndrea Arcangeli 1111a9b85f94SAndrea Arcangeli /* careful to always initialize msg if ret == 0 */ 1112a9b85f94SAndrea Arcangeli *msg = uwq->msg; 111315b726efSAndrea Arcangeli spin_unlock(&ctx->fault_pending_wqh.lock); 111486039bd3SAndrea Arcangeli ret = 0; 111586039bd3SAndrea Arcangeli break; 111686039bd3SAndrea Arcangeli } 111715b726efSAndrea Arcangeli spin_unlock(&ctx->fault_pending_wqh.lock); 11189cd75c3cSPavel Emelyanov 11199cd75c3cSPavel Emelyanov spin_lock(&ctx->event_wqh.lock); 11209cd75c3cSPavel Emelyanov uwq = find_userfault_evt(ctx); 11219cd75c3cSPavel Emelyanov if (uwq) { 11229cd75c3cSPavel Emelyanov *msg = uwq->msg; 11239cd75c3cSPavel Emelyanov 1124893e26e6SPavel Emelyanov if (uwq->msg.event == UFFD_EVENT_FORK) { 1125893e26e6SPavel Emelyanov fork_nctx = (struct userfaultfd_ctx *) 1126893e26e6SPavel Emelyanov (unsigned long) 1127893e26e6SPavel Emelyanov uwq->msg.arg.reserved.reserved1; 11282055da97SIngo Molnar list_move(&uwq->wq.entry, &fork_event); 1129384632e6SAndrea Arcangeli /* 1130384632e6SAndrea Arcangeli * fork_nctx can be freed as soon as 1131384632e6SAndrea Arcangeli * we drop the lock, unless we take a 1132384632e6SAndrea Arcangeli * reference on it. 1133384632e6SAndrea Arcangeli */ 1134384632e6SAndrea Arcangeli userfaultfd_ctx_get(fork_nctx); 1135893e26e6SPavel Emelyanov spin_unlock(&ctx->event_wqh.lock); 1136893e26e6SPavel Emelyanov ret = 0; 1137893e26e6SPavel Emelyanov break; 1138893e26e6SPavel Emelyanov } 1139893e26e6SPavel Emelyanov 11409cd75c3cSPavel Emelyanov userfaultfd_event_complete(ctx, uwq); 11419cd75c3cSPavel Emelyanov spin_unlock(&ctx->event_wqh.lock); 11429cd75c3cSPavel Emelyanov ret = 0; 11439cd75c3cSPavel Emelyanov break; 11449cd75c3cSPavel Emelyanov } 11459cd75c3cSPavel Emelyanov spin_unlock(&ctx->event_wqh.lock); 11469cd75c3cSPavel Emelyanov 114786039bd3SAndrea Arcangeli if (signal_pending(current)) { 114886039bd3SAndrea Arcangeli ret = -ERESTARTSYS; 114986039bd3SAndrea Arcangeli break; 115086039bd3SAndrea Arcangeli } 115186039bd3SAndrea Arcangeli if (no_wait) { 115286039bd3SAndrea Arcangeli ret = -EAGAIN; 115386039bd3SAndrea Arcangeli break; 115486039bd3SAndrea Arcangeli } 1155ae62c16eSChristoph Hellwig spin_unlock_irq(&ctx->fd_wqh.lock); 115686039bd3SAndrea Arcangeli schedule(); 1157ae62c16eSChristoph Hellwig spin_lock_irq(&ctx->fd_wqh.lock); 115886039bd3SAndrea Arcangeli } 115986039bd3SAndrea Arcangeli __remove_wait_queue(&ctx->fd_wqh, &wait); 116086039bd3SAndrea Arcangeli __set_current_state(TASK_RUNNING); 1161ae62c16eSChristoph Hellwig spin_unlock_irq(&ctx->fd_wqh.lock); 116286039bd3SAndrea Arcangeli 1163893e26e6SPavel Emelyanov if (!ret && msg->event == UFFD_EVENT_FORK) { 1164b537900fSDaniel Colascione ret = resolve_userfault_fork(fork_nctx, inode, msg); 1165cbcfa130SEric Biggers spin_lock_irq(&ctx->event_wqh.lock); 1166893e26e6SPavel Emelyanov if (!list_empty(&fork_event)) { 1167384632e6SAndrea Arcangeli /* 1168384632e6SAndrea Arcangeli * The fork thread didn't abort, so we can 1169384632e6SAndrea Arcangeli * drop the temporary refcount. 1170384632e6SAndrea Arcangeli */ 1171384632e6SAndrea Arcangeli userfaultfd_ctx_put(fork_nctx); 1172384632e6SAndrea Arcangeli 1173893e26e6SPavel Emelyanov uwq = list_first_entry(&fork_event, 1174893e26e6SPavel Emelyanov typeof(*uwq), 11752055da97SIngo Molnar wq.entry); 1176384632e6SAndrea Arcangeli /* 1177384632e6SAndrea Arcangeli * If fork_event list wasn't empty and in turn 1178384632e6SAndrea Arcangeli * the event wasn't already released by fork 1179384632e6SAndrea Arcangeli * (the event is allocated on fork kernel 1180384632e6SAndrea Arcangeli * stack), put the event back to its place in 1181384632e6SAndrea Arcangeli * the event_wq. fork_event head will be freed 1182384632e6SAndrea Arcangeli * as soon as we return so the event cannot 1183384632e6SAndrea Arcangeli * stay queued there no matter the current 1184384632e6SAndrea Arcangeli * "ret" value. 1185384632e6SAndrea Arcangeli */ 11862055da97SIngo Molnar list_del(&uwq->wq.entry); 1187893e26e6SPavel Emelyanov __add_wait_queue(&ctx->event_wqh, &uwq->wq); 1188384632e6SAndrea Arcangeli 1189384632e6SAndrea Arcangeli /* 1190384632e6SAndrea Arcangeli * Leave the event in the waitqueue and report 1191384632e6SAndrea Arcangeli * error to userland if we failed to resolve 1192384632e6SAndrea Arcangeli * the userfault fork. 1193384632e6SAndrea Arcangeli */ 1194384632e6SAndrea Arcangeli if (likely(!ret)) 1195893e26e6SPavel Emelyanov userfaultfd_event_complete(ctx, uwq); 1196384632e6SAndrea Arcangeli } else { 1197384632e6SAndrea Arcangeli /* 1198384632e6SAndrea Arcangeli * Here the fork thread aborted and the 1199384632e6SAndrea Arcangeli * refcount from the fork thread on fork_nctx 1200384632e6SAndrea Arcangeli * has already been released. We still hold 1201384632e6SAndrea Arcangeli * the reference we took before releasing the 1202384632e6SAndrea Arcangeli * lock above. If resolve_userfault_fork 1203384632e6SAndrea Arcangeli * failed we've to drop it because the 1204384632e6SAndrea Arcangeli * fork_nctx has to be freed in such case. If 1205384632e6SAndrea Arcangeli * it succeeded we'll hold it because the new 1206384632e6SAndrea Arcangeli * uffd references it. 1207384632e6SAndrea Arcangeli */ 1208384632e6SAndrea Arcangeli if (ret) 1209384632e6SAndrea Arcangeli userfaultfd_ctx_put(fork_nctx); 1210893e26e6SPavel Emelyanov } 1211cbcfa130SEric Biggers spin_unlock_irq(&ctx->event_wqh.lock); 1212893e26e6SPavel Emelyanov } 1213893e26e6SPavel Emelyanov 121486039bd3SAndrea Arcangeli return ret; 121586039bd3SAndrea Arcangeli } 121686039bd3SAndrea Arcangeli 121786039bd3SAndrea Arcangeli static ssize_t userfaultfd_read(struct file *file, char __user *buf, 121886039bd3SAndrea Arcangeli size_t count, loff_t *ppos) 121986039bd3SAndrea Arcangeli { 122086039bd3SAndrea Arcangeli struct userfaultfd_ctx *ctx = file->private_data; 122186039bd3SAndrea Arcangeli ssize_t _ret, ret = 0; 1222a9b85f94SAndrea Arcangeli struct uffd_msg msg; 122386039bd3SAndrea Arcangeli int no_wait = file->f_flags & O_NONBLOCK; 1224b537900fSDaniel Colascione struct inode *inode = file_inode(file); 122586039bd3SAndrea Arcangeli 122622e5fe2aSNadav Amit if (!userfaultfd_is_initialized(ctx)) 122786039bd3SAndrea Arcangeli return -EINVAL; 122886039bd3SAndrea Arcangeli 122986039bd3SAndrea Arcangeli for (;;) { 1230a9b85f94SAndrea Arcangeli if (count < sizeof(msg)) 123186039bd3SAndrea Arcangeli return ret ? ret : -EINVAL; 1232b537900fSDaniel Colascione _ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode); 123386039bd3SAndrea Arcangeli if (_ret < 0) 123486039bd3SAndrea Arcangeli return ret ? ret : _ret; 1235a9b85f94SAndrea Arcangeli if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg))) 123686039bd3SAndrea Arcangeli return ret ? ret : -EFAULT; 1237a9b85f94SAndrea Arcangeli ret += sizeof(msg); 1238a9b85f94SAndrea Arcangeli buf += sizeof(msg); 1239a9b85f94SAndrea Arcangeli count -= sizeof(msg); 124086039bd3SAndrea Arcangeli /* 124186039bd3SAndrea Arcangeli * Allow to read more than one fault at time but only 124286039bd3SAndrea Arcangeli * block if waiting for the very first one. 124386039bd3SAndrea Arcangeli */ 124486039bd3SAndrea Arcangeli no_wait = O_NONBLOCK; 124586039bd3SAndrea Arcangeli } 124686039bd3SAndrea Arcangeli } 124786039bd3SAndrea Arcangeli 124886039bd3SAndrea Arcangeli static void __wake_userfault(struct userfaultfd_ctx *ctx, 124986039bd3SAndrea Arcangeli struct userfaultfd_wake_range *range) 125086039bd3SAndrea Arcangeli { 1251cbcfa130SEric Biggers spin_lock_irq(&ctx->fault_pending_wqh.lock); 125286039bd3SAndrea Arcangeli /* wake all in the range and autoremove */ 125315b726efSAndrea Arcangeli if (waitqueue_active(&ctx->fault_pending_wqh)) 1254ac5be6b4SAndrea Arcangeli __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 125515b726efSAndrea Arcangeli range); 125615b726efSAndrea Arcangeli if (waitqueue_active(&ctx->fault_wqh)) 1257c430d1e8SMatthew Wilcox __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range); 1258cbcfa130SEric Biggers spin_unlock_irq(&ctx->fault_pending_wqh.lock); 125986039bd3SAndrea Arcangeli } 126086039bd3SAndrea Arcangeli 126186039bd3SAndrea Arcangeli static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, 126286039bd3SAndrea Arcangeli struct userfaultfd_wake_range *range) 126386039bd3SAndrea Arcangeli { 12642c5b7e1bSAndrea Arcangeli unsigned seq; 12652c5b7e1bSAndrea Arcangeli bool need_wakeup; 12662c5b7e1bSAndrea Arcangeli 126786039bd3SAndrea Arcangeli /* 126886039bd3SAndrea Arcangeli * To be sure waitqueue_active() is not reordered by the CPU 126986039bd3SAndrea Arcangeli * before the pagetable update, use an explicit SMP memory 12703e4e28c5SMichel Lespinasse * barrier here. PT lock release or mmap_read_unlock(mm) still 127186039bd3SAndrea Arcangeli * have release semantics that can allow the 127286039bd3SAndrea Arcangeli * waitqueue_active() to be reordered before the pte update. 127386039bd3SAndrea Arcangeli */ 127486039bd3SAndrea Arcangeli smp_mb(); 127586039bd3SAndrea Arcangeli 127686039bd3SAndrea Arcangeli /* 127786039bd3SAndrea Arcangeli * Use waitqueue_active because it's very frequent to 127886039bd3SAndrea Arcangeli * change the address space atomically even if there are no 127986039bd3SAndrea Arcangeli * userfaults yet. So we take the spinlock only when we're 128086039bd3SAndrea Arcangeli * sure we've userfaults to wake. 128186039bd3SAndrea Arcangeli */ 12822c5b7e1bSAndrea Arcangeli do { 12832c5b7e1bSAndrea Arcangeli seq = read_seqcount_begin(&ctx->refile_seq); 12842c5b7e1bSAndrea Arcangeli need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) || 12852c5b7e1bSAndrea Arcangeli waitqueue_active(&ctx->fault_wqh); 12862c5b7e1bSAndrea Arcangeli cond_resched(); 12872c5b7e1bSAndrea Arcangeli } while (read_seqcount_retry(&ctx->refile_seq, seq)); 12882c5b7e1bSAndrea Arcangeli if (need_wakeup) 128986039bd3SAndrea Arcangeli __wake_userfault(ctx, range); 129086039bd3SAndrea Arcangeli } 129186039bd3SAndrea Arcangeli 129286039bd3SAndrea Arcangeli static __always_inline int validate_range(struct mm_struct *mm, 1293e71e2aceSPeter Collingbourne __u64 start, __u64 len) 129486039bd3SAndrea Arcangeli { 129586039bd3SAndrea Arcangeli __u64 task_size = mm->task_size; 129686039bd3SAndrea Arcangeli 1297e71e2aceSPeter Collingbourne if (start & ~PAGE_MASK) 129886039bd3SAndrea Arcangeli return -EINVAL; 129986039bd3SAndrea Arcangeli if (len & ~PAGE_MASK) 130086039bd3SAndrea Arcangeli return -EINVAL; 130186039bd3SAndrea Arcangeli if (!len) 130286039bd3SAndrea Arcangeli return -EINVAL; 1303e71e2aceSPeter Collingbourne if (start < mmap_min_addr) 130486039bd3SAndrea Arcangeli return -EINVAL; 1305e71e2aceSPeter Collingbourne if (start >= task_size) 130686039bd3SAndrea Arcangeli return -EINVAL; 1307e71e2aceSPeter Collingbourne if (len > task_size - start) 130886039bd3SAndrea Arcangeli return -EINVAL; 130986039bd3SAndrea Arcangeli return 0; 131086039bd3SAndrea Arcangeli } 131186039bd3SAndrea Arcangeli 131286039bd3SAndrea Arcangeli static int userfaultfd_register(struct userfaultfd_ctx *ctx, 131386039bd3SAndrea Arcangeli unsigned long arg) 131486039bd3SAndrea Arcangeli { 131586039bd3SAndrea Arcangeli struct mm_struct *mm = ctx->mm; 131686039bd3SAndrea Arcangeli struct vm_area_struct *vma, *prev, *cur; 131786039bd3SAndrea Arcangeli int ret; 131886039bd3SAndrea Arcangeli struct uffdio_register uffdio_register; 131986039bd3SAndrea Arcangeli struct uffdio_register __user *user_uffdio_register; 132086039bd3SAndrea Arcangeli unsigned long vm_flags, new_flags; 132186039bd3SAndrea Arcangeli bool found; 1322ce53e8e6SMike Rapoport bool basic_ioctls; 132386039bd3SAndrea Arcangeli unsigned long start, end, vma_end; 132411a9b902SLiam R. Howlett struct vma_iterator vmi; 13255543d3c4SPeter Xu pgoff_t pgoff; 132686039bd3SAndrea Arcangeli 132786039bd3SAndrea Arcangeli user_uffdio_register = (struct uffdio_register __user *) arg; 132886039bd3SAndrea Arcangeli 132986039bd3SAndrea Arcangeli ret = -EFAULT; 133086039bd3SAndrea Arcangeli if (copy_from_user(&uffdio_register, user_uffdio_register, 133186039bd3SAndrea Arcangeli sizeof(uffdio_register)-sizeof(__u64))) 133286039bd3SAndrea Arcangeli goto out; 133386039bd3SAndrea Arcangeli 133486039bd3SAndrea Arcangeli ret = -EINVAL; 133586039bd3SAndrea Arcangeli if (!uffdio_register.mode) 133686039bd3SAndrea Arcangeli goto out; 13377677f7fdSAxel Rasmussen if (uffdio_register.mode & ~UFFD_API_REGISTER_MODES) 133886039bd3SAndrea Arcangeli goto out; 133986039bd3SAndrea Arcangeli vm_flags = 0; 134086039bd3SAndrea Arcangeli if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING) 134186039bd3SAndrea Arcangeli vm_flags |= VM_UFFD_MISSING; 134200b151f2SPeter Xu if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) { 134300b151f2SPeter Xu #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP 134400b151f2SPeter Xu goto out; 134500b151f2SPeter Xu #endif 134686039bd3SAndrea Arcangeli vm_flags |= VM_UFFD_WP; 134700b151f2SPeter Xu } 13487677f7fdSAxel Rasmussen if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) { 13497677f7fdSAxel Rasmussen #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR 13507677f7fdSAxel Rasmussen goto out; 13517677f7fdSAxel Rasmussen #endif 13527677f7fdSAxel Rasmussen vm_flags |= VM_UFFD_MINOR; 13537677f7fdSAxel Rasmussen } 135486039bd3SAndrea Arcangeli 1355e71e2aceSPeter Collingbourne ret = validate_range(mm, uffdio_register.range.start, 135686039bd3SAndrea Arcangeli uffdio_register.range.len); 135786039bd3SAndrea Arcangeli if (ret) 135886039bd3SAndrea Arcangeli goto out; 135986039bd3SAndrea Arcangeli 136086039bd3SAndrea Arcangeli start = uffdio_register.range.start; 136186039bd3SAndrea Arcangeli end = start + uffdio_register.range.len; 136286039bd3SAndrea Arcangeli 1363d2005e3fSOleg Nesterov ret = -ENOMEM; 1364d2005e3fSOleg Nesterov if (!mmget_not_zero(mm)) 1365d2005e3fSOleg Nesterov goto out; 1366d2005e3fSOleg Nesterov 136786039bd3SAndrea Arcangeli ret = -EINVAL; 136811a9b902SLiam R. Howlett mmap_write_lock(mm); 136911a9b902SLiam R. Howlett vma_iter_init(&vmi, mm, start); 137011a9b902SLiam R. Howlett vma = vma_find(&vmi, end); 137111a9b902SLiam R. Howlett if (!vma) 137286039bd3SAndrea Arcangeli goto out_unlock; 137386039bd3SAndrea Arcangeli 137486039bd3SAndrea Arcangeli /* 1375cab350afSMike Kravetz * If the first vma contains huge pages, make sure start address 1376cab350afSMike Kravetz * is aligned to huge page size. 1377cab350afSMike Kravetz */ 1378cab350afSMike Kravetz if (is_vm_hugetlb_page(vma)) { 1379cab350afSMike Kravetz unsigned long vma_hpagesize = vma_kernel_pagesize(vma); 1380cab350afSMike Kravetz 1381cab350afSMike Kravetz if (start & (vma_hpagesize - 1)) 1382cab350afSMike Kravetz goto out_unlock; 1383cab350afSMike Kravetz } 1384cab350afSMike Kravetz 1385cab350afSMike Kravetz /* 138686039bd3SAndrea Arcangeli * Search for not compatible vmas. 138786039bd3SAndrea Arcangeli */ 138886039bd3SAndrea Arcangeli found = false; 1389ce53e8e6SMike Rapoport basic_ioctls = false; 139011a9b902SLiam R. Howlett cur = vma; 139111a9b902SLiam R. Howlett do { 139286039bd3SAndrea Arcangeli cond_resched(); 139386039bd3SAndrea Arcangeli 139486039bd3SAndrea Arcangeli BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ 13957677f7fdSAxel Rasmussen !!(cur->vm_flags & __VM_UFFD_FLAGS)); 139686039bd3SAndrea Arcangeli 139786039bd3SAndrea Arcangeli /* check not compatible vmas */ 139886039bd3SAndrea Arcangeli ret = -EINVAL; 139963b2d417SAndrea Arcangeli if (!vma_can_userfault(cur, vm_flags)) 140086039bd3SAndrea Arcangeli goto out_unlock; 140129ec9066SAndrea Arcangeli 140229ec9066SAndrea Arcangeli /* 140329ec9066SAndrea Arcangeli * UFFDIO_COPY will fill file holes even without 140429ec9066SAndrea Arcangeli * PROT_WRITE. This check enforces that if this is a 140529ec9066SAndrea Arcangeli * MAP_SHARED, the process has write permission to the backing 140629ec9066SAndrea Arcangeli * file. If VM_MAYWRITE is set it also enforces that on a 140729ec9066SAndrea Arcangeli * MAP_SHARED vma: there is no F_WRITE_SEAL and no further 140829ec9066SAndrea Arcangeli * F_WRITE_SEAL can be taken until the vma is destroyed. 140929ec9066SAndrea Arcangeli */ 141029ec9066SAndrea Arcangeli ret = -EPERM; 141129ec9066SAndrea Arcangeli if (unlikely(!(cur->vm_flags & VM_MAYWRITE))) 141229ec9066SAndrea Arcangeli goto out_unlock; 141329ec9066SAndrea Arcangeli 1414cab350afSMike Kravetz /* 1415cab350afSMike Kravetz * If this vma contains ending address, and huge pages 1416cab350afSMike Kravetz * check alignment. 1417cab350afSMike Kravetz */ 1418cab350afSMike Kravetz if (is_vm_hugetlb_page(cur) && end <= cur->vm_end && 1419cab350afSMike Kravetz end > cur->vm_start) { 1420cab350afSMike Kravetz unsigned long vma_hpagesize = vma_kernel_pagesize(cur); 1421cab350afSMike Kravetz 1422cab350afSMike Kravetz ret = -EINVAL; 1423cab350afSMike Kravetz 1424cab350afSMike Kravetz if (end & (vma_hpagesize - 1)) 1425cab350afSMike Kravetz goto out_unlock; 1426cab350afSMike Kravetz } 142763b2d417SAndrea Arcangeli if ((vm_flags & VM_UFFD_WP) && !(cur->vm_flags & VM_MAYWRITE)) 142863b2d417SAndrea Arcangeli goto out_unlock; 142986039bd3SAndrea Arcangeli 143086039bd3SAndrea Arcangeli /* 143186039bd3SAndrea Arcangeli * Check that this vma isn't already owned by a 143286039bd3SAndrea Arcangeli * different userfaultfd. We can't allow more than one 143386039bd3SAndrea Arcangeli * userfaultfd to own a single vma simultaneously or we 143486039bd3SAndrea Arcangeli * wouldn't know which one to deliver the userfaults to. 143586039bd3SAndrea Arcangeli */ 143686039bd3SAndrea Arcangeli ret = -EBUSY; 143786039bd3SAndrea Arcangeli if (cur->vm_userfaultfd_ctx.ctx && 143886039bd3SAndrea Arcangeli cur->vm_userfaultfd_ctx.ctx != ctx) 143986039bd3SAndrea Arcangeli goto out_unlock; 144086039bd3SAndrea Arcangeli 1441cab350afSMike Kravetz /* 1442cab350afSMike Kravetz * Note vmas containing huge pages 1443cab350afSMike Kravetz */ 1444ce53e8e6SMike Rapoport if (is_vm_hugetlb_page(cur)) 1445ce53e8e6SMike Rapoport basic_ioctls = true; 1446cab350afSMike Kravetz 144786039bd3SAndrea Arcangeli found = true; 144811a9b902SLiam R. Howlett } for_each_vma_range(vmi, cur, end); 144986039bd3SAndrea Arcangeli BUG_ON(!found); 145086039bd3SAndrea Arcangeli 145111a9b902SLiam R. Howlett vma_iter_set(&vmi, start); 145211a9b902SLiam R. Howlett prev = vma_prev(&vmi); 1453270aa010SPeter Xu if (vma->vm_start < start) 1454270aa010SPeter Xu prev = vma; 145586039bd3SAndrea Arcangeli 145686039bd3SAndrea Arcangeli ret = 0; 145711a9b902SLiam R. Howlett for_each_vma_range(vmi, vma, end) { 145886039bd3SAndrea Arcangeli cond_resched(); 145986039bd3SAndrea Arcangeli 146063b2d417SAndrea Arcangeli BUG_ON(!vma_can_userfault(vma, vm_flags)); 146186039bd3SAndrea Arcangeli BUG_ON(vma->vm_userfaultfd_ctx.ctx && 146286039bd3SAndrea Arcangeli vma->vm_userfaultfd_ctx.ctx != ctx); 146329ec9066SAndrea Arcangeli WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); 146486039bd3SAndrea Arcangeli 146586039bd3SAndrea Arcangeli /* 146686039bd3SAndrea Arcangeli * Nothing to do: this vma is already registered into this 146786039bd3SAndrea Arcangeli * userfaultfd and with the right tracking mode too. 146886039bd3SAndrea Arcangeli */ 146986039bd3SAndrea Arcangeli if (vma->vm_userfaultfd_ctx.ctx == ctx && 147086039bd3SAndrea Arcangeli (vma->vm_flags & vm_flags) == vm_flags) 147186039bd3SAndrea Arcangeli goto skip; 147286039bd3SAndrea Arcangeli 147386039bd3SAndrea Arcangeli if (vma->vm_start > start) 147486039bd3SAndrea Arcangeli start = vma->vm_start; 147586039bd3SAndrea Arcangeli vma_end = min(end, vma->vm_end); 147686039bd3SAndrea Arcangeli 14777677f7fdSAxel Rasmussen new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags; 14785543d3c4SPeter Xu pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 14799760ebffSLiam R. Howlett prev = vma_merge(&vmi, mm, prev, start, vma_end, new_flags, 14805543d3c4SPeter Xu vma->anon_vma, vma->vm_file, pgoff, 148186039bd3SAndrea Arcangeli vma_policy(vma), 14829a10064fSColin Cross ((struct vm_userfaultfd_ctx){ ctx }), 14835c26f6acSSuren Baghdasaryan anon_vma_name(vma)); 148486039bd3SAndrea Arcangeli if (prev) { 148569dbe6daSLiam R. Howlett /* vma_merge() invalidated the mas */ 148686039bd3SAndrea Arcangeli vma = prev; 148786039bd3SAndrea Arcangeli goto next; 148886039bd3SAndrea Arcangeli } 148986039bd3SAndrea Arcangeli if (vma->vm_start < start) { 14909760ebffSLiam R. Howlett ret = split_vma(&vmi, vma, start, 1); 149186039bd3SAndrea Arcangeli if (ret) 149286039bd3SAndrea Arcangeli break; 149386039bd3SAndrea Arcangeli } 149486039bd3SAndrea Arcangeli if (vma->vm_end > end) { 14959760ebffSLiam R. Howlett ret = split_vma(&vmi, vma, end, 0); 149686039bd3SAndrea Arcangeli if (ret) 149786039bd3SAndrea Arcangeli break; 149886039bd3SAndrea Arcangeli } 149986039bd3SAndrea Arcangeli next: 150086039bd3SAndrea Arcangeli /* 150186039bd3SAndrea Arcangeli * In the vma_merge() successful mprotect-like case 8: 150286039bd3SAndrea Arcangeli * the next vma was merged into the current one and 150386039bd3SAndrea Arcangeli * the current one has not been updated yet. 150486039bd3SAndrea Arcangeli */ 150551d3d5ebSDavid Hildenbrand userfaultfd_set_vm_flags(vma, new_flags); 150686039bd3SAndrea Arcangeli vma->vm_userfaultfd_ctx.ctx = ctx; 150786039bd3SAndrea Arcangeli 15086dfeaff9SPeter Xu if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma)) 15096dfeaff9SPeter Xu hugetlb_unshare_all_pmds(vma); 15106dfeaff9SPeter Xu 151186039bd3SAndrea Arcangeli skip: 151286039bd3SAndrea Arcangeli prev = vma; 151386039bd3SAndrea Arcangeli start = vma->vm_end; 151411a9b902SLiam R. Howlett } 151511a9b902SLiam R. Howlett 151686039bd3SAndrea Arcangeli out_unlock: 1517d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 1518d2005e3fSOleg Nesterov mmput(mm); 151986039bd3SAndrea Arcangeli if (!ret) { 152014819305SPeter Xu __u64 ioctls_out; 152114819305SPeter Xu 152214819305SPeter Xu ioctls_out = basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC : 152314819305SPeter Xu UFFD_API_RANGE_IOCTLS; 152414819305SPeter Xu 152514819305SPeter Xu /* 152614819305SPeter Xu * Declare the WP ioctl only if the WP mode is 152714819305SPeter Xu * specified and all checks passed with the range 152814819305SPeter Xu */ 152914819305SPeter Xu if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP)) 153014819305SPeter Xu ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT); 153114819305SPeter Xu 1532f6191471SAxel Rasmussen /* CONTINUE ioctl is only supported for MINOR ranges. */ 1533f6191471SAxel Rasmussen if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR)) 1534f6191471SAxel Rasmussen ioctls_out &= ~((__u64)1 << _UFFDIO_CONTINUE); 1535f6191471SAxel Rasmussen 153686039bd3SAndrea Arcangeli /* 153786039bd3SAndrea Arcangeli * Now that we scanned all vmas we can already tell 153886039bd3SAndrea Arcangeli * userland which ioctls methods are guaranteed to 153986039bd3SAndrea Arcangeli * succeed on this range. 154086039bd3SAndrea Arcangeli */ 154114819305SPeter Xu if (put_user(ioctls_out, &user_uffdio_register->ioctls)) 154286039bd3SAndrea Arcangeli ret = -EFAULT; 154386039bd3SAndrea Arcangeli } 154486039bd3SAndrea Arcangeli out: 154586039bd3SAndrea Arcangeli return ret; 154686039bd3SAndrea Arcangeli } 154786039bd3SAndrea Arcangeli 154886039bd3SAndrea Arcangeli static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, 154986039bd3SAndrea Arcangeli unsigned long arg) 155086039bd3SAndrea Arcangeli { 155186039bd3SAndrea Arcangeli struct mm_struct *mm = ctx->mm; 155286039bd3SAndrea Arcangeli struct vm_area_struct *vma, *prev, *cur; 155386039bd3SAndrea Arcangeli int ret; 155486039bd3SAndrea Arcangeli struct uffdio_range uffdio_unregister; 155586039bd3SAndrea Arcangeli unsigned long new_flags; 155686039bd3SAndrea Arcangeli bool found; 155786039bd3SAndrea Arcangeli unsigned long start, end, vma_end; 155886039bd3SAndrea Arcangeli const void __user *buf = (void __user *)arg; 155911a9b902SLiam R. Howlett struct vma_iterator vmi; 15605543d3c4SPeter Xu pgoff_t pgoff; 156186039bd3SAndrea Arcangeli 156286039bd3SAndrea Arcangeli ret = -EFAULT; 156386039bd3SAndrea Arcangeli if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister))) 156486039bd3SAndrea Arcangeli goto out; 156586039bd3SAndrea Arcangeli 1566e71e2aceSPeter Collingbourne ret = validate_range(mm, uffdio_unregister.start, 156786039bd3SAndrea Arcangeli uffdio_unregister.len); 156886039bd3SAndrea Arcangeli if (ret) 156986039bd3SAndrea Arcangeli goto out; 157086039bd3SAndrea Arcangeli 157186039bd3SAndrea Arcangeli start = uffdio_unregister.start; 157286039bd3SAndrea Arcangeli end = start + uffdio_unregister.len; 157386039bd3SAndrea Arcangeli 1574d2005e3fSOleg Nesterov ret = -ENOMEM; 1575d2005e3fSOleg Nesterov if (!mmget_not_zero(mm)) 1576d2005e3fSOleg Nesterov goto out; 1577d2005e3fSOleg Nesterov 1578d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 157986039bd3SAndrea Arcangeli ret = -EINVAL; 158011a9b902SLiam R. Howlett vma_iter_init(&vmi, mm, start); 158111a9b902SLiam R. Howlett vma = vma_find(&vmi, end); 158211a9b902SLiam R. Howlett if (!vma) 158386039bd3SAndrea Arcangeli goto out_unlock; 158486039bd3SAndrea Arcangeli 158586039bd3SAndrea Arcangeli /* 1586cab350afSMike Kravetz * If the first vma contains huge pages, make sure start address 1587cab350afSMike Kravetz * is aligned to huge page size. 1588cab350afSMike Kravetz */ 1589cab350afSMike Kravetz if (is_vm_hugetlb_page(vma)) { 1590cab350afSMike Kravetz unsigned long vma_hpagesize = vma_kernel_pagesize(vma); 1591cab350afSMike Kravetz 1592cab350afSMike Kravetz if (start & (vma_hpagesize - 1)) 1593cab350afSMike Kravetz goto out_unlock; 1594cab350afSMike Kravetz } 1595cab350afSMike Kravetz 1596cab350afSMike Kravetz /* 159786039bd3SAndrea Arcangeli * Search for not compatible vmas. 159886039bd3SAndrea Arcangeli */ 159986039bd3SAndrea Arcangeli found = false; 160011a9b902SLiam R. Howlett cur = vma; 160111a9b902SLiam R. Howlett do { 160286039bd3SAndrea Arcangeli cond_resched(); 160386039bd3SAndrea Arcangeli 160486039bd3SAndrea Arcangeli BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ 16057677f7fdSAxel Rasmussen !!(cur->vm_flags & __VM_UFFD_FLAGS)); 160686039bd3SAndrea Arcangeli 160786039bd3SAndrea Arcangeli /* 160886039bd3SAndrea Arcangeli * Check not compatible vmas, not strictly required 160986039bd3SAndrea Arcangeli * here as not compatible vmas cannot have an 161086039bd3SAndrea Arcangeli * userfaultfd_ctx registered on them, but this 161186039bd3SAndrea Arcangeli * provides for more strict behavior to notice 161286039bd3SAndrea Arcangeli * unregistration errors. 161386039bd3SAndrea Arcangeli */ 161463b2d417SAndrea Arcangeli if (!vma_can_userfault(cur, cur->vm_flags)) 161586039bd3SAndrea Arcangeli goto out_unlock; 161686039bd3SAndrea Arcangeli 161786039bd3SAndrea Arcangeli found = true; 161811a9b902SLiam R. Howlett } for_each_vma_range(vmi, cur, end); 161986039bd3SAndrea Arcangeli BUG_ON(!found); 162086039bd3SAndrea Arcangeli 162111a9b902SLiam R. Howlett vma_iter_set(&vmi, start); 162211a9b902SLiam R. Howlett prev = vma_prev(&vmi); 1623270aa010SPeter Xu if (vma->vm_start < start) 1624270aa010SPeter Xu prev = vma; 1625270aa010SPeter Xu 162686039bd3SAndrea Arcangeli ret = 0; 162711a9b902SLiam R. Howlett for_each_vma_range(vmi, vma, end) { 162886039bd3SAndrea Arcangeli cond_resched(); 162986039bd3SAndrea Arcangeli 163063b2d417SAndrea Arcangeli BUG_ON(!vma_can_userfault(vma, vma->vm_flags)); 163186039bd3SAndrea Arcangeli 163286039bd3SAndrea Arcangeli /* 163386039bd3SAndrea Arcangeli * Nothing to do: this vma is already registered into this 163486039bd3SAndrea Arcangeli * userfaultfd and with the right tracking mode too. 163586039bd3SAndrea Arcangeli */ 163686039bd3SAndrea Arcangeli if (!vma->vm_userfaultfd_ctx.ctx) 163786039bd3SAndrea Arcangeli goto skip; 163886039bd3SAndrea Arcangeli 163901e881f5SAndrea Arcangeli WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); 164001e881f5SAndrea Arcangeli 164186039bd3SAndrea Arcangeli if (vma->vm_start > start) 164286039bd3SAndrea Arcangeli start = vma->vm_start; 164386039bd3SAndrea Arcangeli vma_end = min(end, vma->vm_end); 164486039bd3SAndrea Arcangeli 164509fa5296SAndrea Arcangeli if (userfaultfd_missing(vma)) { 164609fa5296SAndrea Arcangeli /* 164709fa5296SAndrea Arcangeli * Wake any concurrent pending userfault while 164809fa5296SAndrea Arcangeli * we unregister, so they will not hang 164909fa5296SAndrea Arcangeli * permanently and it avoids userland to call 165009fa5296SAndrea Arcangeli * UFFDIO_WAKE explicitly. 165109fa5296SAndrea Arcangeli */ 165209fa5296SAndrea Arcangeli struct userfaultfd_wake_range range; 165309fa5296SAndrea Arcangeli range.start = start; 165409fa5296SAndrea Arcangeli range.len = vma_end - start; 165509fa5296SAndrea Arcangeli wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range); 165609fa5296SAndrea Arcangeli } 165709fa5296SAndrea Arcangeli 1658f369b07cSPeter Xu /* Reset ptes for the whole vma range if wr-protected */ 1659f369b07cSPeter Xu if (userfaultfd_wp(vma)) 166061c50040SAxel Rasmussen uffd_wp_range(vma, start, vma_end - start, false); 1661f369b07cSPeter Xu 16627677f7fdSAxel Rasmussen new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; 16635543d3c4SPeter Xu pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 16649760ebffSLiam R. Howlett prev = vma_merge(&vmi, mm, prev, start, vma_end, new_flags, 16655543d3c4SPeter Xu vma->anon_vma, vma->vm_file, pgoff, 166686039bd3SAndrea Arcangeli vma_policy(vma), 16675c26f6acSSuren Baghdasaryan NULL_VM_UFFD_CTX, anon_vma_name(vma)); 166886039bd3SAndrea Arcangeli if (prev) { 166986039bd3SAndrea Arcangeli vma = prev; 167086039bd3SAndrea Arcangeli goto next; 167186039bd3SAndrea Arcangeli } 167286039bd3SAndrea Arcangeli if (vma->vm_start < start) { 16739760ebffSLiam R. Howlett ret = split_vma(&vmi, vma, start, 1); 167486039bd3SAndrea Arcangeli if (ret) 167586039bd3SAndrea Arcangeli break; 167686039bd3SAndrea Arcangeli } 167786039bd3SAndrea Arcangeli if (vma->vm_end > end) { 16789760ebffSLiam R. Howlett ret = split_vma(&vmi, vma, end, 0); 167986039bd3SAndrea Arcangeli if (ret) 168086039bd3SAndrea Arcangeli break; 168186039bd3SAndrea Arcangeli } 168286039bd3SAndrea Arcangeli next: 168386039bd3SAndrea Arcangeli /* 168486039bd3SAndrea Arcangeli * In the vma_merge() successful mprotect-like case 8: 168586039bd3SAndrea Arcangeli * the next vma was merged into the current one and 168686039bd3SAndrea Arcangeli * the current one has not been updated yet. 168786039bd3SAndrea Arcangeli */ 168851d3d5ebSDavid Hildenbrand userfaultfd_set_vm_flags(vma, new_flags); 168986039bd3SAndrea Arcangeli vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 169086039bd3SAndrea Arcangeli 169186039bd3SAndrea Arcangeli skip: 169286039bd3SAndrea Arcangeli prev = vma; 169386039bd3SAndrea Arcangeli start = vma->vm_end; 169411a9b902SLiam R. Howlett } 169511a9b902SLiam R. Howlett 169686039bd3SAndrea Arcangeli out_unlock: 1697d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 1698d2005e3fSOleg Nesterov mmput(mm); 169986039bd3SAndrea Arcangeli out: 170086039bd3SAndrea Arcangeli return ret; 170186039bd3SAndrea Arcangeli } 170286039bd3SAndrea Arcangeli 170386039bd3SAndrea Arcangeli /* 1704ba85c702SAndrea Arcangeli * userfaultfd_wake may be used in combination with the 1705ba85c702SAndrea Arcangeli * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches. 170686039bd3SAndrea Arcangeli */ 170786039bd3SAndrea Arcangeli static int userfaultfd_wake(struct userfaultfd_ctx *ctx, 170886039bd3SAndrea Arcangeli unsigned long arg) 170986039bd3SAndrea Arcangeli { 171086039bd3SAndrea Arcangeli int ret; 171186039bd3SAndrea Arcangeli struct uffdio_range uffdio_wake; 171286039bd3SAndrea Arcangeli struct userfaultfd_wake_range range; 171386039bd3SAndrea Arcangeli const void __user *buf = (void __user *)arg; 171486039bd3SAndrea Arcangeli 171586039bd3SAndrea Arcangeli ret = -EFAULT; 171686039bd3SAndrea Arcangeli if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake))) 171786039bd3SAndrea Arcangeli goto out; 171886039bd3SAndrea Arcangeli 1719e71e2aceSPeter Collingbourne ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len); 172086039bd3SAndrea Arcangeli if (ret) 172186039bd3SAndrea Arcangeli goto out; 172286039bd3SAndrea Arcangeli 172386039bd3SAndrea Arcangeli range.start = uffdio_wake.start; 172486039bd3SAndrea Arcangeli range.len = uffdio_wake.len; 172586039bd3SAndrea Arcangeli 172686039bd3SAndrea Arcangeli /* 172786039bd3SAndrea Arcangeli * len == 0 means wake all and we don't want to wake all here, 172886039bd3SAndrea Arcangeli * so check it again to be sure. 172986039bd3SAndrea Arcangeli */ 173086039bd3SAndrea Arcangeli VM_BUG_ON(!range.len); 173186039bd3SAndrea Arcangeli 173286039bd3SAndrea Arcangeli wake_userfault(ctx, &range); 173386039bd3SAndrea Arcangeli ret = 0; 173486039bd3SAndrea Arcangeli 173586039bd3SAndrea Arcangeli out: 173686039bd3SAndrea Arcangeli return ret; 173786039bd3SAndrea Arcangeli } 173886039bd3SAndrea Arcangeli 1739ad465caeSAndrea Arcangeli static int userfaultfd_copy(struct userfaultfd_ctx *ctx, 1740ad465caeSAndrea Arcangeli unsigned long arg) 1741ad465caeSAndrea Arcangeli { 1742ad465caeSAndrea Arcangeli __s64 ret; 1743ad465caeSAndrea Arcangeli struct uffdio_copy uffdio_copy; 1744ad465caeSAndrea Arcangeli struct uffdio_copy __user *user_uffdio_copy; 1745ad465caeSAndrea Arcangeli struct userfaultfd_wake_range range; 1746d9712937SAxel Rasmussen uffd_flags_t flags = 0; 1747ad465caeSAndrea Arcangeli 1748ad465caeSAndrea Arcangeli user_uffdio_copy = (struct uffdio_copy __user *) arg; 1749ad465caeSAndrea Arcangeli 1750df2cc96eSMike Rapoport ret = -EAGAIN; 1751a759a909SNadav Amit if (atomic_read(&ctx->mmap_changing)) 1752df2cc96eSMike Rapoport goto out; 1753df2cc96eSMike Rapoport 1754ad465caeSAndrea Arcangeli ret = -EFAULT; 1755ad465caeSAndrea Arcangeli if (copy_from_user(&uffdio_copy, user_uffdio_copy, 1756ad465caeSAndrea Arcangeli /* don't copy "copy" last field */ 1757ad465caeSAndrea Arcangeli sizeof(uffdio_copy)-sizeof(__s64))) 1758ad465caeSAndrea Arcangeli goto out; 1759ad465caeSAndrea Arcangeli 1760e71e2aceSPeter Collingbourne ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len); 1761ad465caeSAndrea Arcangeli if (ret) 1762ad465caeSAndrea Arcangeli goto out; 1763ad465caeSAndrea Arcangeli /* 1764ad465caeSAndrea Arcangeli * double check for wraparound just in case. copy_from_user() 1765ad465caeSAndrea Arcangeli * will later check uffdio_copy.src + uffdio_copy.len to fit 1766ad465caeSAndrea Arcangeli * in the userland range. 1767ad465caeSAndrea Arcangeli */ 1768ad465caeSAndrea Arcangeli ret = -EINVAL; 1769ad465caeSAndrea Arcangeli if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src) 1770ad465caeSAndrea Arcangeli goto out; 177172981e0eSAndrea Arcangeli if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP)) 1772ad465caeSAndrea Arcangeli goto out; 1773d9712937SAxel Rasmussen if (uffdio_copy.mode & UFFDIO_COPY_MODE_WP) 1774d9712937SAxel Rasmussen flags |= MFILL_ATOMIC_WP; 1775d2005e3fSOleg Nesterov if (mmget_not_zero(ctx->mm)) { 1776a734991cSAxel Rasmussen ret = mfill_atomic_copy(ctx->mm, uffdio_copy.dst, uffdio_copy.src, 177772981e0eSAndrea Arcangeli uffdio_copy.len, &ctx->mmap_changing, 1778d9712937SAxel Rasmussen flags); 1779d2005e3fSOleg Nesterov mmput(ctx->mm); 178096333187SMike Rapoport } else { 1781e86b298bSMike Rapoport return -ESRCH; 1782d2005e3fSOleg Nesterov } 1783ad465caeSAndrea Arcangeli if (unlikely(put_user(ret, &user_uffdio_copy->copy))) 1784ad465caeSAndrea Arcangeli return -EFAULT; 1785ad465caeSAndrea Arcangeli if (ret < 0) 1786ad465caeSAndrea Arcangeli goto out; 1787ad465caeSAndrea Arcangeli BUG_ON(!ret); 1788ad465caeSAndrea Arcangeli /* len == 0 would wake all */ 1789ad465caeSAndrea Arcangeli range.len = ret; 1790ad465caeSAndrea Arcangeli if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) { 1791ad465caeSAndrea Arcangeli range.start = uffdio_copy.dst; 1792ad465caeSAndrea Arcangeli wake_userfault(ctx, &range); 1793ad465caeSAndrea Arcangeli } 1794ad465caeSAndrea Arcangeli ret = range.len == uffdio_copy.len ? 0 : -EAGAIN; 1795ad465caeSAndrea Arcangeli out: 1796ad465caeSAndrea Arcangeli return ret; 1797ad465caeSAndrea Arcangeli } 1798ad465caeSAndrea Arcangeli 1799ad465caeSAndrea Arcangeli static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, 1800ad465caeSAndrea Arcangeli unsigned long arg) 1801ad465caeSAndrea Arcangeli { 1802ad465caeSAndrea Arcangeli __s64 ret; 1803ad465caeSAndrea Arcangeli struct uffdio_zeropage uffdio_zeropage; 1804ad465caeSAndrea Arcangeli struct uffdio_zeropage __user *user_uffdio_zeropage; 1805ad465caeSAndrea Arcangeli struct userfaultfd_wake_range range; 1806ad465caeSAndrea Arcangeli 1807ad465caeSAndrea Arcangeli user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg; 1808ad465caeSAndrea Arcangeli 1809df2cc96eSMike Rapoport ret = -EAGAIN; 1810a759a909SNadav Amit if (atomic_read(&ctx->mmap_changing)) 1811df2cc96eSMike Rapoport goto out; 1812df2cc96eSMike Rapoport 1813ad465caeSAndrea Arcangeli ret = -EFAULT; 1814ad465caeSAndrea Arcangeli if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage, 1815ad465caeSAndrea Arcangeli /* don't copy "zeropage" last field */ 1816ad465caeSAndrea Arcangeli sizeof(uffdio_zeropage)-sizeof(__s64))) 1817ad465caeSAndrea Arcangeli goto out; 1818ad465caeSAndrea Arcangeli 1819e71e2aceSPeter Collingbourne ret = validate_range(ctx->mm, uffdio_zeropage.range.start, 1820ad465caeSAndrea Arcangeli uffdio_zeropage.range.len); 1821ad465caeSAndrea Arcangeli if (ret) 1822ad465caeSAndrea Arcangeli goto out; 1823ad465caeSAndrea Arcangeli ret = -EINVAL; 1824ad465caeSAndrea Arcangeli if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE) 1825ad465caeSAndrea Arcangeli goto out; 1826ad465caeSAndrea Arcangeli 1827d2005e3fSOleg Nesterov if (mmget_not_zero(ctx->mm)) { 1828a734991cSAxel Rasmussen ret = mfill_atomic_zeropage(ctx->mm, uffdio_zeropage.range.start, 1829df2cc96eSMike Rapoport uffdio_zeropage.range.len, 1830df2cc96eSMike Rapoport &ctx->mmap_changing); 1831d2005e3fSOleg Nesterov mmput(ctx->mm); 18329d95aa4bSMike Rapoport } else { 1833e86b298bSMike Rapoport return -ESRCH; 1834d2005e3fSOleg Nesterov } 1835ad465caeSAndrea Arcangeli if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage))) 1836ad465caeSAndrea Arcangeli return -EFAULT; 1837ad465caeSAndrea Arcangeli if (ret < 0) 1838ad465caeSAndrea Arcangeli goto out; 1839ad465caeSAndrea Arcangeli /* len == 0 would wake all */ 1840ad465caeSAndrea Arcangeli BUG_ON(!ret); 1841ad465caeSAndrea Arcangeli range.len = ret; 1842ad465caeSAndrea Arcangeli if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) { 1843ad465caeSAndrea Arcangeli range.start = uffdio_zeropage.range.start; 1844ad465caeSAndrea Arcangeli wake_userfault(ctx, &range); 1845ad465caeSAndrea Arcangeli } 1846ad465caeSAndrea Arcangeli ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN; 1847ad465caeSAndrea Arcangeli out: 1848ad465caeSAndrea Arcangeli return ret; 1849ad465caeSAndrea Arcangeli } 1850ad465caeSAndrea Arcangeli 185163b2d417SAndrea Arcangeli static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx, 185263b2d417SAndrea Arcangeli unsigned long arg) 185363b2d417SAndrea Arcangeli { 185463b2d417SAndrea Arcangeli int ret; 185563b2d417SAndrea Arcangeli struct uffdio_writeprotect uffdio_wp; 185663b2d417SAndrea Arcangeli struct uffdio_writeprotect __user *user_uffdio_wp; 185763b2d417SAndrea Arcangeli struct userfaultfd_wake_range range; 185823080e27SPeter Xu bool mode_wp, mode_dontwake; 185963b2d417SAndrea Arcangeli 1860a759a909SNadav Amit if (atomic_read(&ctx->mmap_changing)) 186163b2d417SAndrea Arcangeli return -EAGAIN; 186263b2d417SAndrea Arcangeli 186363b2d417SAndrea Arcangeli user_uffdio_wp = (struct uffdio_writeprotect __user *) arg; 186463b2d417SAndrea Arcangeli 186563b2d417SAndrea Arcangeli if (copy_from_user(&uffdio_wp, user_uffdio_wp, 186663b2d417SAndrea Arcangeli sizeof(struct uffdio_writeprotect))) 186763b2d417SAndrea Arcangeli return -EFAULT; 186863b2d417SAndrea Arcangeli 1869e71e2aceSPeter Collingbourne ret = validate_range(ctx->mm, uffdio_wp.range.start, 187063b2d417SAndrea Arcangeli uffdio_wp.range.len); 187163b2d417SAndrea Arcangeli if (ret) 187263b2d417SAndrea Arcangeli return ret; 187363b2d417SAndrea Arcangeli 187463b2d417SAndrea Arcangeli if (uffdio_wp.mode & ~(UFFDIO_WRITEPROTECT_MODE_DONTWAKE | 187563b2d417SAndrea Arcangeli UFFDIO_WRITEPROTECT_MODE_WP)) 187663b2d417SAndrea Arcangeli return -EINVAL; 187723080e27SPeter Xu 187823080e27SPeter Xu mode_wp = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_WP; 187923080e27SPeter Xu mode_dontwake = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_DONTWAKE; 188023080e27SPeter Xu 188123080e27SPeter Xu if (mode_wp && mode_dontwake) 188263b2d417SAndrea Arcangeli return -EINVAL; 188363b2d417SAndrea Arcangeli 1884cb185d5fSNadav Amit if (mmget_not_zero(ctx->mm)) { 188563b2d417SAndrea Arcangeli ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start, 188623080e27SPeter Xu uffdio_wp.range.len, mode_wp, 188763b2d417SAndrea Arcangeli &ctx->mmap_changing); 1888cb185d5fSNadav Amit mmput(ctx->mm); 1889cb185d5fSNadav Amit } else { 1890cb185d5fSNadav Amit return -ESRCH; 1891cb185d5fSNadav Amit } 1892cb185d5fSNadav Amit 189363b2d417SAndrea Arcangeli if (ret) 189463b2d417SAndrea Arcangeli return ret; 189563b2d417SAndrea Arcangeli 189623080e27SPeter Xu if (!mode_wp && !mode_dontwake) { 189763b2d417SAndrea Arcangeli range.start = uffdio_wp.range.start; 189863b2d417SAndrea Arcangeli range.len = uffdio_wp.range.len; 189963b2d417SAndrea Arcangeli wake_userfault(ctx, &range); 190063b2d417SAndrea Arcangeli } 190163b2d417SAndrea Arcangeli return ret; 190263b2d417SAndrea Arcangeli } 190363b2d417SAndrea Arcangeli 1904f6191471SAxel Rasmussen static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg) 1905f6191471SAxel Rasmussen { 1906f6191471SAxel Rasmussen __s64 ret; 1907f6191471SAxel Rasmussen struct uffdio_continue uffdio_continue; 1908f6191471SAxel Rasmussen struct uffdio_continue __user *user_uffdio_continue; 1909f6191471SAxel Rasmussen struct userfaultfd_wake_range range; 191002891844SAxel Rasmussen uffd_flags_t flags = 0; 1911f6191471SAxel Rasmussen 1912f6191471SAxel Rasmussen user_uffdio_continue = (struct uffdio_continue __user *)arg; 1913f6191471SAxel Rasmussen 1914f6191471SAxel Rasmussen ret = -EAGAIN; 1915a759a909SNadav Amit if (atomic_read(&ctx->mmap_changing)) 1916f6191471SAxel Rasmussen goto out; 1917f6191471SAxel Rasmussen 1918f6191471SAxel Rasmussen ret = -EFAULT; 1919f6191471SAxel Rasmussen if (copy_from_user(&uffdio_continue, user_uffdio_continue, 1920f6191471SAxel Rasmussen /* don't copy the output fields */ 1921f6191471SAxel Rasmussen sizeof(uffdio_continue) - (sizeof(__s64)))) 1922f6191471SAxel Rasmussen goto out; 1923f6191471SAxel Rasmussen 1924e71e2aceSPeter Collingbourne ret = validate_range(ctx->mm, uffdio_continue.range.start, 1925f6191471SAxel Rasmussen uffdio_continue.range.len); 1926f6191471SAxel Rasmussen if (ret) 1927f6191471SAxel Rasmussen goto out; 1928f6191471SAxel Rasmussen 1929f6191471SAxel Rasmussen ret = -EINVAL; 1930f6191471SAxel Rasmussen /* double check for wraparound just in case. */ 1931f6191471SAxel Rasmussen if (uffdio_continue.range.start + uffdio_continue.range.len <= 1932f6191471SAxel Rasmussen uffdio_continue.range.start) { 1933f6191471SAxel Rasmussen goto out; 1934f6191471SAxel Rasmussen } 193502891844SAxel Rasmussen if (uffdio_continue.mode & ~(UFFDIO_CONTINUE_MODE_DONTWAKE | 193602891844SAxel Rasmussen UFFDIO_CONTINUE_MODE_WP)) 1937f6191471SAxel Rasmussen goto out; 193802891844SAxel Rasmussen if (uffdio_continue.mode & UFFDIO_CONTINUE_MODE_WP) 193902891844SAxel Rasmussen flags |= MFILL_ATOMIC_WP; 1940f6191471SAxel Rasmussen 1941f6191471SAxel Rasmussen if (mmget_not_zero(ctx->mm)) { 1942a734991cSAxel Rasmussen ret = mfill_atomic_continue(ctx->mm, uffdio_continue.range.start, 1943f6191471SAxel Rasmussen uffdio_continue.range.len, 194402891844SAxel Rasmussen &ctx->mmap_changing, flags); 1945f6191471SAxel Rasmussen mmput(ctx->mm); 1946f6191471SAxel Rasmussen } else { 1947f6191471SAxel Rasmussen return -ESRCH; 1948f6191471SAxel Rasmussen } 1949f6191471SAxel Rasmussen 1950f6191471SAxel Rasmussen if (unlikely(put_user(ret, &user_uffdio_continue->mapped))) 1951f6191471SAxel Rasmussen return -EFAULT; 1952f6191471SAxel Rasmussen if (ret < 0) 1953f6191471SAxel Rasmussen goto out; 1954f6191471SAxel Rasmussen 1955f6191471SAxel Rasmussen /* len == 0 would wake all */ 1956f6191471SAxel Rasmussen BUG_ON(!ret); 1957f6191471SAxel Rasmussen range.len = ret; 1958f6191471SAxel Rasmussen if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) { 1959f6191471SAxel Rasmussen range.start = uffdio_continue.range.start; 1960f6191471SAxel Rasmussen wake_userfault(ctx, &range); 1961f6191471SAxel Rasmussen } 1962f6191471SAxel Rasmussen ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN; 1963f6191471SAxel Rasmussen 1964f6191471SAxel Rasmussen out: 1965f6191471SAxel Rasmussen return ret; 1966f6191471SAxel Rasmussen } 1967f6191471SAxel Rasmussen 19689cd75c3cSPavel Emelyanov static inline unsigned int uffd_ctx_features(__u64 user_features) 19699cd75c3cSPavel Emelyanov { 19709cd75c3cSPavel Emelyanov /* 197122e5fe2aSNadav Amit * For the current set of features the bits just coincide. Set 197222e5fe2aSNadav Amit * UFFD_FEATURE_INITIALIZED to mark the features as enabled. 19739cd75c3cSPavel Emelyanov */ 197422e5fe2aSNadav Amit return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED; 19759cd75c3cSPavel Emelyanov } 19769cd75c3cSPavel Emelyanov 197786039bd3SAndrea Arcangeli /* 197886039bd3SAndrea Arcangeli * userland asks for a certain API version and we return which bits 197986039bd3SAndrea Arcangeli * and ioctl commands are implemented in this kernel for such API 198086039bd3SAndrea Arcangeli * version or -EINVAL if unknown. 198186039bd3SAndrea Arcangeli */ 198286039bd3SAndrea Arcangeli static int userfaultfd_api(struct userfaultfd_ctx *ctx, 198386039bd3SAndrea Arcangeli unsigned long arg) 198486039bd3SAndrea Arcangeli { 198586039bd3SAndrea Arcangeli struct uffdio_api uffdio_api; 198686039bd3SAndrea Arcangeli void __user *buf = (void __user *)arg; 198722e5fe2aSNadav Amit unsigned int ctx_features; 198886039bd3SAndrea Arcangeli int ret; 198965603144SAndrea Arcangeli __u64 features; 199086039bd3SAndrea Arcangeli 199186039bd3SAndrea Arcangeli ret = -EFAULT; 1992a9b85f94SAndrea Arcangeli if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api))) 199386039bd3SAndrea Arcangeli goto out; 19942ff559f3SPeter Xu features = uffdio_api.features; 19952ff559f3SPeter Xu ret = -EINVAL; 19962ff559f3SPeter Xu if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) 19972ff559f3SPeter Xu goto err_out; 19983c1c24d9SMike Rapoport ret = -EPERM; 19993c1c24d9SMike Rapoport if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE)) 20003c1c24d9SMike Rapoport goto err_out; 200165603144SAndrea Arcangeli /* report all available features and ioctls to userland */ 200265603144SAndrea Arcangeli uffdio_api.features = UFFD_API_FEATURES; 20037677f7fdSAxel Rasmussen #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR 2004964ab004SAxel Rasmussen uffdio_api.features &= 2005964ab004SAxel Rasmussen ~(UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM); 20067677f7fdSAxel Rasmussen #endif 200700b151f2SPeter Xu #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP 200800b151f2SPeter Xu uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP; 200986039bd3SAndrea Arcangeli #endif 2010b1f9e876SPeter Xu #ifndef CONFIG_PTE_MARKER_UFFD_WP 2011b1f9e876SPeter Xu uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM; 20122bad466cSPeter Xu uffdio_api.features &= ~UFFD_FEATURE_WP_UNPOPULATED; 2013b1f9e876SPeter Xu #endif 201486039bd3SAndrea Arcangeli uffdio_api.ioctls = UFFD_API_IOCTLS; 201586039bd3SAndrea Arcangeli ret = -EFAULT; 201686039bd3SAndrea Arcangeli if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) 201786039bd3SAndrea Arcangeli goto out; 201822e5fe2aSNadav Amit 201986039bd3SAndrea Arcangeli /* only enable the requested features for this uffd context */ 202022e5fe2aSNadav Amit ctx_features = uffd_ctx_features(features); 202122e5fe2aSNadav Amit ret = -EINVAL; 202222e5fe2aSNadav Amit if (cmpxchg(&ctx->features, 0, ctx_features) != 0) 202322e5fe2aSNadav Amit goto err_out; 202422e5fe2aSNadav Amit 202586039bd3SAndrea Arcangeli ret = 0; 202686039bd3SAndrea Arcangeli out: 202786039bd3SAndrea Arcangeli return ret; 202886039bd3SAndrea Arcangeli err_out: 202986039bd3SAndrea Arcangeli memset(&uffdio_api, 0, sizeof(uffdio_api)); 203086039bd3SAndrea Arcangeli if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) 203186039bd3SAndrea Arcangeli ret = -EFAULT; 203286039bd3SAndrea Arcangeli goto out; 203386039bd3SAndrea Arcangeli } 203486039bd3SAndrea Arcangeli 203586039bd3SAndrea Arcangeli static long userfaultfd_ioctl(struct file *file, unsigned cmd, 2036e6485a47SAndrea Arcangeli unsigned long arg) 2037e6485a47SAndrea Arcangeli { 2038e6485a47SAndrea Arcangeli int ret = -EINVAL; 203986039bd3SAndrea Arcangeli struct userfaultfd_ctx *ctx = file->private_data; 204086039bd3SAndrea Arcangeli 204122e5fe2aSNadav Amit if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx)) 204286039bd3SAndrea Arcangeli return -EINVAL; 204386039bd3SAndrea Arcangeli 204486039bd3SAndrea Arcangeli switch(cmd) { 204586039bd3SAndrea Arcangeli case UFFDIO_API: 204686039bd3SAndrea Arcangeli ret = userfaultfd_api(ctx, arg); 204786039bd3SAndrea Arcangeli break; 204886039bd3SAndrea Arcangeli case UFFDIO_REGISTER: 204986039bd3SAndrea Arcangeli ret = userfaultfd_register(ctx, arg); 205086039bd3SAndrea Arcangeli break; 205186039bd3SAndrea Arcangeli case UFFDIO_UNREGISTER: 205286039bd3SAndrea Arcangeli ret = userfaultfd_unregister(ctx, arg); 205386039bd3SAndrea Arcangeli break; 205486039bd3SAndrea Arcangeli case UFFDIO_WAKE: 2055ad465caeSAndrea Arcangeli ret = userfaultfd_wake(ctx, arg); 2056ad465caeSAndrea Arcangeli break; 2057ad465caeSAndrea Arcangeli case UFFDIO_COPY: 2058ad465caeSAndrea Arcangeli ret = userfaultfd_copy(ctx, arg); 2059ad465caeSAndrea Arcangeli break; 2060ad465caeSAndrea Arcangeli case UFFDIO_ZEROPAGE: 206186039bd3SAndrea Arcangeli ret = userfaultfd_zeropage(ctx, arg); 206286039bd3SAndrea Arcangeli break; 206363b2d417SAndrea Arcangeli case UFFDIO_WRITEPROTECT: 206463b2d417SAndrea Arcangeli ret = userfaultfd_writeprotect(ctx, arg); 206563b2d417SAndrea Arcangeli break; 2066f6191471SAxel Rasmussen case UFFDIO_CONTINUE: 2067f6191471SAxel Rasmussen ret = userfaultfd_continue(ctx, arg); 2068f6191471SAxel Rasmussen break; 206986039bd3SAndrea Arcangeli } 207086039bd3SAndrea Arcangeli return ret; 207186039bd3SAndrea Arcangeli } 207286039bd3SAndrea Arcangeli 207386039bd3SAndrea Arcangeli #ifdef CONFIG_PROC_FS 207486039bd3SAndrea Arcangeli static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f) 207586039bd3SAndrea Arcangeli { 207686039bd3SAndrea Arcangeli struct userfaultfd_ctx *ctx = f->private_data; 2077ac6424b9SIngo Molnar wait_queue_entry_t *wq; 207886039bd3SAndrea Arcangeli unsigned long pending = 0, total = 0; 207986039bd3SAndrea Arcangeli 2080cbcfa130SEric Biggers spin_lock_irq(&ctx->fault_pending_wqh.lock); 20812055da97SIngo Molnar list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) { 208286039bd3SAndrea Arcangeli pending++; 208386039bd3SAndrea Arcangeli total++; 208486039bd3SAndrea Arcangeli } 20852055da97SIngo Molnar list_for_each_entry(wq, &ctx->fault_wqh.head, entry) { 208615b726efSAndrea Arcangeli total++; 208715b726efSAndrea Arcangeli } 2088cbcfa130SEric Biggers spin_unlock_irq(&ctx->fault_pending_wqh.lock); 208986039bd3SAndrea Arcangeli 209086039bd3SAndrea Arcangeli /* 209186039bd3SAndrea Arcangeli * If more protocols will be added, there will be all shown 209286039bd3SAndrea Arcangeli * separated by a space. Like this: 209386039bd3SAndrea Arcangeli * protocols: aa:... bb:... 209486039bd3SAndrea Arcangeli */ 209586039bd3SAndrea Arcangeli seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n", 2096045098e9SMike Rapoport pending, total, UFFD_API, ctx->features, 209786039bd3SAndrea Arcangeli UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS); 209886039bd3SAndrea Arcangeli } 209986039bd3SAndrea Arcangeli #endif 210086039bd3SAndrea Arcangeli 210186039bd3SAndrea Arcangeli static const struct file_operations userfaultfd_fops = { 210286039bd3SAndrea Arcangeli #ifdef CONFIG_PROC_FS 210386039bd3SAndrea Arcangeli .show_fdinfo = userfaultfd_show_fdinfo, 210486039bd3SAndrea Arcangeli #endif 210586039bd3SAndrea Arcangeli .release = userfaultfd_release, 210686039bd3SAndrea Arcangeli .poll = userfaultfd_poll, 210786039bd3SAndrea Arcangeli .read = userfaultfd_read, 210886039bd3SAndrea Arcangeli .unlocked_ioctl = userfaultfd_ioctl, 21091832f2d8SArnd Bergmann .compat_ioctl = compat_ptr_ioctl, 211086039bd3SAndrea Arcangeli .llseek = noop_llseek, 211186039bd3SAndrea Arcangeli }; 211286039bd3SAndrea Arcangeli 21133004ec9cSAndrea Arcangeli static void init_once_userfaultfd_ctx(void *mem) 21143004ec9cSAndrea Arcangeli { 21153004ec9cSAndrea Arcangeli struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem; 21163004ec9cSAndrea Arcangeli 21173004ec9cSAndrea Arcangeli init_waitqueue_head(&ctx->fault_pending_wqh); 21183004ec9cSAndrea Arcangeli init_waitqueue_head(&ctx->fault_wqh); 21199cd75c3cSPavel Emelyanov init_waitqueue_head(&ctx->event_wqh); 21203004ec9cSAndrea Arcangeli init_waitqueue_head(&ctx->fd_wqh); 21212ca97ac8SAhmed S. Darwish seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock); 21223004ec9cSAndrea Arcangeli } 21233004ec9cSAndrea Arcangeli 21242d5de004SAxel Rasmussen static int new_userfaultfd(int flags) 212586039bd3SAndrea Arcangeli { 212686039bd3SAndrea Arcangeli struct userfaultfd_ctx *ctx; 2127284cd241SEric Biggers int fd; 212886039bd3SAndrea Arcangeli 212986039bd3SAndrea Arcangeli BUG_ON(!current->mm); 213086039bd3SAndrea Arcangeli 213186039bd3SAndrea Arcangeli /* Check the UFFD_* constants for consistency. */ 213237cd0575SLokesh Gidra BUILD_BUG_ON(UFFD_USER_MODE_ONLY & UFFD_SHARED_FCNTL_FLAGS); 213386039bd3SAndrea Arcangeli BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC); 213486039bd3SAndrea Arcangeli BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK); 213586039bd3SAndrea Arcangeli 213637cd0575SLokesh Gidra if (flags & ~(UFFD_SHARED_FCNTL_FLAGS | UFFD_USER_MODE_ONLY)) 2137284cd241SEric Biggers return -EINVAL; 213886039bd3SAndrea Arcangeli 21393004ec9cSAndrea Arcangeli ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); 214086039bd3SAndrea Arcangeli if (!ctx) 2141284cd241SEric Biggers return -ENOMEM; 214286039bd3SAndrea Arcangeli 2143ca880420SEric Biggers refcount_set(&ctx->refcount, 1); 214486039bd3SAndrea Arcangeli ctx->flags = flags; 21459cd75c3cSPavel Emelyanov ctx->features = 0; 214686039bd3SAndrea Arcangeli ctx->released = false; 2147a759a909SNadav Amit atomic_set(&ctx->mmap_changing, 0); 214886039bd3SAndrea Arcangeli ctx->mm = current->mm; 214986039bd3SAndrea Arcangeli /* prevent the mm struct to be freed */ 2150f1f10076SVegard Nossum mmgrab(ctx->mm); 215186039bd3SAndrea Arcangeli 2152b537900fSDaniel Colascione fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, ctx, 2153abec3d01SOndrej Mosnacek O_RDONLY | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL); 2154284cd241SEric Biggers if (fd < 0) { 2155d2005e3fSOleg Nesterov mmdrop(ctx->mm); 21563004ec9cSAndrea Arcangeli kmem_cache_free(userfaultfd_ctx_cachep, ctx); 2157c03e946fSEric Biggers } 215886039bd3SAndrea Arcangeli return fd; 215986039bd3SAndrea Arcangeli } 21603004ec9cSAndrea Arcangeli 21612d5de004SAxel Rasmussen static inline bool userfaultfd_syscall_allowed(int flags) 21622d5de004SAxel Rasmussen { 21632d5de004SAxel Rasmussen /* Userspace-only page faults are always allowed */ 21642d5de004SAxel Rasmussen if (flags & UFFD_USER_MODE_ONLY) 21652d5de004SAxel Rasmussen return true; 21662d5de004SAxel Rasmussen 21672d5de004SAxel Rasmussen /* 21682d5de004SAxel Rasmussen * The user is requesting a userfaultfd which can handle kernel faults. 21692d5de004SAxel Rasmussen * Privileged users are always allowed to do this. 21702d5de004SAxel Rasmussen */ 21712d5de004SAxel Rasmussen if (capable(CAP_SYS_PTRACE)) 21722d5de004SAxel Rasmussen return true; 21732d5de004SAxel Rasmussen 21742d5de004SAxel Rasmussen /* Otherwise, access to kernel fault handling is sysctl controlled. */ 21752d5de004SAxel Rasmussen return sysctl_unprivileged_userfaultfd; 21762d5de004SAxel Rasmussen } 21772d5de004SAxel Rasmussen 21782d5de004SAxel Rasmussen SYSCALL_DEFINE1(userfaultfd, int, flags) 21792d5de004SAxel Rasmussen { 21802d5de004SAxel Rasmussen if (!userfaultfd_syscall_allowed(flags)) 21812d5de004SAxel Rasmussen return -EPERM; 21822d5de004SAxel Rasmussen 21832d5de004SAxel Rasmussen return new_userfaultfd(flags); 21842d5de004SAxel Rasmussen } 21852d5de004SAxel Rasmussen 21862d5de004SAxel Rasmussen static long userfaultfd_dev_ioctl(struct file *file, unsigned int cmd, unsigned long flags) 21872d5de004SAxel Rasmussen { 21882d5de004SAxel Rasmussen if (cmd != USERFAULTFD_IOC_NEW) 21892d5de004SAxel Rasmussen return -EINVAL; 21902d5de004SAxel Rasmussen 21912d5de004SAxel Rasmussen return new_userfaultfd(flags); 21922d5de004SAxel Rasmussen } 21932d5de004SAxel Rasmussen 21942d5de004SAxel Rasmussen static const struct file_operations userfaultfd_dev_fops = { 21952d5de004SAxel Rasmussen .unlocked_ioctl = userfaultfd_dev_ioctl, 21962d5de004SAxel Rasmussen .compat_ioctl = userfaultfd_dev_ioctl, 21972d5de004SAxel Rasmussen .owner = THIS_MODULE, 21982d5de004SAxel Rasmussen .llseek = noop_llseek, 21992d5de004SAxel Rasmussen }; 22002d5de004SAxel Rasmussen 22012d5de004SAxel Rasmussen static struct miscdevice userfaultfd_misc = { 22022d5de004SAxel Rasmussen .minor = MISC_DYNAMIC_MINOR, 22032d5de004SAxel Rasmussen .name = "userfaultfd", 22042d5de004SAxel Rasmussen .fops = &userfaultfd_dev_fops 22052d5de004SAxel Rasmussen }; 22062d5de004SAxel Rasmussen 22073004ec9cSAndrea Arcangeli static int __init userfaultfd_init(void) 22083004ec9cSAndrea Arcangeli { 22092d5de004SAxel Rasmussen int ret; 22102d5de004SAxel Rasmussen 22112d5de004SAxel Rasmussen ret = misc_register(&userfaultfd_misc); 22122d5de004SAxel Rasmussen if (ret) 22132d5de004SAxel Rasmussen return ret; 22142d5de004SAxel Rasmussen 22153004ec9cSAndrea Arcangeli userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache", 22163004ec9cSAndrea Arcangeli sizeof(struct userfaultfd_ctx), 22173004ec9cSAndrea Arcangeli 0, 22183004ec9cSAndrea Arcangeli SLAB_HWCACHE_ALIGN|SLAB_PANIC, 22193004ec9cSAndrea Arcangeli init_once_userfaultfd_ctx); 22202d337b71SZhangPeng #ifdef CONFIG_SYSCTL 22212d337b71SZhangPeng register_sysctl_init("vm", vm_userfaultfd_table); 22222d337b71SZhangPeng #endif 22233004ec9cSAndrea Arcangeli return 0; 22243004ec9cSAndrea Arcangeli } 22253004ec9cSAndrea Arcangeli __initcall(userfaultfd_init); 2226