1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * fs/userfaultfd.c 4 * 5 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> 6 * Copyright (C) 2008-2009 Red Hat, Inc. 7 * Copyright (C) 2015 Red Hat, Inc. 8 * 9 * Some part derived from fs/eventfd.c (anon inode setup) and 10 * mm/ksm.c (mm hashing). 11 */ 12 13 #include <linux/list.h> 14 #include <linux/hashtable.h> 15 #include <linux/sched/signal.h> 16 #include <linux/sched/mm.h> 17 #include <linux/mm.h> 18 #include <linux/mm_inline.h> 19 #include <linux/mmu_notifier.h> 20 #include <linux/poll.h> 21 #include <linux/slab.h> 22 #include <linux/seq_file.h> 23 #include <linux/file.h> 24 #include <linux/bug.h> 25 #include <linux/anon_inodes.h> 26 #include <linux/syscalls.h> 27 #include <linux/userfaultfd_k.h> 28 #include <linux/mempolicy.h> 29 #include <linux/ioctl.h> 30 #include <linux/security.h> 31 #include <linux/hugetlb.h> 32 #include <linux/swapops.h> 33 #include <linux/miscdevice.h> 34 35 int sysctl_unprivileged_userfaultfd __read_mostly; 36 37 static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly; 38 39 /* 40 * Start with fault_pending_wqh and fault_wqh so they're more likely 41 * to be in the same cacheline. 42 * 43 * Locking order: 44 * fd_wqh.lock 45 * fault_pending_wqh.lock 46 * fault_wqh.lock 47 * event_wqh.lock 48 * 49 * To avoid deadlocks, IRQs must be disabled when taking any of the above locks, 50 * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's 51 * also taken in IRQ context. 52 */ 53 struct userfaultfd_ctx { 54 /* waitqueue head for the pending (i.e. not read) userfaults */ 55 wait_queue_head_t fault_pending_wqh; 56 /* waitqueue head for the userfaults */ 57 wait_queue_head_t fault_wqh; 58 /* waitqueue head for the pseudo fd to wakeup poll/read */ 59 wait_queue_head_t fd_wqh; 60 /* waitqueue head for events */ 61 wait_queue_head_t event_wqh; 62 /* a refile sequence protected by fault_pending_wqh lock */ 63 seqcount_spinlock_t refile_seq; 64 /* pseudo fd refcounting */ 65 refcount_t refcount; 66 /* userfaultfd syscall flags */ 67 unsigned int flags; 68 /* features requested from the userspace */ 69 unsigned int features; 70 /* released */ 71 bool released; 72 /* memory mappings are changing because of non-cooperative event */ 73 atomic_t mmap_changing; 74 /* mm with one ore more vmas attached to this userfaultfd_ctx */ 75 struct mm_struct *mm; 76 }; 77 78 struct userfaultfd_fork_ctx { 79 struct userfaultfd_ctx *orig; 80 struct userfaultfd_ctx *new; 81 struct list_head list; 82 }; 83 84 struct userfaultfd_unmap_ctx { 85 struct userfaultfd_ctx *ctx; 86 unsigned long start; 87 unsigned long end; 88 struct list_head list; 89 }; 90 91 struct userfaultfd_wait_queue { 92 struct uffd_msg msg; 93 wait_queue_entry_t wq; 94 struct userfaultfd_ctx *ctx; 95 bool waken; 96 }; 97 98 struct userfaultfd_wake_range { 99 unsigned long start; 100 unsigned long len; 101 }; 102 103 /* internal indication that UFFD_API ioctl was successfully executed */ 104 #define UFFD_FEATURE_INITIALIZED (1u << 31) 105 106 static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx) 107 { 108 return ctx->features & UFFD_FEATURE_INITIALIZED; 109 } 110 111 static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode, 112 int wake_flags, void *key) 113 { 114 struct userfaultfd_wake_range *range = key; 115 int ret; 116 struct userfaultfd_wait_queue *uwq; 117 unsigned long start, len; 118 119 uwq = container_of(wq, struct userfaultfd_wait_queue, wq); 120 ret = 0; 121 /* len == 0 means wake all */ 122 start = range->start; 123 len = range->len; 124 if (len && (start > uwq->msg.arg.pagefault.address || 125 start + len <= uwq->msg.arg.pagefault.address)) 126 goto out; 127 WRITE_ONCE(uwq->waken, true); 128 /* 129 * The Program-Order guarantees provided by the scheduler 130 * ensure uwq->waken is visible before the task is woken. 131 */ 132 ret = wake_up_state(wq->private, mode); 133 if (ret) { 134 /* 135 * Wake only once, autoremove behavior. 136 * 137 * After the effect of list_del_init is visible to the other 138 * CPUs, the waitqueue may disappear from under us, see the 139 * !list_empty_careful() in handle_userfault(). 140 * 141 * try_to_wake_up() has an implicit smp_mb(), and the 142 * wq->private is read before calling the extern function 143 * "wake_up_state" (which in turns calls try_to_wake_up). 144 */ 145 list_del_init(&wq->entry); 146 } 147 out: 148 return ret; 149 } 150 151 /** 152 * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd 153 * context. 154 * @ctx: [in] Pointer to the userfaultfd context. 155 */ 156 static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx) 157 { 158 refcount_inc(&ctx->refcount); 159 } 160 161 /** 162 * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd 163 * context. 164 * @ctx: [in] Pointer to userfaultfd context. 165 * 166 * The userfaultfd context reference must have been previously acquired either 167 * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget(). 168 */ 169 static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx) 170 { 171 if (refcount_dec_and_test(&ctx->refcount)) { 172 VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock)); 173 VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh)); 174 VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock)); 175 VM_BUG_ON(waitqueue_active(&ctx->fault_wqh)); 176 VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock)); 177 VM_BUG_ON(waitqueue_active(&ctx->event_wqh)); 178 VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock)); 179 VM_BUG_ON(waitqueue_active(&ctx->fd_wqh)); 180 mmdrop(ctx->mm); 181 kmem_cache_free(userfaultfd_ctx_cachep, ctx); 182 } 183 } 184 185 static inline void msg_init(struct uffd_msg *msg) 186 { 187 BUILD_BUG_ON(sizeof(struct uffd_msg) != 32); 188 /* 189 * Must use memset to zero out the paddings or kernel data is 190 * leaked to userland. 191 */ 192 memset(msg, 0, sizeof(struct uffd_msg)); 193 } 194 195 static inline struct uffd_msg userfault_msg(unsigned long address, 196 unsigned long real_address, 197 unsigned int flags, 198 unsigned long reason, 199 unsigned int features) 200 { 201 struct uffd_msg msg; 202 203 msg_init(&msg); 204 msg.event = UFFD_EVENT_PAGEFAULT; 205 206 msg.arg.pagefault.address = (features & UFFD_FEATURE_EXACT_ADDRESS) ? 207 real_address : address; 208 209 /* 210 * These flags indicate why the userfault occurred: 211 * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault. 212 * - UFFD_PAGEFAULT_FLAG_MINOR indicates a minor fault. 213 * - Neither of these flags being set indicates a MISSING fault. 214 * 215 * Separately, UFFD_PAGEFAULT_FLAG_WRITE indicates it was a write 216 * fault. Otherwise, it was a read fault. 217 */ 218 if (flags & FAULT_FLAG_WRITE) 219 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE; 220 if (reason & VM_UFFD_WP) 221 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP; 222 if (reason & VM_UFFD_MINOR) 223 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_MINOR; 224 if (features & UFFD_FEATURE_THREAD_ID) 225 msg.arg.pagefault.feat.ptid = task_pid_vnr(current); 226 return msg; 227 } 228 229 #ifdef CONFIG_HUGETLB_PAGE 230 /* 231 * Same functionality as userfaultfd_must_wait below with modifications for 232 * hugepmd ranges. 233 */ 234 static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, 235 struct vm_area_struct *vma, 236 unsigned long address, 237 unsigned long flags, 238 unsigned long reason) 239 { 240 struct mm_struct *mm = ctx->mm; 241 pte_t *ptep, pte; 242 bool ret = true; 243 244 mmap_assert_locked(mm); 245 246 ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); 247 248 if (!ptep) 249 goto out; 250 251 ret = false; 252 pte = huge_ptep_get(ptep); 253 254 /* 255 * Lockless access: we're in a wait_event so it's ok if it 256 * changes under us. PTE markers should be handled the same as none 257 * ptes here. 258 */ 259 if (huge_pte_none_mostly(pte)) 260 ret = true; 261 if (!huge_pte_write(pte) && (reason & VM_UFFD_WP)) 262 ret = true; 263 out: 264 return ret; 265 } 266 #else 267 static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, 268 struct vm_area_struct *vma, 269 unsigned long address, 270 unsigned long flags, 271 unsigned long reason) 272 { 273 return false; /* should never get here */ 274 } 275 #endif /* CONFIG_HUGETLB_PAGE */ 276 277 /* 278 * Verify the pagetables are still not ok after having reigstered into 279 * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any 280 * userfault that has already been resolved, if userfaultfd_read and 281 * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different 282 * threads. 283 */ 284 static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, 285 unsigned long address, 286 unsigned long flags, 287 unsigned long reason) 288 { 289 struct mm_struct *mm = ctx->mm; 290 pgd_t *pgd; 291 p4d_t *p4d; 292 pud_t *pud; 293 pmd_t *pmd, _pmd; 294 pte_t *pte; 295 bool ret = true; 296 297 mmap_assert_locked(mm); 298 299 pgd = pgd_offset(mm, address); 300 if (!pgd_present(*pgd)) 301 goto out; 302 p4d = p4d_offset(pgd, address); 303 if (!p4d_present(*p4d)) 304 goto out; 305 pud = pud_offset(p4d, address); 306 if (!pud_present(*pud)) 307 goto out; 308 pmd = pmd_offset(pud, address); 309 /* 310 * READ_ONCE must function as a barrier with narrower scope 311 * and it must be equivalent to: 312 * _pmd = *pmd; barrier(); 313 * 314 * This is to deal with the instability (as in 315 * pmd_trans_unstable) of the pmd. 316 */ 317 _pmd = READ_ONCE(*pmd); 318 if (pmd_none(_pmd)) 319 goto out; 320 321 ret = false; 322 if (!pmd_present(_pmd)) 323 goto out; 324 325 if (pmd_trans_huge(_pmd)) { 326 if (!pmd_write(_pmd) && (reason & VM_UFFD_WP)) 327 ret = true; 328 goto out; 329 } 330 331 /* 332 * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it 333 * and use the standard pte_offset_map() instead of parsing _pmd. 334 */ 335 pte = pte_offset_map(pmd, address); 336 /* 337 * Lockless access: we're in a wait_event so it's ok if it 338 * changes under us. PTE markers should be handled the same as none 339 * ptes here. 340 */ 341 if (pte_none_mostly(*pte)) 342 ret = true; 343 if (!pte_write(*pte) && (reason & VM_UFFD_WP)) 344 ret = true; 345 pte_unmap(pte); 346 347 out: 348 return ret; 349 } 350 351 static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags) 352 { 353 if (flags & FAULT_FLAG_INTERRUPTIBLE) 354 return TASK_INTERRUPTIBLE; 355 356 if (flags & FAULT_FLAG_KILLABLE) 357 return TASK_KILLABLE; 358 359 return TASK_UNINTERRUPTIBLE; 360 } 361 362 /* 363 * The locking rules involved in returning VM_FAULT_RETRY depending on 364 * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and 365 * FAULT_FLAG_KILLABLE are not straightforward. The "Caution" 366 * recommendation in __lock_page_or_retry is not an understatement. 367 * 368 * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_lock must be released 369 * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is 370 * not set. 371 * 372 * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not 373 * set, VM_FAULT_RETRY can still be returned if and only if there are 374 * fatal_signal_pending()s, and the mmap_lock must be released before 375 * returning it. 376 */ 377 vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) 378 { 379 struct mm_struct *mm = vmf->vma->vm_mm; 380 struct userfaultfd_ctx *ctx; 381 struct userfaultfd_wait_queue uwq; 382 vm_fault_t ret = VM_FAULT_SIGBUS; 383 bool must_wait; 384 unsigned int blocking_state; 385 386 /* 387 * We don't do userfault handling for the final child pid update. 388 * 389 * We also don't do userfault handling during 390 * coredumping. hugetlbfs has the special 391 * follow_hugetlb_page() to skip missing pages in the 392 * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with 393 * the no_page_table() helper in follow_page_mask(), but the 394 * shmem_vm_ops->fault method is invoked even during 395 * coredumping without mmap_lock and it ends up here. 396 */ 397 if (current->flags & (PF_EXITING|PF_DUMPCORE)) 398 goto out; 399 400 /* 401 * Coredumping runs without mmap_lock so we can only check that 402 * the mmap_lock is held, if PF_DUMPCORE was not set. 403 */ 404 mmap_assert_locked(mm); 405 406 ctx = vmf->vma->vm_userfaultfd_ctx.ctx; 407 if (!ctx) 408 goto out; 409 410 BUG_ON(ctx->mm != mm); 411 412 /* Any unrecognized flag is a bug. */ 413 VM_BUG_ON(reason & ~__VM_UFFD_FLAGS); 414 /* 0 or > 1 flags set is a bug; we expect exactly 1. */ 415 VM_BUG_ON(!reason || (reason & (reason - 1))); 416 417 if (ctx->features & UFFD_FEATURE_SIGBUS) 418 goto out; 419 if (!(vmf->flags & FAULT_FLAG_USER) && (ctx->flags & UFFD_USER_MODE_ONLY)) 420 goto out; 421 422 /* 423 * If it's already released don't get it. This avoids to loop 424 * in __get_user_pages if userfaultfd_release waits on the 425 * caller of handle_userfault to release the mmap_lock. 426 */ 427 if (unlikely(READ_ONCE(ctx->released))) { 428 /* 429 * Don't return VM_FAULT_SIGBUS in this case, so a non 430 * cooperative manager can close the uffd after the 431 * last UFFDIO_COPY, without risking to trigger an 432 * involuntary SIGBUS if the process was starting the 433 * userfaultfd while the userfaultfd was still armed 434 * (but after the last UFFDIO_COPY). If the uffd 435 * wasn't already closed when the userfault reached 436 * this point, that would normally be solved by 437 * userfaultfd_must_wait returning 'false'. 438 * 439 * If we were to return VM_FAULT_SIGBUS here, the non 440 * cooperative manager would be instead forced to 441 * always call UFFDIO_UNREGISTER before it can safely 442 * close the uffd. 443 */ 444 ret = VM_FAULT_NOPAGE; 445 goto out; 446 } 447 448 /* 449 * Check that we can return VM_FAULT_RETRY. 450 * 451 * NOTE: it should become possible to return VM_FAULT_RETRY 452 * even if FAULT_FLAG_TRIED is set without leading to gup() 453 * -EBUSY failures, if the userfaultfd is to be extended for 454 * VM_UFFD_WP tracking and we intend to arm the userfault 455 * without first stopping userland access to the memory. For 456 * VM_UFFD_MISSING userfaults this is enough for now. 457 */ 458 if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) { 459 /* 460 * Validate the invariant that nowait must allow retry 461 * to be sure not to return SIGBUS erroneously on 462 * nowait invocations. 463 */ 464 BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT); 465 #ifdef CONFIG_DEBUG_VM 466 if (printk_ratelimit()) { 467 printk(KERN_WARNING 468 "FAULT_FLAG_ALLOW_RETRY missing %x\n", 469 vmf->flags); 470 dump_stack(); 471 } 472 #endif 473 goto out; 474 } 475 476 /* 477 * Handle nowait, not much to do other than tell it to retry 478 * and wait. 479 */ 480 ret = VM_FAULT_RETRY; 481 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) 482 goto out; 483 484 /* take the reference before dropping the mmap_lock */ 485 userfaultfd_ctx_get(ctx); 486 487 init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); 488 uwq.wq.private = current; 489 uwq.msg = userfault_msg(vmf->address, vmf->real_address, vmf->flags, 490 reason, ctx->features); 491 uwq.ctx = ctx; 492 uwq.waken = false; 493 494 blocking_state = userfaultfd_get_blocking_state(vmf->flags); 495 496 spin_lock_irq(&ctx->fault_pending_wqh.lock); 497 /* 498 * After the __add_wait_queue the uwq is visible to userland 499 * through poll/read(). 500 */ 501 __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq); 502 /* 503 * The smp_mb() after __set_current_state prevents the reads 504 * following the spin_unlock to happen before the list_add in 505 * __add_wait_queue. 506 */ 507 set_current_state(blocking_state); 508 spin_unlock_irq(&ctx->fault_pending_wqh.lock); 509 510 if (!is_vm_hugetlb_page(vmf->vma)) 511 must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, 512 reason); 513 else 514 must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma, 515 vmf->address, 516 vmf->flags, reason); 517 mmap_read_unlock(mm); 518 519 if (likely(must_wait && !READ_ONCE(ctx->released))) { 520 wake_up_poll(&ctx->fd_wqh, EPOLLIN); 521 schedule(); 522 } 523 524 __set_current_state(TASK_RUNNING); 525 526 /* 527 * Here we race with the list_del; list_add in 528 * userfaultfd_ctx_read(), however because we don't ever run 529 * list_del_init() to refile across the two lists, the prev 530 * and next pointers will never point to self. list_add also 531 * would never let any of the two pointers to point to 532 * self. So list_empty_careful won't risk to see both pointers 533 * pointing to self at any time during the list refile. The 534 * only case where list_del_init() is called is the full 535 * removal in the wake function and there we don't re-list_add 536 * and it's fine not to block on the spinlock. The uwq on this 537 * kernel stack can be released after the list_del_init. 538 */ 539 if (!list_empty_careful(&uwq.wq.entry)) { 540 spin_lock_irq(&ctx->fault_pending_wqh.lock); 541 /* 542 * No need of list_del_init(), the uwq on the stack 543 * will be freed shortly anyway. 544 */ 545 list_del(&uwq.wq.entry); 546 spin_unlock_irq(&ctx->fault_pending_wqh.lock); 547 } 548 549 /* 550 * ctx may go away after this if the userfault pseudo fd is 551 * already released. 552 */ 553 userfaultfd_ctx_put(ctx); 554 555 out: 556 return ret; 557 } 558 559 static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, 560 struct userfaultfd_wait_queue *ewq) 561 { 562 struct userfaultfd_ctx *release_new_ctx; 563 564 if (WARN_ON_ONCE(current->flags & PF_EXITING)) 565 goto out; 566 567 ewq->ctx = ctx; 568 init_waitqueue_entry(&ewq->wq, current); 569 release_new_ctx = NULL; 570 571 spin_lock_irq(&ctx->event_wqh.lock); 572 /* 573 * After the __add_wait_queue the uwq is visible to userland 574 * through poll/read(). 575 */ 576 __add_wait_queue(&ctx->event_wqh, &ewq->wq); 577 for (;;) { 578 set_current_state(TASK_KILLABLE); 579 if (ewq->msg.event == 0) 580 break; 581 if (READ_ONCE(ctx->released) || 582 fatal_signal_pending(current)) { 583 /* 584 * &ewq->wq may be queued in fork_event, but 585 * __remove_wait_queue ignores the head 586 * parameter. It would be a problem if it 587 * didn't. 588 */ 589 __remove_wait_queue(&ctx->event_wqh, &ewq->wq); 590 if (ewq->msg.event == UFFD_EVENT_FORK) { 591 struct userfaultfd_ctx *new; 592 593 new = (struct userfaultfd_ctx *) 594 (unsigned long) 595 ewq->msg.arg.reserved.reserved1; 596 release_new_ctx = new; 597 } 598 break; 599 } 600 601 spin_unlock_irq(&ctx->event_wqh.lock); 602 603 wake_up_poll(&ctx->fd_wqh, EPOLLIN); 604 schedule(); 605 606 spin_lock_irq(&ctx->event_wqh.lock); 607 } 608 __set_current_state(TASK_RUNNING); 609 spin_unlock_irq(&ctx->event_wqh.lock); 610 611 if (release_new_ctx) { 612 struct vm_area_struct *vma; 613 struct mm_struct *mm = release_new_ctx->mm; 614 615 /* the various vma->vm_userfaultfd_ctx still points to it */ 616 mmap_write_lock(mm); 617 for (vma = mm->mmap; vma; vma = vma->vm_next) 618 if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { 619 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 620 vma->vm_flags &= ~__VM_UFFD_FLAGS; 621 } 622 mmap_write_unlock(mm); 623 624 userfaultfd_ctx_put(release_new_ctx); 625 } 626 627 /* 628 * ctx may go away after this if the userfault pseudo fd is 629 * already released. 630 */ 631 out: 632 atomic_dec(&ctx->mmap_changing); 633 VM_BUG_ON(atomic_read(&ctx->mmap_changing) < 0); 634 userfaultfd_ctx_put(ctx); 635 } 636 637 static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx, 638 struct userfaultfd_wait_queue *ewq) 639 { 640 ewq->msg.event = 0; 641 wake_up_locked(&ctx->event_wqh); 642 __remove_wait_queue(&ctx->event_wqh, &ewq->wq); 643 } 644 645 int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) 646 { 647 struct userfaultfd_ctx *ctx = NULL, *octx; 648 struct userfaultfd_fork_ctx *fctx; 649 650 octx = vma->vm_userfaultfd_ctx.ctx; 651 if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) { 652 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 653 vma->vm_flags &= ~__VM_UFFD_FLAGS; 654 return 0; 655 } 656 657 list_for_each_entry(fctx, fcs, list) 658 if (fctx->orig == octx) { 659 ctx = fctx->new; 660 break; 661 } 662 663 if (!ctx) { 664 fctx = kmalloc(sizeof(*fctx), GFP_KERNEL); 665 if (!fctx) 666 return -ENOMEM; 667 668 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); 669 if (!ctx) { 670 kfree(fctx); 671 return -ENOMEM; 672 } 673 674 refcount_set(&ctx->refcount, 1); 675 ctx->flags = octx->flags; 676 ctx->features = octx->features; 677 ctx->released = false; 678 atomic_set(&ctx->mmap_changing, 0); 679 ctx->mm = vma->vm_mm; 680 mmgrab(ctx->mm); 681 682 userfaultfd_ctx_get(octx); 683 atomic_inc(&octx->mmap_changing); 684 fctx->orig = octx; 685 fctx->new = ctx; 686 list_add_tail(&fctx->list, fcs); 687 } 688 689 vma->vm_userfaultfd_ctx.ctx = ctx; 690 return 0; 691 } 692 693 static void dup_fctx(struct userfaultfd_fork_ctx *fctx) 694 { 695 struct userfaultfd_ctx *ctx = fctx->orig; 696 struct userfaultfd_wait_queue ewq; 697 698 msg_init(&ewq.msg); 699 700 ewq.msg.event = UFFD_EVENT_FORK; 701 ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new; 702 703 userfaultfd_event_wait_completion(ctx, &ewq); 704 } 705 706 void dup_userfaultfd_complete(struct list_head *fcs) 707 { 708 struct userfaultfd_fork_ctx *fctx, *n; 709 710 list_for_each_entry_safe(fctx, n, fcs, list) { 711 dup_fctx(fctx); 712 list_del(&fctx->list); 713 kfree(fctx); 714 } 715 } 716 717 void mremap_userfaultfd_prep(struct vm_area_struct *vma, 718 struct vm_userfaultfd_ctx *vm_ctx) 719 { 720 struct userfaultfd_ctx *ctx; 721 722 ctx = vma->vm_userfaultfd_ctx.ctx; 723 724 if (!ctx) 725 return; 726 727 if (ctx->features & UFFD_FEATURE_EVENT_REMAP) { 728 vm_ctx->ctx = ctx; 729 userfaultfd_ctx_get(ctx); 730 atomic_inc(&ctx->mmap_changing); 731 } else { 732 /* Drop uffd context if remap feature not enabled */ 733 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 734 vma->vm_flags &= ~__VM_UFFD_FLAGS; 735 } 736 } 737 738 void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx, 739 unsigned long from, unsigned long to, 740 unsigned long len) 741 { 742 struct userfaultfd_ctx *ctx = vm_ctx->ctx; 743 struct userfaultfd_wait_queue ewq; 744 745 if (!ctx) 746 return; 747 748 if (to & ~PAGE_MASK) { 749 userfaultfd_ctx_put(ctx); 750 return; 751 } 752 753 msg_init(&ewq.msg); 754 755 ewq.msg.event = UFFD_EVENT_REMAP; 756 ewq.msg.arg.remap.from = from; 757 ewq.msg.arg.remap.to = to; 758 ewq.msg.arg.remap.len = len; 759 760 userfaultfd_event_wait_completion(ctx, &ewq); 761 } 762 763 bool userfaultfd_remove(struct vm_area_struct *vma, 764 unsigned long start, unsigned long end) 765 { 766 struct mm_struct *mm = vma->vm_mm; 767 struct userfaultfd_ctx *ctx; 768 struct userfaultfd_wait_queue ewq; 769 770 ctx = vma->vm_userfaultfd_ctx.ctx; 771 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE)) 772 return true; 773 774 userfaultfd_ctx_get(ctx); 775 atomic_inc(&ctx->mmap_changing); 776 mmap_read_unlock(mm); 777 778 msg_init(&ewq.msg); 779 780 ewq.msg.event = UFFD_EVENT_REMOVE; 781 ewq.msg.arg.remove.start = start; 782 ewq.msg.arg.remove.end = end; 783 784 userfaultfd_event_wait_completion(ctx, &ewq); 785 786 return false; 787 } 788 789 static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps, 790 unsigned long start, unsigned long end) 791 { 792 struct userfaultfd_unmap_ctx *unmap_ctx; 793 794 list_for_each_entry(unmap_ctx, unmaps, list) 795 if (unmap_ctx->ctx == ctx && unmap_ctx->start == start && 796 unmap_ctx->end == end) 797 return true; 798 799 return false; 800 } 801 802 int userfaultfd_unmap_prep(struct vm_area_struct *vma, 803 unsigned long start, unsigned long end, 804 struct list_head *unmaps) 805 { 806 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) { 807 struct userfaultfd_unmap_ctx *unmap_ctx; 808 struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; 809 810 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) || 811 has_unmap_ctx(ctx, unmaps, start, end)) 812 continue; 813 814 unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL); 815 if (!unmap_ctx) 816 return -ENOMEM; 817 818 userfaultfd_ctx_get(ctx); 819 atomic_inc(&ctx->mmap_changing); 820 unmap_ctx->ctx = ctx; 821 unmap_ctx->start = start; 822 unmap_ctx->end = end; 823 list_add_tail(&unmap_ctx->list, unmaps); 824 } 825 826 return 0; 827 } 828 829 void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf) 830 { 831 struct userfaultfd_unmap_ctx *ctx, *n; 832 struct userfaultfd_wait_queue ewq; 833 834 list_for_each_entry_safe(ctx, n, uf, list) { 835 msg_init(&ewq.msg); 836 837 ewq.msg.event = UFFD_EVENT_UNMAP; 838 ewq.msg.arg.remove.start = ctx->start; 839 ewq.msg.arg.remove.end = ctx->end; 840 841 userfaultfd_event_wait_completion(ctx->ctx, &ewq); 842 843 list_del(&ctx->list); 844 kfree(ctx); 845 } 846 } 847 848 static int userfaultfd_release(struct inode *inode, struct file *file) 849 { 850 struct userfaultfd_ctx *ctx = file->private_data; 851 struct mm_struct *mm = ctx->mm; 852 struct vm_area_struct *vma, *prev; 853 /* len == 0 means wake all */ 854 struct userfaultfd_wake_range range = { .len = 0, }; 855 unsigned long new_flags; 856 857 WRITE_ONCE(ctx->released, true); 858 859 if (!mmget_not_zero(mm)) 860 goto wakeup; 861 862 /* 863 * Flush page faults out of all CPUs. NOTE: all page faults 864 * must be retried without returning VM_FAULT_SIGBUS if 865 * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx 866 * changes while handle_userfault released the mmap_lock. So 867 * it's critical that released is set to true (above), before 868 * taking the mmap_lock for writing. 869 */ 870 mmap_write_lock(mm); 871 prev = NULL; 872 for (vma = mm->mmap; vma; vma = vma->vm_next) { 873 cond_resched(); 874 BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^ 875 !!(vma->vm_flags & __VM_UFFD_FLAGS)); 876 if (vma->vm_userfaultfd_ctx.ctx != ctx) { 877 prev = vma; 878 continue; 879 } 880 new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; 881 prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, 882 new_flags, vma->anon_vma, 883 vma->vm_file, vma->vm_pgoff, 884 vma_policy(vma), 885 NULL_VM_UFFD_CTX, anon_vma_name(vma)); 886 if (prev) 887 vma = prev; 888 else 889 prev = vma; 890 vma->vm_flags = new_flags; 891 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 892 } 893 mmap_write_unlock(mm); 894 mmput(mm); 895 wakeup: 896 /* 897 * After no new page faults can wait on this fault_*wqh, flush 898 * the last page faults that may have been already waiting on 899 * the fault_*wqh. 900 */ 901 spin_lock_irq(&ctx->fault_pending_wqh.lock); 902 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range); 903 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range); 904 spin_unlock_irq(&ctx->fault_pending_wqh.lock); 905 906 /* Flush pending events that may still wait on event_wqh */ 907 wake_up_all(&ctx->event_wqh); 908 909 wake_up_poll(&ctx->fd_wqh, EPOLLHUP); 910 userfaultfd_ctx_put(ctx); 911 return 0; 912 } 913 914 /* fault_pending_wqh.lock must be hold by the caller */ 915 static inline struct userfaultfd_wait_queue *find_userfault_in( 916 wait_queue_head_t *wqh) 917 { 918 wait_queue_entry_t *wq; 919 struct userfaultfd_wait_queue *uwq; 920 921 lockdep_assert_held(&wqh->lock); 922 923 uwq = NULL; 924 if (!waitqueue_active(wqh)) 925 goto out; 926 /* walk in reverse to provide FIFO behavior to read userfaults */ 927 wq = list_last_entry(&wqh->head, typeof(*wq), entry); 928 uwq = container_of(wq, struct userfaultfd_wait_queue, wq); 929 out: 930 return uwq; 931 } 932 933 static inline struct userfaultfd_wait_queue *find_userfault( 934 struct userfaultfd_ctx *ctx) 935 { 936 return find_userfault_in(&ctx->fault_pending_wqh); 937 } 938 939 static inline struct userfaultfd_wait_queue *find_userfault_evt( 940 struct userfaultfd_ctx *ctx) 941 { 942 return find_userfault_in(&ctx->event_wqh); 943 } 944 945 static __poll_t userfaultfd_poll(struct file *file, poll_table *wait) 946 { 947 struct userfaultfd_ctx *ctx = file->private_data; 948 __poll_t ret; 949 950 poll_wait(file, &ctx->fd_wqh, wait); 951 952 if (!userfaultfd_is_initialized(ctx)) 953 return EPOLLERR; 954 955 /* 956 * poll() never guarantees that read won't block. 957 * userfaults can be waken before they're read(). 958 */ 959 if (unlikely(!(file->f_flags & O_NONBLOCK))) 960 return EPOLLERR; 961 /* 962 * lockless access to see if there are pending faults 963 * __pollwait last action is the add_wait_queue but 964 * the spin_unlock would allow the waitqueue_active to 965 * pass above the actual list_add inside 966 * add_wait_queue critical section. So use a full 967 * memory barrier to serialize the list_add write of 968 * add_wait_queue() with the waitqueue_active read 969 * below. 970 */ 971 ret = 0; 972 smp_mb(); 973 if (waitqueue_active(&ctx->fault_pending_wqh)) 974 ret = EPOLLIN; 975 else if (waitqueue_active(&ctx->event_wqh)) 976 ret = EPOLLIN; 977 978 return ret; 979 } 980 981 static const struct file_operations userfaultfd_fops; 982 983 static int resolve_userfault_fork(struct userfaultfd_ctx *new, 984 struct inode *inode, 985 struct uffd_msg *msg) 986 { 987 int fd; 988 989 fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, new, 990 O_RDWR | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode); 991 if (fd < 0) 992 return fd; 993 994 msg->arg.reserved.reserved1 = 0; 995 msg->arg.fork.ufd = fd; 996 return 0; 997 } 998 999 static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, 1000 struct uffd_msg *msg, struct inode *inode) 1001 { 1002 ssize_t ret; 1003 DECLARE_WAITQUEUE(wait, current); 1004 struct userfaultfd_wait_queue *uwq; 1005 /* 1006 * Handling fork event requires sleeping operations, so 1007 * we drop the event_wqh lock, then do these ops, then 1008 * lock it back and wake up the waiter. While the lock is 1009 * dropped the ewq may go away so we keep track of it 1010 * carefully. 1011 */ 1012 LIST_HEAD(fork_event); 1013 struct userfaultfd_ctx *fork_nctx = NULL; 1014 1015 /* always take the fd_wqh lock before the fault_pending_wqh lock */ 1016 spin_lock_irq(&ctx->fd_wqh.lock); 1017 __add_wait_queue(&ctx->fd_wqh, &wait); 1018 for (;;) { 1019 set_current_state(TASK_INTERRUPTIBLE); 1020 spin_lock(&ctx->fault_pending_wqh.lock); 1021 uwq = find_userfault(ctx); 1022 if (uwq) { 1023 /* 1024 * Use a seqcount to repeat the lockless check 1025 * in wake_userfault() to avoid missing 1026 * wakeups because during the refile both 1027 * waitqueue could become empty if this is the 1028 * only userfault. 1029 */ 1030 write_seqcount_begin(&ctx->refile_seq); 1031 1032 /* 1033 * The fault_pending_wqh.lock prevents the uwq 1034 * to disappear from under us. 1035 * 1036 * Refile this userfault from 1037 * fault_pending_wqh to fault_wqh, it's not 1038 * pending anymore after we read it. 1039 * 1040 * Use list_del() by hand (as 1041 * userfaultfd_wake_function also uses 1042 * list_del_init() by hand) to be sure nobody 1043 * changes __remove_wait_queue() to use 1044 * list_del_init() in turn breaking the 1045 * !list_empty_careful() check in 1046 * handle_userfault(). The uwq->wq.head list 1047 * must never be empty at any time during the 1048 * refile, or the waitqueue could disappear 1049 * from under us. The "wait_queue_head_t" 1050 * parameter of __remove_wait_queue() is unused 1051 * anyway. 1052 */ 1053 list_del(&uwq->wq.entry); 1054 add_wait_queue(&ctx->fault_wqh, &uwq->wq); 1055 1056 write_seqcount_end(&ctx->refile_seq); 1057 1058 /* careful to always initialize msg if ret == 0 */ 1059 *msg = uwq->msg; 1060 spin_unlock(&ctx->fault_pending_wqh.lock); 1061 ret = 0; 1062 break; 1063 } 1064 spin_unlock(&ctx->fault_pending_wqh.lock); 1065 1066 spin_lock(&ctx->event_wqh.lock); 1067 uwq = find_userfault_evt(ctx); 1068 if (uwq) { 1069 *msg = uwq->msg; 1070 1071 if (uwq->msg.event == UFFD_EVENT_FORK) { 1072 fork_nctx = (struct userfaultfd_ctx *) 1073 (unsigned long) 1074 uwq->msg.arg.reserved.reserved1; 1075 list_move(&uwq->wq.entry, &fork_event); 1076 /* 1077 * fork_nctx can be freed as soon as 1078 * we drop the lock, unless we take a 1079 * reference on it. 1080 */ 1081 userfaultfd_ctx_get(fork_nctx); 1082 spin_unlock(&ctx->event_wqh.lock); 1083 ret = 0; 1084 break; 1085 } 1086 1087 userfaultfd_event_complete(ctx, uwq); 1088 spin_unlock(&ctx->event_wqh.lock); 1089 ret = 0; 1090 break; 1091 } 1092 spin_unlock(&ctx->event_wqh.lock); 1093 1094 if (signal_pending(current)) { 1095 ret = -ERESTARTSYS; 1096 break; 1097 } 1098 if (no_wait) { 1099 ret = -EAGAIN; 1100 break; 1101 } 1102 spin_unlock_irq(&ctx->fd_wqh.lock); 1103 schedule(); 1104 spin_lock_irq(&ctx->fd_wqh.lock); 1105 } 1106 __remove_wait_queue(&ctx->fd_wqh, &wait); 1107 __set_current_state(TASK_RUNNING); 1108 spin_unlock_irq(&ctx->fd_wqh.lock); 1109 1110 if (!ret && msg->event == UFFD_EVENT_FORK) { 1111 ret = resolve_userfault_fork(fork_nctx, inode, msg); 1112 spin_lock_irq(&ctx->event_wqh.lock); 1113 if (!list_empty(&fork_event)) { 1114 /* 1115 * The fork thread didn't abort, so we can 1116 * drop the temporary refcount. 1117 */ 1118 userfaultfd_ctx_put(fork_nctx); 1119 1120 uwq = list_first_entry(&fork_event, 1121 typeof(*uwq), 1122 wq.entry); 1123 /* 1124 * If fork_event list wasn't empty and in turn 1125 * the event wasn't already released by fork 1126 * (the event is allocated on fork kernel 1127 * stack), put the event back to its place in 1128 * the event_wq. fork_event head will be freed 1129 * as soon as we return so the event cannot 1130 * stay queued there no matter the current 1131 * "ret" value. 1132 */ 1133 list_del(&uwq->wq.entry); 1134 __add_wait_queue(&ctx->event_wqh, &uwq->wq); 1135 1136 /* 1137 * Leave the event in the waitqueue and report 1138 * error to userland if we failed to resolve 1139 * the userfault fork. 1140 */ 1141 if (likely(!ret)) 1142 userfaultfd_event_complete(ctx, uwq); 1143 } else { 1144 /* 1145 * Here the fork thread aborted and the 1146 * refcount from the fork thread on fork_nctx 1147 * has already been released. We still hold 1148 * the reference we took before releasing the 1149 * lock above. If resolve_userfault_fork 1150 * failed we've to drop it because the 1151 * fork_nctx has to be freed in such case. If 1152 * it succeeded we'll hold it because the new 1153 * uffd references it. 1154 */ 1155 if (ret) 1156 userfaultfd_ctx_put(fork_nctx); 1157 } 1158 spin_unlock_irq(&ctx->event_wqh.lock); 1159 } 1160 1161 return ret; 1162 } 1163 1164 static ssize_t userfaultfd_read(struct file *file, char __user *buf, 1165 size_t count, loff_t *ppos) 1166 { 1167 struct userfaultfd_ctx *ctx = file->private_data; 1168 ssize_t _ret, ret = 0; 1169 struct uffd_msg msg; 1170 int no_wait = file->f_flags & O_NONBLOCK; 1171 struct inode *inode = file_inode(file); 1172 1173 if (!userfaultfd_is_initialized(ctx)) 1174 return -EINVAL; 1175 1176 for (;;) { 1177 if (count < sizeof(msg)) 1178 return ret ? ret : -EINVAL; 1179 _ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode); 1180 if (_ret < 0) 1181 return ret ? ret : _ret; 1182 if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg))) 1183 return ret ? ret : -EFAULT; 1184 ret += sizeof(msg); 1185 buf += sizeof(msg); 1186 count -= sizeof(msg); 1187 /* 1188 * Allow to read more than one fault at time but only 1189 * block if waiting for the very first one. 1190 */ 1191 no_wait = O_NONBLOCK; 1192 } 1193 } 1194 1195 static void __wake_userfault(struct userfaultfd_ctx *ctx, 1196 struct userfaultfd_wake_range *range) 1197 { 1198 spin_lock_irq(&ctx->fault_pending_wqh.lock); 1199 /* wake all in the range and autoremove */ 1200 if (waitqueue_active(&ctx->fault_pending_wqh)) 1201 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 1202 range); 1203 if (waitqueue_active(&ctx->fault_wqh)) 1204 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range); 1205 spin_unlock_irq(&ctx->fault_pending_wqh.lock); 1206 } 1207 1208 static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, 1209 struct userfaultfd_wake_range *range) 1210 { 1211 unsigned seq; 1212 bool need_wakeup; 1213 1214 /* 1215 * To be sure waitqueue_active() is not reordered by the CPU 1216 * before the pagetable update, use an explicit SMP memory 1217 * barrier here. PT lock release or mmap_read_unlock(mm) still 1218 * have release semantics that can allow the 1219 * waitqueue_active() to be reordered before the pte update. 1220 */ 1221 smp_mb(); 1222 1223 /* 1224 * Use waitqueue_active because it's very frequent to 1225 * change the address space atomically even if there are no 1226 * userfaults yet. So we take the spinlock only when we're 1227 * sure we've userfaults to wake. 1228 */ 1229 do { 1230 seq = read_seqcount_begin(&ctx->refile_seq); 1231 need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) || 1232 waitqueue_active(&ctx->fault_wqh); 1233 cond_resched(); 1234 } while (read_seqcount_retry(&ctx->refile_seq, seq)); 1235 if (need_wakeup) 1236 __wake_userfault(ctx, range); 1237 } 1238 1239 static __always_inline int validate_range(struct mm_struct *mm, 1240 __u64 start, __u64 len) 1241 { 1242 __u64 task_size = mm->task_size; 1243 1244 if (start & ~PAGE_MASK) 1245 return -EINVAL; 1246 if (len & ~PAGE_MASK) 1247 return -EINVAL; 1248 if (!len) 1249 return -EINVAL; 1250 if (start < mmap_min_addr) 1251 return -EINVAL; 1252 if (start >= task_size) 1253 return -EINVAL; 1254 if (len > task_size - start) 1255 return -EINVAL; 1256 return 0; 1257 } 1258 1259 static int userfaultfd_register(struct userfaultfd_ctx *ctx, 1260 unsigned long arg) 1261 { 1262 struct mm_struct *mm = ctx->mm; 1263 struct vm_area_struct *vma, *prev, *cur; 1264 int ret; 1265 struct uffdio_register uffdio_register; 1266 struct uffdio_register __user *user_uffdio_register; 1267 unsigned long vm_flags, new_flags; 1268 bool found; 1269 bool basic_ioctls; 1270 unsigned long start, end, vma_end; 1271 1272 user_uffdio_register = (struct uffdio_register __user *) arg; 1273 1274 ret = -EFAULT; 1275 if (copy_from_user(&uffdio_register, user_uffdio_register, 1276 sizeof(uffdio_register)-sizeof(__u64))) 1277 goto out; 1278 1279 ret = -EINVAL; 1280 if (!uffdio_register.mode) 1281 goto out; 1282 if (uffdio_register.mode & ~UFFD_API_REGISTER_MODES) 1283 goto out; 1284 vm_flags = 0; 1285 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING) 1286 vm_flags |= VM_UFFD_MISSING; 1287 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) { 1288 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP 1289 goto out; 1290 #endif 1291 vm_flags |= VM_UFFD_WP; 1292 } 1293 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) { 1294 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR 1295 goto out; 1296 #endif 1297 vm_flags |= VM_UFFD_MINOR; 1298 } 1299 1300 ret = validate_range(mm, uffdio_register.range.start, 1301 uffdio_register.range.len); 1302 if (ret) 1303 goto out; 1304 1305 start = uffdio_register.range.start; 1306 end = start + uffdio_register.range.len; 1307 1308 ret = -ENOMEM; 1309 if (!mmget_not_zero(mm)) 1310 goto out; 1311 1312 mmap_write_lock(mm); 1313 vma = find_vma_prev(mm, start, &prev); 1314 if (!vma) 1315 goto out_unlock; 1316 1317 /* check that there's at least one vma in the range */ 1318 ret = -EINVAL; 1319 if (vma->vm_start >= end) 1320 goto out_unlock; 1321 1322 /* 1323 * If the first vma contains huge pages, make sure start address 1324 * is aligned to huge page size. 1325 */ 1326 if (is_vm_hugetlb_page(vma)) { 1327 unsigned long vma_hpagesize = vma_kernel_pagesize(vma); 1328 1329 if (start & (vma_hpagesize - 1)) 1330 goto out_unlock; 1331 } 1332 1333 /* 1334 * Search for not compatible vmas. 1335 */ 1336 found = false; 1337 basic_ioctls = false; 1338 for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { 1339 cond_resched(); 1340 1341 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ 1342 !!(cur->vm_flags & __VM_UFFD_FLAGS)); 1343 1344 /* check not compatible vmas */ 1345 ret = -EINVAL; 1346 if (!vma_can_userfault(cur, vm_flags)) 1347 goto out_unlock; 1348 1349 /* 1350 * UFFDIO_COPY will fill file holes even without 1351 * PROT_WRITE. This check enforces that if this is a 1352 * MAP_SHARED, the process has write permission to the backing 1353 * file. If VM_MAYWRITE is set it also enforces that on a 1354 * MAP_SHARED vma: there is no F_WRITE_SEAL and no further 1355 * F_WRITE_SEAL can be taken until the vma is destroyed. 1356 */ 1357 ret = -EPERM; 1358 if (unlikely(!(cur->vm_flags & VM_MAYWRITE))) 1359 goto out_unlock; 1360 1361 /* 1362 * If this vma contains ending address, and huge pages 1363 * check alignment. 1364 */ 1365 if (is_vm_hugetlb_page(cur) && end <= cur->vm_end && 1366 end > cur->vm_start) { 1367 unsigned long vma_hpagesize = vma_kernel_pagesize(cur); 1368 1369 ret = -EINVAL; 1370 1371 if (end & (vma_hpagesize - 1)) 1372 goto out_unlock; 1373 } 1374 if ((vm_flags & VM_UFFD_WP) && !(cur->vm_flags & VM_MAYWRITE)) 1375 goto out_unlock; 1376 1377 /* 1378 * Check that this vma isn't already owned by a 1379 * different userfaultfd. We can't allow more than one 1380 * userfaultfd to own a single vma simultaneously or we 1381 * wouldn't know which one to deliver the userfaults to. 1382 */ 1383 ret = -EBUSY; 1384 if (cur->vm_userfaultfd_ctx.ctx && 1385 cur->vm_userfaultfd_ctx.ctx != ctx) 1386 goto out_unlock; 1387 1388 /* 1389 * Note vmas containing huge pages 1390 */ 1391 if (is_vm_hugetlb_page(cur)) 1392 basic_ioctls = true; 1393 1394 found = true; 1395 } 1396 BUG_ON(!found); 1397 1398 if (vma->vm_start < start) 1399 prev = vma; 1400 1401 ret = 0; 1402 do { 1403 cond_resched(); 1404 1405 BUG_ON(!vma_can_userfault(vma, vm_flags)); 1406 BUG_ON(vma->vm_userfaultfd_ctx.ctx && 1407 vma->vm_userfaultfd_ctx.ctx != ctx); 1408 WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); 1409 1410 /* 1411 * Nothing to do: this vma is already registered into this 1412 * userfaultfd and with the right tracking mode too. 1413 */ 1414 if (vma->vm_userfaultfd_ctx.ctx == ctx && 1415 (vma->vm_flags & vm_flags) == vm_flags) 1416 goto skip; 1417 1418 if (vma->vm_start > start) 1419 start = vma->vm_start; 1420 vma_end = min(end, vma->vm_end); 1421 1422 new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags; 1423 prev = vma_merge(mm, prev, start, vma_end, new_flags, 1424 vma->anon_vma, vma->vm_file, vma->vm_pgoff, 1425 vma_policy(vma), 1426 ((struct vm_userfaultfd_ctx){ ctx }), 1427 anon_vma_name(vma)); 1428 if (prev) { 1429 vma = prev; 1430 goto next; 1431 } 1432 if (vma->vm_start < start) { 1433 ret = split_vma(mm, vma, start, 1); 1434 if (ret) 1435 break; 1436 } 1437 if (vma->vm_end > end) { 1438 ret = split_vma(mm, vma, end, 0); 1439 if (ret) 1440 break; 1441 } 1442 next: 1443 /* 1444 * In the vma_merge() successful mprotect-like case 8: 1445 * the next vma was merged into the current one and 1446 * the current one has not been updated yet. 1447 */ 1448 vma->vm_flags = new_flags; 1449 vma->vm_userfaultfd_ctx.ctx = ctx; 1450 1451 if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma)) 1452 hugetlb_unshare_all_pmds(vma); 1453 1454 skip: 1455 prev = vma; 1456 start = vma->vm_end; 1457 vma = vma->vm_next; 1458 } while (vma && vma->vm_start < end); 1459 out_unlock: 1460 mmap_write_unlock(mm); 1461 mmput(mm); 1462 if (!ret) { 1463 __u64 ioctls_out; 1464 1465 ioctls_out = basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC : 1466 UFFD_API_RANGE_IOCTLS; 1467 1468 /* 1469 * Declare the WP ioctl only if the WP mode is 1470 * specified and all checks passed with the range 1471 */ 1472 if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP)) 1473 ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT); 1474 1475 /* CONTINUE ioctl is only supported for MINOR ranges. */ 1476 if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR)) 1477 ioctls_out &= ~((__u64)1 << _UFFDIO_CONTINUE); 1478 1479 /* 1480 * Now that we scanned all vmas we can already tell 1481 * userland which ioctls methods are guaranteed to 1482 * succeed on this range. 1483 */ 1484 if (put_user(ioctls_out, &user_uffdio_register->ioctls)) 1485 ret = -EFAULT; 1486 } 1487 out: 1488 return ret; 1489 } 1490 1491 static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, 1492 unsigned long arg) 1493 { 1494 struct mm_struct *mm = ctx->mm; 1495 struct vm_area_struct *vma, *prev, *cur; 1496 int ret; 1497 struct uffdio_range uffdio_unregister; 1498 unsigned long new_flags; 1499 bool found; 1500 unsigned long start, end, vma_end; 1501 const void __user *buf = (void __user *)arg; 1502 1503 ret = -EFAULT; 1504 if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister))) 1505 goto out; 1506 1507 ret = validate_range(mm, uffdio_unregister.start, 1508 uffdio_unregister.len); 1509 if (ret) 1510 goto out; 1511 1512 start = uffdio_unregister.start; 1513 end = start + uffdio_unregister.len; 1514 1515 ret = -ENOMEM; 1516 if (!mmget_not_zero(mm)) 1517 goto out; 1518 1519 mmap_write_lock(mm); 1520 vma = find_vma_prev(mm, start, &prev); 1521 if (!vma) 1522 goto out_unlock; 1523 1524 /* check that there's at least one vma in the range */ 1525 ret = -EINVAL; 1526 if (vma->vm_start >= end) 1527 goto out_unlock; 1528 1529 /* 1530 * If the first vma contains huge pages, make sure start address 1531 * is aligned to huge page size. 1532 */ 1533 if (is_vm_hugetlb_page(vma)) { 1534 unsigned long vma_hpagesize = vma_kernel_pagesize(vma); 1535 1536 if (start & (vma_hpagesize - 1)) 1537 goto out_unlock; 1538 } 1539 1540 /* 1541 * Search for not compatible vmas. 1542 */ 1543 found = false; 1544 ret = -EINVAL; 1545 for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { 1546 cond_resched(); 1547 1548 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ 1549 !!(cur->vm_flags & __VM_UFFD_FLAGS)); 1550 1551 /* 1552 * Check not compatible vmas, not strictly required 1553 * here as not compatible vmas cannot have an 1554 * userfaultfd_ctx registered on them, but this 1555 * provides for more strict behavior to notice 1556 * unregistration errors. 1557 */ 1558 if (!vma_can_userfault(cur, cur->vm_flags)) 1559 goto out_unlock; 1560 1561 found = true; 1562 } 1563 BUG_ON(!found); 1564 1565 if (vma->vm_start < start) 1566 prev = vma; 1567 1568 ret = 0; 1569 do { 1570 cond_resched(); 1571 1572 BUG_ON(!vma_can_userfault(vma, vma->vm_flags)); 1573 1574 /* 1575 * Nothing to do: this vma is already registered into this 1576 * userfaultfd and with the right tracking mode too. 1577 */ 1578 if (!vma->vm_userfaultfd_ctx.ctx) 1579 goto skip; 1580 1581 WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); 1582 1583 if (vma->vm_start > start) 1584 start = vma->vm_start; 1585 vma_end = min(end, vma->vm_end); 1586 1587 if (userfaultfd_missing(vma)) { 1588 /* 1589 * Wake any concurrent pending userfault while 1590 * we unregister, so they will not hang 1591 * permanently and it avoids userland to call 1592 * UFFDIO_WAKE explicitly. 1593 */ 1594 struct userfaultfd_wake_range range; 1595 range.start = start; 1596 range.len = vma_end - start; 1597 wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range); 1598 } 1599 1600 /* Reset ptes for the whole vma range if wr-protected */ 1601 if (userfaultfd_wp(vma)) 1602 uffd_wp_range(mm, vma, start, vma_end - start, false); 1603 1604 new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; 1605 prev = vma_merge(mm, prev, start, vma_end, new_flags, 1606 vma->anon_vma, vma->vm_file, vma->vm_pgoff, 1607 vma_policy(vma), 1608 NULL_VM_UFFD_CTX, anon_vma_name(vma)); 1609 if (prev) { 1610 vma = prev; 1611 goto next; 1612 } 1613 if (vma->vm_start < start) { 1614 ret = split_vma(mm, vma, start, 1); 1615 if (ret) 1616 break; 1617 } 1618 if (vma->vm_end > end) { 1619 ret = split_vma(mm, vma, end, 0); 1620 if (ret) 1621 break; 1622 } 1623 next: 1624 /* 1625 * In the vma_merge() successful mprotect-like case 8: 1626 * the next vma was merged into the current one and 1627 * the current one has not been updated yet. 1628 */ 1629 vma->vm_flags = new_flags; 1630 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 1631 1632 skip: 1633 prev = vma; 1634 start = vma->vm_end; 1635 vma = vma->vm_next; 1636 } while (vma && vma->vm_start < end); 1637 out_unlock: 1638 mmap_write_unlock(mm); 1639 mmput(mm); 1640 out: 1641 return ret; 1642 } 1643 1644 /* 1645 * userfaultfd_wake may be used in combination with the 1646 * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches. 1647 */ 1648 static int userfaultfd_wake(struct userfaultfd_ctx *ctx, 1649 unsigned long arg) 1650 { 1651 int ret; 1652 struct uffdio_range uffdio_wake; 1653 struct userfaultfd_wake_range range; 1654 const void __user *buf = (void __user *)arg; 1655 1656 ret = -EFAULT; 1657 if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake))) 1658 goto out; 1659 1660 ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len); 1661 if (ret) 1662 goto out; 1663 1664 range.start = uffdio_wake.start; 1665 range.len = uffdio_wake.len; 1666 1667 /* 1668 * len == 0 means wake all and we don't want to wake all here, 1669 * so check it again to be sure. 1670 */ 1671 VM_BUG_ON(!range.len); 1672 1673 wake_userfault(ctx, &range); 1674 ret = 0; 1675 1676 out: 1677 return ret; 1678 } 1679 1680 static int userfaultfd_copy(struct userfaultfd_ctx *ctx, 1681 unsigned long arg) 1682 { 1683 __s64 ret; 1684 struct uffdio_copy uffdio_copy; 1685 struct uffdio_copy __user *user_uffdio_copy; 1686 struct userfaultfd_wake_range range; 1687 1688 user_uffdio_copy = (struct uffdio_copy __user *) arg; 1689 1690 ret = -EAGAIN; 1691 if (atomic_read(&ctx->mmap_changing)) 1692 goto out; 1693 1694 ret = -EFAULT; 1695 if (copy_from_user(&uffdio_copy, user_uffdio_copy, 1696 /* don't copy "copy" last field */ 1697 sizeof(uffdio_copy)-sizeof(__s64))) 1698 goto out; 1699 1700 ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len); 1701 if (ret) 1702 goto out; 1703 /* 1704 * double check for wraparound just in case. copy_from_user() 1705 * will later check uffdio_copy.src + uffdio_copy.len to fit 1706 * in the userland range. 1707 */ 1708 ret = -EINVAL; 1709 if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src) 1710 goto out; 1711 if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP)) 1712 goto out; 1713 if (mmget_not_zero(ctx->mm)) { 1714 ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src, 1715 uffdio_copy.len, &ctx->mmap_changing, 1716 uffdio_copy.mode); 1717 mmput(ctx->mm); 1718 } else { 1719 return -ESRCH; 1720 } 1721 if (unlikely(put_user(ret, &user_uffdio_copy->copy))) 1722 return -EFAULT; 1723 if (ret < 0) 1724 goto out; 1725 BUG_ON(!ret); 1726 /* len == 0 would wake all */ 1727 range.len = ret; 1728 if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) { 1729 range.start = uffdio_copy.dst; 1730 wake_userfault(ctx, &range); 1731 } 1732 ret = range.len == uffdio_copy.len ? 0 : -EAGAIN; 1733 out: 1734 return ret; 1735 } 1736 1737 static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, 1738 unsigned long arg) 1739 { 1740 __s64 ret; 1741 struct uffdio_zeropage uffdio_zeropage; 1742 struct uffdio_zeropage __user *user_uffdio_zeropage; 1743 struct userfaultfd_wake_range range; 1744 1745 user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg; 1746 1747 ret = -EAGAIN; 1748 if (atomic_read(&ctx->mmap_changing)) 1749 goto out; 1750 1751 ret = -EFAULT; 1752 if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage, 1753 /* don't copy "zeropage" last field */ 1754 sizeof(uffdio_zeropage)-sizeof(__s64))) 1755 goto out; 1756 1757 ret = validate_range(ctx->mm, uffdio_zeropage.range.start, 1758 uffdio_zeropage.range.len); 1759 if (ret) 1760 goto out; 1761 ret = -EINVAL; 1762 if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE) 1763 goto out; 1764 1765 if (mmget_not_zero(ctx->mm)) { 1766 ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start, 1767 uffdio_zeropage.range.len, 1768 &ctx->mmap_changing); 1769 mmput(ctx->mm); 1770 } else { 1771 return -ESRCH; 1772 } 1773 if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage))) 1774 return -EFAULT; 1775 if (ret < 0) 1776 goto out; 1777 /* len == 0 would wake all */ 1778 BUG_ON(!ret); 1779 range.len = ret; 1780 if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) { 1781 range.start = uffdio_zeropage.range.start; 1782 wake_userfault(ctx, &range); 1783 } 1784 ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN; 1785 out: 1786 return ret; 1787 } 1788 1789 static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx, 1790 unsigned long arg) 1791 { 1792 int ret; 1793 struct uffdio_writeprotect uffdio_wp; 1794 struct uffdio_writeprotect __user *user_uffdio_wp; 1795 struct userfaultfd_wake_range range; 1796 bool mode_wp, mode_dontwake; 1797 1798 if (atomic_read(&ctx->mmap_changing)) 1799 return -EAGAIN; 1800 1801 user_uffdio_wp = (struct uffdio_writeprotect __user *) arg; 1802 1803 if (copy_from_user(&uffdio_wp, user_uffdio_wp, 1804 sizeof(struct uffdio_writeprotect))) 1805 return -EFAULT; 1806 1807 ret = validate_range(ctx->mm, uffdio_wp.range.start, 1808 uffdio_wp.range.len); 1809 if (ret) 1810 return ret; 1811 1812 if (uffdio_wp.mode & ~(UFFDIO_WRITEPROTECT_MODE_DONTWAKE | 1813 UFFDIO_WRITEPROTECT_MODE_WP)) 1814 return -EINVAL; 1815 1816 mode_wp = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_WP; 1817 mode_dontwake = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_DONTWAKE; 1818 1819 if (mode_wp && mode_dontwake) 1820 return -EINVAL; 1821 1822 if (mmget_not_zero(ctx->mm)) { 1823 ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start, 1824 uffdio_wp.range.len, mode_wp, 1825 &ctx->mmap_changing); 1826 mmput(ctx->mm); 1827 } else { 1828 return -ESRCH; 1829 } 1830 1831 if (ret) 1832 return ret; 1833 1834 if (!mode_wp && !mode_dontwake) { 1835 range.start = uffdio_wp.range.start; 1836 range.len = uffdio_wp.range.len; 1837 wake_userfault(ctx, &range); 1838 } 1839 return ret; 1840 } 1841 1842 static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg) 1843 { 1844 __s64 ret; 1845 struct uffdio_continue uffdio_continue; 1846 struct uffdio_continue __user *user_uffdio_continue; 1847 struct userfaultfd_wake_range range; 1848 1849 user_uffdio_continue = (struct uffdio_continue __user *)arg; 1850 1851 ret = -EAGAIN; 1852 if (atomic_read(&ctx->mmap_changing)) 1853 goto out; 1854 1855 ret = -EFAULT; 1856 if (copy_from_user(&uffdio_continue, user_uffdio_continue, 1857 /* don't copy the output fields */ 1858 sizeof(uffdio_continue) - (sizeof(__s64)))) 1859 goto out; 1860 1861 ret = validate_range(ctx->mm, uffdio_continue.range.start, 1862 uffdio_continue.range.len); 1863 if (ret) 1864 goto out; 1865 1866 ret = -EINVAL; 1867 /* double check for wraparound just in case. */ 1868 if (uffdio_continue.range.start + uffdio_continue.range.len <= 1869 uffdio_continue.range.start) { 1870 goto out; 1871 } 1872 if (uffdio_continue.mode & ~UFFDIO_CONTINUE_MODE_DONTWAKE) 1873 goto out; 1874 1875 if (mmget_not_zero(ctx->mm)) { 1876 ret = mcopy_continue(ctx->mm, uffdio_continue.range.start, 1877 uffdio_continue.range.len, 1878 &ctx->mmap_changing); 1879 mmput(ctx->mm); 1880 } else { 1881 return -ESRCH; 1882 } 1883 1884 if (unlikely(put_user(ret, &user_uffdio_continue->mapped))) 1885 return -EFAULT; 1886 if (ret < 0) 1887 goto out; 1888 1889 /* len == 0 would wake all */ 1890 BUG_ON(!ret); 1891 range.len = ret; 1892 if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) { 1893 range.start = uffdio_continue.range.start; 1894 wake_userfault(ctx, &range); 1895 } 1896 ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN; 1897 1898 out: 1899 return ret; 1900 } 1901 1902 static inline unsigned int uffd_ctx_features(__u64 user_features) 1903 { 1904 /* 1905 * For the current set of features the bits just coincide. Set 1906 * UFFD_FEATURE_INITIALIZED to mark the features as enabled. 1907 */ 1908 return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED; 1909 } 1910 1911 /* 1912 * userland asks for a certain API version and we return which bits 1913 * and ioctl commands are implemented in this kernel for such API 1914 * version or -EINVAL if unknown. 1915 */ 1916 static int userfaultfd_api(struct userfaultfd_ctx *ctx, 1917 unsigned long arg) 1918 { 1919 struct uffdio_api uffdio_api; 1920 void __user *buf = (void __user *)arg; 1921 unsigned int ctx_features; 1922 int ret; 1923 __u64 features; 1924 1925 ret = -EFAULT; 1926 if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api))) 1927 goto out; 1928 /* Ignore unsupported features (userspace built against newer kernel) */ 1929 features = uffdio_api.features & UFFD_API_FEATURES; 1930 ret = -EPERM; 1931 if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE)) 1932 goto err_out; 1933 /* report all available features and ioctls to userland */ 1934 uffdio_api.features = UFFD_API_FEATURES; 1935 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR 1936 uffdio_api.features &= 1937 ~(UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM); 1938 #endif 1939 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP 1940 uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP; 1941 #endif 1942 #ifndef CONFIG_PTE_MARKER_UFFD_WP 1943 uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM; 1944 #endif 1945 uffdio_api.ioctls = UFFD_API_IOCTLS; 1946 ret = -EFAULT; 1947 if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) 1948 goto out; 1949 1950 /* only enable the requested features for this uffd context */ 1951 ctx_features = uffd_ctx_features(features); 1952 ret = -EINVAL; 1953 if (cmpxchg(&ctx->features, 0, ctx_features) != 0) 1954 goto err_out; 1955 1956 ret = 0; 1957 out: 1958 return ret; 1959 err_out: 1960 memset(&uffdio_api, 0, sizeof(uffdio_api)); 1961 if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) 1962 ret = -EFAULT; 1963 goto out; 1964 } 1965 1966 static long userfaultfd_ioctl(struct file *file, unsigned cmd, 1967 unsigned long arg) 1968 { 1969 int ret = -EINVAL; 1970 struct userfaultfd_ctx *ctx = file->private_data; 1971 1972 if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx)) 1973 return -EINVAL; 1974 1975 switch(cmd) { 1976 case UFFDIO_API: 1977 ret = userfaultfd_api(ctx, arg); 1978 break; 1979 case UFFDIO_REGISTER: 1980 ret = userfaultfd_register(ctx, arg); 1981 break; 1982 case UFFDIO_UNREGISTER: 1983 ret = userfaultfd_unregister(ctx, arg); 1984 break; 1985 case UFFDIO_WAKE: 1986 ret = userfaultfd_wake(ctx, arg); 1987 break; 1988 case UFFDIO_COPY: 1989 ret = userfaultfd_copy(ctx, arg); 1990 break; 1991 case UFFDIO_ZEROPAGE: 1992 ret = userfaultfd_zeropage(ctx, arg); 1993 break; 1994 case UFFDIO_WRITEPROTECT: 1995 ret = userfaultfd_writeprotect(ctx, arg); 1996 break; 1997 case UFFDIO_CONTINUE: 1998 ret = userfaultfd_continue(ctx, arg); 1999 break; 2000 } 2001 return ret; 2002 } 2003 2004 #ifdef CONFIG_PROC_FS 2005 static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f) 2006 { 2007 struct userfaultfd_ctx *ctx = f->private_data; 2008 wait_queue_entry_t *wq; 2009 unsigned long pending = 0, total = 0; 2010 2011 spin_lock_irq(&ctx->fault_pending_wqh.lock); 2012 list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) { 2013 pending++; 2014 total++; 2015 } 2016 list_for_each_entry(wq, &ctx->fault_wqh.head, entry) { 2017 total++; 2018 } 2019 spin_unlock_irq(&ctx->fault_pending_wqh.lock); 2020 2021 /* 2022 * If more protocols will be added, there will be all shown 2023 * separated by a space. Like this: 2024 * protocols: aa:... bb:... 2025 */ 2026 seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n", 2027 pending, total, UFFD_API, ctx->features, 2028 UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS); 2029 } 2030 #endif 2031 2032 static const struct file_operations userfaultfd_fops = { 2033 #ifdef CONFIG_PROC_FS 2034 .show_fdinfo = userfaultfd_show_fdinfo, 2035 #endif 2036 .release = userfaultfd_release, 2037 .poll = userfaultfd_poll, 2038 .read = userfaultfd_read, 2039 .unlocked_ioctl = userfaultfd_ioctl, 2040 .compat_ioctl = compat_ptr_ioctl, 2041 .llseek = noop_llseek, 2042 }; 2043 2044 static void init_once_userfaultfd_ctx(void *mem) 2045 { 2046 struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem; 2047 2048 init_waitqueue_head(&ctx->fault_pending_wqh); 2049 init_waitqueue_head(&ctx->fault_wqh); 2050 init_waitqueue_head(&ctx->event_wqh); 2051 init_waitqueue_head(&ctx->fd_wqh); 2052 seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock); 2053 } 2054 2055 static int new_userfaultfd(int flags) 2056 { 2057 struct userfaultfd_ctx *ctx; 2058 int fd; 2059 2060 BUG_ON(!current->mm); 2061 2062 /* Check the UFFD_* constants for consistency. */ 2063 BUILD_BUG_ON(UFFD_USER_MODE_ONLY & UFFD_SHARED_FCNTL_FLAGS); 2064 BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC); 2065 BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK); 2066 2067 if (flags & ~(UFFD_SHARED_FCNTL_FLAGS | UFFD_USER_MODE_ONLY)) 2068 return -EINVAL; 2069 2070 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); 2071 if (!ctx) 2072 return -ENOMEM; 2073 2074 refcount_set(&ctx->refcount, 1); 2075 ctx->flags = flags; 2076 ctx->features = 0; 2077 ctx->released = false; 2078 atomic_set(&ctx->mmap_changing, 0); 2079 ctx->mm = current->mm; 2080 /* prevent the mm struct to be freed */ 2081 mmgrab(ctx->mm); 2082 2083 fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, ctx, 2084 O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL); 2085 if (fd < 0) { 2086 mmdrop(ctx->mm); 2087 kmem_cache_free(userfaultfd_ctx_cachep, ctx); 2088 } 2089 return fd; 2090 } 2091 2092 static inline bool userfaultfd_syscall_allowed(int flags) 2093 { 2094 /* Userspace-only page faults are always allowed */ 2095 if (flags & UFFD_USER_MODE_ONLY) 2096 return true; 2097 2098 /* 2099 * The user is requesting a userfaultfd which can handle kernel faults. 2100 * Privileged users are always allowed to do this. 2101 */ 2102 if (capable(CAP_SYS_PTRACE)) 2103 return true; 2104 2105 /* Otherwise, access to kernel fault handling is sysctl controlled. */ 2106 return sysctl_unprivileged_userfaultfd; 2107 } 2108 2109 SYSCALL_DEFINE1(userfaultfd, int, flags) 2110 { 2111 if (!userfaultfd_syscall_allowed(flags)) 2112 return -EPERM; 2113 2114 return new_userfaultfd(flags); 2115 } 2116 2117 static long userfaultfd_dev_ioctl(struct file *file, unsigned int cmd, unsigned long flags) 2118 { 2119 if (cmd != USERFAULTFD_IOC_NEW) 2120 return -EINVAL; 2121 2122 return new_userfaultfd(flags); 2123 } 2124 2125 static const struct file_operations userfaultfd_dev_fops = { 2126 .unlocked_ioctl = userfaultfd_dev_ioctl, 2127 .compat_ioctl = userfaultfd_dev_ioctl, 2128 .owner = THIS_MODULE, 2129 .llseek = noop_llseek, 2130 }; 2131 2132 static struct miscdevice userfaultfd_misc = { 2133 .minor = MISC_DYNAMIC_MINOR, 2134 .name = "userfaultfd", 2135 .fops = &userfaultfd_dev_fops 2136 }; 2137 2138 static int __init userfaultfd_init(void) 2139 { 2140 int ret; 2141 2142 ret = misc_register(&userfaultfd_misc); 2143 if (ret) 2144 return ret; 2145 2146 userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache", 2147 sizeof(struct userfaultfd_ctx), 2148 0, 2149 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2150 init_once_userfaultfd_ctx); 2151 return 0; 2152 } 2153 __initcall(userfaultfd_init); 2154