userfaultfd.c (aec44e0f0213e36d4f0868a80cdc5097a510f79d) userfaultfd.c (f619147104c8ea71e120e4936d2b68ec11a1e527)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * mm/userfaultfd.c
4 *
5 * Copyright (C) 2015 Red Hat, Inc.
6 */
7
8#include <linux/mm.h>

--- 193 unchanged lines hidden (view full) ---

202 * __mcopy_atomic processing for HUGETLB vmas. Note that this routine is
203 * called with mmap_lock held, it will release mmap_lock before returning.
204 */
205static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
206 struct vm_area_struct *dst_vma,
207 unsigned long dst_start,
208 unsigned long src_start,
209 unsigned long len,
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * mm/userfaultfd.c
4 *
5 * Copyright (C) 2015 Red Hat, Inc.
6 */
7
8#include <linux/mm.h>

--- 193 unchanged lines hidden (view full) ---

202 * __mcopy_atomic processing for HUGETLB vmas. Note that this routine is
203 * called with mmap_lock held, it will release mmap_lock before returning.
204 */
205static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
206 struct vm_area_struct *dst_vma,
207 unsigned long dst_start,
208 unsigned long src_start,
209 unsigned long len,
210 bool zeropage)
210 enum mcopy_atomic_mode mode)
211{
212 int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED;
213 int vm_shared = dst_vma->vm_flags & VM_SHARED;
214 ssize_t err;
215 pte_t *dst_pte;
216 unsigned long src_addr, dst_addr;
217 long copied;
218 struct page *page;
219 unsigned long vma_hpagesize;
220 pgoff_t idx;
221 u32 hash;
222 struct address_space *mapping;
223
224 /*
225 * There is no default zero huge page for all huge page sizes as
226 * supported by hugetlb. A PMD_SIZE huge pages may exist as used
227 * by THP. Since we can not reliably insert a zero page, this
228 * feature is not supported.
229 */
211{
212 int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED;
213 int vm_shared = dst_vma->vm_flags & VM_SHARED;
214 ssize_t err;
215 pte_t *dst_pte;
216 unsigned long src_addr, dst_addr;
217 long copied;
218 struct page *page;
219 unsigned long vma_hpagesize;
220 pgoff_t idx;
221 u32 hash;
222 struct address_space *mapping;
223
224 /*
225 * There is no default zero huge page for all huge page sizes as
226 * supported by hugetlb. A PMD_SIZE huge pages may exist as used
227 * by THP. Since we can not reliably insert a zero page, this
228 * feature is not supported.
229 */
230 if (zeropage) {
230 if (mode == MCOPY_ATOMIC_ZEROPAGE) {
231 mmap_read_unlock(dst_mm);
232 return -EINVAL;
233 }
234
235 src_addr = src_start;
236 dst_addr = dst_start;
237 copied = 0;
238 page = NULL;

--- 29 unchanged lines hidden (view full) ---

268 */
269 err = -ENOMEM;
270 if (!vm_shared) {
271 if (unlikely(anon_vma_prepare(dst_vma)))
272 goto out_unlock;
273 }
274
275 while (src_addr < src_start + len) {
231 mmap_read_unlock(dst_mm);
232 return -EINVAL;
233 }
234
235 src_addr = src_start;
236 dst_addr = dst_start;
237 copied = 0;
238 page = NULL;

--- 29 unchanged lines hidden (view full) ---

268 */
269 err = -ENOMEM;
270 if (!vm_shared) {
271 if (unlikely(anon_vma_prepare(dst_vma)))
272 goto out_unlock;
273 }
274
275 while (src_addr < src_start + len) {
276 pte_t dst_pteval;
277
278 BUG_ON(dst_addr >= dst_start + len);
279
280 /*
281 * Serialize via i_mmap_rwsem and hugetlb_fault_mutex.
282 * i_mmap_rwsem ensures the dst_pte remains valid even
283 * in the case of shared pmds. fault mutex prevents
284 * races with other faulting threads.
285 */

--- 6 unchanged lines hidden (view full) ---

292 err = -ENOMEM;
293 dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
294 if (!dst_pte) {
295 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
296 i_mmap_unlock_read(mapping);
297 goto out_unlock;
298 }
299
276 BUG_ON(dst_addr >= dst_start + len);
277
278 /*
279 * Serialize via i_mmap_rwsem and hugetlb_fault_mutex.
280 * i_mmap_rwsem ensures the dst_pte remains valid even
281 * in the case of shared pmds. fault mutex prevents
282 * races with other faulting threads.
283 */

--- 6 unchanged lines hidden (view full) ---

290 err = -ENOMEM;
291 dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
292 if (!dst_pte) {
293 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
294 i_mmap_unlock_read(mapping);
295 goto out_unlock;
296 }
297
300 err = -EEXIST;
301 dst_pteval = huge_ptep_get(dst_pte);
302 if (!huge_pte_none(dst_pteval)) {
298 if (mode != MCOPY_ATOMIC_CONTINUE &&
299 !huge_pte_none(huge_ptep_get(dst_pte))) {
300 err = -EEXIST;
303 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
304 i_mmap_unlock_read(mapping);
305 goto out_unlock;
306 }
307
308 err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
301 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
302 i_mmap_unlock_read(mapping);
303 goto out_unlock;
304 }
305
306 err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
309 dst_addr, src_addr, &page);
307 dst_addr, src_addr, mode, &page);
310
311 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
312 i_mmap_unlock_read(mapping);
313 vm_alloc_shared = vm_shared;
314
315 cond_resched();
316
317 if (unlikely(err == -ENOENT)) {

--- 85 unchanged lines hidden (view full) ---

403}
404#else /* !CONFIG_HUGETLB_PAGE */
405/* fail at build time if gcc attempts to use this */
406extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
407 struct vm_area_struct *dst_vma,
408 unsigned long dst_start,
409 unsigned long src_start,
410 unsigned long len,
308
309 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
310 i_mmap_unlock_read(mapping);
311 vm_alloc_shared = vm_shared;
312
313 cond_resched();
314
315 if (unlikely(err == -ENOENT)) {

--- 85 unchanged lines hidden (view full) ---

401}
402#else /* !CONFIG_HUGETLB_PAGE */
403/* fail at build time if gcc attempts to use this */
404extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
405 struct vm_area_struct *dst_vma,
406 unsigned long dst_start,
407 unsigned long src_start,
408 unsigned long len,
411 bool zeropage);
409 enum mcopy_atomic_mode mode);
412#endif /* CONFIG_HUGETLB_PAGE */
413
414static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
415 pmd_t *dst_pmd,
416 struct vm_area_struct *dst_vma,
417 unsigned long dst_addr,
418 unsigned long src_addr,
419 struct page **page,

--- 33 unchanged lines hidden (view full) ---

453
454 return err;
455}
456
457static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
458 unsigned long dst_start,
459 unsigned long src_start,
460 unsigned long len,
410#endif /* CONFIG_HUGETLB_PAGE */
411
412static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
413 pmd_t *dst_pmd,
414 struct vm_area_struct *dst_vma,
415 unsigned long dst_addr,
416 unsigned long src_addr,
417 struct page **page,

--- 33 unchanged lines hidden (view full) ---

451
452 return err;
453}
454
455static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
456 unsigned long dst_start,
457 unsigned long src_start,
458 unsigned long len,
461 bool zeropage,
459 enum mcopy_atomic_mode mcopy_mode,
462 bool *mmap_changing,
463 __u64 mode)
464{
465 struct vm_area_struct *dst_vma;
466 ssize_t err;
467 pmd_t *dst_pmd;
468 unsigned long src_addr, dst_addr;
469 long copied;
470 struct page *page;
471 bool wp_copy;
460 bool *mmap_changing,
461 __u64 mode)
462{
463 struct vm_area_struct *dst_vma;
464 ssize_t err;
465 pmd_t *dst_pmd;
466 unsigned long src_addr, dst_addr;
467 long copied;
468 struct page *page;
469 bool wp_copy;
470 bool zeropage = (mcopy_mode == MCOPY_ATOMIC_ZEROPAGE);
472
473 /*
474 * Sanitize the command parameters:
475 */
476 BUG_ON(dst_start & ~PAGE_MASK);
477 BUG_ON(len & ~PAGE_MASK);
478
479 /* Does the address range wrap, or is the span zero-sized? */

--- 42 unchanged lines hidden (view full) ---

522 if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP))
523 goto out_unlock;
524
525 /*
526 * If this is a HUGETLB vma, pass off to appropriate routine
527 */
528 if (is_vm_hugetlb_page(dst_vma))
529 return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
471
472 /*
473 * Sanitize the command parameters:
474 */
475 BUG_ON(dst_start & ~PAGE_MASK);
476 BUG_ON(len & ~PAGE_MASK);
477
478 /* Does the address range wrap, or is the span zero-sized? */

--- 42 unchanged lines hidden (view full) ---

521 if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP))
522 goto out_unlock;
523
524 /*
525 * If this is a HUGETLB vma, pass off to appropriate routine
526 */
527 if (is_vm_hugetlb_page(dst_vma))
528 return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
530 src_start, len, zeropage);
529 src_start, len, mcopy_mode);
531
532 if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
533 goto out_unlock;
530
531 if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
532 goto out_unlock;
533 if (mcopy_mode == MCOPY_ATOMIC_CONTINUE)
534 goto out_unlock;
534
535 /*
536 * Ensure the dst_vma has a anon_vma or this page
537 * would get a NULL anon_vma when moved in the
538 * dst_vma.
539 */
540 err = -ENOMEM;
541 if (!(dst_vma->vm_flags & VM_SHARED) &&

--- 79 unchanged lines hidden (view full) ---

621 BUG_ON(!copied && !err);
622 return copied ? copied : err;
623}
624
625ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
626 unsigned long src_start, unsigned long len,
627 bool *mmap_changing, __u64 mode)
628{
535
536 /*
537 * Ensure the dst_vma has a anon_vma or this page
538 * would get a NULL anon_vma when moved in the
539 * dst_vma.
540 */
541 err = -ENOMEM;
542 if (!(dst_vma->vm_flags & VM_SHARED) &&

--- 79 unchanged lines hidden (view full) ---

622 BUG_ON(!copied && !err);
623 return copied ? copied : err;
624}
625
626ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
627 unsigned long src_start, unsigned long len,
628 bool *mmap_changing, __u64 mode)
629{
629 return __mcopy_atomic(dst_mm, dst_start, src_start, len, false,
630 mmap_changing, mode);
630 return __mcopy_atomic(dst_mm, dst_start, src_start, len,
631 MCOPY_ATOMIC_NORMAL, mmap_changing, mode);
631}
632
633ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
634 unsigned long len, bool *mmap_changing)
635{
632}
633
634ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
635 unsigned long len, bool *mmap_changing)
636{
636 return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing, 0);
637 return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_ZEROPAGE,
638 mmap_changing, 0);
637}
638
639}
640
641ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start,
642 unsigned long len, bool *mmap_changing)
643{
644 return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_CONTINUE,
645 mmap_changing, 0);
646}
647
639int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
640 unsigned long len, bool enable_wp, bool *mmap_changing)
641{
642 struct vm_area_struct *dst_vma;
643 pgprot_t newprot;
644 int err;
645
646 /*

--- 45 unchanged lines hidden ---
648int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
649 unsigned long len, bool enable_wp, bool *mmap_changing)
650{
651 struct vm_area_struct *dst_vma;
652 pgprot_t newprot;
653 int err;
654
655 /*

--- 45 unchanged lines hidden ---