xref: /openbmc/linux/mm/userfaultfd.c (revision 240e6d25)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  mm/userfaultfd.c
4  *
5  *  Copyright (C) 2015  Red Hat, Inc.
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/sched/signal.h>
10 #include <linux/pagemap.h>
11 #include <linux/rmap.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/userfaultfd_k.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/hugetlb.h>
17 #include <linux/shmem_fs.h>
18 #include <asm/tlbflush.h>
19 #include "internal.h"
20 
21 static __always_inline
22 struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
23 				    unsigned long dst_start,
24 				    unsigned long len)
25 {
26 	/*
27 	 * Make sure that the dst range is both valid and fully within a
28 	 * single existing vma.
29 	 */
30 	struct vm_area_struct *dst_vma;
31 
32 	dst_vma = find_vma(dst_mm, dst_start);
33 	if (!dst_vma)
34 		return NULL;
35 
36 	if (dst_start < dst_vma->vm_start ||
37 	    dst_start + len > dst_vma->vm_end)
38 		return NULL;
39 
40 	/*
41 	 * Check the vma is registered in uffd, this is required to
42 	 * enforce the VM_MAYWRITE check done at uffd registration
43 	 * time.
44 	 */
45 	if (!dst_vma->vm_userfaultfd_ctx.ctx)
46 		return NULL;
47 
48 	return dst_vma;
49 }
50 
51 /*
52  * Install PTEs, to map dst_addr (within dst_vma) to page.
53  *
54  * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
55  * and anon, and for both shared and private VMAs.
56  */
57 int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
58 			     struct vm_area_struct *dst_vma,
59 			     unsigned long dst_addr, struct page *page,
60 			     bool newly_allocated, bool wp_copy)
61 {
62 	int ret;
63 	pte_t _dst_pte, *dst_pte;
64 	bool writable = dst_vma->vm_flags & VM_WRITE;
65 	bool vm_shared = dst_vma->vm_flags & VM_SHARED;
66 	bool page_in_cache = page->mapping;
67 	spinlock_t *ptl;
68 	struct inode *inode;
69 	pgoff_t offset, max_off;
70 
71 	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
72 	_dst_pte = pte_mkdirty(_dst_pte);
73 	if (page_in_cache && !vm_shared)
74 		writable = false;
75 	if (writable) {
76 		if (wp_copy)
77 			_dst_pte = pte_mkuffd_wp(_dst_pte);
78 		else
79 			_dst_pte = pte_mkwrite(_dst_pte);
80 	}
81 
82 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
83 
84 	if (vma_is_shmem(dst_vma)) {
85 		/* serialize against truncate with the page table lock */
86 		inode = dst_vma->vm_file->f_inode;
87 		offset = linear_page_index(dst_vma, dst_addr);
88 		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
89 		ret = -EFAULT;
90 		if (unlikely(offset >= max_off))
91 			goto out_unlock;
92 	}
93 
94 	ret = -EEXIST;
95 	if (!pte_none(*dst_pte))
96 		goto out_unlock;
97 
98 	if (page_in_cache)
99 		page_add_file_rmap(page, false);
100 	else
101 		page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
102 
103 	/*
104 	 * Must happen after rmap, as mm_counter() checks mapping (via
105 	 * PageAnon()), which is set by __page_set_anon_rmap().
106 	 */
107 	inc_mm_counter(dst_mm, mm_counter(page));
108 
109 	if (newly_allocated)
110 		lru_cache_add_inactive_or_unevictable(page, dst_vma);
111 
112 	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
113 
114 	/* No need to invalidate - it was non-present before */
115 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
116 	ret = 0;
117 out_unlock:
118 	pte_unmap_unlock(dst_pte, ptl);
119 	return ret;
120 }
121 
122 static int mcopy_atomic_pte(struct mm_struct *dst_mm,
123 			    pmd_t *dst_pmd,
124 			    struct vm_area_struct *dst_vma,
125 			    unsigned long dst_addr,
126 			    unsigned long src_addr,
127 			    struct page **pagep,
128 			    bool wp_copy)
129 {
130 	void *page_kaddr;
131 	int ret;
132 	struct page *page;
133 
134 	if (!*pagep) {
135 		ret = -ENOMEM;
136 		page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
137 		if (!page)
138 			goto out;
139 
140 		page_kaddr = kmap_atomic(page);
141 		ret = copy_from_user(page_kaddr,
142 				     (const void __user *) src_addr,
143 				     PAGE_SIZE);
144 		kunmap_atomic(page_kaddr);
145 
146 		/* fallback to copy_from_user outside mmap_lock */
147 		if (unlikely(ret)) {
148 			ret = -ENOENT;
149 			*pagep = page;
150 			/* don't free the page */
151 			goto out;
152 		}
153 	} else {
154 		page = *pagep;
155 		*pagep = NULL;
156 	}
157 
158 	/*
159 	 * The memory barrier inside __SetPageUptodate makes sure that
160 	 * preceding stores to the page contents become visible before
161 	 * the set_pte_at() write.
162 	 */
163 	__SetPageUptodate(page);
164 
165 	ret = -ENOMEM;
166 	if (mem_cgroup_charge(page_folio(page), dst_mm, GFP_KERNEL))
167 		goto out_release;
168 
169 	ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
170 				       page, true, wp_copy);
171 	if (ret)
172 		goto out_release;
173 out:
174 	return ret;
175 out_release:
176 	put_page(page);
177 	goto out;
178 }
179 
180 static int mfill_zeropage_pte(struct mm_struct *dst_mm,
181 			      pmd_t *dst_pmd,
182 			      struct vm_area_struct *dst_vma,
183 			      unsigned long dst_addr)
184 {
185 	pte_t _dst_pte, *dst_pte;
186 	spinlock_t *ptl;
187 	int ret;
188 	pgoff_t offset, max_off;
189 	struct inode *inode;
190 
191 	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
192 					 dst_vma->vm_page_prot));
193 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
194 	if (dst_vma->vm_file) {
195 		/* the shmem MAP_PRIVATE case requires checking the i_size */
196 		inode = dst_vma->vm_file->f_inode;
197 		offset = linear_page_index(dst_vma, dst_addr);
198 		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
199 		ret = -EFAULT;
200 		if (unlikely(offset >= max_off))
201 			goto out_unlock;
202 	}
203 	ret = -EEXIST;
204 	if (!pte_none(*dst_pte))
205 		goto out_unlock;
206 	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
207 	/* No need to invalidate - it was non-present before */
208 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
209 	ret = 0;
210 out_unlock:
211 	pte_unmap_unlock(dst_pte, ptl);
212 	return ret;
213 }
214 
215 /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
216 static int mcontinue_atomic_pte(struct mm_struct *dst_mm,
217 				pmd_t *dst_pmd,
218 				struct vm_area_struct *dst_vma,
219 				unsigned long dst_addr,
220 				bool wp_copy)
221 {
222 	struct inode *inode = file_inode(dst_vma->vm_file);
223 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
224 	struct page *page;
225 	int ret;
226 
227 	ret = shmem_getpage(inode, pgoff, &page, SGP_READ);
228 	if (ret)
229 		goto out;
230 	if (!page) {
231 		ret = -EFAULT;
232 		goto out;
233 	}
234 
235 	ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
236 				       page, false, wp_copy);
237 	if (ret)
238 		goto out_release;
239 
240 	unlock_page(page);
241 	ret = 0;
242 out:
243 	return ret;
244 out_release:
245 	unlock_page(page);
246 	put_page(page);
247 	goto out;
248 }
249 
250 static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
251 {
252 	pgd_t *pgd;
253 	p4d_t *p4d;
254 	pud_t *pud;
255 
256 	pgd = pgd_offset(mm, address);
257 	p4d = p4d_alloc(mm, pgd, address);
258 	if (!p4d)
259 		return NULL;
260 	pud = pud_alloc(mm, p4d, address);
261 	if (!pud)
262 		return NULL;
263 	/*
264 	 * Note that we didn't run this because the pmd was
265 	 * missing, the *pmd may be already established and in
266 	 * turn it may also be a trans_huge_pmd.
267 	 */
268 	return pmd_alloc(mm, pud, address);
269 }
270 
271 #ifdef CONFIG_HUGETLB_PAGE
272 /*
273  * __mcopy_atomic processing for HUGETLB vmas.  Note that this routine is
274  * called with mmap_lock held, it will release mmap_lock before returning.
275  */
276 static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
277 					      struct vm_area_struct *dst_vma,
278 					      unsigned long dst_start,
279 					      unsigned long src_start,
280 					      unsigned long len,
281 					      enum mcopy_atomic_mode mode)
282 {
283 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
284 	ssize_t err;
285 	pte_t *dst_pte;
286 	unsigned long src_addr, dst_addr;
287 	long copied;
288 	struct page *page;
289 	unsigned long vma_hpagesize;
290 	pgoff_t idx;
291 	u32 hash;
292 	struct address_space *mapping;
293 
294 	/*
295 	 * There is no default zero huge page for all huge page sizes as
296 	 * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
297 	 * by THP.  Since we can not reliably insert a zero page, this
298 	 * feature is not supported.
299 	 */
300 	if (mode == MCOPY_ATOMIC_ZEROPAGE) {
301 		mmap_read_unlock(dst_mm);
302 		return -EINVAL;
303 	}
304 
305 	src_addr = src_start;
306 	dst_addr = dst_start;
307 	copied = 0;
308 	page = NULL;
309 	vma_hpagesize = vma_kernel_pagesize(dst_vma);
310 
311 	/*
312 	 * Validate alignment based on huge page size
313 	 */
314 	err = -EINVAL;
315 	if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
316 		goto out_unlock;
317 
318 retry:
319 	/*
320 	 * On routine entry dst_vma is set.  If we had to drop mmap_lock and
321 	 * retry, dst_vma will be set to NULL and we must lookup again.
322 	 */
323 	if (!dst_vma) {
324 		err = -ENOENT;
325 		dst_vma = find_dst_vma(dst_mm, dst_start, len);
326 		if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
327 			goto out_unlock;
328 
329 		err = -EINVAL;
330 		if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
331 			goto out_unlock;
332 
333 		vm_shared = dst_vma->vm_flags & VM_SHARED;
334 	}
335 
336 	/*
337 	 * If not shared, ensure the dst_vma has a anon_vma.
338 	 */
339 	err = -ENOMEM;
340 	if (!vm_shared) {
341 		if (unlikely(anon_vma_prepare(dst_vma)))
342 			goto out_unlock;
343 	}
344 
345 	while (src_addr < src_start + len) {
346 		BUG_ON(dst_addr >= dst_start + len);
347 
348 		/*
349 		 * Serialize via i_mmap_rwsem and hugetlb_fault_mutex.
350 		 * i_mmap_rwsem ensures the dst_pte remains valid even
351 		 * in the case of shared pmds.  fault mutex prevents
352 		 * races with other faulting threads.
353 		 */
354 		mapping = dst_vma->vm_file->f_mapping;
355 		i_mmap_lock_read(mapping);
356 		idx = linear_page_index(dst_vma, dst_addr);
357 		hash = hugetlb_fault_mutex_hash(mapping, idx);
358 		mutex_lock(&hugetlb_fault_mutex_table[hash]);
359 
360 		err = -ENOMEM;
361 		dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
362 		if (!dst_pte) {
363 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
364 			i_mmap_unlock_read(mapping);
365 			goto out_unlock;
366 		}
367 
368 		if (mode != MCOPY_ATOMIC_CONTINUE &&
369 		    !huge_pte_none(huge_ptep_get(dst_pte))) {
370 			err = -EEXIST;
371 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
372 			i_mmap_unlock_read(mapping);
373 			goto out_unlock;
374 		}
375 
376 		err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
377 					       dst_addr, src_addr, mode, &page);
378 
379 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
380 		i_mmap_unlock_read(mapping);
381 
382 		cond_resched();
383 
384 		if (unlikely(err == -ENOENT)) {
385 			mmap_read_unlock(dst_mm);
386 			BUG_ON(!page);
387 
388 			err = copy_huge_page_from_user(page,
389 						(const void __user *)src_addr,
390 						vma_hpagesize / PAGE_SIZE,
391 						true);
392 			if (unlikely(err)) {
393 				err = -EFAULT;
394 				goto out;
395 			}
396 			mmap_read_lock(dst_mm);
397 
398 			dst_vma = NULL;
399 			goto retry;
400 		} else
401 			BUG_ON(page);
402 
403 		if (!err) {
404 			dst_addr += vma_hpagesize;
405 			src_addr += vma_hpagesize;
406 			copied += vma_hpagesize;
407 
408 			if (fatal_signal_pending(current))
409 				err = -EINTR;
410 		}
411 		if (err)
412 			break;
413 	}
414 
415 out_unlock:
416 	mmap_read_unlock(dst_mm);
417 out:
418 	if (page)
419 		put_page(page);
420 	BUG_ON(copied < 0);
421 	BUG_ON(err > 0);
422 	BUG_ON(!copied && !err);
423 	return copied ? copied : err;
424 }
425 #else /* !CONFIG_HUGETLB_PAGE */
426 /* fail at build time if gcc attempts to use this */
427 extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
428 				      struct vm_area_struct *dst_vma,
429 				      unsigned long dst_start,
430 				      unsigned long src_start,
431 				      unsigned long len,
432 				      enum mcopy_atomic_mode mode);
433 #endif /* CONFIG_HUGETLB_PAGE */
434 
435 static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
436 						pmd_t *dst_pmd,
437 						struct vm_area_struct *dst_vma,
438 						unsigned long dst_addr,
439 						unsigned long src_addr,
440 						struct page **page,
441 						enum mcopy_atomic_mode mode,
442 						bool wp_copy)
443 {
444 	ssize_t err;
445 
446 	if (mode == MCOPY_ATOMIC_CONTINUE) {
447 		return mcontinue_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
448 					    wp_copy);
449 	}
450 
451 	/*
452 	 * The normal page fault path for a shmem will invoke the
453 	 * fault, fill the hole in the file and COW it right away. The
454 	 * result generates plain anonymous memory. So when we are
455 	 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
456 	 * generate anonymous memory directly without actually filling
457 	 * the hole. For the MAP_PRIVATE case the robustness check
458 	 * only happens in the pagetable (to verify it's still none)
459 	 * and not in the radix tree.
460 	 */
461 	if (!(dst_vma->vm_flags & VM_SHARED)) {
462 		if (mode == MCOPY_ATOMIC_NORMAL)
463 			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
464 					       dst_addr, src_addr, page,
465 					       wp_copy);
466 		else
467 			err = mfill_zeropage_pte(dst_mm, dst_pmd,
468 						 dst_vma, dst_addr);
469 	} else {
470 		VM_WARN_ON_ONCE(wp_copy);
471 		err = shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
472 					     dst_addr, src_addr,
473 					     mode != MCOPY_ATOMIC_NORMAL,
474 					     page);
475 	}
476 
477 	return err;
478 }
479 
480 static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
481 					      unsigned long dst_start,
482 					      unsigned long src_start,
483 					      unsigned long len,
484 					      enum mcopy_atomic_mode mcopy_mode,
485 					      atomic_t *mmap_changing,
486 					      __u64 mode)
487 {
488 	struct vm_area_struct *dst_vma;
489 	ssize_t err;
490 	pmd_t *dst_pmd;
491 	unsigned long src_addr, dst_addr;
492 	long copied;
493 	struct page *page;
494 	bool wp_copy;
495 
496 	/*
497 	 * Sanitize the command parameters:
498 	 */
499 	BUG_ON(dst_start & ~PAGE_MASK);
500 	BUG_ON(len & ~PAGE_MASK);
501 
502 	/* Does the address range wrap, or is the span zero-sized? */
503 	BUG_ON(src_start + len <= src_start);
504 	BUG_ON(dst_start + len <= dst_start);
505 
506 	src_addr = src_start;
507 	dst_addr = dst_start;
508 	copied = 0;
509 	page = NULL;
510 retry:
511 	mmap_read_lock(dst_mm);
512 
513 	/*
514 	 * If memory mappings are changing because of non-cooperative
515 	 * operation (e.g. mremap) running in parallel, bail out and
516 	 * request the user to retry later
517 	 */
518 	err = -EAGAIN;
519 	if (mmap_changing && atomic_read(mmap_changing))
520 		goto out_unlock;
521 
522 	/*
523 	 * Make sure the vma is not shared, that the dst range is
524 	 * both valid and fully within a single existing vma.
525 	 */
526 	err = -ENOENT;
527 	dst_vma = find_dst_vma(dst_mm, dst_start, len);
528 	if (!dst_vma)
529 		goto out_unlock;
530 
531 	err = -EINVAL;
532 	/*
533 	 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
534 	 * it will overwrite vm_ops, so vma_is_anonymous must return false.
535 	 */
536 	if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
537 	    dst_vma->vm_flags & VM_SHARED))
538 		goto out_unlock;
539 
540 	/*
541 	 * validate 'mode' now that we know the dst_vma: don't allow
542 	 * a wrprotect copy if the userfaultfd didn't register as WP.
543 	 */
544 	wp_copy = mode & UFFDIO_COPY_MODE_WP;
545 	if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP))
546 		goto out_unlock;
547 
548 	/*
549 	 * If this is a HUGETLB vma, pass off to appropriate routine
550 	 */
551 	if (is_vm_hugetlb_page(dst_vma))
552 		return  __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
553 						src_start, len, mcopy_mode);
554 
555 	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
556 		goto out_unlock;
557 	if (!vma_is_shmem(dst_vma) && mcopy_mode == MCOPY_ATOMIC_CONTINUE)
558 		goto out_unlock;
559 
560 	/*
561 	 * Ensure the dst_vma has a anon_vma or this page
562 	 * would get a NULL anon_vma when moved in the
563 	 * dst_vma.
564 	 */
565 	err = -ENOMEM;
566 	if (!(dst_vma->vm_flags & VM_SHARED) &&
567 	    unlikely(anon_vma_prepare(dst_vma)))
568 		goto out_unlock;
569 
570 	while (src_addr < src_start + len) {
571 		pmd_t dst_pmdval;
572 
573 		BUG_ON(dst_addr >= dst_start + len);
574 
575 		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
576 		if (unlikely(!dst_pmd)) {
577 			err = -ENOMEM;
578 			break;
579 		}
580 
581 		dst_pmdval = pmd_read_atomic(dst_pmd);
582 		/*
583 		 * If the dst_pmd is mapped as THP don't
584 		 * override it and just be strict.
585 		 */
586 		if (unlikely(pmd_trans_huge(dst_pmdval))) {
587 			err = -EEXIST;
588 			break;
589 		}
590 		if (unlikely(pmd_none(dst_pmdval)) &&
591 		    unlikely(__pte_alloc(dst_mm, dst_pmd))) {
592 			err = -ENOMEM;
593 			break;
594 		}
595 		/* If an huge pmd materialized from under us fail */
596 		if (unlikely(pmd_trans_huge(*dst_pmd))) {
597 			err = -EFAULT;
598 			break;
599 		}
600 
601 		BUG_ON(pmd_none(*dst_pmd));
602 		BUG_ON(pmd_trans_huge(*dst_pmd));
603 
604 		err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
605 				       src_addr, &page, mcopy_mode, wp_copy);
606 		cond_resched();
607 
608 		if (unlikely(err == -ENOENT)) {
609 			void *page_kaddr;
610 
611 			mmap_read_unlock(dst_mm);
612 			BUG_ON(!page);
613 
614 			page_kaddr = kmap(page);
615 			err = copy_from_user(page_kaddr,
616 					     (const void __user *) src_addr,
617 					     PAGE_SIZE);
618 			kunmap(page);
619 			if (unlikely(err)) {
620 				err = -EFAULT;
621 				goto out;
622 			}
623 			goto retry;
624 		} else
625 			BUG_ON(page);
626 
627 		if (!err) {
628 			dst_addr += PAGE_SIZE;
629 			src_addr += PAGE_SIZE;
630 			copied += PAGE_SIZE;
631 
632 			if (fatal_signal_pending(current))
633 				err = -EINTR;
634 		}
635 		if (err)
636 			break;
637 	}
638 
639 out_unlock:
640 	mmap_read_unlock(dst_mm);
641 out:
642 	if (page)
643 		put_page(page);
644 	BUG_ON(copied < 0);
645 	BUG_ON(err > 0);
646 	BUG_ON(!copied && !err);
647 	return copied ? copied : err;
648 }
649 
650 ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
651 		     unsigned long src_start, unsigned long len,
652 		     atomic_t *mmap_changing, __u64 mode)
653 {
654 	return __mcopy_atomic(dst_mm, dst_start, src_start, len,
655 			      MCOPY_ATOMIC_NORMAL, mmap_changing, mode);
656 }
657 
658 ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
659 		       unsigned long len, atomic_t *mmap_changing)
660 {
661 	return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_ZEROPAGE,
662 			      mmap_changing, 0);
663 }
664 
665 ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start,
666 		       unsigned long len, atomic_t *mmap_changing)
667 {
668 	return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_CONTINUE,
669 			      mmap_changing, 0);
670 }
671 
672 int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
673 			unsigned long len, bool enable_wp,
674 			atomic_t *mmap_changing)
675 {
676 	struct vm_area_struct *dst_vma;
677 	pgprot_t newprot;
678 	int err;
679 
680 	/*
681 	 * Sanitize the command parameters:
682 	 */
683 	BUG_ON(start & ~PAGE_MASK);
684 	BUG_ON(len & ~PAGE_MASK);
685 
686 	/* Does the address range wrap, or is the span zero-sized? */
687 	BUG_ON(start + len <= start);
688 
689 	mmap_read_lock(dst_mm);
690 
691 	/*
692 	 * If memory mappings are changing because of non-cooperative
693 	 * operation (e.g. mremap) running in parallel, bail out and
694 	 * request the user to retry later
695 	 */
696 	err = -EAGAIN;
697 	if (mmap_changing && atomic_read(mmap_changing))
698 		goto out_unlock;
699 
700 	err = -ENOENT;
701 	dst_vma = find_dst_vma(dst_mm, start, len);
702 	/*
703 	 * Make sure the vma is not shared, that the dst range is
704 	 * both valid and fully within a single existing vma.
705 	 */
706 	if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
707 		goto out_unlock;
708 	if (!userfaultfd_wp(dst_vma))
709 		goto out_unlock;
710 	if (!vma_is_anonymous(dst_vma))
711 		goto out_unlock;
712 
713 	if (enable_wp)
714 		newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
715 	else
716 		newprot = vm_get_page_prot(dst_vma->vm_flags);
717 
718 	change_protection(dst_vma, start, start + len, newprot,
719 			  enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
720 
721 	err = 0;
722 out_unlock:
723 	mmap_read_unlock(dst_mm);
724 	return err;
725 }
726