xref: /openbmc/linux/mm/userfaultfd.c (revision 9470114d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  mm/userfaultfd.c
4  *
5  *  Copyright (C) 2015  Red Hat, Inc.
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/sched/signal.h>
10 #include <linux/pagemap.h>
11 #include <linux/rmap.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/userfaultfd_k.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/hugetlb.h>
17 #include <linux/shmem_fs.h>
18 #include <asm/tlbflush.h>
19 #include <asm/tlb.h>
20 #include "internal.h"
21 
22 static __always_inline
23 struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
24 				    unsigned long dst_start,
25 				    unsigned long len)
26 {
27 	/*
28 	 * Make sure that the dst range is both valid and fully within a
29 	 * single existing vma.
30 	 */
31 	struct vm_area_struct *dst_vma;
32 
33 	dst_vma = find_vma(dst_mm, dst_start);
34 	if (!range_in_vma(dst_vma, dst_start, dst_start + len))
35 		return NULL;
36 
37 	/*
38 	 * Check the vma is registered in uffd, this is required to
39 	 * enforce the VM_MAYWRITE check done at uffd registration
40 	 * time.
41 	 */
42 	if (!dst_vma->vm_userfaultfd_ctx.ctx)
43 		return NULL;
44 
45 	return dst_vma;
46 }
47 
48 /* Check if dst_addr is outside of file's size. Must be called with ptl held. */
49 static bool mfill_file_over_size(struct vm_area_struct *dst_vma,
50 				 unsigned long dst_addr)
51 {
52 	struct inode *inode;
53 	pgoff_t offset, max_off;
54 
55 	if (!dst_vma->vm_file)
56 		return false;
57 
58 	inode = dst_vma->vm_file->f_inode;
59 	offset = linear_page_index(dst_vma, dst_addr);
60 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
61 	return offset >= max_off;
62 }
63 
64 /*
65  * Install PTEs, to map dst_addr (within dst_vma) to page.
66  *
67  * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
68  * and anon, and for both shared and private VMAs.
69  */
70 int mfill_atomic_install_pte(pmd_t *dst_pmd,
71 			     struct vm_area_struct *dst_vma,
72 			     unsigned long dst_addr, struct page *page,
73 			     bool newly_allocated, uffd_flags_t flags)
74 {
75 	int ret;
76 	struct mm_struct *dst_mm = dst_vma->vm_mm;
77 	pte_t _dst_pte, *dst_pte;
78 	bool writable = dst_vma->vm_flags & VM_WRITE;
79 	bool vm_shared = dst_vma->vm_flags & VM_SHARED;
80 	bool page_in_cache = page_mapping(page);
81 	spinlock_t *ptl;
82 	struct folio *folio;
83 
84 	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
85 	_dst_pte = pte_mkdirty(_dst_pte);
86 	if (page_in_cache && !vm_shared)
87 		writable = false;
88 	if (writable)
89 		_dst_pte = pte_mkwrite(_dst_pte, dst_vma);
90 	if (flags & MFILL_ATOMIC_WP)
91 		_dst_pte = pte_mkuffd_wp(_dst_pte);
92 
93 	ret = -EAGAIN;
94 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
95 	if (!dst_pte)
96 		goto out;
97 
98 	if (mfill_file_over_size(dst_vma, dst_addr)) {
99 		ret = -EFAULT;
100 		goto out_unlock;
101 	}
102 
103 	ret = -EEXIST;
104 	/*
105 	 * We allow to overwrite a pte marker: consider when both MISSING|WP
106 	 * registered, we firstly wr-protect a none pte which has no page cache
107 	 * page backing it, then access the page.
108 	 */
109 	if (!pte_none_mostly(ptep_get(dst_pte)))
110 		goto out_unlock;
111 
112 	folio = page_folio(page);
113 	if (page_in_cache) {
114 		/* Usually, cache pages are already added to LRU */
115 		if (newly_allocated)
116 			folio_add_lru(folio);
117 		page_add_file_rmap(page, dst_vma, false);
118 	} else {
119 		page_add_new_anon_rmap(page, dst_vma, dst_addr);
120 		folio_add_lru_vma(folio, dst_vma);
121 	}
122 
123 	/*
124 	 * Must happen after rmap, as mm_counter() checks mapping (via
125 	 * PageAnon()), which is set by __page_set_anon_rmap().
126 	 */
127 	inc_mm_counter(dst_mm, mm_counter(page));
128 
129 	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
130 
131 	/* No need to invalidate - it was non-present before */
132 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
133 	ret = 0;
134 out_unlock:
135 	pte_unmap_unlock(dst_pte, ptl);
136 out:
137 	return ret;
138 }
139 
140 static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
141 				 struct vm_area_struct *dst_vma,
142 				 unsigned long dst_addr,
143 				 unsigned long src_addr,
144 				 uffd_flags_t flags,
145 				 struct folio **foliop)
146 {
147 	void *kaddr;
148 	int ret;
149 	struct folio *folio;
150 
151 	if (!*foliop) {
152 		ret = -ENOMEM;
153 		folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma,
154 					dst_addr, false);
155 		if (!folio)
156 			goto out;
157 
158 		kaddr = kmap_local_folio(folio, 0);
159 		/*
160 		 * The read mmap_lock is held here.  Despite the
161 		 * mmap_lock being read recursive a deadlock is still
162 		 * possible if a writer has taken a lock.  For example:
163 		 *
164 		 * process A thread 1 takes read lock on own mmap_lock
165 		 * process A thread 2 calls mmap, blocks taking write lock
166 		 * process B thread 1 takes page fault, read lock on own mmap lock
167 		 * process B thread 2 calls mmap, blocks taking write lock
168 		 * process A thread 1 blocks taking read lock on process B
169 		 * process B thread 1 blocks taking read lock on process A
170 		 *
171 		 * Disable page faults to prevent potential deadlock
172 		 * and retry the copy outside the mmap_lock.
173 		 */
174 		pagefault_disable();
175 		ret = copy_from_user(kaddr, (const void __user *) src_addr,
176 				     PAGE_SIZE);
177 		pagefault_enable();
178 		kunmap_local(kaddr);
179 
180 		/* fallback to copy_from_user outside mmap_lock */
181 		if (unlikely(ret)) {
182 			ret = -ENOENT;
183 			*foliop = folio;
184 			/* don't free the page */
185 			goto out;
186 		}
187 
188 		flush_dcache_folio(folio);
189 	} else {
190 		folio = *foliop;
191 		*foliop = NULL;
192 	}
193 
194 	/*
195 	 * The memory barrier inside __folio_mark_uptodate makes sure that
196 	 * preceding stores to the page contents become visible before
197 	 * the set_pte_at() write.
198 	 */
199 	__folio_mark_uptodate(folio);
200 
201 	ret = -ENOMEM;
202 	if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
203 		goto out_release;
204 
205 	ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
206 				       &folio->page, true, flags);
207 	if (ret)
208 		goto out_release;
209 out:
210 	return ret;
211 out_release:
212 	folio_put(folio);
213 	goto out;
214 }
215 
216 static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
217 				     struct vm_area_struct *dst_vma,
218 				     unsigned long dst_addr)
219 {
220 	pte_t _dst_pte, *dst_pte;
221 	spinlock_t *ptl;
222 	int ret;
223 
224 	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
225 					 dst_vma->vm_page_prot));
226 	ret = -EAGAIN;
227 	dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl);
228 	if (!dst_pte)
229 		goto out;
230 	if (mfill_file_over_size(dst_vma, dst_addr)) {
231 		ret = -EFAULT;
232 		goto out_unlock;
233 	}
234 	ret = -EEXIST;
235 	if (!pte_none(ptep_get(dst_pte)))
236 		goto out_unlock;
237 	set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte);
238 	/* No need to invalidate - it was non-present before */
239 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
240 	ret = 0;
241 out_unlock:
242 	pte_unmap_unlock(dst_pte, ptl);
243 out:
244 	return ret;
245 }
246 
247 /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
248 static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
249 				     struct vm_area_struct *dst_vma,
250 				     unsigned long dst_addr,
251 				     uffd_flags_t flags)
252 {
253 	struct inode *inode = file_inode(dst_vma->vm_file);
254 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
255 	struct folio *folio;
256 	struct page *page;
257 	int ret;
258 
259 	ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC);
260 	/* Our caller expects us to return -EFAULT if we failed to find folio */
261 	if (ret == -ENOENT)
262 		ret = -EFAULT;
263 	if (ret)
264 		goto out;
265 	if (!folio) {
266 		ret = -EFAULT;
267 		goto out;
268 	}
269 
270 	page = folio_file_page(folio, pgoff);
271 	if (PageHWPoison(page)) {
272 		ret = -EIO;
273 		goto out_release;
274 	}
275 
276 	ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
277 				       page, false, flags);
278 	if (ret)
279 		goto out_release;
280 
281 	folio_unlock(folio);
282 	ret = 0;
283 out:
284 	return ret;
285 out_release:
286 	folio_unlock(folio);
287 	folio_put(folio);
288 	goto out;
289 }
290 
291 /* Handles UFFDIO_POISON for all non-hugetlb VMAs. */
292 static int mfill_atomic_pte_poison(pmd_t *dst_pmd,
293 				   struct vm_area_struct *dst_vma,
294 				   unsigned long dst_addr,
295 				   uffd_flags_t flags)
296 {
297 	int ret;
298 	struct mm_struct *dst_mm = dst_vma->vm_mm;
299 	pte_t _dst_pte, *dst_pte;
300 	spinlock_t *ptl;
301 
302 	_dst_pte = make_pte_marker(PTE_MARKER_POISONED);
303 	ret = -EAGAIN;
304 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
305 	if (!dst_pte)
306 		goto out;
307 
308 	if (mfill_file_over_size(dst_vma, dst_addr)) {
309 		ret = -EFAULT;
310 		goto out_unlock;
311 	}
312 
313 	ret = -EEXIST;
314 	/* Refuse to overwrite any PTE, even a PTE marker (e.g. UFFD WP). */
315 	if (!pte_none(*dst_pte))
316 		goto out_unlock;
317 
318 	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
319 
320 	/* No need to invalidate - it was non-present before */
321 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
322 	ret = 0;
323 out_unlock:
324 	pte_unmap_unlock(dst_pte, ptl);
325 out:
326 	return ret;
327 }
328 
329 static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
330 {
331 	pgd_t *pgd;
332 	p4d_t *p4d;
333 	pud_t *pud;
334 
335 	pgd = pgd_offset(mm, address);
336 	p4d = p4d_alloc(mm, pgd, address);
337 	if (!p4d)
338 		return NULL;
339 	pud = pud_alloc(mm, p4d, address);
340 	if (!pud)
341 		return NULL;
342 	/*
343 	 * Note that we didn't run this because the pmd was
344 	 * missing, the *pmd may be already established and in
345 	 * turn it may also be a trans_huge_pmd.
346 	 */
347 	return pmd_alloc(mm, pud, address);
348 }
349 
350 #ifdef CONFIG_HUGETLB_PAGE
351 /*
352  * mfill_atomic processing for HUGETLB vmas.  Note that this routine is
353  * called with mmap_lock held, it will release mmap_lock before returning.
354  */
355 static __always_inline ssize_t mfill_atomic_hugetlb(
356 					      struct vm_area_struct *dst_vma,
357 					      unsigned long dst_start,
358 					      unsigned long src_start,
359 					      unsigned long len,
360 					      atomic_t *mmap_changing,
361 					      uffd_flags_t flags)
362 {
363 	struct mm_struct *dst_mm = dst_vma->vm_mm;
364 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
365 	ssize_t err;
366 	pte_t *dst_pte;
367 	unsigned long src_addr, dst_addr;
368 	long copied;
369 	struct folio *folio;
370 	unsigned long vma_hpagesize;
371 	pgoff_t idx;
372 	u32 hash;
373 	struct address_space *mapping;
374 
375 	/*
376 	 * There is no default zero huge page for all huge page sizes as
377 	 * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
378 	 * by THP.  Since we can not reliably insert a zero page, this
379 	 * feature is not supported.
380 	 */
381 	if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
382 		mmap_read_unlock(dst_mm);
383 		return -EINVAL;
384 	}
385 
386 	src_addr = src_start;
387 	dst_addr = dst_start;
388 	copied = 0;
389 	folio = NULL;
390 	vma_hpagesize = vma_kernel_pagesize(dst_vma);
391 
392 	/*
393 	 * Validate alignment based on huge page size
394 	 */
395 	err = -EINVAL;
396 	if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
397 		goto out_unlock;
398 
399 retry:
400 	/*
401 	 * On routine entry dst_vma is set.  If we had to drop mmap_lock and
402 	 * retry, dst_vma will be set to NULL and we must lookup again.
403 	 */
404 	if (!dst_vma) {
405 		err = -ENOENT;
406 		dst_vma = find_dst_vma(dst_mm, dst_start, len);
407 		if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
408 			goto out_unlock;
409 
410 		err = -EINVAL;
411 		if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
412 			goto out_unlock;
413 
414 		vm_shared = dst_vma->vm_flags & VM_SHARED;
415 	}
416 
417 	/*
418 	 * If not shared, ensure the dst_vma has a anon_vma.
419 	 */
420 	err = -ENOMEM;
421 	if (!vm_shared) {
422 		if (unlikely(anon_vma_prepare(dst_vma)))
423 			goto out_unlock;
424 	}
425 
426 	while (src_addr < src_start + len) {
427 		BUG_ON(dst_addr >= dst_start + len);
428 
429 		/*
430 		 * Serialize via vma_lock and hugetlb_fault_mutex.
431 		 * vma_lock ensures the dst_pte remains valid even
432 		 * in the case of shared pmds.  fault mutex prevents
433 		 * races with other faulting threads.
434 		 */
435 		idx = linear_page_index(dst_vma, dst_addr);
436 		mapping = dst_vma->vm_file->f_mapping;
437 		hash = hugetlb_fault_mutex_hash(mapping, idx);
438 		mutex_lock(&hugetlb_fault_mutex_table[hash]);
439 		hugetlb_vma_lock_read(dst_vma);
440 
441 		err = -ENOMEM;
442 		dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
443 		if (!dst_pte) {
444 			hugetlb_vma_unlock_read(dst_vma);
445 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
446 			goto out_unlock;
447 		}
448 
449 		if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) &&
450 		    !huge_pte_none_mostly(huge_ptep_get(dst_pte))) {
451 			err = -EEXIST;
452 			hugetlb_vma_unlock_read(dst_vma);
453 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
454 			goto out_unlock;
455 		}
456 
457 		err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr,
458 					       src_addr, flags, &folio);
459 
460 		hugetlb_vma_unlock_read(dst_vma);
461 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
462 
463 		cond_resched();
464 
465 		if (unlikely(err == -ENOENT)) {
466 			mmap_read_unlock(dst_mm);
467 			BUG_ON(!folio);
468 
469 			err = copy_folio_from_user(folio,
470 						   (const void __user *)src_addr, true);
471 			if (unlikely(err)) {
472 				err = -EFAULT;
473 				goto out;
474 			}
475 			mmap_read_lock(dst_mm);
476 			/*
477 			 * If memory mappings are changing because of non-cooperative
478 			 * operation (e.g. mremap) running in parallel, bail out and
479 			 * request the user to retry later
480 			 */
481 			if (mmap_changing && atomic_read(mmap_changing)) {
482 				err = -EAGAIN;
483 				break;
484 			}
485 
486 			dst_vma = NULL;
487 			goto retry;
488 		} else
489 			BUG_ON(folio);
490 
491 		if (!err) {
492 			dst_addr += vma_hpagesize;
493 			src_addr += vma_hpagesize;
494 			copied += vma_hpagesize;
495 
496 			if (fatal_signal_pending(current))
497 				err = -EINTR;
498 		}
499 		if (err)
500 			break;
501 	}
502 
503 out_unlock:
504 	mmap_read_unlock(dst_mm);
505 out:
506 	if (folio)
507 		folio_put(folio);
508 	BUG_ON(copied < 0);
509 	BUG_ON(err > 0);
510 	BUG_ON(!copied && !err);
511 	return copied ? copied : err;
512 }
513 #else /* !CONFIG_HUGETLB_PAGE */
514 /* fail at build time if gcc attempts to use this */
515 extern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma,
516 				    unsigned long dst_start,
517 				    unsigned long src_start,
518 				    unsigned long len,
519 				    atomic_t *mmap_changing,
520 				    uffd_flags_t flags);
521 #endif /* CONFIG_HUGETLB_PAGE */
522 
523 static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
524 						struct vm_area_struct *dst_vma,
525 						unsigned long dst_addr,
526 						unsigned long src_addr,
527 						uffd_flags_t flags,
528 						struct folio **foliop)
529 {
530 	ssize_t err;
531 
532 	if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
533 		return mfill_atomic_pte_continue(dst_pmd, dst_vma,
534 						 dst_addr, flags);
535 	} else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
536 		return mfill_atomic_pte_poison(dst_pmd, dst_vma,
537 					       dst_addr, flags);
538 	}
539 
540 	/*
541 	 * The normal page fault path for a shmem will invoke the
542 	 * fault, fill the hole in the file and COW it right away. The
543 	 * result generates plain anonymous memory. So when we are
544 	 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
545 	 * generate anonymous memory directly without actually filling
546 	 * the hole. For the MAP_PRIVATE case the robustness check
547 	 * only happens in the pagetable (to verify it's still none)
548 	 * and not in the radix tree.
549 	 */
550 	if (!(dst_vma->vm_flags & VM_SHARED)) {
551 		if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY))
552 			err = mfill_atomic_pte_copy(dst_pmd, dst_vma,
553 						    dst_addr, src_addr,
554 						    flags, foliop);
555 		else
556 			err = mfill_atomic_pte_zeropage(dst_pmd,
557 						 dst_vma, dst_addr);
558 	} else {
559 		err = shmem_mfill_atomic_pte(dst_pmd, dst_vma,
560 					     dst_addr, src_addr,
561 					     flags, foliop);
562 	}
563 
564 	return err;
565 }
566 
567 static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm,
568 					    unsigned long dst_start,
569 					    unsigned long src_start,
570 					    unsigned long len,
571 					    atomic_t *mmap_changing,
572 					    uffd_flags_t flags)
573 {
574 	struct vm_area_struct *dst_vma;
575 	ssize_t err;
576 	pmd_t *dst_pmd;
577 	unsigned long src_addr, dst_addr;
578 	long copied;
579 	struct folio *folio;
580 
581 	/*
582 	 * Sanitize the command parameters:
583 	 */
584 	BUG_ON(dst_start & ~PAGE_MASK);
585 	BUG_ON(len & ~PAGE_MASK);
586 
587 	/* Does the address range wrap, or is the span zero-sized? */
588 	BUG_ON(src_start + len <= src_start);
589 	BUG_ON(dst_start + len <= dst_start);
590 
591 	src_addr = src_start;
592 	dst_addr = dst_start;
593 	copied = 0;
594 	folio = NULL;
595 retry:
596 	mmap_read_lock(dst_mm);
597 
598 	/*
599 	 * If memory mappings are changing because of non-cooperative
600 	 * operation (e.g. mremap) running in parallel, bail out and
601 	 * request the user to retry later
602 	 */
603 	err = -EAGAIN;
604 	if (mmap_changing && atomic_read(mmap_changing))
605 		goto out_unlock;
606 
607 	/*
608 	 * Make sure the vma is not shared, that the dst range is
609 	 * both valid and fully within a single existing vma.
610 	 */
611 	err = -ENOENT;
612 	dst_vma = find_dst_vma(dst_mm, dst_start, len);
613 	if (!dst_vma)
614 		goto out_unlock;
615 
616 	err = -EINVAL;
617 	/*
618 	 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
619 	 * it will overwrite vm_ops, so vma_is_anonymous must return false.
620 	 */
621 	if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
622 	    dst_vma->vm_flags & VM_SHARED))
623 		goto out_unlock;
624 
625 	/*
626 	 * validate 'mode' now that we know the dst_vma: don't allow
627 	 * a wrprotect copy if the userfaultfd didn't register as WP.
628 	 */
629 	if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
630 		goto out_unlock;
631 
632 	/*
633 	 * If this is a HUGETLB vma, pass off to appropriate routine
634 	 */
635 	if (is_vm_hugetlb_page(dst_vma))
636 		return  mfill_atomic_hugetlb(dst_vma, dst_start, src_start,
637 					     len, mmap_changing, flags);
638 
639 	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
640 		goto out_unlock;
641 	if (!vma_is_shmem(dst_vma) &&
642 	    uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
643 		goto out_unlock;
644 
645 	/*
646 	 * Ensure the dst_vma has a anon_vma or this page
647 	 * would get a NULL anon_vma when moved in the
648 	 * dst_vma.
649 	 */
650 	err = -ENOMEM;
651 	if (!(dst_vma->vm_flags & VM_SHARED) &&
652 	    unlikely(anon_vma_prepare(dst_vma)))
653 		goto out_unlock;
654 
655 	while (src_addr < src_start + len) {
656 		pmd_t dst_pmdval;
657 
658 		BUG_ON(dst_addr >= dst_start + len);
659 
660 		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
661 		if (unlikely(!dst_pmd)) {
662 			err = -ENOMEM;
663 			break;
664 		}
665 
666 		dst_pmdval = pmdp_get_lockless(dst_pmd);
667 		/*
668 		 * If the dst_pmd is mapped as THP don't
669 		 * override it and just be strict.
670 		 */
671 		if (unlikely(pmd_trans_huge(dst_pmdval))) {
672 			err = -EEXIST;
673 			break;
674 		}
675 		if (unlikely(pmd_none(dst_pmdval)) &&
676 		    unlikely(__pte_alloc(dst_mm, dst_pmd))) {
677 			err = -ENOMEM;
678 			break;
679 		}
680 		/* If an huge pmd materialized from under us fail */
681 		if (unlikely(pmd_trans_huge(*dst_pmd))) {
682 			err = -EFAULT;
683 			break;
684 		}
685 
686 		BUG_ON(pmd_none(*dst_pmd));
687 		BUG_ON(pmd_trans_huge(*dst_pmd));
688 
689 		err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr,
690 				       src_addr, flags, &folio);
691 		cond_resched();
692 
693 		if (unlikely(err == -ENOENT)) {
694 			void *kaddr;
695 
696 			mmap_read_unlock(dst_mm);
697 			BUG_ON(!folio);
698 
699 			kaddr = kmap_local_folio(folio, 0);
700 			err = copy_from_user(kaddr,
701 					     (const void __user *) src_addr,
702 					     PAGE_SIZE);
703 			kunmap_local(kaddr);
704 			if (unlikely(err)) {
705 				err = -EFAULT;
706 				goto out;
707 			}
708 			flush_dcache_folio(folio);
709 			goto retry;
710 		} else
711 			BUG_ON(folio);
712 
713 		if (!err) {
714 			dst_addr += PAGE_SIZE;
715 			src_addr += PAGE_SIZE;
716 			copied += PAGE_SIZE;
717 
718 			if (fatal_signal_pending(current))
719 				err = -EINTR;
720 		}
721 		if (err)
722 			break;
723 	}
724 
725 out_unlock:
726 	mmap_read_unlock(dst_mm);
727 out:
728 	if (folio)
729 		folio_put(folio);
730 	BUG_ON(copied < 0);
731 	BUG_ON(err > 0);
732 	BUG_ON(!copied && !err);
733 	return copied ? copied : err;
734 }
735 
736 ssize_t mfill_atomic_copy(struct mm_struct *dst_mm, unsigned long dst_start,
737 			  unsigned long src_start, unsigned long len,
738 			  atomic_t *mmap_changing, uffd_flags_t flags)
739 {
740 	return mfill_atomic(dst_mm, dst_start, src_start, len, mmap_changing,
741 			    uffd_flags_set_mode(flags, MFILL_ATOMIC_COPY));
742 }
743 
744 ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm, unsigned long start,
745 			      unsigned long len, atomic_t *mmap_changing)
746 {
747 	return mfill_atomic(dst_mm, start, 0, len, mmap_changing,
748 			    uffd_flags_set_mode(0, MFILL_ATOMIC_ZEROPAGE));
749 }
750 
751 ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start,
752 			      unsigned long len, atomic_t *mmap_changing,
753 			      uffd_flags_t flags)
754 {
755 	return mfill_atomic(dst_mm, start, 0, len, mmap_changing,
756 			    uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE));
757 }
758 
759 ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
760 			    unsigned long len, atomic_t *mmap_changing,
761 			    uffd_flags_t flags)
762 {
763 	return mfill_atomic(dst_mm, start, 0, len, mmap_changing,
764 			    uffd_flags_set_mode(flags, MFILL_ATOMIC_POISON));
765 }
766 
767 long uffd_wp_range(struct vm_area_struct *dst_vma,
768 		   unsigned long start, unsigned long len, bool enable_wp)
769 {
770 	unsigned int mm_cp_flags;
771 	struct mmu_gather tlb;
772 	long ret;
773 
774 	VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end,
775 			"The address range exceeds VMA boundary.\n");
776 	if (enable_wp)
777 		mm_cp_flags = MM_CP_UFFD_WP;
778 	else
779 		mm_cp_flags = MM_CP_UFFD_WP_RESOLVE;
780 
781 	/*
782 	 * vma->vm_page_prot already reflects that uffd-wp is enabled for this
783 	 * VMA (see userfaultfd_set_vm_flags()) and that all PTEs are supposed
784 	 * to be write-protected as default whenever protection changes.
785 	 * Try upgrading write permissions manually.
786 	 */
787 	if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma))
788 		mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
789 	tlb_gather_mmu(&tlb, dst_vma->vm_mm);
790 	ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags);
791 	tlb_finish_mmu(&tlb);
792 
793 	return ret;
794 }
795 
796 int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
797 			unsigned long len, bool enable_wp,
798 			atomic_t *mmap_changing)
799 {
800 	unsigned long end = start + len;
801 	unsigned long _start, _end;
802 	struct vm_area_struct *dst_vma;
803 	unsigned long page_mask;
804 	long err;
805 	VMA_ITERATOR(vmi, dst_mm, start);
806 
807 	/*
808 	 * Sanitize the command parameters:
809 	 */
810 	BUG_ON(start & ~PAGE_MASK);
811 	BUG_ON(len & ~PAGE_MASK);
812 
813 	/* Does the address range wrap, or is the span zero-sized? */
814 	BUG_ON(start + len <= start);
815 
816 	mmap_read_lock(dst_mm);
817 
818 	/*
819 	 * If memory mappings are changing because of non-cooperative
820 	 * operation (e.g. mremap) running in parallel, bail out and
821 	 * request the user to retry later
822 	 */
823 	err = -EAGAIN;
824 	if (mmap_changing && atomic_read(mmap_changing))
825 		goto out_unlock;
826 
827 	err = -ENOENT;
828 	for_each_vma_range(vmi, dst_vma, end) {
829 
830 		if (!userfaultfd_wp(dst_vma)) {
831 			err = -ENOENT;
832 			break;
833 		}
834 
835 		if (is_vm_hugetlb_page(dst_vma)) {
836 			err = -EINVAL;
837 			page_mask = vma_kernel_pagesize(dst_vma) - 1;
838 			if ((start & page_mask) || (len & page_mask))
839 				break;
840 		}
841 
842 		_start = max(dst_vma->vm_start, start);
843 		_end = min(dst_vma->vm_end, end);
844 
845 		err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp);
846 
847 		/* Return 0 on success, <0 on failures */
848 		if (err < 0)
849 			break;
850 		err = 0;
851 	}
852 out_unlock:
853 	mmap_read_unlock(dst_mm);
854 	return err;
855 }
856