xref: /openbmc/linux/mm/memory.c (revision b96a3e9142fdf346b05b20e867b4f0dfca119e96)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/memory.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  */
7 
8 /*
9  * demand-loading started 01.12.91 - seems it is high on the list of
10  * things wanted, and it should be easy to implement. - Linus
11  */
12 
13 /*
14  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
15  * pages started 02.12.91, seems to work. - Linus.
16  *
17  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
18  * would have taken more than the 6M I have free, but it worked well as
19  * far as I could see.
20  *
21  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
22  */
23 
24 /*
25  * Real VM (paging to/from disk) started 18.12.91. Much more work and
26  * thought has to go into this. Oh, well..
27  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
28  *		Found it. Everything seems to work now.
29  * 20.12.91  -  Ok, making the swap-device changeable like the root.
30  */
31 
32 /*
33  * 05.04.94  -  Multi-page memory management added for v1.1.
34  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
35  *
36  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
37  *		(Gerhard.Wichert@pdb.siemens.de)
38  *
39  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
40  */
41 
42 #include <linux/kernel_stat.h>
43 #include <linux/mm.h>
44 #include <linux/mm_inline.h>
45 #include <linux/sched/mm.h>
46 #include <linux/sched/coredump.h>
47 #include <linux/sched/numa_balancing.h>
48 #include <linux/sched/task.h>
49 #include <linux/hugetlb.h>
50 #include <linux/mman.h>
51 #include <linux/swap.h>
52 #include <linux/highmem.h>
53 #include <linux/pagemap.h>
54 #include <linux/memremap.h>
55 #include <linux/kmsan.h>
56 #include <linux/ksm.h>
57 #include <linux/rmap.h>
58 #include <linux/export.h>
59 #include <linux/delayacct.h>
60 #include <linux/init.h>
61 #include <linux/pfn_t.h>
62 #include <linux/writeback.h>
63 #include <linux/memcontrol.h>
64 #include <linux/mmu_notifier.h>
65 #include <linux/swapops.h>
66 #include <linux/elf.h>
67 #include <linux/gfp.h>
68 #include <linux/migrate.h>
69 #include <linux/string.h>
70 #include <linux/memory-tiers.h>
71 #include <linux/debugfs.h>
72 #include <linux/userfaultfd_k.h>
73 #include <linux/dax.h>
74 #include <linux/oom.h>
75 #include <linux/numa.h>
76 #include <linux/perf_event.h>
77 #include <linux/ptrace.h>
78 #include <linux/vmalloc.h>
79 #include <linux/sched/sysctl.h>
80 
81 #include <trace/events/kmem.h>
82 
83 #include <asm/io.h>
84 #include <asm/mmu_context.h>
85 #include <asm/pgalloc.h>
86 #include <linux/uaccess.h>
87 #include <asm/tlb.h>
88 #include <asm/tlbflush.h>
89 
90 #include "pgalloc-track.h"
91 #include "internal.h"
92 #include "swap.h"
93 
94 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
95 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
96 #endif
97 
98 #ifndef CONFIG_NUMA
99 unsigned long max_mapnr;
100 EXPORT_SYMBOL(max_mapnr);
101 
102 struct page *mem_map;
103 EXPORT_SYMBOL(mem_map);
104 #endif
105 
106 static vm_fault_t do_fault(struct vm_fault *vmf);
107 static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
108 static bool vmf_pte_changed(struct vm_fault *vmf);
109 
110 /*
111  * Return true if the original pte was a uffd-wp pte marker (so the pte was
112  * wr-protected).
113  */
114 static bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
115 {
116 	if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
117 		return false;
118 
119 	return pte_marker_uffd_wp(vmf->orig_pte);
120 }
121 
122 /*
123  * A number of key systems in x86 including ioremap() rely on the assumption
124  * that high_memory defines the upper bound on direct map memory, then end
125  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
126  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
127  * and ZONE_HIGHMEM.
128  */
129 void *high_memory;
130 EXPORT_SYMBOL(high_memory);
131 
132 /*
133  * Randomize the address space (stacks, mmaps, brk, etc.).
134  *
135  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
136  *   as ancient (libc5 based) binaries can segfault. )
137  */
138 int randomize_va_space __read_mostly =
139 #ifdef CONFIG_COMPAT_BRK
140 					1;
141 #else
142 					2;
143 #endif
144 
145 #ifndef arch_wants_old_prefaulted_pte
146 static inline bool arch_wants_old_prefaulted_pte(void)
147 {
148 	/*
149 	 * Transitioning a PTE from 'old' to 'young' can be expensive on
150 	 * some architectures, even if it's performed in hardware. By
151 	 * default, "false" means prefaulted entries will be 'young'.
152 	 */
153 	return false;
154 }
155 #endif
156 
157 static int __init disable_randmaps(char *s)
158 {
159 	randomize_va_space = 0;
160 	return 1;
161 }
162 __setup("norandmaps", disable_randmaps);
163 
164 unsigned long zero_pfn __read_mostly;
165 EXPORT_SYMBOL(zero_pfn);
166 
167 unsigned long highest_memmap_pfn __read_mostly;
168 
169 /*
170  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
171  */
172 static int __init init_zero_pfn(void)
173 {
174 	zero_pfn = page_to_pfn(ZERO_PAGE(0));
175 	return 0;
176 }
177 early_initcall(init_zero_pfn);
178 
179 void mm_trace_rss_stat(struct mm_struct *mm, int member)
180 {
181 	trace_rss_stat(mm, member);
182 }
183 
184 /*
185  * Note: this doesn't free the actual pages themselves. That
186  * has been handled earlier when unmapping all the memory regions.
187  */
188 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
189 			   unsigned long addr)
190 {
191 	pgtable_t token = pmd_pgtable(*pmd);
192 	pmd_clear(pmd);
193 	pte_free_tlb(tlb, token, addr);
194 	mm_dec_nr_ptes(tlb->mm);
195 }
196 
197 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
198 				unsigned long addr, unsigned long end,
199 				unsigned long floor, unsigned long ceiling)
200 {
201 	pmd_t *pmd;
202 	unsigned long next;
203 	unsigned long start;
204 
205 	start = addr;
206 	pmd = pmd_offset(pud, addr);
207 	do {
208 		next = pmd_addr_end(addr, end);
209 		if (pmd_none_or_clear_bad(pmd))
210 			continue;
211 		free_pte_range(tlb, pmd, addr);
212 	} while (pmd++, addr = next, addr != end);
213 
214 	start &= PUD_MASK;
215 	if (start < floor)
216 		return;
217 	if (ceiling) {
218 		ceiling &= PUD_MASK;
219 		if (!ceiling)
220 			return;
221 	}
222 	if (end - 1 > ceiling - 1)
223 		return;
224 
225 	pmd = pmd_offset(pud, start);
226 	pud_clear(pud);
227 	pmd_free_tlb(tlb, pmd, start);
228 	mm_dec_nr_pmds(tlb->mm);
229 }
230 
231 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
232 				unsigned long addr, unsigned long end,
233 				unsigned long floor, unsigned long ceiling)
234 {
235 	pud_t *pud;
236 	unsigned long next;
237 	unsigned long start;
238 
239 	start = addr;
240 	pud = pud_offset(p4d, addr);
241 	do {
242 		next = pud_addr_end(addr, end);
243 		if (pud_none_or_clear_bad(pud))
244 			continue;
245 		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
246 	} while (pud++, addr = next, addr != end);
247 
248 	start &= P4D_MASK;
249 	if (start < floor)
250 		return;
251 	if (ceiling) {
252 		ceiling &= P4D_MASK;
253 		if (!ceiling)
254 			return;
255 	}
256 	if (end - 1 > ceiling - 1)
257 		return;
258 
259 	pud = pud_offset(p4d, start);
260 	p4d_clear(p4d);
261 	pud_free_tlb(tlb, pud, start);
262 	mm_dec_nr_puds(tlb->mm);
263 }
264 
265 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
266 				unsigned long addr, unsigned long end,
267 				unsigned long floor, unsigned long ceiling)
268 {
269 	p4d_t *p4d;
270 	unsigned long next;
271 	unsigned long start;
272 
273 	start = addr;
274 	p4d = p4d_offset(pgd, addr);
275 	do {
276 		next = p4d_addr_end(addr, end);
277 		if (p4d_none_or_clear_bad(p4d))
278 			continue;
279 		free_pud_range(tlb, p4d, addr, next, floor, ceiling);
280 	} while (p4d++, addr = next, addr != end);
281 
282 	start &= PGDIR_MASK;
283 	if (start < floor)
284 		return;
285 	if (ceiling) {
286 		ceiling &= PGDIR_MASK;
287 		if (!ceiling)
288 			return;
289 	}
290 	if (end - 1 > ceiling - 1)
291 		return;
292 
293 	p4d = p4d_offset(pgd, start);
294 	pgd_clear(pgd);
295 	p4d_free_tlb(tlb, p4d, start);
296 }
297 
298 /*
299  * This function frees user-level page tables of a process.
300  */
301 void free_pgd_range(struct mmu_gather *tlb,
302 			unsigned long addr, unsigned long end,
303 			unsigned long floor, unsigned long ceiling)
304 {
305 	pgd_t *pgd;
306 	unsigned long next;
307 
308 	/*
309 	 * The next few lines have given us lots of grief...
310 	 *
311 	 * Why are we testing PMD* at this top level?  Because often
312 	 * there will be no work to do at all, and we'd prefer not to
313 	 * go all the way down to the bottom just to discover that.
314 	 *
315 	 * Why all these "- 1"s?  Because 0 represents both the bottom
316 	 * of the address space and the top of it (using -1 for the
317 	 * top wouldn't help much: the masks would do the wrong thing).
318 	 * The rule is that addr 0 and floor 0 refer to the bottom of
319 	 * the address space, but end 0 and ceiling 0 refer to the top
320 	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
321 	 * that end 0 case should be mythical).
322 	 *
323 	 * Wherever addr is brought up or ceiling brought down, we must
324 	 * be careful to reject "the opposite 0" before it confuses the
325 	 * subsequent tests.  But what about where end is brought down
326 	 * by PMD_SIZE below? no, end can't go down to 0 there.
327 	 *
328 	 * Whereas we round start (addr) and ceiling down, by different
329 	 * masks at different levels, in order to test whether a table
330 	 * now has no other vmas using it, so can be freed, we don't
331 	 * bother to round floor or end up - the tests don't need that.
332 	 */
333 
334 	addr &= PMD_MASK;
335 	if (addr < floor) {
336 		addr += PMD_SIZE;
337 		if (!addr)
338 			return;
339 	}
340 	if (ceiling) {
341 		ceiling &= PMD_MASK;
342 		if (!ceiling)
343 			return;
344 	}
345 	if (end - 1 > ceiling - 1)
346 		end -= PMD_SIZE;
347 	if (addr > end - 1)
348 		return;
349 	/*
350 	 * We add page table cache pages with PAGE_SIZE,
351 	 * (see pte_free_tlb()), flush the tlb if we need
352 	 */
353 	tlb_change_page_size(tlb, PAGE_SIZE);
354 	pgd = pgd_offset(tlb->mm, addr);
355 	do {
356 		next = pgd_addr_end(addr, end);
357 		if (pgd_none_or_clear_bad(pgd))
358 			continue;
359 		free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
360 	} while (pgd++, addr = next, addr != end);
361 }
362 
363 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
364 		   struct vm_area_struct *vma, unsigned long floor,
365 		   unsigned long ceiling, bool mm_wr_locked)
366 {
367 	do {
368 		unsigned long addr = vma->vm_start;
369 		struct vm_area_struct *next;
370 
371 		/*
372 		 * Note: USER_PGTABLES_CEILING may be passed as ceiling and may
373 		 * be 0.  This will underflow and is okay.
374 		 */
375 		next = mas_find(mas, ceiling - 1);
376 
377 		/*
378 		 * Hide vma from rmap and truncate_pagecache before freeing
379 		 * pgtables
380 		 */
381 		if (mm_wr_locked)
382 			vma_start_write(vma);
383 		unlink_anon_vmas(vma);
384 		unlink_file_vma(vma);
385 
386 		if (is_vm_hugetlb_page(vma)) {
387 			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
388 				floor, next ? next->vm_start : ceiling);
389 		} else {
390 			/*
391 			 * Optimization: gather nearby vmas into one call down
392 			 */
393 			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
394 			       && !is_vm_hugetlb_page(next)) {
395 				vma = next;
396 				next = mas_find(mas, ceiling - 1);
397 				if (mm_wr_locked)
398 					vma_start_write(vma);
399 				unlink_anon_vmas(vma);
400 				unlink_file_vma(vma);
401 			}
402 			free_pgd_range(tlb, addr, vma->vm_end,
403 				floor, next ? next->vm_start : ceiling);
404 		}
405 		vma = next;
406 	} while (vma);
407 }
408 
409 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
410 {
411 	spinlock_t *ptl = pmd_lock(mm, pmd);
412 
413 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
414 		mm_inc_nr_ptes(mm);
415 		/*
416 		 * Ensure all pte setup (eg. pte page lock and page clearing) are
417 		 * visible before the pte is made visible to other CPUs by being
418 		 * put into page tables.
419 		 *
420 		 * The other side of the story is the pointer chasing in the page
421 		 * table walking code (when walking the page table without locking;
422 		 * ie. most of the time). Fortunately, these data accesses consist
423 		 * of a chain of data-dependent loads, meaning most CPUs (alpha
424 		 * being the notable exception) will already guarantee loads are
425 		 * seen in-order. See the alpha page table accessors for the
426 		 * smp_rmb() barriers in page table walking code.
427 		 */
428 		smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
429 		pmd_populate(mm, pmd, *pte);
430 		*pte = NULL;
431 	}
432 	spin_unlock(ptl);
433 }
434 
435 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
436 {
437 	pgtable_t new = pte_alloc_one(mm);
438 	if (!new)
439 		return -ENOMEM;
440 
441 	pmd_install(mm, pmd, &new);
442 	if (new)
443 		pte_free(mm, new);
444 	return 0;
445 }
446 
447 int __pte_alloc_kernel(pmd_t *pmd)
448 {
449 	pte_t *new = pte_alloc_one_kernel(&init_mm);
450 	if (!new)
451 		return -ENOMEM;
452 
453 	spin_lock(&init_mm.page_table_lock);
454 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
455 		smp_wmb(); /* See comment in pmd_install() */
456 		pmd_populate_kernel(&init_mm, pmd, new);
457 		new = NULL;
458 	}
459 	spin_unlock(&init_mm.page_table_lock);
460 	if (new)
461 		pte_free_kernel(&init_mm, new);
462 	return 0;
463 }
464 
465 static inline void init_rss_vec(int *rss)
466 {
467 	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
468 }
469 
470 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
471 {
472 	int i;
473 
474 	if (current->mm == mm)
475 		sync_mm_rss(mm);
476 	for (i = 0; i < NR_MM_COUNTERS; i++)
477 		if (rss[i])
478 			add_mm_counter(mm, i, rss[i]);
479 }
480 
481 /*
482  * This function is called to print an error when a bad pte
483  * is found. For example, we might have a PFN-mapped pte in
484  * a region that doesn't allow it.
485  *
486  * The calling function must still handle the error.
487  */
488 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
489 			  pte_t pte, struct page *page)
490 {
491 	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
492 	p4d_t *p4d = p4d_offset(pgd, addr);
493 	pud_t *pud = pud_offset(p4d, addr);
494 	pmd_t *pmd = pmd_offset(pud, addr);
495 	struct address_space *mapping;
496 	pgoff_t index;
497 	static unsigned long resume;
498 	static unsigned long nr_shown;
499 	static unsigned long nr_unshown;
500 
501 	/*
502 	 * Allow a burst of 60 reports, then keep quiet for that minute;
503 	 * or allow a steady drip of one report per second.
504 	 */
505 	if (nr_shown == 60) {
506 		if (time_before(jiffies, resume)) {
507 			nr_unshown++;
508 			return;
509 		}
510 		if (nr_unshown) {
511 			pr_alert("BUG: Bad page map: %lu messages suppressed\n",
512 				 nr_unshown);
513 			nr_unshown = 0;
514 		}
515 		nr_shown = 0;
516 	}
517 	if (nr_shown++ == 0)
518 		resume = jiffies + 60 * HZ;
519 
520 	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
521 	index = linear_page_index(vma, addr);
522 
523 	pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
524 		 current->comm,
525 		 (long long)pte_val(pte), (long long)pmd_val(*pmd));
526 	if (page)
527 		dump_page(page, "bad pte");
528 	pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
529 		 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
530 	pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n",
531 		 vma->vm_file,
532 		 vma->vm_ops ? vma->vm_ops->fault : NULL,
533 		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
534 		 mapping ? mapping->a_ops->read_folio : NULL);
535 	dump_stack();
536 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
537 }
538 
539 /*
540  * vm_normal_page -- This function gets the "struct page" associated with a pte.
541  *
542  * "Special" mappings do not wish to be associated with a "struct page" (either
543  * it doesn't exist, or it exists but they don't want to touch it). In this
544  * case, NULL is returned here. "Normal" mappings do have a struct page.
545  *
546  * There are 2 broad cases. Firstly, an architecture may define a pte_special()
547  * pte bit, in which case this function is trivial. Secondly, an architecture
548  * may not have a spare pte bit, which requires a more complicated scheme,
549  * described below.
550  *
551  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
552  * special mapping (even if there are underlying and valid "struct pages").
553  * COWed pages of a VM_PFNMAP are always normal.
554  *
555  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
556  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
557  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
558  * mapping will always honor the rule
559  *
560  *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
561  *
562  * And for normal mappings this is false.
563  *
564  * This restricts such mappings to be a linear translation from virtual address
565  * to pfn. To get around this restriction, we allow arbitrary mappings so long
566  * as the vma is not a COW mapping; in that case, we know that all ptes are
567  * special (because none can have been COWed).
568  *
569  *
570  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
571  *
572  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
573  * page" backing, however the difference is that _all_ pages with a struct
574  * page (that is, those where pfn_valid is true) are refcounted and considered
575  * normal pages by the VM. The disadvantage is that pages are refcounted
576  * (which can be slower and simply not an option for some PFNMAP users). The
577  * advantage is that we don't have to follow the strict linearity rule of
578  * PFNMAP mappings in order to support COWable mappings.
579  *
580  */
581 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
582 			    pte_t pte)
583 {
584 	unsigned long pfn = pte_pfn(pte);
585 
586 	if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
587 		if (likely(!pte_special(pte)))
588 			goto check_pfn;
589 		if (vma->vm_ops && vma->vm_ops->find_special_page)
590 			return vma->vm_ops->find_special_page(vma, addr);
591 		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
592 			return NULL;
593 		if (is_zero_pfn(pfn))
594 			return NULL;
595 		if (pte_devmap(pte))
596 		/*
597 		 * NOTE: New users of ZONE_DEVICE will not set pte_devmap()
598 		 * and will have refcounts incremented on their struct pages
599 		 * when they are inserted into PTEs, thus they are safe to
600 		 * return here. Legacy ZONE_DEVICE pages that set pte_devmap()
601 		 * do not have refcounts. Example of legacy ZONE_DEVICE is
602 		 * MEMORY_DEVICE_FS_DAX type in pmem or virtio_fs drivers.
603 		 */
604 			return NULL;
605 
606 		print_bad_pte(vma, addr, pte, NULL);
607 		return NULL;
608 	}
609 
610 	/* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
611 
612 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
613 		if (vma->vm_flags & VM_MIXEDMAP) {
614 			if (!pfn_valid(pfn))
615 				return NULL;
616 			goto out;
617 		} else {
618 			unsigned long off;
619 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
620 			if (pfn == vma->vm_pgoff + off)
621 				return NULL;
622 			if (!is_cow_mapping(vma->vm_flags))
623 				return NULL;
624 		}
625 	}
626 
627 	if (is_zero_pfn(pfn))
628 		return NULL;
629 
630 check_pfn:
631 	if (unlikely(pfn > highest_memmap_pfn)) {
632 		print_bad_pte(vma, addr, pte, NULL);
633 		return NULL;
634 	}
635 
636 	/*
637 	 * NOTE! We still have PageReserved() pages in the page tables.
638 	 * eg. VDSO mappings can cause them to exist.
639 	 */
640 out:
641 	return pfn_to_page(pfn);
642 }
643 
644 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
645 			    pte_t pte)
646 {
647 	struct page *page = vm_normal_page(vma, addr, pte);
648 
649 	if (page)
650 		return page_folio(page);
651 	return NULL;
652 }
653 
654 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
655 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
656 				pmd_t pmd)
657 {
658 	unsigned long pfn = pmd_pfn(pmd);
659 
660 	/*
661 	 * There is no pmd_special() but there may be special pmds, e.g.
662 	 * in a direct-access (dax) mapping, so let's just replicate the
663 	 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
664 	 */
665 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
666 		if (vma->vm_flags & VM_MIXEDMAP) {
667 			if (!pfn_valid(pfn))
668 				return NULL;
669 			goto out;
670 		} else {
671 			unsigned long off;
672 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
673 			if (pfn == vma->vm_pgoff + off)
674 				return NULL;
675 			if (!is_cow_mapping(vma->vm_flags))
676 				return NULL;
677 		}
678 	}
679 
680 	if (pmd_devmap(pmd))
681 		return NULL;
682 	if (is_huge_zero_pmd(pmd))
683 		return NULL;
684 	if (unlikely(pfn > highest_memmap_pfn))
685 		return NULL;
686 
687 	/*
688 	 * NOTE! We still have PageReserved() pages in the page tables.
689 	 * eg. VDSO mappings can cause them to exist.
690 	 */
691 out:
692 	return pfn_to_page(pfn);
693 }
694 #endif
695 
696 static void restore_exclusive_pte(struct vm_area_struct *vma,
697 				  struct page *page, unsigned long address,
698 				  pte_t *ptep)
699 {
700 	pte_t orig_pte;
701 	pte_t pte;
702 	swp_entry_t entry;
703 
704 	orig_pte = ptep_get(ptep);
705 	pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
706 	if (pte_swp_soft_dirty(orig_pte))
707 		pte = pte_mksoft_dirty(pte);
708 
709 	entry = pte_to_swp_entry(orig_pte);
710 	if (pte_swp_uffd_wp(orig_pte))
711 		pte = pte_mkuffd_wp(pte);
712 	else if (is_writable_device_exclusive_entry(entry))
713 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
714 
715 	VM_BUG_ON(pte_write(pte) && !(PageAnon(page) && PageAnonExclusive(page)));
716 
717 	/*
718 	 * No need to take a page reference as one was already
719 	 * created when the swap entry was made.
720 	 */
721 	if (PageAnon(page))
722 		page_add_anon_rmap(page, vma, address, RMAP_NONE);
723 	else
724 		/*
725 		 * Currently device exclusive access only supports anonymous
726 		 * memory so the entry shouldn't point to a filebacked page.
727 		 */
728 		WARN_ON_ONCE(1);
729 
730 	set_pte_at(vma->vm_mm, address, ptep, pte);
731 
732 	/*
733 	 * No need to invalidate - it was non-present before. However
734 	 * secondary CPUs may have mappings that need invalidating.
735 	 */
736 	update_mmu_cache(vma, address, ptep);
737 }
738 
739 /*
740  * Tries to restore an exclusive pte if the page lock can be acquired without
741  * sleeping.
742  */
743 static int
744 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
745 			unsigned long addr)
746 {
747 	swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte));
748 	struct page *page = pfn_swap_entry_to_page(entry);
749 
750 	if (trylock_page(page)) {
751 		restore_exclusive_pte(vma, page, addr, src_pte);
752 		unlock_page(page);
753 		return 0;
754 	}
755 
756 	return -EBUSY;
757 }
758 
759 /*
760  * copy one vm_area from one task to the other. Assumes the page tables
761  * already present in the new task to be cleared in the whole range
762  * covered by this vma.
763  */
764 
765 static unsigned long
766 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
767 		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
768 		struct vm_area_struct *src_vma, unsigned long addr, int *rss)
769 {
770 	unsigned long vm_flags = dst_vma->vm_flags;
771 	pte_t orig_pte = ptep_get(src_pte);
772 	pte_t pte = orig_pte;
773 	struct page *page;
774 	swp_entry_t entry = pte_to_swp_entry(orig_pte);
775 
776 	if (likely(!non_swap_entry(entry))) {
777 		if (swap_duplicate(entry) < 0)
778 			return -EIO;
779 
780 		/* make sure dst_mm is on swapoff's mmlist. */
781 		if (unlikely(list_empty(&dst_mm->mmlist))) {
782 			spin_lock(&mmlist_lock);
783 			if (list_empty(&dst_mm->mmlist))
784 				list_add(&dst_mm->mmlist,
785 						&src_mm->mmlist);
786 			spin_unlock(&mmlist_lock);
787 		}
788 		/* Mark the swap entry as shared. */
789 		if (pte_swp_exclusive(orig_pte)) {
790 			pte = pte_swp_clear_exclusive(orig_pte);
791 			set_pte_at(src_mm, addr, src_pte, pte);
792 		}
793 		rss[MM_SWAPENTS]++;
794 	} else if (is_migration_entry(entry)) {
795 		page = pfn_swap_entry_to_page(entry);
796 
797 		rss[mm_counter(page)]++;
798 
799 		if (!is_readable_migration_entry(entry) &&
800 				is_cow_mapping(vm_flags)) {
801 			/*
802 			 * COW mappings require pages in both parent and child
803 			 * to be set to read. A previously exclusive entry is
804 			 * now shared.
805 			 */
806 			entry = make_readable_migration_entry(
807 							swp_offset(entry));
808 			pte = swp_entry_to_pte(entry);
809 			if (pte_swp_soft_dirty(orig_pte))
810 				pte = pte_swp_mksoft_dirty(pte);
811 			if (pte_swp_uffd_wp(orig_pte))
812 				pte = pte_swp_mkuffd_wp(pte);
813 			set_pte_at(src_mm, addr, src_pte, pte);
814 		}
815 	} else if (is_device_private_entry(entry)) {
816 		page = pfn_swap_entry_to_page(entry);
817 
818 		/*
819 		 * Update rss count even for unaddressable pages, as
820 		 * they should treated just like normal pages in this
821 		 * respect.
822 		 *
823 		 * We will likely want to have some new rss counters
824 		 * for unaddressable pages, at some point. But for now
825 		 * keep things as they are.
826 		 */
827 		get_page(page);
828 		rss[mm_counter(page)]++;
829 		/* Cannot fail as these pages cannot get pinned. */
830 		BUG_ON(page_try_dup_anon_rmap(page, false, src_vma));
831 
832 		/*
833 		 * We do not preserve soft-dirty information, because so
834 		 * far, checkpoint/restore is the only feature that
835 		 * requires that. And checkpoint/restore does not work
836 		 * when a device driver is involved (you cannot easily
837 		 * save and restore device driver state).
838 		 */
839 		if (is_writable_device_private_entry(entry) &&
840 		    is_cow_mapping(vm_flags)) {
841 			entry = make_readable_device_private_entry(
842 							swp_offset(entry));
843 			pte = swp_entry_to_pte(entry);
844 			if (pte_swp_uffd_wp(orig_pte))
845 				pte = pte_swp_mkuffd_wp(pte);
846 			set_pte_at(src_mm, addr, src_pte, pte);
847 		}
848 	} else if (is_device_exclusive_entry(entry)) {
849 		/*
850 		 * Make device exclusive entries present by restoring the
851 		 * original entry then copying as for a present pte. Device
852 		 * exclusive entries currently only support private writable
853 		 * (ie. COW) mappings.
854 		 */
855 		VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
856 		if (try_restore_exclusive_pte(src_pte, src_vma, addr))
857 			return -EBUSY;
858 		return -ENOENT;
859 	} else if (is_pte_marker_entry(entry)) {
860 		pte_marker marker = copy_pte_marker(entry, dst_vma);
861 
862 		if (marker)
863 			set_pte_at(dst_mm, addr, dst_pte,
864 				   make_pte_marker(marker));
865 		return 0;
866 	}
867 	if (!userfaultfd_wp(dst_vma))
868 		pte = pte_swp_clear_uffd_wp(pte);
869 	set_pte_at(dst_mm, addr, dst_pte, pte);
870 	return 0;
871 }
872 
873 /*
874  * Copy a present and normal page.
875  *
876  * NOTE! The usual case is that this isn't required;
877  * instead, the caller can just increase the page refcount
878  * and re-use the pte the traditional way.
879  *
880  * And if we need a pre-allocated page but don't yet have
881  * one, return a negative error to let the preallocation
882  * code know so that it can do so outside the page table
883  * lock.
884  */
885 static inline int
886 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
887 		  pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
888 		  struct folio **prealloc, struct page *page)
889 {
890 	struct folio *new_folio;
891 	pte_t pte;
892 
893 	new_folio = *prealloc;
894 	if (!new_folio)
895 		return -EAGAIN;
896 
897 	/*
898 	 * We have a prealloc page, all good!  Take it
899 	 * over and copy the page & arm it.
900 	 */
901 	*prealloc = NULL;
902 	copy_user_highpage(&new_folio->page, page, addr, src_vma);
903 	__folio_mark_uptodate(new_folio);
904 	folio_add_new_anon_rmap(new_folio, dst_vma, addr);
905 	folio_add_lru_vma(new_folio, dst_vma);
906 	rss[MM_ANONPAGES]++;
907 
908 	/* All done, just insert the new page copy in the child */
909 	pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot);
910 	pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
911 	if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte)))
912 		/* Uffd-wp needs to be delivered to dest pte as well */
913 		pte = pte_mkuffd_wp(pte);
914 	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
915 	return 0;
916 }
917 
918 /*
919  * Copy one pte.  Returns 0 if succeeded, or -EAGAIN if one preallocated page
920  * is required to copy this pte.
921  */
922 static inline int
923 copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
924 		 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
925 		 struct folio **prealloc)
926 {
927 	struct mm_struct *src_mm = src_vma->vm_mm;
928 	unsigned long vm_flags = src_vma->vm_flags;
929 	pte_t pte = ptep_get(src_pte);
930 	struct page *page;
931 	struct folio *folio;
932 
933 	page = vm_normal_page(src_vma, addr, pte);
934 	if (page)
935 		folio = page_folio(page);
936 	if (page && folio_test_anon(folio)) {
937 		/*
938 		 * If this page may have been pinned by the parent process,
939 		 * copy the page immediately for the child so that we'll always
940 		 * guarantee the pinned page won't be randomly replaced in the
941 		 * future.
942 		 */
943 		folio_get(folio);
944 		if (unlikely(page_try_dup_anon_rmap(page, false, src_vma))) {
945 			/* Page may be pinned, we have to copy. */
946 			folio_put(folio);
947 			return copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
948 						 addr, rss, prealloc, page);
949 		}
950 		rss[MM_ANONPAGES]++;
951 	} else if (page) {
952 		folio_get(folio);
953 		page_dup_file_rmap(page, false);
954 		rss[mm_counter_file(page)]++;
955 	}
956 
957 	/*
958 	 * If it's a COW mapping, write protect it both
959 	 * in the parent and the child
960 	 */
961 	if (is_cow_mapping(vm_flags) && pte_write(pte)) {
962 		ptep_set_wrprotect(src_mm, addr, src_pte);
963 		pte = pte_wrprotect(pte);
964 	}
965 	VM_BUG_ON(page && folio_test_anon(folio) && PageAnonExclusive(page));
966 
967 	/*
968 	 * If it's a shared mapping, mark it clean in
969 	 * the child
970 	 */
971 	if (vm_flags & VM_SHARED)
972 		pte = pte_mkclean(pte);
973 	pte = pte_mkold(pte);
974 
975 	if (!userfaultfd_wp(dst_vma))
976 		pte = pte_clear_uffd_wp(pte);
977 
978 	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
979 	return 0;
980 }
981 
982 static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm,
983 		struct vm_area_struct *vma, unsigned long addr)
984 {
985 	struct folio *new_folio;
986 
987 	new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
988 	if (!new_folio)
989 		return NULL;
990 
991 	if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) {
992 		folio_put(new_folio);
993 		return NULL;
994 	}
995 	folio_throttle_swaprate(new_folio, GFP_KERNEL);
996 
997 	return new_folio;
998 }
999 
1000 static int
1001 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1002 	       pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1003 	       unsigned long end)
1004 {
1005 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1006 	struct mm_struct *src_mm = src_vma->vm_mm;
1007 	pte_t *orig_src_pte, *orig_dst_pte;
1008 	pte_t *src_pte, *dst_pte;
1009 	pte_t ptent;
1010 	spinlock_t *src_ptl, *dst_ptl;
1011 	int progress, ret = 0;
1012 	int rss[NR_MM_COUNTERS];
1013 	swp_entry_t entry = (swp_entry_t){0};
1014 	struct folio *prealloc = NULL;
1015 
1016 again:
1017 	progress = 0;
1018 	init_rss_vec(rss);
1019 
1020 	/*
1021 	 * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the
1022 	 * error handling here, assume that exclusive mmap_lock on dst and src
1023 	 * protects anon from unexpected THP transitions; with shmem and file
1024 	 * protected by mmap_lock-less collapse skipping areas with anon_vma
1025 	 * (whereas vma_needs_copy() skips areas without anon_vma).  A rework
1026 	 * can remove such assumptions later, but this is good enough for now.
1027 	 */
1028 	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1029 	if (!dst_pte) {
1030 		ret = -ENOMEM;
1031 		goto out;
1032 	}
1033 	src_pte = pte_offset_map_nolock(src_mm, src_pmd, addr, &src_ptl);
1034 	if (!src_pte) {
1035 		pte_unmap_unlock(dst_pte, dst_ptl);
1036 		/* ret == 0 */
1037 		goto out;
1038 	}
1039 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1040 	orig_src_pte = src_pte;
1041 	orig_dst_pte = dst_pte;
1042 	arch_enter_lazy_mmu_mode();
1043 
1044 	do {
1045 		/*
1046 		 * We are holding two locks at this point - either of them
1047 		 * could generate latencies in another task on another CPU.
1048 		 */
1049 		if (progress >= 32) {
1050 			progress = 0;
1051 			if (need_resched() ||
1052 			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
1053 				break;
1054 		}
1055 		ptent = ptep_get(src_pte);
1056 		if (pte_none(ptent)) {
1057 			progress++;
1058 			continue;
1059 		}
1060 		if (unlikely(!pte_present(ptent))) {
1061 			ret = copy_nonpresent_pte(dst_mm, src_mm,
1062 						  dst_pte, src_pte,
1063 						  dst_vma, src_vma,
1064 						  addr, rss);
1065 			if (ret == -EIO) {
1066 				entry = pte_to_swp_entry(ptep_get(src_pte));
1067 				break;
1068 			} else if (ret == -EBUSY) {
1069 				break;
1070 			} else if (!ret) {
1071 				progress += 8;
1072 				continue;
1073 			}
1074 
1075 			/*
1076 			 * Device exclusive entry restored, continue by copying
1077 			 * the now present pte.
1078 			 */
1079 			WARN_ON_ONCE(ret != -ENOENT);
1080 		}
1081 		/* copy_present_pte() will clear `*prealloc' if consumed */
1082 		ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
1083 				       addr, rss, &prealloc);
1084 		/*
1085 		 * If we need a pre-allocated page for this pte, drop the
1086 		 * locks, allocate, and try again.
1087 		 */
1088 		if (unlikely(ret == -EAGAIN))
1089 			break;
1090 		if (unlikely(prealloc)) {
1091 			/*
1092 			 * pre-alloc page cannot be reused by next time so as
1093 			 * to strictly follow mempolicy (e.g., alloc_page_vma()
1094 			 * will allocate page according to address).  This
1095 			 * could only happen if one pinned pte changed.
1096 			 */
1097 			folio_put(prealloc);
1098 			prealloc = NULL;
1099 		}
1100 		progress += 8;
1101 	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1102 
1103 	arch_leave_lazy_mmu_mode();
1104 	pte_unmap_unlock(orig_src_pte, src_ptl);
1105 	add_mm_rss_vec(dst_mm, rss);
1106 	pte_unmap_unlock(orig_dst_pte, dst_ptl);
1107 	cond_resched();
1108 
1109 	if (ret == -EIO) {
1110 		VM_WARN_ON_ONCE(!entry.val);
1111 		if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1112 			ret = -ENOMEM;
1113 			goto out;
1114 		}
1115 		entry.val = 0;
1116 	} else if (ret == -EBUSY) {
1117 		goto out;
1118 	} else if (ret ==  -EAGAIN) {
1119 		prealloc = page_copy_prealloc(src_mm, src_vma, addr);
1120 		if (!prealloc)
1121 			return -ENOMEM;
1122 	} else if (ret) {
1123 		VM_WARN_ON_ONCE(1);
1124 	}
1125 
1126 	/* We've captured and resolved the error. Reset, try again. */
1127 	ret = 0;
1128 
1129 	if (addr != end)
1130 		goto again;
1131 out:
1132 	if (unlikely(prealloc))
1133 		folio_put(prealloc);
1134 	return ret;
1135 }
1136 
1137 static inline int
1138 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1139 	       pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1140 	       unsigned long end)
1141 {
1142 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1143 	struct mm_struct *src_mm = src_vma->vm_mm;
1144 	pmd_t *src_pmd, *dst_pmd;
1145 	unsigned long next;
1146 
1147 	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1148 	if (!dst_pmd)
1149 		return -ENOMEM;
1150 	src_pmd = pmd_offset(src_pud, addr);
1151 	do {
1152 		next = pmd_addr_end(addr, end);
1153 		if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1154 			|| pmd_devmap(*src_pmd)) {
1155 			int err;
1156 			VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
1157 			err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
1158 					    addr, dst_vma, src_vma);
1159 			if (err == -ENOMEM)
1160 				return -ENOMEM;
1161 			if (!err)
1162 				continue;
1163 			/* fall through */
1164 		}
1165 		if (pmd_none_or_clear_bad(src_pmd))
1166 			continue;
1167 		if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1168 				   addr, next))
1169 			return -ENOMEM;
1170 	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
1171 	return 0;
1172 }
1173 
1174 static inline int
1175 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1176 	       p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1177 	       unsigned long end)
1178 {
1179 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1180 	struct mm_struct *src_mm = src_vma->vm_mm;
1181 	pud_t *src_pud, *dst_pud;
1182 	unsigned long next;
1183 
1184 	dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1185 	if (!dst_pud)
1186 		return -ENOMEM;
1187 	src_pud = pud_offset(src_p4d, addr);
1188 	do {
1189 		next = pud_addr_end(addr, end);
1190 		if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1191 			int err;
1192 
1193 			VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
1194 			err = copy_huge_pud(dst_mm, src_mm,
1195 					    dst_pud, src_pud, addr, src_vma);
1196 			if (err == -ENOMEM)
1197 				return -ENOMEM;
1198 			if (!err)
1199 				continue;
1200 			/* fall through */
1201 		}
1202 		if (pud_none_or_clear_bad(src_pud))
1203 			continue;
1204 		if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1205 				   addr, next))
1206 			return -ENOMEM;
1207 	} while (dst_pud++, src_pud++, addr = next, addr != end);
1208 	return 0;
1209 }
1210 
1211 static inline int
1212 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1213 	       pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1214 	       unsigned long end)
1215 {
1216 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1217 	p4d_t *src_p4d, *dst_p4d;
1218 	unsigned long next;
1219 
1220 	dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1221 	if (!dst_p4d)
1222 		return -ENOMEM;
1223 	src_p4d = p4d_offset(src_pgd, addr);
1224 	do {
1225 		next = p4d_addr_end(addr, end);
1226 		if (p4d_none_or_clear_bad(src_p4d))
1227 			continue;
1228 		if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1229 				   addr, next))
1230 			return -ENOMEM;
1231 	} while (dst_p4d++, src_p4d++, addr = next, addr != end);
1232 	return 0;
1233 }
1234 
1235 /*
1236  * Return true if the vma needs to copy the pgtable during this fork().  Return
1237  * false when we can speed up fork() by allowing lazy page faults later until
1238  * when the child accesses the memory range.
1239  */
1240 static bool
1241 vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1242 {
1243 	/*
1244 	 * Always copy pgtables when dst_vma has uffd-wp enabled even if it's
1245 	 * file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable
1246 	 * contains uffd-wp protection information, that's something we can't
1247 	 * retrieve from page cache, and skip copying will lose those info.
1248 	 */
1249 	if (userfaultfd_wp(dst_vma))
1250 		return true;
1251 
1252 	if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
1253 		return true;
1254 
1255 	if (src_vma->anon_vma)
1256 		return true;
1257 
1258 	/*
1259 	 * Don't copy ptes where a page fault will fill them correctly.  Fork
1260 	 * becomes much lighter when there are big shared or private readonly
1261 	 * mappings. The tradeoff is that copy_page_range is more efficient
1262 	 * than faulting.
1263 	 */
1264 	return false;
1265 }
1266 
1267 int
1268 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1269 {
1270 	pgd_t *src_pgd, *dst_pgd;
1271 	unsigned long next;
1272 	unsigned long addr = src_vma->vm_start;
1273 	unsigned long end = src_vma->vm_end;
1274 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1275 	struct mm_struct *src_mm = src_vma->vm_mm;
1276 	struct mmu_notifier_range range;
1277 	bool is_cow;
1278 	int ret;
1279 
1280 	if (!vma_needs_copy(dst_vma, src_vma))
1281 		return 0;
1282 
1283 	if (is_vm_hugetlb_page(src_vma))
1284 		return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma);
1285 
1286 	if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
1287 		/*
1288 		 * We do not free on error cases below as remove_vma
1289 		 * gets called on error from higher level routine
1290 		 */
1291 		ret = track_pfn_copy(src_vma);
1292 		if (ret)
1293 			return ret;
1294 	}
1295 
1296 	/*
1297 	 * We need to invalidate the secondary MMU mappings only when
1298 	 * there could be a permission downgrade on the ptes of the
1299 	 * parent mm. And a permission downgrade will only happen if
1300 	 * is_cow_mapping() returns true.
1301 	 */
1302 	is_cow = is_cow_mapping(src_vma->vm_flags);
1303 
1304 	if (is_cow) {
1305 		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1306 					0, src_mm, addr, end);
1307 		mmu_notifier_invalidate_range_start(&range);
1308 		/*
1309 		 * Disabling preemption is not needed for the write side, as
1310 		 * the read side doesn't spin, but goes to the mmap_lock.
1311 		 *
1312 		 * Use the raw variant of the seqcount_t write API to avoid
1313 		 * lockdep complaining about preemptibility.
1314 		 */
1315 		vma_assert_write_locked(src_vma);
1316 		raw_write_seqcount_begin(&src_mm->write_protect_seq);
1317 	}
1318 
1319 	ret = 0;
1320 	dst_pgd = pgd_offset(dst_mm, addr);
1321 	src_pgd = pgd_offset(src_mm, addr);
1322 	do {
1323 		next = pgd_addr_end(addr, end);
1324 		if (pgd_none_or_clear_bad(src_pgd))
1325 			continue;
1326 		if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1327 					    addr, next))) {
1328 			untrack_pfn_clear(dst_vma);
1329 			ret = -ENOMEM;
1330 			break;
1331 		}
1332 	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
1333 
1334 	if (is_cow) {
1335 		raw_write_seqcount_end(&src_mm->write_protect_seq);
1336 		mmu_notifier_invalidate_range_end(&range);
1337 	}
1338 	return ret;
1339 }
1340 
1341 /* Whether we should zap all COWed (private) pages too */
1342 static inline bool should_zap_cows(struct zap_details *details)
1343 {
1344 	/* By default, zap all pages */
1345 	if (!details)
1346 		return true;
1347 
1348 	/* Or, we zap COWed pages only if the caller wants to */
1349 	return details->even_cows;
1350 }
1351 
1352 /* Decides whether we should zap this page with the page pointer specified */
1353 static inline bool should_zap_page(struct zap_details *details, struct page *page)
1354 {
1355 	/* If we can make a decision without *page.. */
1356 	if (should_zap_cows(details))
1357 		return true;
1358 
1359 	/* E.g. the caller passes NULL for the case of a zero page */
1360 	if (!page)
1361 		return true;
1362 
1363 	/* Otherwise we should only zap non-anon pages */
1364 	return !PageAnon(page);
1365 }
1366 
1367 static inline bool zap_drop_file_uffd_wp(struct zap_details *details)
1368 {
1369 	if (!details)
1370 		return false;
1371 
1372 	return details->zap_flags & ZAP_FLAG_DROP_MARKER;
1373 }
1374 
1375 /*
1376  * This function makes sure that we'll replace the none pte with an uffd-wp
1377  * swap special pte marker when necessary. Must be with the pgtable lock held.
1378  */
1379 static inline void
1380 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
1381 			      unsigned long addr, pte_t *pte,
1382 			      struct zap_details *details, pte_t pteval)
1383 {
1384 	/* Zap on anonymous always means dropping everything */
1385 	if (vma_is_anonymous(vma))
1386 		return;
1387 
1388 	if (zap_drop_file_uffd_wp(details))
1389 		return;
1390 
1391 	pte_install_uffd_wp_if_needed(vma, addr, pte, pteval);
1392 }
1393 
1394 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1395 				struct vm_area_struct *vma, pmd_t *pmd,
1396 				unsigned long addr, unsigned long end,
1397 				struct zap_details *details)
1398 {
1399 	struct mm_struct *mm = tlb->mm;
1400 	int force_flush = 0;
1401 	int rss[NR_MM_COUNTERS];
1402 	spinlock_t *ptl;
1403 	pte_t *start_pte;
1404 	pte_t *pte;
1405 	swp_entry_t entry;
1406 
1407 	tlb_change_page_size(tlb, PAGE_SIZE);
1408 	init_rss_vec(rss);
1409 	start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1410 	if (!pte)
1411 		return addr;
1412 
1413 	flush_tlb_batched_pending(mm);
1414 	arch_enter_lazy_mmu_mode();
1415 	do {
1416 		pte_t ptent = ptep_get(pte);
1417 		struct page *page;
1418 
1419 		if (pte_none(ptent))
1420 			continue;
1421 
1422 		if (need_resched())
1423 			break;
1424 
1425 		if (pte_present(ptent)) {
1426 			unsigned int delay_rmap;
1427 
1428 			page = vm_normal_page(vma, addr, ptent);
1429 			if (unlikely(!should_zap_page(details, page)))
1430 				continue;
1431 			ptent = ptep_get_and_clear_full(mm, addr, pte,
1432 							tlb->fullmm);
1433 			tlb_remove_tlb_entry(tlb, pte, addr);
1434 			zap_install_uffd_wp_if_needed(vma, addr, pte, details,
1435 						      ptent);
1436 			if (unlikely(!page)) {
1437 				ksm_might_unmap_zero_page(mm, ptent);
1438 				continue;
1439 			}
1440 
1441 			delay_rmap = 0;
1442 			if (!PageAnon(page)) {
1443 				if (pte_dirty(ptent)) {
1444 					set_page_dirty(page);
1445 					if (tlb_delay_rmap(tlb)) {
1446 						delay_rmap = 1;
1447 						force_flush = 1;
1448 					}
1449 				}
1450 				if (pte_young(ptent) && likely(vma_has_recency(vma)))
1451 					mark_page_accessed(page);
1452 			}
1453 			rss[mm_counter(page)]--;
1454 			if (!delay_rmap) {
1455 				page_remove_rmap(page, vma, false);
1456 				if (unlikely(page_mapcount(page) < 0))
1457 					print_bad_pte(vma, addr, ptent, page);
1458 			}
1459 			if (unlikely(__tlb_remove_page(tlb, page, delay_rmap))) {
1460 				force_flush = 1;
1461 				addr += PAGE_SIZE;
1462 				break;
1463 			}
1464 			continue;
1465 		}
1466 
1467 		entry = pte_to_swp_entry(ptent);
1468 		if (is_device_private_entry(entry) ||
1469 		    is_device_exclusive_entry(entry)) {
1470 			page = pfn_swap_entry_to_page(entry);
1471 			if (unlikely(!should_zap_page(details, page)))
1472 				continue;
1473 			/*
1474 			 * Both device private/exclusive mappings should only
1475 			 * work with anonymous page so far, so we don't need to
1476 			 * consider uffd-wp bit when zap. For more information,
1477 			 * see zap_install_uffd_wp_if_needed().
1478 			 */
1479 			WARN_ON_ONCE(!vma_is_anonymous(vma));
1480 			rss[mm_counter(page)]--;
1481 			if (is_device_private_entry(entry))
1482 				page_remove_rmap(page, vma, false);
1483 			put_page(page);
1484 		} else if (!non_swap_entry(entry)) {
1485 			/* Genuine swap entry, hence a private anon page */
1486 			if (!should_zap_cows(details))
1487 				continue;
1488 			rss[MM_SWAPENTS]--;
1489 			if (unlikely(!free_swap_and_cache(entry)))
1490 				print_bad_pte(vma, addr, ptent, NULL);
1491 		} else if (is_migration_entry(entry)) {
1492 			page = pfn_swap_entry_to_page(entry);
1493 			if (!should_zap_page(details, page))
1494 				continue;
1495 			rss[mm_counter(page)]--;
1496 		} else if (pte_marker_entry_uffd_wp(entry)) {
1497 			/*
1498 			 * For anon: always drop the marker; for file: only
1499 			 * drop the marker if explicitly requested.
1500 			 */
1501 			if (!vma_is_anonymous(vma) &&
1502 			    !zap_drop_file_uffd_wp(details))
1503 				continue;
1504 		} else if (is_hwpoison_entry(entry) ||
1505 			   is_poisoned_swp_entry(entry)) {
1506 			if (!should_zap_cows(details))
1507 				continue;
1508 		} else {
1509 			/* We should have covered all the swap entry types */
1510 			WARN_ON_ONCE(1);
1511 		}
1512 		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1513 		zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
1514 	} while (pte++, addr += PAGE_SIZE, addr != end);
1515 
1516 	add_mm_rss_vec(mm, rss);
1517 	arch_leave_lazy_mmu_mode();
1518 
1519 	/* Do the actual TLB flush before dropping ptl */
1520 	if (force_flush) {
1521 		tlb_flush_mmu_tlbonly(tlb);
1522 		tlb_flush_rmaps(tlb, vma);
1523 	}
1524 	pte_unmap_unlock(start_pte, ptl);
1525 
1526 	/*
1527 	 * If we forced a TLB flush (either due to running out of
1528 	 * batch buffers or because we needed to flush dirty TLB
1529 	 * entries before releasing the ptl), free the batched
1530 	 * memory too. Come back again if we didn't do everything.
1531 	 */
1532 	if (force_flush)
1533 		tlb_flush_mmu(tlb);
1534 
1535 	return addr;
1536 }
1537 
1538 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1539 				struct vm_area_struct *vma, pud_t *pud,
1540 				unsigned long addr, unsigned long end,
1541 				struct zap_details *details)
1542 {
1543 	pmd_t *pmd;
1544 	unsigned long next;
1545 
1546 	pmd = pmd_offset(pud, addr);
1547 	do {
1548 		next = pmd_addr_end(addr, end);
1549 		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1550 			if (next - addr != HPAGE_PMD_SIZE)
1551 				__split_huge_pmd(vma, pmd, addr, false, NULL);
1552 			else if (zap_huge_pmd(tlb, vma, pmd, addr)) {
1553 				addr = next;
1554 				continue;
1555 			}
1556 			/* fall through */
1557 		} else if (details && details->single_folio &&
1558 			   folio_test_pmd_mappable(details->single_folio) &&
1559 			   next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1560 			spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1561 			/*
1562 			 * Take and drop THP pmd lock so that we cannot return
1563 			 * prematurely, while zap_huge_pmd() has cleared *pmd,
1564 			 * but not yet decremented compound_mapcount().
1565 			 */
1566 			spin_unlock(ptl);
1567 		}
1568 		if (pmd_none(*pmd)) {
1569 			addr = next;
1570 			continue;
1571 		}
1572 		addr = zap_pte_range(tlb, vma, pmd, addr, next, details);
1573 		if (addr != next)
1574 			pmd--;
1575 	} while (pmd++, cond_resched(), addr != end);
1576 
1577 	return addr;
1578 }
1579 
1580 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1581 				struct vm_area_struct *vma, p4d_t *p4d,
1582 				unsigned long addr, unsigned long end,
1583 				struct zap_details *details)
1584 {
1585 	pud_t *pud;
1586 	unsigned long next;
1587 
1588 	pud = pud_offset(p4d, addr);
1589 	do {
1590 		next = pud_addr_end(addr, end);
1591 		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1592 			if (next - addr != HPAGE_PUD_SIZE) {
1593 				mmap_assert_locked(tlb->mm);
1594 				split_huge_pud(vma, pud, addr);
1595 			} else if (zap_huge_pud(tlb, vma, pud, addr))
1596 				goto next;
1597 			/* fall through */
1598 		}
1599 		if (pud_none_or_clear_bad(pud))
1600 			continue;
1601 		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1602 next:
1603 		cond_resched();
1604 	} while (pud++, addr = next, addr != end);
1605 
1606 	return addr;
1607 }
1608 
1609 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1610 				struct vm_area_struct *vma, pgd_t *pgd,
1611 				unsigned long addr, unsigned long end,
1612 				struct zap_details *details)
1613 {
1614 	p4d_t *p4d;
1615 	unsigned long next;
1616 
1617 	p4d = p4d_offset(pgd, addr);
1618 	do {
1619 		next = p4d_addr_end(addr, end);
1620 		if (p4d_none_or_clear_bad(p4d))
1621 			continue;
1622 		next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1623 	} while (p4d++, addr = next, addr != end);
1624 
1625 	return addr;
1626 }
1627 
1628 void unmap_page_range(struct mmu_gather *tlb,
1629 			     struct vm_area_struct *vma,
1630 			     unsigned long addr, unsigned long end,
1631 			     struct zap_details *details)
1632 {
1633 	pgd_t *pgd;
1634 	unsigned long next;
1635 
1636 	BUG_ON(addr >= end);
1637 	tlb_start_vma(tlb, vma);
1638 	pgd = pgd_offset(vma->vm_mm, addr);
1639 	do {
1640 		next = pgd_addr_end(addr, end);
1641 		if (pgd_none_or_clear_bad(pgd))
1642 			continue;
1643 		next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1644 	} while (pgd++, addr = next, addr != end);
1645 	tlb_end_vma(tlb, vma);
1646 }
1647 
1648 
1649 static void unmap_single_vma(struct mmu_gather *tlb,
1650 		struct vm_area_struct *vma, unsigned long start_addr,
1651 		unsigned long end_addr,
1652 		struct zap_details *details, bool mm_wr_locked)
1653 {
1654 	unsigned long start = max(vma->vm_start, start_addr);
1655 	unsigned long end;
1656 
1657 	if (start >= vma->vm_end)
1658 		return;
1659 	end = min(vma->vm_end, end_addr);
1660 	if (end <= vma->vm_start)
1661 		return;
1662 
1663 	if (vma->vm_file)
1664 		uprobe_munmap(vma, start, end);
1665 
1666 	if (unlikely(vma->vm_flags & VM_PFNMAP))
1667 		untrack_pfn(vma, 0, 0, mm_wr_locked);
1668 
1669 	if (start != end) {
1670 		if (unlikely(is_vm_hugetlb_page(vma))) {
1671 			/*
1672 			 * It is undesirable to test vma->vm_file as it
1673 			 * should be non-null for valid hugetlb area.
1674 			 * However, vm_file will be NULL in the error
1675 			 * cleanup path of mmap_region. When
1676 			 * hugetlbfs ->mmap method fails,
1677 			 * mmap_region() nullifies vma->vm_file
1678 			 * before calling this function to clean up.
1679 			 * Since no pte has actually been setup, it is
1680 			 * safe to do nothing in this case.
1681 			 */
1682 			if (vma->vm_file) {
1683 				zap_flags_t zap_flags = details ?
1684 				    details->zap_flags : 0;
1685 				__unmap_hugepage_range_final(tlb, vma, start, end,
1686 							     NULL, zap_flags);
1687 			}
1688 		} else
1689 			unmap_page_range(tlb, vma, start, end, details);
1690 	}
1691 }
1692 
1693 /**
1694  * unmap_vmas - unmap a range of memory covered by a list of vma's
1695  * @tlb: address of the caller's struct mmu_gather
1696  * @mas: the maple state
1697  * @vma: the starting vma
1698  * @start_addr: virtual address at which to start unmapping
1699  * @end_addr: virtual address at which to end unmapping
1700  * @tree_end: The maximum index to check
1701  * @mm_wr_locked: lock flag
1702  *
1703  * Unmap all pages in the vma list.
1704  *
1705  * Only addresses between `start' and `end' will be unmapped.
1706  *
1707  * The VMA list must be sorted in ascending virtual address order.
1708  *
1709  * unmap_vmas() assumes that the caller will flush the whole unmapped address
1710  * range after unmap_vmas() returns.  So the only responsibility here is to
1711  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1712  * drops the lock and schedules.
1713  */
1714 void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
1715 		struct vm_area_struct *vma, unsigned long start_addr,
1716 		unsigned long end_addr, unsigned long tree_end,
1717 		bool mm_wr_locked)
1718 {
1719 	struct mmu_notifier_range range;
1720 	struct zap_details details = {
1721 		.zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
1722 		/* Careful - we need to zap private pages too! */
1723 		.even_cows = true,
1724 	};
1725 
1726 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
1727 				start_addr, end_addr);
1728 	mmu_notifier_invalidate_range_start(&range);
1729 	do {
1730 		unmap_single_vma(tlb, vma, start_addr, end_addr, &details,
1731 				 mm_wr_locked);
1732 	} while ((vma = mas_find(mas, tree_end - 1)) != NULL);
1733 	mmu_notifier_invalidate_range_end(&range);
1734 }
1735 
1736 /**
1737  * zap_page_range_single - remove user pages in a given range
1738  * @vma: vm_area_struct holding the applicable pages
1739  * @address: starting address of pages to zap
1740  * @size: number of bytes to zap
1741  * @details: details of shared cache invalidation
1742  *
1743  * The range must fit into one VMA.
1744  */
1745 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1746 		unsigned long size, struct zap_details *details)
1747 {
1748 	const unsigned long end = address + size;
1749 	struct mmu_notifier_range range;
1750 	struct mmu_gather tlb;
1751 
1752 	lru_add_drain();
1753 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
1754 				address, end);
1755 	if (is_vm_hugetlb_page(vma))
1756 		adjust_range_if_pmd_sharing_possible(vma, &range.start,
1757 						     &range.end);
1758 	tlb_gather_mmu(&tlb, vma->vm_mm);
1759 	update_hiwater_rss(vma->vm_mm);
1760 	mmu_notifier_invalidate_range_start(&range);
1761 	/*
1762 	 * unmap 'address-end' not 'range.start-range.end' as range
1763 	 * could have been expanded for hugetlb pmd sharing.
1764 	 */
1765 	unmap_single_vma(&tlb, vma, address, end, details, false);
1766 	mmu_notifier_invalidate_range_end(&range);
1767 	tlb_finish_mmu(&tlb);
1768 }
1769 
1770 /**
1771  * zap_vma_ptes - remove ptes mapping the vma
1772  * @vma: vm_area_struct holding ptes to be zapped
1773  * @address: starting address of pages to zap
1774  * @size: number of bytes to zap
1775  *
1776  * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1777  *
1778  * The entire address range must be fully contained within the vma.
1779  *
1780  */
1781 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1782 		unsigned long size)
1783 {
1784 	if (!range_in_vma(vma, address, address + size) ||
1785 	    		!(vma->vm_flags & VM_PFNMAP))
1786 		return;
1787 
1788 	zap_page_range_single(vma, address, size, NULL);
1789 }
1790 EXPORT_SYMBOL_GPL(zap_vma_ptes);
1791 
1792 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
1793 {
1794 	pgd_t *pgd;
1795 	p4d_t *p4d;
1796 	pud_t *pud;
1797 	pmd_t *pmd;
1798 
1799 	pgd = pgd_offset(mm, addr);
1800 	p4d = p4d_alloc(mm, pgd, addr);
1801 	if (!p4d)
1802 		return NULL;
1803 	pud = pud_alloc(mm, p4d, addr);
1804 	if (!pud)
1805 		return NULL;
1806 	pmd = pmd_alloc(mm, pud, addr);
1807 	if (!pmd)
1808 		return NULL;
1809 
1810 	VM_BUG_ON(pmd_trans_huge(*pmd));
1811 	return pmd;
1812 }
1813 
1814 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1815 			spinlock_t **ptl)
1816 {
1817 	pmd_t *pmd = walk_to_pmd(mm, addr);
1818 
1819 	if (!pmd)
1820 		return NULL;
1821 	return pte_alloc_map_lock(mm, pmd, addr, ptl);
1822 }
1823 
1824 static int validate_page_before_insert(struct page *page)
1825 {
1826 	if (PageAnon(page) || PageSlab(page) || page_has_type(page))
1827 		return -EINVAL;
1828 	flush_dcache_page(page);
1829 	return 0;
1830 }
1831 
1832 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
1833 			unsigned long addr, struct page *page, pgprot_t prot)
1834 {
1835 	if (!pte_none(ptep_get(pte)))
1836 		return -EBUSY;
1837 	/* Ok, finally just insert the thing.. */
1838 	get_page(page);
1839 	inc_mm_counter(vma->vm_mm, mm_counter_file(page));
1840 	page_add_file_rmap(page, vma, false);
1841 	set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot));
1842 	return 0;
1843 }
1844 
1845 /*
1846  * This is the old fallback for page remapping.
1847  *
1848  * For historical reasons, it only allows reserved pages. Only
1849  * old drivers should use this, and they needed to mark their
1850  * pages reserved for the old functions anyway.
1851  */
1852 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1853 			struct page *page, pgprot_t prot)
1854 {
1855 	int retval;
1856 	pte_t *pte;
1857 	spinlock_t *ptl;
1858 
1859 	retval = validate_page_before_insert(page);
1860 	if (retval)
1861 		goto out;
1862 	retval = -ENOMEM;
1863 	pte = get_locked_pte(vma->vm_mm, addr, &ptl);
1864 	if (!pte)
1865 		goto out;
1866 	retval = insert_page_into_pte_locked(vma, pte, addr, page, prot);
1867 	pte_unmap_unlock(pte, ptl);
1868 out:
1869 	return retval;
1870 }
1871 
1872 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
1873 			unsigned long addr, struct page *page, pgprot_t prot)
1874 {
1875 	int err;
1876 
1877 	if (!page_count(page))
1878 		return -EINVAL;
1879 	err = validate_page_before_insert(page);
1880 	if (err)
1881 		return err;
1882 	return insert_page_into_pte_locked(vma, pte, addr, page, prot);
1883 }
1884 
1885 /* insert_pages() amortizes the cost of spinlock operations
1886  * when inserting pages in a loop.
1887  */
1888 static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
1889 			struct page **pages, unsigned long *num, pgprot_t prot)
1890 {
1891 	pmd_t *pmd = NULL;
1892 	pte_t *start_pte, *pte;
1893 	spinlock_t *pte_lock;
1894 	struct mm_struct *const mm = vma->vm_mm;
1895 	unsigned long curr_page_idx = 0;
1896 	unsigned long remaining_pages_total = *num;
1897 	unsigned long pages_to_write_in_pmd;
1898 	int ret;
1899 more:
1900 	ret = -EFAULT;
1901 	pmd = walk_to_pmd(mm, addr);
1902 	if (!pmd)
1903 		goto out;
1904 
1905 	pages_to_write_in_pmd = min_t(unsigned long,
1906 		remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
1907 
1908 	/* Allocate the PTE if necessary; takes PMD lock once only. */
1909 	ret = -ENOMEM;
1910 	if (pte_alloc(mm, pmd))
1911 		goto out;
1912 
1913 	while (pages_to_write_in_pmd) {
1914 		int pte_idx = 0;
1915 		const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
1916 
1917 		start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
1918 		if (!start_pte) {
1919 			ret = -EFAULT;
1920 			goto out;
1921 		}
1922 		for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
1923 			int err = insert_page_in_batch_locked(vma, pte,
1924 				addr, pages[curr_page_idx], prot);
1925 			if (unlikely(err)) {
1926 				pte_unmap_unlock(start_pte, pte_lock);
1927 				ret = err;
1928 				remaining_pages_total -= pte_idx;
1929 				goto out;
1930 			}
1931 			addr += PAGE_SIZE;
1932 			++curr_page_idx;
1933 		}
1934 		pte_unmap_unlock(start_pte, pte_lock);
1935 		pages_to_write_in_pmd -= batch_size;
1936 		remaining_pages_total -= batch_size;
1937 	}
1938 	if (remaining_pages_total)
1939 		goto more;
1940 	ret = 0;
1941 out:
1942 	*num = remaining_pages_total;
1943 	return ret;
1944 }
1945 
1946 /**
1947  * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1948  * @vma: user vma to map to
1949  * @addr: target start user address of these pages
1950  * @pages: source kernel pages
1951  * @num: in: number of pages to map. out: number of pages that were *not*
1952  * mapped. (0 means all pages were successfully mapped).
1953  *
1954  * Preferred over vm_insert_page() when inserting multiple pages.
1955  *
1956  * In case of error, we may have mapped a subset of the provided
1957  * pages. It is the caller's responsibility to account for this case.
1958  *
1959  * The same restrictions apply as in vm_insert_page().
1960  */
1961 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
1962 			struct page **pages, unsigned long *num)
1963 {
1964 	const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
1965 
1966 	if (addr < vma->vm_start || end_addr >= vma->vm_end)
1967 		return -EFAULT;
1968 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
1969 		BUG_ON(mmap_read_trylock(vma->vm_mm));
1970 		BUG_ON(vma->vm_flags & VM_PFNMAP);
1971 		vm_flags_set(vma, VM_MIXEDMAP);
1972 	}
1973 	/* Defer page refcount checking till we're about to map that page. */
1974 	return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
1975 }
1976 EXPORT_SYMBOL(vm_insert_pages);
1977 
1978 /**
1979  * vm_insert_page - insert single page into user vma
1980  * @vma: user vma to map to
1981  * @addr: target user address of this page
1982  * @page: source kernel page
1983  *
1984  * This allows drivers to insert individual pages they've allocated
1985  * into a user vma.
1986  *
1987  * The page has to be a nice clean _individual_ kernel allocation.
1988  * If you allocate a compound page, you need to have marked it as
1989  * such (__GFP_COMP), or manually just split the page up yourself
1990  * (see split_page()).
1991  *
1992  * NOTE! Traditionally this was done with "remap_pfn_range()" which
1993  * took an arbitrary page protection parameter. This doesn't allow
1994  * that. Your vma protection will have to be set up correctly, which
1995  * means that if you want a shared writable mapping, you'd better
1996  * ask for a shared writable mapping!
1997  *
1998  * The page does not need to be reserved.
1999  *
2000  * Usually this function is called from f_op->mmap() handler
2001  * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
2002  * Caller must set VM_MIXEDMAP on vma if it wants to call this
2003  * function from other places, for example from page-fault handler.
2004  *
2005  * Return: %0 on success, negative error code otherwise.
2006  */
2007 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
2008 			struct page *page)
2009 {
2010 	if (addr < vma->vm_start || addr >= vma->vm_end)
2011 		return -EFAULT;
2012 	if (!page_count(page))
2013 		return -EINVAL;
2014 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
2015 		BUG_ON(mmap_read_trylock(vma->vm_mm));
2016 		BUG_ON(vma->vm_flags & VM_PFNMAP);
2017 		vm_flags_set(vma, VM_MIXEDMAP);
2018 	}
2019 	return insert_page(vma, addr, page, vma->vm_page_prot);
2020 }
2021 EXPORT_SYMBOL(vm_insert_page);
2022 
2023 /*
2024  * __vm_map_pages - maps range of kernel pages into user vma
2025  * @vma: user vma to map to
2026  * @pages: pointer to array of source kernel pages
2027  * @num: number of pages in page array
2028  * @offset: user's requested vm_pgoff
2029  *
2030  * This allows drivers to map range of kernel pages into a user vma.
2031  *
2032  * Return: 0 on success and error code otherwise.
2033  */
2034 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2035 				unsigned long num, unsigned long offset)
2036 {
2037 	unsigned long count = vma_pages(vma);
2038 	unsigned long uaddr = vma->vm_start;
2039 	int ret, i;
2040 
2041 	/* Fail if the user requested offset is beyond the end of the object */
2042 	if (offset >= num)
2043 		return -ENXIO;
2044 
2045 	/* Fail if the user requested size exceeds available object size */
2046 	if (count > num - offset)
2047 		return -ENXIO;
2048 
2049 	for (i = 0; i < count; i++) {
2050 		ret = vm_insert_page(vma, uaddr, pages[offset + i]);
2051 		if (ret < 0)
2052 			return ret;
2053 		uaddr += PAGE_SIZE;
2054 	}
2055 
2056 	return 0;
2057 }
2058 
2059 /**
2060  * vm_map_pages - maps range of kernel pages starts with non zero offset
2061  * @vma: user vma to map to
2062  * @pages: pointer to array of source kernel pages
2063  * @num: number of pages in page array
2064  *
2065  * Maps an object consisting of @num pages, catering for the user's
2066  * requested vm_pgoff
2067  *
2068  * If we fail to insert any page into the vma, the function will return
2069  * immediately leaving any previously inserted pages present.  Callers
2070  * from the mmap handler may immediately return the error as their caller
2071  * will destroy the vma, removing any successfully inserted pages. Other
2072  * callers should make their own arrangements for calling unmap_region().
2073  *
2074  * Context: Process context. Called by mmap handlers.
2075  * Return: 0 on success and error code otherwise.
2076  */
2077 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2078 				unsigned long num)
2079 {
2080 	return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
2081 }
2082 EXPORT_SYMBOL(vm_map_pages);
2083 
2084 /**
2085  * vm_map_pages_zero - map range of kernel pages starts with zero offset
2086  * @vma: user vma to map to
2087  * @pages: pointer to array of source kernel pages
2088  * @num: number of pages in page array
2089  *
2090  * Similar to vm_map_pages(), except that it explicitly sets the offset
2091  * to 0. This function is intended for the drivers that did not consider
2092  * vm_pgoff.
2093  *
2094  * Context: Process context. Called by mmap handlers.
2095  * Return: 0 on success and error code otherwise.
2096  */
2097 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2098 				unsigned long num)
2099 {
2100 	return __vm_map_pages(vma, pages, num, 0);
2101 }
2102 EXPORT_SYMBOL(vm_map_pages_zero);
2103 
2104 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2105 			pfn_t pfn, pgprot_t prot, bool mkwrite)
2106 {
2107 	struct mm_struct *mm = vma->vm_mm;
2108 	pte_t *pte, entry;
2109 	spinlock_t *ptl;
2110 
2111 	pte = get_locked_pte(mm, addr, &ptl);
2112 	if (!pte)
2113 		return VM_FAULT_OOM;
2114 	entry = ptep_get(pte);
2115 	if (!pte_none(entry)) {
2116 		if (mkwrite) {
2117 			/*
2118 			 * For read faults on private mappings the PFN passed
2119 			 * in may not match the PFN we have mapped if the
2120 			 * mapped PFN is a writeable COW page.  In the mkwrite
2121 			 * case we are creating a writable PTE for a shared
2122 			 * mapping and we expect the PFNs to match. If they
2123 			 * don't match, we are likely racing with block
2124 			 * allocation and mapping invalidation so just skip the
2125 			 * update.
2126 			 */
2127 			if (pte_pfn(entry) != pfn_t_to_pfn(pfn)) {
2128 				WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry)));
2129 				goto out_unlock;
2130 			}
2131 			entry = pte_mkyoung(entry);
2132 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2133 			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
2134 				update_mmu_cache(vma, addr, pte);
2135 		}
2136 		goto out_unlock;
2137 	}
2138 
2139 	/* Ok, finally just insert the thing.. */
2140 	if (pfn_t_devmap(pfn))
2141 		entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
2142 	else
2143 		entry = pte_mkspecial(pfn_t_pte(pfn, prot));
2144 
2145 	if (mkwrite) {
2146 		entry = pte_mkyoung(entry);
2147 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2148 	}
2149 
2150 	set_pte_at(mm, addr, pte, entry);
2151 	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
2152 
2153 out_unlock:
2154 	pte_unmap_unlock(pte, ptl);
2155 	return VM_FAULT_NOPAGE;
2156 }
2157 
2158 /**
2159  * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2160  * @vma: user vma to map to
2161  * @addr: target user address of this page
2162  * @pfn: source kernel pfn
2163  * @pgprot: pgprot flags for the inserted page
2164  *
2165  * This is exactly like vmf_insert_pfn(), except that it allows drivers
2166  * to override pgprot on a per-page basis.
2167  *
2168  * This only makes sense for IO mappings, and it makes no sense for
2169  * COW mappings.  In general, using multiple vmas is preferable;
2170  * vmf_insert_pfn_prot should only be used if using multiple VMAs is
2171  * impractical.
2172  *
2173  * pgprot typically only differs from @vma->vm_page_prot when drivers set
2174  * caching- and encryption bits different than those of @vma->vm_page_prot,
2175  * because the caching- or encryption mode may not be known at mmap() time.
2176  *
2177  * This is ok as long as @vma->vm_page_prot is not used by the core vm
2178  * to set caching and encryption bits for those vmas (except for COW pages).
2179  * This is ensured by core vm only modifying these page table entries using
2180  * functions that don't touch caching- or encryption bits, using pte_modify()
2181  * if needed. (See for example mprotect()).
2182  *
2183  * Also when new page-table entries are created, this is only done using the
2184  * fault() callback, and never using the value of vma->vm_page_prot,
2185  * except for page-table entries that point to anonymous pages as the result
2186  * of COW.
2187  *
2188  * Context: Process context.  May allocate using %GFP_KERNEL.
2189  * Return: vm_fault_t value.
2190  */
2191 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2192 			unsigned long pfn, pgprot_t pgprot)
2193 {
2194 	/*
2195 	 * Technically, architectures with pte_special can avoid all these
2196 	 * restrictions (same for remap_pfn_range).  However we would like
2197 	 * consistency in testing and feature parity among all, so we should
2198 	 * try to keep these invariants in place for everybody.
2199 	 */
2200 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2201 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2202 						(VM_PFNMAP|VM_MIXEDMAP));
2203 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2204 	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2205 
2206 	if (addr < vma->vm_start || addr >= vma->vm_end)
2207 		return VM_FAULT_SIGBUS;
2208 
2209 	if (!pfn_modify_allowed(pfn, pgprot))
2210 		return VM_FAULT_SIGBUS;
2211 
2212 	track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2213 
2214 	return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
2215 			false);
2216 }
2217 EXPORT_SYMBOL(vmf_insert_pfn_prot);
2218 
2219 /**
2220  * vmf_insert_pfn - insert single pfn into user vma
2221  * @vma: user vma to map to
2222  * @addr: target user address of this page
2223  * @pfn: source kernel pfn
2224  *
2225  * Similar to vm_insert_page, this allows drivers to insert individual pages
2226  * they've allocated into a user vma. Same comments apply.
2227  *
2228  * This function should only be called from a vm_ops->fault handler, and
2229  * in that case the handler should return the result of this function.
2230  *
2231  * vma cannot be a COW mapping.
2232  *
2233  * As this is called only for pages that do not currently exist, we
2234  * do not need to flush old virtual caches or the TLB.
2235  *
2236  * Context: Process context.  May allocate using %GFP_KERNEL.
2237  * Return: vm_fault_t value.
2238  */
2239 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2240 			unsigned long pfn)
2241 {
2242 	return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2243 }
2244 EXPORT_SYMBOL(vmf_insert_pfn);
2245 
2246 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
2247 {
2248 	/* these checks mirror the abort conditions in vm_normal_page */
2249 	if (vma->vm_flags & VM_MIXEDMAP)
2250 		return true;
2251 	if (pfn_t_devmap(pfn))
2252 		return true;
2253 	if (pfn_t_special(pfn))
2254 		return true;
2255 	if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2256 		return true;
2257 	return false;
2258 }
2259 
2260 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
2261 		unsigned long addr, pfn_t pfn, bool mkwrite)
2262 {
2263 	pgprot_t pgprot = vma->vm_page_prot;
2264 	int err;
2265 
2266 	BUG_ON(!vm_mixed_ok(vma, pfn));
2267 
2268 	if (addr < vma->vm_start || addr >= vma->vm_end)
2269 		return VM_FAULT_SIGBUS;
2270 
2271 	track_pfn_insert(vma, &pgprot, pfn);
2272 
2273 	if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
2274 		return VM_FAULT_SIGBUS;
2275 
2276 	/*
2277 	 * If we don't have pte special, then we have to use the pfn_valid()
2278 	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2279 	 * refcount the page if pfn_valid is true (hence insert_page rather
2280 	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
2281 	 * without pte special, it would there be refcounted as a normal page.
2282 	 */
2283 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2284 	    !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
2285 		struct page *page;
2286 
2287 		/*
2288 		 * At this point we are committed to insert_page()
2289 		 * regardless of whether the caller specified flags that
2290 		 * result in pfn_t_has_page() == false.
2291 		 */
2292 		page = pfn_to_page(pfn_t_to_pfn(pfn));
2293 		err = insert_page(vma, addr, page, pgprot);
2294 	} else {
2295 		return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2296 	}
2297 
2298 	if (err == -ENOMEM)
2299 		return VM_FAULT_OOM;
2300 	if (err < 0 && err != -EBUSY)
2301 		return VM_FAULT_SIGBUS;
2302 
2303 	return VM_FAULT_NOPAGE;
2304 }
2305 
2306 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2307 		pfn_t pfn)
2308 {
2309 	return __vm_insert_mixed(vma, addr, pfn, false);
2310 }
2311 EXPORT_SYMBOL(vmf_insert_mixed);
2312 
2313 /*
2314  *  If the insertion of PTE failed because someone else already added a
2315  *  different entry in the mean time, we treat that as success as we assume
2316  *  the same entry was actually inserted.
2317  */
2318 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2319 		unsigned long addr, pfn_t pfn)
2320 {
2321 	return __vm_insert_mixed(vma, addr, pfn, true);
2322 }
2323 EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
2324 
2325 /*
2326  * maps a range of physical memory into the requested pages. the old
2327  * mappings are removed. any references to nonexistent pages results
2328  * in null mappings (currently treated as "copy-on-access")
2329  */
2330 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2331 			unsigned long addr, unsigned long end,
2332 			unsigned long pfn, pgprot_t prot)
2333 {
2334 	pte_t *pte, *mapped_pte;
2335 	spinlock_t *ptl;
2336 	int err = 0;
2337 
2338 	mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2339 	if (!pte)
2340 		return -ENOMEM;
2341 	arch_enter_lazy_mmu_mode();
2342 	do {
2343 		BUG_ON(!pte_none(ptep_get(pte)));
2344 		if (!pfn_modify_allowed(pfn, prot)) {
2345 			err = -EACCES;
2346 			break;
2347 		}
2348 		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2349 		pfn++;
2350 	} while (pte++, addr += PAGE_SIZE, addr != end);
2351 	arch_leave_lazy_mmu_mode();
2352 	pte_unmap_unlock(mapped_pte, ptl);
2353 	return err;
2354 }
2355 
2356 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2357 			unsigned long addr, unsigned long end,
2358 			unsigned long pfn, pgprot_t prot)
2359 {
2360 	pmd_t *pmd;
2361 	unsigned long next;
2362 	int err;
2363 
2364 	pfn -= addr >> PAGE_SHIFT;
2365 	pmd = pmd_alloc(mm, pud, addr);
2366 	if (!pmd)
2367 		return -ENOMEM;
2368 	VM_BUG_ON(pmd_trans_huge(*pmd));
2369 	do {
2370 		next = pmd_addr_end(addr, end);
2371 		err = remap_pte_range(mm, pmd, addr, next,
2372 				pfn + (addr >> PAGE_SHIFT), prot);
2373 		if (err)
2374 			return err;
2375 	} while (pmd++, addr = next, addr != end);
2376 	return 0;
2377 }
2378 
2379 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2380 			unsigned long addr, unsigned long end,
2381 			unsigned long pfn, pgprot_t prot)
2382 {
2383 	pud_t *pud;
2384 	unsigned long next;
2385 	int err;
2386 
2387 	pfn -= addr >> PAGE_SHIFT;
2388 	pud = pud_alloc(mm, p4d, addr);
2389 	if (!pud)
2390 		return -ENOMEM;
2391 	do {
2392 		next = pud_addr_end(addr, end);
2393 		err = remap_pmd_range(mm, pud, addr, next,
2394 				pfn + (addr >> PAGE_SHIFT), prot);
2395 		if (err)
2396 			return err;
2397 	} while (pud++, addr = next, addr != end);
2398 	return 0;
2399 }
2400 
2401 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2402 			unsigned long addr, unsigned long end,
2403 			unsigned long pfn, pgprot_t prot)
2404 {
2405 	p4d_t *p4d;
2406 	unsigned long next;
2407 	int err;
2408 
2409 	pfn -= addr >> PAGE_SHIFT;
2410 	p4d = p4d_alloc(mm, pgd, addr);
2411 	if (!p4d)
2412 		return -ENOMEM;
2413 	do {
2414 		next = p4d_addr_end(addr, end);
2415 		err = remap_pud_range(mm, p4d, addr, next,
2416 				pfn + (addr >> PAGE_SHIFT), prot);
2417 		if (err)
2418 			return err;
2419 	} while (p4d++, addr = next, addr != end);
2420 	return 0;
2421 }
2422 
2423 /*
2424  * Variant of remap_pfn_range that does not call track_pfn_remap.  The caller
2425  * must have pre-validated the caching bits of the pgprot_t.
2426  */
2427 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
2428 		unsigned long pfn, unsigned long size, pgprot_t prot)
2429 {
2430 	pgd_t *pgd;
2431 	unsigned long next;
2432 	unsigned long end = addr + PAGE_ALIGN(size);
2433 	struct mm_struct *mm = vma->vm_mm;
2434 	int err;
2435 
2436 	if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2437 		return -EINVAL;
2438 
2439 	/*
2440 	 * Physically remapped pages are special. Tell the
2441 	 * rest of the world about it:
2442 	 *   VM_IO tells people not to look at these pages
2443 	 *	(accesses can have side effects).
2444 	 *   VM_PFNMAP tells the core MM that the base pages are just
2445 	 *	raw PFN mappings, and do not have a "struct page" associated
2446 	 *	with them.
2447 	 *   VM_DONTEXPAND
2448 	 *      Disable vma merging and expanding with mremap().
2449 	 *   VM_DONTDUMP
2450 	 *      Omit vma from core dump, even when VM_IO turned off.
2451 	 *
2452 	 * There's a horrible special case to handle copy-on-write
2453 	 * behaviour that some programs depend on. We mark the "original"
2454 	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2455 	 * See vm_normal_page() for details.
2456 	 */
2457 	if (is_cow_mapping(vma->vm_flags)) {
2458 		if (addr != vma->vm_start || end != vma->vm_end)
2459 			return -EINVAL;
2460 		vma->vm_pgoff = pfn;
2461 	}
2462 
2463 	vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
2464 
2465 	BUG_ON(addr >= end);
2466 	pfn -= addr >> PAGE_SHIFT;
2467 	pgd = pgd_offset(mm, addr);
2468 	flush_cache_range(vma, addr, end);
2469 	do {
2470 		next = pgd_addr_end(addr, end);
2471 		err = remap_p4d_range(mm, pgd, addr, next,
2472 				pfn + (addr >> PAGE_SHIFT), prot);
2473 		if (err)
2474 			return err;
2475 	} while (pgd++, addr = next, addr != end);
2476 
2477 	return 0;
2478 }
2479 
2480 /**
2481  * remap_pfn_range - remap kernel memory to userspace
2482  * @vma: user vma to map to
2483  * @addr: target page aligned user address to start at
2484  * @pfn: page frame number of kernel physical memory address
2485  * @size: size of mapping area
2486  * @prot: page protection flags for this mapping
2487  *
2488  * Note: this is only safe if the mm semaphore is held when called.
2489  *
2490  * Return: %0 on success, negative error code otherwise.
2491  */
2492 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2493 		    unsigned long pfn, unsigned long size, pgprot_t prot)
2494 {
2495 	int err;
2496 
2497 	err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
2498 	if (err)
2499 		return -EINVAL;
2500 
2501 	err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
2502 	if (err)
2503 		untrack_pfn(vma, pfn, PAGE_ALIGN(size), true);
2504 	return err;
2505 }
2506 EXPORT_SYMBOL(remap_pfn_range);
2507 
2508 /**
2509  * vm_iomap_memory - remap memory to userspace
2510  * @vma: user vma to map to
2511  * @start: start of the physical memory to be mapped
2512  * @len: size of area
2513  *
2514  * This is a simplified io_remap_pfn_range() for common driver use. The
2515  * driver just needs to give us the physical memory range to be mapped,
2516  * we'll figure out the rest from the vma information.
2517  *
2518  * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2519  * whatever write-combining details or similar.
2520  *
2521  * Return: %0 on success, negative error code otherwise.
2522  */
2523 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2524 {
2525 	unsigned long vm_len, pfn, pages;
2526 
2527 	/* Check that the physical memory area passed in looks valid */
2528 	if (start + len < start)
2529 		return -EINVAL;
2530 	/*
2531 	 * You *really* shouldn't map things that aren't page-aligned,
2532 	 * but we've historically allowed it because IO memory might
2533 	 * just have smaller alignment.
2534 	 */
2535 	len += start & ~PAGE_MASK;
2536 	pfn = start >> PAGE_SHIFT;
2537 	pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2538 	if (pfn + pages < pfn)
2539 		return -EINVAL;
2540 
2541 	/* We start the mapping 'vm_pgoff' pages into the area */
2542 	if (vma->vm_pgoff > pages)
2543 		return -EINVAL;
2544 	pfn += vma->vm_pgoff;
2545 	pages -= vma->vm_pgoff;
2546 
2547 	/* Can we fit all of the mapping? */
2548 	vm_len = vma->vm_end - vma->vm_start;
2549 	if (vm_len >> PAGE_SHIFT > pages)
2550 		return -EINVAL;
2551 
2552 	/* Ok, let it rip */
2553 	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2554 }
2555 EXPORT_SYMBOL(vm_iomap_memory);
2556 
2557 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2558 				     unsigned long addr, unsigned long end,
2559 				     pte_fn_t fn, void *data, bool create,
2560 				     pgtbl_mod_mask *mask)
2561 {
2562 	pte_t *pte, *mapped_pte;
2563 	int err = 0;
2564 	spinlock_t *ptl;
2565 
2566 	if (create) {
2567 		mapped_pte = pte = (mm == &init_mm) ?
2568 			pte_alloc_kernel_track(pmd, addr, mask) :
2569 			pte_alloc_map_lock(mm, pmd, addr, &ptl);
2570 		if (!pte)
2571 			return -ENOMEM;
2572 	} else {
2573 		mapped_pte = pte = (mm == &init_mm) ?
2574 			pte_offset_kernel(pmd, addr) :
2575 			pte_offset_map_lock(mm, pmd, addr, &ptl);
2576 		if (!pte)
2577 			return -EINVAL;
2578 	}
2579 
2580 	arch_enter_lazy_mmu_mode();
2581 
2582 	if (fn) {
2583 		do {
2584 			if (create || !pte_none(ptep_get(pte))) {
2585 				err = fn(pte++, addr, data);
2586 				if (err)
2587 					break;
2588 			}
2589 		} while (addr += PAGE_SIZE, addr != end);
2590 	}
2591 	*mask |= PGTBL_PTE_MODIFIED;
2592 
2593 	arch_leave_lazy_mmu_mode();
2594 
2595 	if (mm != &init_mm)
2596 		pte_unmap_unlock(mapped_pte, ptl);
2597 	return err;
2598 }
2599 
2600 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2601 				     unsigned long addr, unsigned long end,
2602 				     pte_fn_t fn, void *data, bool create,
2603 				     pgtbl_mod_mask *mask)
2604 {
2605 	pmd_t *pmd;
2606 	unsigned long next;
2607 	int err = 0;
2608 
2609 	BUG_ON(pud_huge(*pud));
2610 
2611 	if (create) {
2612 		pmd = pmd_alloc_track(mm, pud, addr, mask);
2613 		if (!pmd)
2614 			return -ENOMEM;
2615 	} else {
2616 		pmd = pmd_offset(pud, addr);
2617 	}
2618 	do {
2619 		next = pmd_addr_end(addr, end);
2620 		if (pmd_none(*pmd) && !create)
2621 			continue;
2622 		if (WARN_ON_ONCE(pmd_leaf(*pmd)))
2623 			return -EINVAL;
2624 		if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
2625 			if (!create)
2626 				continue;
2627 			pmd_clear_bad(pmd);
2628 		}
2629 		err = apply_to_pte_range(mm, pmd, addr, next,
2630 					 fn, data, create, mask);
2631 		if (err)
2632 			break;
2633 	} while (pmd++, addr = next, addr != end);
2634 
2635 	return err;
2636 }
2637 
2638 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2639 				     unsigned long addr, unsigned long end,
2640 				     pte_fn_t fn, void *data, bool create,
2641 				     pgtbl_mod_mask *mask)
2642 {
2643 	pud_t *pud;
2644 	unsigned long next;
2645 	int err = 0;
2646 
2647 	if (create) {
2648 		pud = pud_alloc_track(mm, p4d, addr, mask);
2649 		if (!pud)
2650 			return -ENOMEM;
2651 	} else {
2652 		pud = pud_offset(p4d, addr);
2653 	}
2654 	do {
2655 		next = pud_addr_end(addr, end);
2656 		if (pud_none(*pud) && !create)
2657 			continue;
2658 		if (WARN_ON_ONCE(pud_leaf(*pud)))
2659 			return -EINVAL;
2660 		if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) {
2661 			if (!create)
2662 				continue;
2663 			pud_clear_bad(pud);
2664 		}
2665 		err = apply_to_pmd_range(mm, pud, addr, next,
2666 					 fn, data, create, mask);
2667 		if (err)
2668 			break;
2669 	} while (pud++, addr = next, addr != end);
2670 
2671 	return err;
2672 }
2673 
2674 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2675 				     unsigned long addr, unsigned long end,
2676 				     pte_fn_t fn, void *data, bool create,
2677 				     pgtbl_mod_mask *mask)
2678 {
2679 	p4d_t *p4d;
2680 	unsigned long next;
2681 	int err = 0;
2682 
2683 	if (create) {
2684 		p4d = p4d_alloc_track(mm, pgd, addr, mask);
2685 		if (!p4d)
2686 			return -ENOMEM;
2687 	} else {
2688 		p4d = p4d_offset(pgd, addr);
2689 	}
2690 	do {
2691 		next = p4d_addr_end(addr, end);
2692 		if (p4d_none(*p4d) && !create)
2693 			continue;
2694 		if (WARN_ON_ONCE(p4d_leaf(*p4d)))
2695 			return -EINVAL;
2696 		if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) {
2697 			if (!create)
2698 				continue;
2699 			p4d_clear_bad(p4d);
2700 		}
2701 		err = apply_to_pud_range(mm, p4d, addr, next,
2702 					 fn, data, create, mask);
2703 		if (err)
2704 			break;
2705 	} while (p4d++, addr = next, addr != end);
2706 
2707 	return err;
2708 }
2709 
2710 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2711 				 unsigned long size, pte_fn_t fn,
2712 				 void *data, bool create)
2713 {
2714 	pgd_t *pgd;
2715 	unsigned long start = addr, next;
2716 	unsigned long end = addr + size;
2717 	pgtbl_mod_mask mask = 0;
2718 	int err = 0;
2719 
2720 	if (WARN_ON(addr >= end))
2721 		return -EINVAL;
2722 
2723 	pgd = pgd_offset(mm, addr);
2724 	do {
2725 		next = pgd_addr_end(addr, end);
2726 		if (pgd_none(*pgd) && !create)
2727 			continue;
2728 		if (WARN_ON_ONCE(pgd_leaf(*pgd)))
2729 			return -EINVAL;
2730 		if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
2731 			if (!create)
2732 				continue;
2733 			pgd_clear_bad(pgd);
2734 		}
2735 		err = apply_to_p4d_range(mm, pgd, addr, next,
2736 					 fn, data, create, &mask);
2737 		if (err)
2738 			break;
2739 	} while (pgd++, addr = next, addr != end);
2740 
2741 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
2742 		arch_sync_kernel_mappings(start, start + size);
2743 
2744 	return err;
2745 }
2746 
2747 /*
2748  * Scan a region of virtual memory, filling in page tables as necessary
2749  * and calling a provided function on each leaf page table.
2750  */
2751 int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2752 			unsigned long size, pte_fn_t fn, void *data)
2753 {
2754 	return __apply_to_page_range(mm, addr, size, fn, data, true);
2755 }
2756 EXPORT_SYMBOL_GPL(apply_to_page_range);
2757 
2758 /*
2759  * Scan a region of virtual memory, calling a provided function on
2760  * each leaf page table where it exists.
2761  *
2762  * Unlike apply_to_page_range, this does _not_ fill in page tables
2763  * where they are absent.
2764  */
2765 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
2766 				 unsigned long size, pte_fn_t fn, void *data)
2767 {
2768 	return __apply_to_page_range(mm, addr, size, fn, data, false);
2769 }
2770 EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
2771 
2772 /*
2773  * handle_pte_fault chooses page fault handler according to an entry which was
2774  * read non-atomically.  Before making any commitment, on those architectures
2775  * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
2776  * parts, do_swap_page must check under lock before unmapping the pte and
2777  * proceeding (but do_wp_page is only called after already making such a check;
2778  * and do_anonymous_page can safely check later on).
2779  */
2780 static inline int pte_unmap_same(struct vm_fault *vmf)
2781 {
2782 	int same = 1;
2783 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
2784 	if (sizeof(pte_t) > sizeof(unsigned long)) {
2785 		spin_lock(vmf->ptl);
2786 		same = pte_same(ptep_get(vmf->pte), vmf->orig_pte);
2787 		spin_unlock(vmf->ptl);
2788 	}
2789 #endif
2790 	pte_unmap(vmf->pte);
2791 	vmf->pte = NULL;
2792 	return same;
2793 }
2794 
2795 /*
2796  * Return:
2797  *	0:		copied succeeded
2798  *	-EHWPOISON:	copy failed due to hwpoison in source page
2799  *	-EAGAIN:	copied failed (some other reason)
2800  */
2801 static inline int __wp_page_copy_user(struct page *dst, struct page *src,
2802 				      struct vm_fault *vmf)
2803 {
2804 	int ret;
2805 	void *kaddr;
2806 	void __user *uaddr;
2807 	struct vm_area_struct *vma = vmf->vma;
2808 	struct mm_struct *mm = vma->vm_mm;
2809 	unsigned long addr = vmf->address;
2810 
2811 	if (likely(src)) {
2812 		if (copy_mc_user_highpage(dst, src, addr, vma)) {
2813 			memory_failure_queue(page_to_pfn(src), 0);
2814 			return -EHWPOISON;
2815 		}
2816 		return 0;
2817 	}
2818 
2819 	/*
2820 	 * If the source page was a PFN mapping, we don't have
2821 	 * a "struct page" for it. We do a best-effort copy by
2822 	 * just copying from the original user address. If that
2823 	 * fails, we just zero-fill it. Live with it.
2824 	 */
2825 	kaddr = kmap_atomic(dst);
2826 	uaddr = (void __user *)(addr & PAGE_MASK);
2827 
2828 	/*
2829 	 * On architectures with software "accessed" bits, we would
2830 	 * take a double page fault, so mark it accessed here.
2831 	 */
2832 	vmf->pte = NULL;
2833 	if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) {
2834 		pte_t entry;
2835 
2836 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2837 		if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
2838 			/*
2839 			 * Other thread has already handled the fault
2840 			 * and update local tlb only
2841 			 */
2842 			if (vmf->pte)
2843 				update_mmu_tlb(vma, addr, vmf->pte);
2844 			ret = -EAGAIN;
2845 			goto pte_unlock;
2846 		}
2847 
2848 		entry = pte_mkyoung(vmf->orig_pte);
2849 		if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
2850 			update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1);
2851 	}
2852 
2853 	/*
2854 	 * This really shouldn't fail, because the page is there
2855 	 * in the page tables. But it might just be unreadable,
2856 	 * in which case we just give up and fill the result with
2857 	 * zeroes.
2858 	 */
2859 	if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2860 		if (vmf->pte)
2861 			goto warn;
2862 
2863 		/* Re-validate under PTL if the page is still mapped */
2864 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2865 		if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
2866 			/* The PTE changed under us, update local tlb */
2867 			if (vmf->pte)
2868 				update_mmu_tlb(vma, addr, vmf->pte);
2869 			ret = -EAGAIN;
2870 			goto pte_unlock;
2871 		}
2872 
2873 		/*
2874 		 * The same page can be mapped back since last copy attempt.
2875 		 * Try to copy again under PTL.
2876 		 */
2877 		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2878 			/*
2879 			 * Give a warn in case there can be some obscure
2880 			 * use-case
2881 			 */
2882 warn:
2883 			WARN_ON_ONCE(1);
2884 			clear_page(kaddr);
2885 		}
2886 	}
2887 
2888 	ret = 0;
2889 
2890 pte_unlock:
2891 	if (vmf->pte)
2892 		pte_unmap_unlock(vmf->pte, vmf->ptl);
2893 	kunmap_atomic(kaddr);
2894 	flush_dcache_page(dst);
2895 
2896 	return ret;
2897 }
2898 
2899 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
2900 {
2901 	struct file *vm_file = vma->vm_file;
2902 
2903 	if (vm_file)
2904 		return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
2905 
2906 	/*
2907 	 * Special mappings (e.g. VDSO) do not have any file so fake
2908 	 * a default GFP_KERNEL for them.
2909 	 */
2910 	return GFP_KERNEL;
2911 }
2912 
2913 /*
2914  * Notify the address space that the page is about to become writable so that
2915  * it can prohibit this or wait for the page to get into an appropriate state.
2916  *
2917  * We do this without the lock held, so that it can sleep if it needs to.
2918  */
2919 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio)
2920 {
2921 	vm_fault_t ret;
2922 	unsigned int old_flags = vmf->flags;
2923 
2924 	vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2925 
2926 	if (vmf->vma->vm_file &&
2927 	    IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
2928 		return VM_FAULT_SIGBUS;
2929 
2930 	ret = vmf->vma->vm_ops->page_mkwrite(vmf);
2931 	/* Restore original flags so that caller is not surprised */
2932 	vmf->flags = old_flags;
2933 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2934 		return ret;
2935 	if (unlikely(!(ret & VM_FAULT_LOCKED))) {
2936 		folio_lock(folio);
2937 		if (!folio->mapping) {
2938 			folio_unlock(folio);
2939 			return 0; /* retry */
2940 		}
2941 		ret |= VM_FAULT_LOCKED;
2942 	} else
2943 		VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2944 	return ret;
2945 }
2946 
2947 /*
2948  * Handle dirtying of a page in shared file mapping on a write fault.
2949  *
2950  * The function expects the page to be locked and unlocks it.
2951  */
2952 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
2953 {
2954 	struct vm_area_struct *vma = vmf->vma;
2955 	struct address_space *mapping;
2956 	struct folio *folio = page_folio(vmf->page);
2957 	bool dirtied;
2958 	bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
2959 
2960 	dirtied = folio_mark_dirty(folio);
2961 	VM_BUG_ON_FOLIO(folio_test_anon(folio), folio);
2962 	/*
2963 	 * Take a local copy of the address_space - folio.mapping may be zeroed
2964 	 * by truncate after folio_unlock().   The address_space itself remains
2965 	 * pinned by vma->vm_file's reference.  We rely on folio_unlock()'s
2966 	 * release semantics to prevent the compiler from undoing this copying.
2967 	 */
2968 	mapping = folio_raw_mapping(folio);
2969 	folio_unlock(folio);
2970 
2971 	if (!page_mkwrite)
2972 		file_update_time(vma->vm_file);
2973 
2974 	/*
2975 	 * Throttle page dirtying rate down to writeback speed.
2976 	 *
2977 	 * mapping may be NULL here because some device drivers do not
2978 	 * set page.mapping but still dirty their pages
2979 	 *
2980 	 * Drop the mmap_lock before waiting on IO, if we can. The file
2981 	 * is pinning the mapping, as per above.
2982 	 */
2983 	if ((dirtied || page_mkwrite) && mapping) {
2984 		struct file *fpin;
2985 
2986 		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2987 		balance_dirty_pages_ratelimited(mapping);
2988 		if (fpin) {
2989 			fput(fpin);
2990 			return VM_FAULT_COMPLETED;
2991 		}
2992 	}
2993 
2994 	return 0;
2995 }
2996 
2997 /*
2998  * Handle write page faults for pages that can be reused in the current vma
2999  *
3000  * This can happen either due to the mapping being with the VM_SHARED flag,
3001  * or due to us being the last reference standing to the page. In either
3002  * case, all we need to do here is to mark the page as writable and update
3003  * any related book-keeping.
3004  */
3005 static inline void wp_page_reuse(struct vm_fault *vmf)
3006 	__releases(vmf->ptl)
3007 {
3008 	struct vm_area_struct *vma = vmf->vma;
3009 	struct page *page = vmf->page;
3010 	pte_t entry;
3011 
3012 	VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
3013 	VM_BUG_ON(page && PageAnon(page) && !PageAnonExclusive(page));
3014 
3015 	/*
3016 	 * Clear the pages cpupid information as the existing
3017 	 * information potentially belongs to a now completely
3018 	 * unrelated process.
3019 	 */
3020 	if (page)
3021 		page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
3022 
3023 	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3024 	entry = pte_mkyoung(vmf->orig_pte);
3025 	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3026 	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
3027 		update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3028 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3029 	count_vm_event(PGREUSE);
3030 }
3031 
3032 /*
3033  * Handle the case of a page which we actually need to copy to a new page,
3034  * either due to COW or unsharing.
3035  *
3036  * Called with mmap_lock locked and the old page referenced, but
3037  * without the ptl held.
3038  *
3039  * High level logic flow:
3040  *
3041  * - Allocate a page, copy the content of the old page to the new one.
3042  * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
3043  * - Take the PTL. If the pte changed, bail out and release the allocated page
3044  * - If the pte is still the way we remember it, update the page table and all
3045  *   relevant references. This includes dropping the reference the page-table
3046  *   held to the old page, as well as updating the rmap.
3047  * - In any case, unlock the PTL and drop the reference we took to the old page.
3048  */
3049 static vm_fault_t wp_page_copy(struct vm_fault *vmf)
3050 {
3051 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
3052 	struct vm_area_struct *vma = vmf->vma;
3053 	struct mm_struct *mm = vma->vm_mm;
3054 	struct folio *old_folio = NULL;
3055 	struct folio *new_folio = NULL;
3056 	pte_t entry;
3057 	int page_copied = 0;
3058 	struct mmu_notifier_range range;
3059 	int ret;
3060 
3061 	delayacct_wpcopy_start();
3062 
3063 	if (vmf->page)
3064 		old_folio = page_folio(vmf->page);
3065 	if (unlikely(anon_vma_prepare(vma)))
3066 		goto oom;
3067 
3068 	if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
3069 		new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
3070 		if (!new_folio)
3071 			goto oom;
3072 	} else {
3073 		new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
3074 				vmf->address, false);
3075 		if (!new_folio)
3076 			goto oom;
3077 
3078 		ret = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
3079 		if (ret) {
3080 			/*
3081 			 * COW failed, if the fault was solved by other,
3082 			 * it's fine. If not, userspace would re-fault on
3083 			 * the same address and we will handle the fault
3084 			 * from the second attempt.
3085 			 * The -EHWPOISON case will not be retried.
3086 			 */
3087 			folio_put(new_folio);
3088 			if (old_folio)
3089 				folio_put(old_folio);
3090 
3091 			delayacct_wpcopy_end();
3092 			return ret == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
3093 		}
3094 		kmsan_copy_page_meta(&new_folio->page, vmf->page);
3095 	}
3096 
3097 	if (mem_cgroup_charge(new_folio, mm, GFP_KERNEL))
3098 		goto oom_free_new;
3099 	folio_throttle_swaprate(new_folio, GFP_KERNEL);
3100 
3101 	__folio_mark_uptodate(new_folio);
3102 
3103 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
3104 				vmf->address & PAGE_MASK,
3105 				(vmf->address & PAGE_MASK) + PAGE_SIZE);
3106 	mmu_notifier_invalidate_range_start(&range);
3107 
3108 	/*
3109 	 * Re-check the pte - we dropped the lock
3110 	 */
3111 	vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
3112 	if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3113 		if (old_folio) {
3114 			if (!folio_test_anon(old_folio)) {
3115 				dec_mm_counter(mm, mm_counter_file(&old_folio->page));
3116 				inc_mm_counter(mm, MM_ANONPAGES);
3117 			}
3118 		} else {
3119 			ksm_might_unmap_zero_page(mm, vmf->orig_pte);
3120 			inc_mm_counter(mm, MM_ANONPAGES);
3121 		}
3122 		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3123 		entry = mk_pte(&new_folio->page, vma->vm_page_prot);
3124 		entry = pte_sw_mkyoung(entry);
3125 		if (unlikely(unshare)) {
3126 			if (pte_soft_dirty(vmf->orig_pte))
3127 				entry = pte_mksoft_dirty(entry);
3128 			if (pte_uffd_wp(vmf->orig_pte))
3129 				entry = pte_mkuffd_wp(entry);
3130 		} else {
3131 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3132 		}
3133 
3134 		/*
3135 		 * Clear the pte entry and flush it first, before updating the
3136 		 * pte with the new entry, to keep TLBs on different CPUs in
3137 		 * sync. This code used to set the new PTE then flush TLBs, but
3138 		 * that left a window where the new PTE could be loaded into
3139 		 * some TLBs while the old PTE remains in others.
3140 		 */
3141 		ptep_clear_flush(vma, vmf->address, vmf->pte);
3142 		folio_add_new_anon_rmap(new_folio, vma, vmf->address);
3143 		folio_add_lru_vma(new_folio, vma);
3144 		/*
3145 		 * We call the notify macro here because, when using secondary
3146 		 * mmu page tables (such as kvm shadow page tables), we want the
3147 		 * new page to be mapped directly into the secondary page table.
3148 		 */
3149 		BUG_ON(unshare && pte_write(entry));
3150 		set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
3151 		update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3152 		if (old_folio) {
3153 			/*
3154 			 * Only after switching the pte to the new page may
3155 			 * we remove the mapcount here. Otherwise another
3156 			 * process may come and find the rmap count decremented
3157 			 * before the pte is switched to the new page, and
3158 			 * "reuse" the old page writing into it while our pte
3159 			 * here still points into it and can be read by other
3160 			 * threads.
3161 			 *
3162 			 * The critical issue is to order this
3163 			 * page_remove_rmap with the ptp_clear_flush above.
3164 			 * Those stores are ordered by (if nothing else,)
3165 			 * the barrier present in the atomic_add_negative
3166 			 * in page_remove_rmap.
3167 			 *
3168 			 * Then the TLB flush in ptep_clear_flush ensures that
3169 			 * no process can access the old page before the
3170 			 * decremented mapcount is visible. And the old page
3171 			 * cannot be reused until after the decremented
3172 			 * mapcount is visible. So transitively, TLBs to
3173 			 * old page will be flushed before it can be reused.
3174 			 */
3175 			page_remove_rmap(vmf->page, vma, false);
3176 		}
3177 
3178 		/* Free the old page.. */
3179 		new_folio = old_folio;
3180 		page_copied = 1;
3181 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3182 	} else if (vmf->pte) {
3183 		update_mmu_tlb(vma, vmf->address, vmf->pte);
3184 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3185 	}
3186 
3187 	mmu_notifier_invalidate_range_end(&range);
3188 
3189 	if (new_folio)
3190 		folio_put(new_folio);
3191 	if (old_folio) {
3192 		if (page_copied)
3193 			free_swap_cache(&old_folio->page);
3194 		folio_put(old_folio);
3195 	}
3196 
3197 	delayacct_wpcopy_end();
3198 	return 0;
3199 oom_free_new:
3200 	folio_put(new_folio);
3201 oom:
3202 	if (old_folio)
3203 		folio_put(old_folio);
3204 
3205 	delayacct_wpcopy_end();
3206 	return VM_FAULT_OOM;
3207 }
3208 
3209 /**
3210  * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3211  *			  writeable once the page is prepared
3212  *
3213  * @vmf: structure describing the fault
3214  *
3215  * This function handles all that is needed to finish a write page fault in a
3216  * shared mapping due to PTE being read-only once the mapped page is prepared.
3217  * It handles locking of PTE and modifying it.
3218  *
3219  * The function expects the page to be locked or other protection against
3220  * concurrent faults / writeback (such as DAX radix tree locks).
3221  *
3222  * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
3223  * we acquired PTE lock.
3224  */
3225 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
3226 {
3227 	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3228 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3229 				       &vmf->ptl);
3230 	if (!vmf->pte)
3231 		return VM_FAULT_NOPAGE;
3232 	/*
3233 	 * We might have raced with another page fault while we released the
3234 	 * pte_offset_map_lock.
3235 	 */
3236 	if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) {
3237 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
3238 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3239 		return VM_FAULT_NOPAGE;
3240 	}
3241 	wp_page_reuse(vmf);
3242 	return 0;
3243 }
3244 
3245 /*
3246  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3247  * mapping
3248  */
3249 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
3250 {
3251 	struct vm_area_struct *vma = vmf->vma;
3252 
3253 	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3254 		vm_fault_t ret;
3255 
3256 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3257 		if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3258 			vma_end_read(vmf->vma);
3259 			return VM_FAULT_RETRY;
3260 		}
3261 
3262 		vmf->flags |= FAULT_FLAG_MKWRITE;
3263 		ret = vma->vm_ops->pfn_mkwrite(vmf);
3264 		if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
3265 			return ret;
3266 		return finish_mkwrite_fault(vmf);
3267 	}
3268 	wp_page_reuse(vmf);
3269 	return 0;
3270 }
3271 
3272 static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio)
3273 	__releases(vmf->ptl)
3274 {
3275 	struct vm_area_struct *vma = vmf->vma;
3276 	vm_fault_t ret = 0;
3277 
3278 	folio_get(folio);
3279 
3280 	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3281 		vm_fault_t tmp;
3282 
3283 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3284 		if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3285 			folio_put(folio);
3286 			vma_end_read(vmf->vma);
3287 			return VM_FAULT_RETRY;
3288 		}
3289 
3290 		tmp = do_page_mkwrite(vmf, folio);
3291 		if (unlikely(!tmp || (tmp &
3292 				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3293 			folio_put(folio);
3294 			return tmp;
3295 		}
3296 		tmp = finish_mkwrite_fault(vmf);
3297 		if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3298 			folio_unlock(folio);
3299 			folio_put(folio);
3300 			return tmp;
3301 		}
3302 	} else {
3303 		wp_page_reuse(vmf);
3304 		folio_lock(folio);
3305 	}
3306 	ret |= fault_dirty_shared_page(vmf);
3307 	folio_put(folio);
3308 
3309 	return ret;
3310 }
3311 
3312 /*
3313  * This routine handles present pages, when
3314  * * users try to write to a shared page (FAULT_FLAG_WRITE)
3315  * * GUP wants to take a R/O pin on a possibly shared anonymous page
3316  *   (FAULT_FLAG_UNSHARE)
3317  *
3318  * It is done by copying the page to a new address and decrementing the
3319  * shared-page counter for the old page.
3320  *
3321  * Note that this routine assumes that the protection checks have been
3322  * done by the caller (the low-level page fault routine in most cases).
3323  * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've
3324  * done any necessary COW.
3325  *
3326  * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even
3327  * though the page will change only once the write actually happens. This
3328  * avoids a few races, and potentially makes it more efficient.
3329  *
3330  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3331  * but allow concurrent faults), with pte both mapped and locked.
3332  * We return with mmap_lock still held, but pte unmapped and unlocked.
3333  */
3334 static vm_fault_t do_wp_page(struct vm_fault *vmf)
3335 	__releases(vmf->ptl)
3336 {
3337 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
3338 	struct vm_area_struct *vma = vmf->vma;
3339 	struct folio *folio = NULL;
3340 
3341 	if (likely(!unshare)) {
3342 		if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) {
3343 			pte_unmap_unlock(vmf->pte, vmf->ptl);
3344 			return handle_userfault(vmf, VM_UFFD_WP);
3345 		}
3346 
3347 		/*
3348 		 * Userfaultfd write-protect can defer flushes. Ensure the TLB
3349 		 * is flushed in this case before copying.
3350 		 */
3351 		if (unlikely(userfaultfd_wp(vmf->vma) &&
3352 			     mm_tlb_flush_pending(vmf->vma->vm_mm)))
3353 			flush_tlb_page(vmf->vma, vmf->address);
3354 	}
3355 
3356 	vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3357 
3358 	if (vmf->page)
3359 		folio = page_folio(vmf->page);
3360 
3361 	/*
3362 	 * Shared mapping: we are guaranteed to have VM_WRITE and
3363 	 * FAULT_FLAG_WRITE set at this point.
3364 	 */
3365 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
3366 		/*
3367 		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3368 		 * VM_PFNMAP VMA.
3369 		 *
3370 		 * We should not cow pages in a shared writeable mapping.
3371 		 * Just mark the pages writable and/or call ops->pfn_mkwrite.
3372 		 */
3373 		if (!vmf->page)
3374 			return wp_pfn_shared(vmf);
3375 		return wp_page_shared(vmf, folio);
3376 	}
3377 
3378 	/*
3379 	 * Private mapping: create an exclusive anonymous page copy if reuse
3380 	 * is impossible. We might miss VM_WRITE for FOLL_FORCE handling.
3381 	 */
3382 	if (folio && folio_test_anon(folio)) {
3383 		/*
3384 		 * If the page is exclusive to this process we must reuse the
3385 		 * page without further checks.
3386 		 */
3387 		if (PageAnonExclusive(vmf->page))
3388 			goto reuse;
3389 
3390 		/*
3391 		 * We have to verify under folio lock: these early checks are
3392 		 * just an optimization to avoid locking the folio and freeing
3393 		 * the swapcache if there is little hope that we can reuse.
3394 		 *
3395 		 * KSM doesn't necessarily raise the folio refcount.
3396 		 */
3397 		if (folio_test_ksm(folio) || folio_ref_count(folio) > 3)
3398 			goto copy;
3399 		if (!folio_test_lru(folio))
3400 			/*
3401 			 * We cannot easily detect+handle references from
3402 			 * remote LRU caches or references to LRU folios.
3403 			 */
3404 			lru_add_drain();
3405 		if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
3406 			goto copy;
3407 		if (!folio_trylock(folio))
3408 			goto copy;
3409 		if (folio_test_swapcache(folio))
3410 			folio_free_swap(folio);
3411 		if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) {
3412 			folio_unlock(folio);
3413 			goto copy;
3414 		}
3415 		/*
3416 		 * Ok, we've got the only folio reference from our mapping
3417 		 * and the folio is locked, it's dark out, and we're wearing
3418 		 * sunglasses. Hit it.
3419 		 */
3420 		page_move_anon_rmap(vmf->page, vma);
3421 		folio_unlock(folio);
3422 reuse:
3423 		if (unlikely(unshare)) {
3424 			pte_unmap_unlock(vmf->pte, vmf->ptl);
3425 			return 0;
3426 		}
3427 		wp_page_reuse(vmf);
3428 		return 0;
3429 	}
3430 copy:
3431 	if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma->anon_vma) {
3432 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3433 		vma_end_read(vmf->vma);
3434 		return VM_FAULT_RETRY;
3435 	}
3436 
3437 	/*
3438 	 * Ok, we need to copy. Oh, well..
3439 	 */
3440 	if (folio)
3441 		folio_get(folio);
3442 
3443 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3444 #ifdef CONFIG_KSM
3445 	if (folio && folio_test_ksm(folio))
3446 		count_vm_event(COW_KSM);
3447 #endif
3448 	return wp_page_copy(vmf);
3449 }
3450 
3451 static void unmap_mapping_range_vma(struct vm_area_struct *vma,
3452 		unsigned long start_addr, unsigned long end_addr,
3453 		struct zap_details *details)
3454 {
3455 	zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
3456 }
3457 
3458 static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
3459 					    pgoff_t first_index,
3460 					    pgoff_t last_index,
3461 					    struct zap_details *details)
3462 {
3463 	struct vm_area_struct *vma;
3464 	pgoff_t vba, vea, zba, zea;
3465 
3466 	vma_interval_tree_foreach(vma, root, first_index, last_index) {
3467 		vba = vma->vm_pgoff;
3468 		vea = vba + vma_pages(vma) - 1;
3469 		zba = max(first_index, vba);
3470 		zea = min(last_index, vea);
3471 
3472 		unmap_mapping_range_vma(vma,
3473 			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3474 			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
3475 				details);
3476 	}
3477 }
3478 
3479 /**
3480  * unmap_mapping_folio() - Unmap single folio from processes.
3481  * @folio: The locked folio to be unmapped.
3482  *
3483  * Unmap this folio from any userspace process which still has it mmaped.
3484  * Typically, for efficiency, the range of nearby pages has already been
3485  * unmapped by unmap_mapping_pages() or unmap_mapping_range().  But once
3486  * truncation or invalidation holds the lock on a folio, it may find that
3487  * the page has been remapped again: and then uses unmap_mapping_folio()
3488  * to unmap it finally.
3489  */
3490 void unmap_mapping_folio(struct folio *folio)
3491 {
3492 	struct address_space *mapping = folio->mapping;
3493 	struct zap_details details = { };
3494 	pgoff_t	first_index;
3495 	pgoff_t	last_index;
3496 
3497 	VM_BUG_ON(!folio_test_locked(folio));
3498 
3499 	first_index = folio->index;
3500 	last_index = folio_next_index(folio) - 1;
3501 
3502 	details.even_cows = false;
3503 	details.single_folio = folio;
3504 	details.zap_flags = ZAP_FLAG_DROP_MARKER;
3505 
3506 	i_mmap_lock_read(mapping);
3507 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3508 		unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3509 					 last_index, &details);
3510 	i_mmap_unlock_read(mapping);
3511 }
3512 
3513 /**
3514  * unmap_mapping_pages() - Unmap pages from processes.
3515  * @mapping: The address space containing pages to be unmapped.
3516  * @start: Index of first page to be unmapped.
3517  * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
3518  * @even_cows: Whether to unmap even private COWed pages.
3519  *
3520  * Unmap the pages in this address space from any userspace process which
3521  * has them mmaped.  Generally, you want to remove COWed pages as well when
3522  * a file is being truncated, but not when invalidating pages from the page
3523  * cache.
3524  */
3525 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3526 		pgoff_t nr, bool even_cows)
3527 {
3528 	struct zap_details details = { };
3529 	pgoff_t	first_index = start;
3530 	pgoff_t	last_index = start + nr - 1;
3531 
3532 	details.even_cows = even_cows;
3533 	if (last_index < first_index)
3534 		last_index = ULONG_MAX;
3535 
3536 	i_mmap_lock_read(mapping);
3537 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3538 		unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3539 					 last_index, &details);
3540 	i_mmap_unlock_read(mapping);
3541 }
3542 EXPORT_SYMBOL_GPL(unmap_mapping_pages);
3543 
3544 /**
3545  * unmap_mapping_range - unmap the portion of all mmaps in the specified
3546  * address_space corresponding to the specified byte range in the underlying
3547  * file.
3548  *
3549  * @mapping: the address space containing mmaps to be unmapped.
3550  * @holebegin: byte in first page to unmap, relative to the start of
3551  * the underlying file.  This will be rounded down to a PAGE_SIZE
3552  * boundary.  Note that this is different from truncate_pagecache(), which
3553  * must keep the partial page.  In contrast, we must get rid of
3554  * partial pages.
3555  * @holelen: size of prospective hole in bytes.  This will be rounded
3556  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
3557  * end of the file.
3558  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3559  * but 0 when invalidating pagecache, don't throw away private data.
3560  */
3561 void unmap_mapping_range(struct address_space *mapping,
3562 		loff_t const holebegin, loff_t const holelen, int even_cows)
3563 {
3564 	pgoff_t hba = holebegin >> PAGE_SHIFT;
3565 	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3566 
3567 	/* Check for overflow. */
3568 	if (sizeof(holelen) > sizeof(hlen)) {
3569 		long long holeend =
3570 			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3571 		if (holeend & ~(long long)ULONG_MAX)
3572 			hlen = ULONG_MAX - hba + 1;
3573 	}
3574 
3575 	unmap_mapping_pages(mapping, hba, hlen, even_cows);
3576 }
3577 EXPORT_SYMBOL(unmap_mapping_range);
3578 
3579 /*
3580  * Restore a potential device exclusive pte to a working pte entry
3581  */
3582 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
3583 {
3584 	struct folio *folio = page_folio(vmf->page);
3585 	struct vm_area_struct *vma = vmf->vma;
3586 	struct mmu_notifier_range range;
3587 	vm_fault_t ret;
3588 
3589 	/*
3590 	 * We need a reference to lock the folio because we don't hold
3591 	 * the PTL so a racing thread can remove the device-exclusive
3592 	 * entry and unmap it. If the folio is free the entry must
3593 	 * have been removed already. If it happens to have already
3594 	 * been re-allocated after being freed all we do is lock and
3595 	 * unlock it.
3596 	 */
3597 	if (!folio_try_get(folio))
3598 		return 0;
3599 
3600 	ret = folio_lock_or_retry(folio, vmf);
3601 	if (ret) {
3602 		folio_put(folio);
3603 		return ret;
3604 	}
3605 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
3606 				vma->vm_mm, vmf->address & PAGE_MASK,
3607 				(vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
3608 	mmu_notifier_invalidate_range_start(&range);
3609 
3610 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3611 				&vmf->ptl);
3612 	if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
3613 		restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);
3614 
3615 	if (vmf->pte)
3616 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3617 	folio_unlock(folio);
3618 	folio_put(folio);
3619 
3620 	mmu_notifier_invalidate_range_end(&range);
3621 	return 0;
3622 }
3623 
3624 static inline bool should_try_to_free_swap(struct folio *folio,
3625 					   struct vm_area_struct *vma,
3626 					   unsigned int fault_flags)
3627 {
3628 	if (!folio_test_swapcache(folio))
3629 		return false;
3630 	if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) ||
3631 	    folio_test_mlocked(folio))
3632 		return true;
3633 	/*
3634 	 * If we want to map a page that's in the swapcache writable, we
3635 	 * have to detect via the refcount if we're really the exclusive
3636 	 * user. Try freeing the swapcache to get rid of the swapcache
3637 	 * reference only in case it's likely that we'll be the exlusive user.
3638 	 */
3639 	return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
3640 		folio_ref_count(folio) == 2;
3641 }
3642 
3643 static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
3644 {
3645 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
3646 				       vmf->address, &vmf->ptl);
3647 	if (!vmf->pte)
3648 		return 0;
3649 	/*
3650 	 * Be careful so that we will only recover a special uffd-wp pte into a
3651 	 * none pte.  Otherwise it means the pte could have changed, so retry.
3652 	 *
3653 	 * This should also cover the case where e.g. the pte changed
3654 	 * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_POISONED.
3655 	 * So is_pte_marker() check is not enough to safely drop the pte.
3656 	 */
3657 	if (pte_same(vmf->orig_pte, ptep_get(vmf->pte)))
3658 		pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte);
3659 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3660 	return 0;
3661 }
3662 
3663 static vm_fault_t do_pte_missing(struct vm_fault *vmf)
3664 {
3665 	if (vma_is_anonymous(vmf->vma))
3666 		return do_anonymous_page(vmf);
3667 	else
3668 		return do_fault(vmf);
3669 }
3670 
3671 /*
3672  * This is actually a page-missing access, but with uffd-wp special pte
3673  * installed.  It means this pte was wr-protected before being unmapped.
3674  */
3675 static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf)
3676 {
3677 	/*
3678 	 * Just in case there're leftover special ptes even after the region
3679 	 * got unregistered - we can simply clear them.
3680 	 */
3681 	if (unlikely(!userfaultfd_wp(vmf->vma)))
3682 		return pte_marker_clear(vmf);
3683 
3684 	return do_pte_missing(vmf);
3685 }
3686 
3687 static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
3688 {
3689 	swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte);
3690 	unsigned long marker = pte_marker_get(entry);
3691 
3692 	/*
3693 	 * PTE markers should never be empty.  If anything weird happened,
3694 	 * the best thing to do is to kill the process along with its mm.
3695 	 */
3696 	if (WARN_ON_ONCE(!marker))
3697 		return VM_FAULT_SIGBUS;
3698 
3699 	/* Higher priority than uffd-wp when data corrupted */
3700 	if (marker & PTE_MARKER_POISONED)
3701 		return VM_FAULT_HWPOISON;
3702 
3703 	if (pte_marker_entry_uffd_wp(entry))
3704 		return pte_marker_handle_uffd_wp(vmf);
3705 
3706 	/* This is an unknown pte marker */
3707 	return VM_FAULT_SIGBUS;
3708 }
3709 
3710 /*
3711  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3712  * but allow concurrent faults), and pte mapped but not yet locked.
3713  * We return with pte unmapped and unlocked.
3714  *
3715  * We return with the mmap_lock locked or unlocked in the same cases
3716  * as does filemap_fault().
3717  */
3718 vm_fault_t do_swap_page(struct vm_fault *vmf)
3719 {
3720 	struct vm_area_struct *vma = vmf->vma;
3721 	struct folio *swapcache, *folio = NULL;
3722 	struct page *page;
3723 	struct swap_info_struct *si = NULL;
3724 	rmap_t rmap_flags = RMAP_NONE;
3725 	bool exclusive = false;
3726 	swp_entry_t entry;
3727 	pte_t pte;
3728 	vm_fault_t ret = 0;
3729 	void *shadow = NULL;
3730 
3731 	if (!pte_unmap_same(vmf))
3732 		goto out;
3733 
3734 	entry = pte_to_swp_entry(vmf->orig_pte);
3735 	if (unlikely(non_swap_entry(entry))) {
3736 		if (is_migration_entry(entry)) {
3737 			migration_entry_wait(vma->vm_mm, vmf->pmd,
3738 					     vmf->address);
3739 		} else if (is_device_exclusive_entry(entry)) {
3740 			vmf->page = pfn_swap_entry_to_page(entry);
3741 			ret = remove_device_exclusive_entry(vmf);
3742 		} else if (is_device_private_entry(entry)) {
3743 			if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3744 				/*
3745 				 * migrate_to_ram is not yet ready to operate
3746 				 * under VMA lock.
3747 				 */
3748 				vma_end_read(vma);
3749 				ret = VM_FAULT_RETRY;
3750 				goto out;
3751 			}
3752 
3753 			vmf->page = pfn_swap_entry_to_page(entry);
3754 			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3755 					vmf->address, &vmf->ptl);
3756 			if (unlikely(!vmf->pte ||
3757 				     !pte_same(ptep_get(vmf->pte),
3758 							vmf->orig_pte)))
3759 				goto unlock;
3760 
3761 			/*
3762 			 * Get a page reference while we know the page can't be
3763 			 * freed.
3764 			 */
3765 			get_page(vmf->page);
3766 			pte_unmap_unlock(vmf->pte, vmf->ptl);
3767 			ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
3768 			put_page(vmf->page);
3769 		} else if (is_hwpoison_entry(entry)) {
3770 			ret = VM_FAULT_HWPOISON;
3771 		} else if (is_pte_marker_entry(entry)) {
3772 			ret = handle_pte_marker(vmf);
3773 		} else {
3774 			print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
3775 			ret = VM_FAULT_SIGBUS;
3776 		}
3777 		goto out;
3778 	}
3779 
3780 	/* Prevent swapoff from happening to us. */
3781 	si = get_swap_device(entry);
3782 	if (unlikely(!si))
3783 		goto out;
3784 
3785 	folio = swap_cache_get_folio(entry, vma, vmf->address);
3786 	if (folio)
3787 		page = folio_file_page(folio, swp_offset(entry));
3788 	swapcache = folio;
3789 
3790 	if (!folio) {
3791 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
3792 		    __swap_count(entry) == 1) {
3793 			/* skip swapcache */
3794 			folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
3795 						vma, vmf->address, false);
3796 			page = &folio->page;
3797 			if (folio) {
3798 				__folio_set_locked(folio);
3799 				__folio_set_swapbacked(folio);
3800 
3801 				if (mem_cgroup_swapin_charge_folio(folio,
3802 							vma->vm_mm, GFP_KERNEL,
3803 							entry)) {
3804 					ret = VM_FAULT_OOM;
3805 					goto out_page;
3806 				}
3807 				mem_cgroup_swapin_uncharge_swap(entry);
3808 
3809 				shadow = get_shadow_from_swap_cache(entry);
3810 				if (shadow)
3811 					workingset_refault(folio, shadow);
3812 
3813 				folio_add_lru(folio);
3814 
3815 				/* To provide entry to swap_readpage() */
3816 				folio->swap = entry;
3817 				swap_readpage(page, true, NULL);
3818 				folio->private = NULL;
3819 			}
3820 		} else {
3821 			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
3822 						vmf);
3823 			if (page)
3824 				folio = page_folio(page);
3825 			swapcache = folio;
3826 		}
3827 
3828 		if (!folio) {
3829 			/*
3830 			 * Back out if somebody else faulted in this pte
3831 			 * while we released the pte lock.
3832 			 */
3833 			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3834 					vmf->address, &vmf->ptl);
3835 			if (likely(vmf->pte &&
3836 				   pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
3837 				ret = VM_FAULT_OOM;
3838 			goto unlock;
3839 		}
3840 
3841 		/* Had to read the page from swap area: Major fault */
3842 		ret = VM_FAULT_MAJOR;
3843 		count_vm_event(PGMAJFAULT);
3844 		count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
3845 	} else if (PageHWPoison(page)) {
3846 		/*
3847 		 * hwpoisoned dirty swapcache pages are kept for killing
3848 		 * owner processes (which may be unknown at hwpoison time)
3849 		 */
3850 		ret = VM_FAULT_HWPOISON;
3851 		goto out_release;
3852 	}
3853 
3854 	ret |= folio_lock_or_retry(folio, vmf);
3855 	if (ret & VM_FAULT_RETRY)
3856 		goto out_release;
3857 
3858 	if (swapcache) {
3859 		/*
3860 		 * Make sure folio_free_swap() or swapoff did not release the
3861 		 * swapcache from under us.  The page pin, and pte_same test
3862 		 * below, are not enough to exclude that.  Even if it is still
3863 		 * swapcache, we need to check that the page's swap has not
3864 		 * changed.
3865 		 */
3866 		if (unlikely(!folio_test_swapcache(folio) ||
3867 			     page_swap_entry(page).val != entry.val))
3868 			goto out_page;
3869 
3870 		/*
3871 		 * KSM sometimes has to copy on read faults, for example, if
3872 		 * page->index of !PageKSM() pages would be nonlinear inside the
3873 		 * anon VMA -- PageKSM() is lost on actual swapout.
3874 		 */
3875 		page = ksm_might_need_to_copy(page, vma, vmf->address);
3876 		if (unlikely(!page)) {
3877 			ret = VM_FAULT_OOM;
3878 			goto out_page;
3879 		} else if (unlikely(PTR_ERR(page) == -EHWPOISON)) {
3880 			ret = VM_FAULT_HWPOISON;
3881 			goto out_page;
3882 		}
3883 		folio = page_folio(page);
3884 
3885 		/*
3886 		 * If we want to map a page that's in the swapcache writable, we
3887 		 * have to detect via the refcount if we're really the exclusive
3888 		 * owner. Try removing the extra reference from the local LRU
3889 		 * caches if required.
3890 		 */
3891 		if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache &&
3892 		    !folio_test_ksm(folio) && !folio_test_lru(folio))
3893 			lru_add_drain();
3894 	}
3895 
3896 	folio_throttle_swaprate(folio, GFP_KERNEL);
3897 
3898 	/*
3899 	 * Back out if somebody else already faulted in this pte.
3900 	 */
3901 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3902 			&vmf->ptl);
3903 	if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
3904 		goto out_nomap;
3905 
3906 	if (unlikely(!folio_test_uptodate(folio))) {
3907 		ret = VM_FAULT_SIGBUS;
3908 		goto out_nomap;
3909 	}
3910 
3911 	/*
3912 	 * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
3913 	 * must never point at an anonymous page in the swapcache that is
3914 	 * PG_anon_exclusive. Sanity check that this holds and especially, that
3915 	 * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity
3916 	 * check after taking the PT lock and making sure that nobody
3917 	 * concurrently faulted in this page and set PG_anon_exclusive.
3918 	 */
3919 	BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
3920 	BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
3921 
3922 	/*
3923 	 * Check under PT lock (to protect against concurrent fork() sharing
3924 	 * the swap entry concurrently) for certainly exclusive pages.
3925 	 */
3926 	if (!folio_test_ksm(folio)) {
3927 		exclusive = pte_swp_exclusive(vmf->orig_pte);
3928 		if (folio != swapcache) {
3929 			/*
3930 			 * We have a fresh page that is not exposed to the
3931 			 * swapcache -> certainly exclusive.
3932 			 */
3933 			exclusive = true;
3934 		} else if (exclusive && folio_test_writeback(folio) &&
3935 			  data_race(si->flags & SWP_STABLE_WRITES)) {
3936 			/*
3937 			 * This is tricky: not all swap backends support
3938 			 * concurrent page modifications while under writeback.
3939 			 *
3940 			 * So if we stumble over such a page in the swapcache
3941 			 * we must not set the page exclusive, otherwise we can
3942 			 * map it writable without further checks and modify it
3943 			 * while still under writeback.
3944 			 *
3945 			 * For these problematic swap backends, simply drop the
3946 			 * exclusive marker: this is perfectly fine as we start
3947 			 * writeback only if we fully unmapped the page and
3948 			 * there are no unexpected references on the page after
3949 			 * unmapping succeeded. After fully unmapped, no
3950 			 * further GUP references (FOLL_GET and FOLL_PIN) can
3951 			 * appear, so dropping the exclusive marker and mapping
3952 			 * it only R/O is fine.
3953 			 */
3954 			exclusive = false;
3955 		}
3956 	}
3957 
3958 	/*
3959 	 * Some architectures may have to restore extra metadata to the page
3960 	 * when reading from swap. This metadata may be indexed by swap entry
3961 	 * so this must be called before swap_free().
3962 	 */
3963 	arch_swap_restore(entry, folio);
3964 
3965 	/*
3966 	 * Remove the swap entry and conditionally try to free up the swapcache.
3967 	 * We're already holding a reference on the page but haven't mapped it
3968 	 * yet.
3969 	 */
3970 	swap_free(entry);
3971 	if (should_try_to_free_swap(folio, vma, vmf->flags))
3972 		folio_free_swap(folio);
3973 
3974 	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
3975 	dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
3976 	pte = mk_pte(page, vma->vm_page_prot);
3977 
3978 	/*
3979 	 * Same logic as in do_wp_page(); however, optimize for pages that are
3980 	 * certainly not shared either because we just allocated them without
3981 	 * exposing them to the swapcache or because the swap entry indicates
3982 	 * exclusivity.
3983 	 */
3984 	if (!folio_test_ksm(folio) &&
3985 	    (exclusive || folio_ref_count(folio) == 1)) {
3986 		if (vmf->flags & FAULT_FLAG_WRITE) {
3987 			pte = maybe_mkwrite(pte_mkdirty(pte), vma);
3988 			vmf->flags &= ~FAULT_FLAG_WRITE;
3989 		}
3990 		rmap_flags |= RMAP_EXCLUSIVE;
3991 	}
3992 	flush_icache_page(vma, page);
3993 	if (pte_swp_soft_dirty(vmf->orig_pte))
3994 		pte = pte_mksoft_dirty(pte);
3995 	if (pte_swp_uffd_wp(vmf->orig_pte))
3996 		pte = pte_mkuffd_wp(pte);
3997 	vmf->orig_pte = pte;
3998 
3999 	/* ksm created a completely new copy */
4000 	if (unlikely(folio != swapcache && swapcache)) {
4001 		page_add_new_anon_rmap(page, vma, vmf->address);
4002 		folio_add_lru_vma(folio, vma);
4003 	} else {
4004 		page_add_anon_rmap(page, vma, vmf->address, rmap_flags);
4005 	}
4006 
4007 	VM_BUG_ON(!folio_test_anon(folio) ||
4008 			(pte_write(pte) && !PageAnonExclusive(page)));
4009 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
4010 	arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
4011 
4012 	folio_unlock(folio);
4013 	if (folio != swapcache && swapcache) {
4014 		/*
4015 		 * Hold the lock to avoid the swap entry to be reused
4016 		 * until we take the PT lock for the pte_same() check
4017 		 * (to avoid false positives from pte_same). For
4018 		 * further safety release the lock after the swap_free
4019 		 * so that the swap count won't change under a
4020 		 * parallel locked swapcache.
4021 		 */
4022 		folio_unlock(swapcache);
4023 		folio_put(swapcache);
4024 	}
4025 
4026 	if (vmf->flags & FAULT_FLAG_WRITE) {
4027 		ret |= do_wp_page(vmf);
4028 		if (ret & VM_FAULT_ERROR)
4029 			ret &= VM_FAULT_ERROR;
4030 		goto out;
4031 	}
4032 
4033 	/* No need to invalidate - it was non-present before */
4034 	update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
4035 unlock:
4036 	if (vmf->pte)
4037 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4038 out:
4039 	if (si)
4040 		put_swap_device(si);
4041 	return ret;
4042 out_nomap:
4043 	if (vmf->pte)
4044 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4045 out_page:
4046 	folio_unlock(folio);
4047 out_release:
4048 	folio_put(folio);
4049 	if (folio != swapcache && swapcache) {
4050 		folio_unlock(swapcache);
4051 		folio_put(swapcache);
4052 	}
4053 	if (si)
4054 		put_swap_device(si);
4055 	return ret;
4056 }
4057 
4058 /*
4059  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4060  * but allow concurrent faults), and pte mapped but not yet locked.
4061  * We return with mmap_lock still held, but pte unmapped and unlocked.
4062  */
4063 static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
4064 {
4065 	bool uffd_wp = vmf_orig_pte_uffd_wp(vmf);
4066 	struct vm_area_struct *vma = vmf->vma;
4067 	struct folio *folio;
4068 	vm_fault_t ret = 0;
4069 	pte_t entry;
4070 
4071 	/* File mapping without ->vm_ops ? */
4072 	if (vma->vm_flags & VM_SHARED)
4073 		return VM_FAULT_SIGBUS;
4074 
4075 	/*
4076 	 * Use pte_alloc() instead of pte_alloc_map(), so that OOM can
4077 	 * be distinguished from a transient failure of pte_offset_map().
4078 	 */
4079 	if (pte_alloc(vma->vm_mm, vmf->pmd))
4080 		return VM_FAULT_OOM;
4081 
4082 	/* Use the zero-page for reads */
4083 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
4084 			!mm_forbids_zeropage(vma->vm_mm)) {
4085 		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
4086 						vma->vm_page_prot));
4087 		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4088 				vmf->address, &vmf->ptl);
4089 		if (!vmf->pte)
4090 			goto unlock;
4091 		if (vmf_pte_changed(vmf)) {
4092 			update_mmu_tlb(vma, vmf->address, vmf->pte);
4093 			goto unlock;
4094 		}
4095 		ret = check_stable_address_space(vma->vm_mm);
4096 		if (ret)
4097 			goto unlock;
4098 		/* Deliver the page fault to userland, check inside PT lock */
4099 		if (userfaultfd_missing(vma)) {
4100 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4101 			return handle_userfault(vmf, VM_UFFD_MISSING);
4102 		}
4103 		goto setpte;
4104 	}
4105 
4106 	/* Allocate our own private page. */
4107 	if (unlikely(anon_vma_prepare(vma)))
4108 		goto oom;
4109 	folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
4110 	if (!folio)
4111 		goto oom;
4112 
4113 	if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
4114 		goto oom_free_page;
4115 	folio_throttle_swaprate(folio, GFP_KERNEL);
4116 
4117 	/*
4118 	 * The memory barrier inside __folio_mark_uptodate makes sure that
4119 	 * preceding stores to the page contents become visible before
4120 	 * the set_pte_at() write.
4121 	 */
4122 	__folio_mark_uptodate(folio);
4123 
4124 	entry = mk_pte(&folio->page, vma->vm_page_prot);
4125 	entry = pte_sw_mkyoung(entry);
4126 	if (vma->vm_flags & VM_WRITE)
4127 		entry = pte_mkwrite(pte_mkdirty(entry));
4128 
4129 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4130 			&vmf->ptl);
4131 	if (!vmf->pte)
4132 		goto release;
4133 	if (vmf_pte_changed(vmf)) {
4134 		update_mmu_tlb(vma, vmf->address, vmf->pte);
4135 		goto release;
4136 	}
4137 
4138 	ret = check_stable_address_space(vma->vm_mm);
4139 	if (ret)
4140 		goto release;
4141 
4142 	/* Deliver the page fault to userland, check inside PT lock */
4143 	if (userfaultfd_missing(vma)) {
4144 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4145 		folio_put(folio);
4146 		return handle_userfault(vmf, VM_UFFD_MISSING);
4147 	}
4148 
4149 	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
4150 	folio_add_new_anon_rmap(folio, vma, vmf->address);
4151 	folio_add_lru_vma(folio, vma);
4152 setpte:
4153 	if (uffd_wp)
4154 		entry = pte_mkuffd_wp(entry);
4155 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
4156 
4157 	/* No need to invalidate - it was non-present before */
4158 	update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
4159 unlock:
4160 	if (vmf->pte)
4161 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4162 	return ret;
4163 release:
4164 	folio_put(folio);
4165 	goto unlock;
4166 oom_free_page:
4167 	folio_put(folio);
4168 oom:
4169 	return VM_FAULT_OOM;
4170 }
4171 
4172 /*
4173  * The mmap_lock must have been held on entry, and may have been
4174  * released depending on flags and vma->vm_ops->fault() return value.
4175  * See filemap_fault() and __lock_page_retry().
4176  */
4177 static vm_fault_t __do_fault(struct vm_fault *vmf)
4178 {
4179 	struct vm_area_struct *vma = vmf->vma;
4180 	vm_fault_t ret;
4181 
4182 	/*
4183 	 * Preallocate pte before we take page_lock because this might lead to
4184 	 * deadlocks for memcg reclaim which waits for pages under writeback:
4185 	 *				lock_page(A)
4186 	 *				SetPageWriteback(A)
4187 	 *				unlock_page(A)
4188 	 * lock_page(B)
4189 	 *				lock_page(B)
4190 	 * pte_alloc_one
4191 	 *   shrink_page_list
4192 	 *     wait_on_page_writeback(A)
4193 	 *				SetPageWriteback(B)
4194 	 *				unlock_page(B)
4195 	 *				# flush A, B to clear the writeback
4196 	 */
4197 	if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
4198 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4199 		if (!vmf->prealloc_pte)
4200 			return VM_FAULT_OOM;
4201 	}
4202 
4203 	ret = vma->vm_ops->fault(vmf);
4204 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
4205 			    VM_FAULT_DONE_COW)))
4206 		return ret;
4207 
4208 	if (unlikely(PageHWPoison(vmf->page))) {
4209 		struct page *page = vmf->page;
4210 		vm_fault_t poisonret = VM_FAULT_HWPOISON;
4211 		if (ret & VM_FAULT_LOCKED) {
4212 			if (page_mapped(page))
4213 				unmap_mapping_pages(page_mapping(page),
4214 						    page->index, 1, false);
4215 			/* Retry if a clean page was removed from the cache. */
4216 			if (invalidate_inode_page(page))
4217 				poisonret = VM_FAULT_NOPAGE;
4218 			unlock_page(page);
4219 		}
4220 		put_page(page);
4221 		vmf->page = NULL;
4222 		return poisonret;
4223 	}
4224 
4225 	if (unlikely(!(ret & VM_FAULT_LOCKED)))
4226 		lock_page(vmf->page);
4227 	else
4228 		VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
4229 
4230 	return ret;
4231 }
4232 
4233 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4234 static void deposit_prealloc_pte(struct vm_fault *vmf)
4235 {
4236 	struct vm_area_struct *vma = vmf->vma;
4237 
4238 	pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
4239 	/*
4240 	 * We are going to consume the prealloc table,
4241 	 * count that as nr_ptes.
4242 	 */
4243 	mm_inc_nr_ptes(vma->vm_mm);
4244 	vmf->prealloc_pte = NULL;
4245 }
4246 
4247 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
4248 {
4249 	struct vm_area_struct *vma = vmf->vma;
4250 	bool write = vmf->flags & FAULT_FLAG_WRITE;
4251 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
4252 	pmd_t entry;
4253 	vm_fault_t ret = VM_FAULT_FALLBACK;
4254 
4255 	if (!transhuge_vma_suitable(vma, haddr))
4256 		return ret;
4257 
4258 	page = compound_head(page);
4259 	if (compound_order(page) != HPAGE_PMD_ORDER)
4260 		return ret;
4261 
4262 	/*
4263 	 * Just backoff if any subpage of a THP is corrupted otherwise
4264 	 * the corrupted page may mapped by PMD silently to escape the
4265 	 * check.  This kind of THP just can be PTE mapped.  Access to
4266 	 * the corrupted subpage should trigger SIGBUS as expected.
4267 	 */
4268 	if (unlikely(PageHasHWPoisoned(page)))
4269 		return ret;
4270 
4271 	/*
4272 	 * Archs like ppc64 need additional space to store information
4273 	 * related to pte entry. Use the preallocated table for that.
4274 	 */
4275 	if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
4276 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4277 		if (!vmf->prealloc_pte)
4278 			return VM_FAULT_OOM;
4279 	}
4280 
4281 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
4282 	if (unlikely(!pmd_none(*vmf->pmd)))
4283 		goto out;
4284 
4285 	flush_icache_pages(vma, page, HPAGE_PMD_NR);
4286 
4287 	entry = mk_huge_pmd(page, vma->vm_page_prot);
4288 	if (write)
4289 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
4290 
4291 	add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
4292 	page_add_file_rmap(page, vma, true);
4293 
4294 	/*
4295 	 * deposit and withdraw with pmd lock held
4296 	 */
4297 	if (arch_needs_pgtable_deposit())
4298 		deposit_prealloc_pte(vmf);
4299 
4300 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
4301 
4302 	update_mmu_cache_pmd(vma, haddr, vmf->pmd);
4303 
4304 	/* fault is handled */
4305 	ret = 0;
4306 	count_vm_event(THP_FILE_MAPPED);
4307 out:
4308 	spin_unlock(vmf->ptl);
4309 	return ret;
4310 }
4311 #else
4312 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
4313 {
4314 	return VM_FAULT_FALLBACK;
4315 }
4316 #endif
4317 
4318 /**
4319  * set_pte_range - Set a range of PTEs to point to pages in a folio.
4320  * @vmf: Fault decription.
4321  * @folio: The folio that contains @page.
4322  * @page: The first page to create a PTE for.
4323  * @nr: The number of PTEs to create.
4324  * @addr: The first address to create a PTE for.
4325  */
4326 void set_pte_range(struct vm_fault *vmf, struct folio *folio,
4327 		struct page *page, unsigned int nr, unsigned long addr)
4328 {
4329 	struct vm_area_struct *vma = vmf->vma;
4330 	bool uffd_wp = vmf_orig_pte_uffd_wp(vmf);
4331 	bool write = vmf->flags & FAULT_FLAG_WRITE;
4332 	bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE);
4333 	pte_t entry;
4334 
4335 	flush_icache_pages(vma, page, nr);
4336 	entry = mk_pte(page, vma->vm_page_prot);
4337 
4338 	if (prefault && arch_wants_old_prefaulted_pte())
4339 		entry = pte_mkold(entry);
4340 	else
4341 		entry = pte_sw_mkyoung(entry);
4342 
4343 	if (write)
4344 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
4345 	if (unlikely(uffd_wp))
4346 		entry = pte_mkuffd_wp(entry);
4347 	/* copy-on-write page */
4348 	if (write && !(vma->vm_flags & VM_SHARED)) {
4349 		add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr);
4350 		VM_BUG_ON_FOLIO(nr != 1, folio);
4351 		folio_add_new_anon_rmap(folio, vma, addr);
4352 		folio_add_lru_vma(folio, vma);
4353 	} else {
4354 		add_mm_counter(vma->vm_mm, mm_counter_file(page), nr);
4355 		folio_add_file_rmap_range(folio, page, nr, vma, false);
4356 	}
4357 	set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
4358 
4359 	/* no need to invalidate: a not-present page won't be cached */
4360 	update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr);
4361 }
4362 
4363 static bool vmf_pte_changed(struct vm_fault *vmf)
4364 {
4365 	if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)
4366 		return !pte_same(ptep_get(vmf->pte), vmf->orig_pte);
4367 
4368 	return !pte_none(ptep_get(vmf->pte));
4369 }
4370 
4371 /**
4372  * finish_fault - finish page fault once we have prepared the page to fault
4373  *
4374  * @vmf: structure describing the fault
4375  *
4376  * This function handles all that is needed to finish a page fault once the
4377  * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
4378  * given page, adds reverse page mapping, handles memcg charges and LRU
4379  * addition.
4380  *
4381  * The function expects the page to be locked and on success it consumes a
4382  * reference of a page being mapped (for the PTE which maps it).
4383  *
4384  * Return: %0 on success, %VM_FAULT_ code in case of error.
4385  */
4386 vm_fault_t finish_fault(struct vm_fault *vmf)
4387 {
4388 	struct vm_area_struct *vma = vmf->vma;
4389 	struct page *page;
4390 	vm_fault_t ret;
4391 
4392 	/* Did we COW the page? */
4393 	if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
4394 		page = vmf->cow_page;
4395 	else
4396 		page = vmf->page;
4397 
4398 	/*
4399 	 * check even for read faults because we might have lost our CoWed
4400 	 * page
4401 	 */
4402 	if (!(vma->vm_flags & VM_SHARED)) {
4403 		ret = check_stable_address_space(vma->vm_mm);
4404 		if (ret)
4405 			return ret;
4406 	}
4407 
4408 	if (pmd_none(*vmf->pmd)) {
4409 		if (PageTransCompound(page)) {
4410 			ret = do_set_pmd(vmf, page);
4411 			if (ret != VM_FAULT_FALLBACK)
4412 				return ret;
4413 		}
4414 
4415 		if (vmf->prealloc_pte)
4416 			pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
4417 		else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
4418 			return VM_FAULT_OOM;
4419 	}
4420 
4421 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4422 				      vmf->address, &vmf->ptl);
4423 	if (!vmf->pte)
4424 		return VM_FAULT_NOPAGE;
4425 
4426 	/* Re-check under ptl */
4427 	if (likely(!vmf_pte_changed(vmf))) {
4428 		struct folio *folio = page_folio(page);
4429 
4430 		set_pte_range(vmf, folio, page, 1, vmf->address);
4431 		ret = 0;
4432 	} else {
4433 		update_mmu_tlb(vma, vmf->address, vmf->pte);
4434 		ret = VM_FAULT_NOPAGE;
4435 	}
4436 
4437 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4438 	return ret;
4439 }
4440 
4441 static unsigned long fault_around_pages __read_mostly =
4442 	65536 >> PAGE_SHIFT;
4443 
4444 #ifdef CONFIG_DEBUG_FS
4445 static int fault_around_bytes_get(void *data, u64 *val)
4446 {
4447 	*val = fault_around_pages << PAGE_SHIFT;
4448 	return 0;
4449 }
4450 
4451 /*
4452  * fault_around_bytes must be rounded down to the nearest page order as it's
4453  * what do_fault_around() expects to see.
4454  */
4455 static int fault_around_bytes_set(void *data, u64 val)
4456 {
4457 	if (val / PAGE_SIZE > PTRS_PER_PTE)
4458 		return -EINVAL;
4459 
4460 	/*
4461 	 * The minimum value is 1 page, however this results in no fault-around
4462 	 * at all. See should_fault_around().
4463 	 */
4464 	fault_around_pages = max(rounddown_pow_of_two(val) >> PAGE_SHIFT, 1UL);
4465 
4466 	return 0;
4467 }
4468 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
4469 		fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
4470 
4471 static int __init fault_around_debugfs(void)
4472 {
4473 	debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
4474 				   &fault_around_bytes_fops);
4475 	return 0;
4476 }
4477 late_initcall(fault_around_debugfs);
4478 #endif
4479 
4480 /*
4481  * do_fault_around() tries to map few pages around the fault address. The hope
4482  * is that the pages will be needed soon and this will lower the number of
4483  * faults to handle.
4484  *
4485  * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
4486  * not ready to be mapped: not up-to-date, locked, etc.
4487  *
4488  * This function doesn't cross VMA or page table boundaries, in order to call
4489  * map_pages() and acquire a PTE lock only once.
4490  *
4491  * fault_around_pages defines how many pages we'll try to map.
4492  * do_fault_around() expects it to be set to a power of two less than or equal
4493  * to PTRS_PER_PTE.
4494  *
4495  * The virtual address of the area that we map is naturally aligned to
4496  * fault_around_pages * PAGE_SIZE rounded down to the machine page size
4497  * (and therefore to page order).  This way it's easier to guarantee
4498  * that we don't cross page table boundaries.
4499  */
4500 static vm_fault_t do_fault_around(struct vm_fault *vmf)
4501 {
4502 	pgoff_t nr_pages = READ_ONCE(fault_around_pages);
4503 	pgoff_t pte_off = pte_index(vmf->address);
4504 	/* The page offset of vmf->address within the VMA. */
4505 	pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
4506 	pgoff_t from_pte, to_pte;
4507 	vm_fault_t ret;
4508 
4509 	/* The PTE offset of the start address, clamped to the VMA. */
4510 	from_pte = max(ALIGN_DOWN(pte_off, nr_pages),
4511 		       pte_off - min(pte_off, vma_off));
4512 
4513 	/* The PTE offset of the end address, clamped to the VMA and PTE. */
4514 	to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE,
4515 		      pte_off + vma_pages(vmf->vma) - vma_off) - 1;
4516 
4517 	if (pmd_none(*vmf->pmd)) {
4518 		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
4519 		if (!vmf->prealloc_pte)
4520 			return VM_FAULT_OOM;
4521 	}
4522 
4523 	rcu_read_lock();
4524 	ret = vmf->vma->vm_ops->map_pages(vmf,
4525 			vmf->pgoff + from_pte - pte_off,
4526 			vmf->pgoff + to_pte - pte_off);
4527 	rcu_read_unlock();
4528 
4529 	return ret;
4530 }
4531 
4532 /* Return true if we should do read fault-around, false otherwise */
4533 static inline bool should_fault_around(struct vm_fault *vmf)
4534 {
4535 	/* No ->map_pages?  No way to fault around... */
4536 	if (!vmf->vma->vm_ops->map_pages)
4537 		return false;
4538 
4539 	if (uffd_disable_fault_around(vmf->vma))
4540 		return false;
4541 
4542 	/* A single page implies no faulting 'around' at all. */
4543 	return fault_around_pages > 1;
4544 }
4545 
4546 static vm_fault_t do_read_fault(struct vm_fault *vmf)
4547 {
4548 	vm_fault_t ret = 0;
4549 	struct folio *folio;
4550 
4551 	/*
4552 	 * Let's call ->map_pages() first and use ->fault() as fallback
4553 	 * if page by the offset is not ready to be mapped (cold cache or
4554 	 * something).
4555 	 */
4556 	if (should_fault_around(vmf)) {
4557 		ret = do_fault_around(vmf);
4558 		if (ret)
4559 			return ret;
4560 	}
4561 
4562 	if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
4563 		vma_end_read(vmf->vma);
4564 		return VM_FAULT_RETRY;
4565 	}
4566 
4567 	ret = __do_fault(vmf);
4568 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4569 		return ret;
4570 
4571 	ret |= finish_fault(vmf);
4572 	folio = page_folio(vmf->page);
4573 	folio_unlock(folio);
4574 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4575 		folio_put(folio);
4576 	return ret;
4577 }
4578 
4579 static vm_fault_t do_cow_fault(struct vm_fault *vmf)
4580 {
4581 	struct vm_area_struct *vma = vmf->vma;
4582 	vm_fault_t ret;
4583 
4584 	if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
4585 		vma_end_read(vma);
4586 		return VM_FAULT_RETRY;
4587 	}
4588 
4589 	if (unlikely(anon_vma_prepare(vma)))
4590 		return VM_FAULT_OOM;
4591 
4592 	vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
4593 	if (!vmf->cow_page)
4594 		return VM_FAULT_OOM;
4595 
4596 	if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm,
4597 				GFP_KERNEL)) {
4598 		put_page(vmf->cow_page);
4599 		return VM_FAULT_OOM;
4600 	}
4601 	folio_throttle_swaprate(page_folio(vmf->cow_page), GFP_KERNEL);
4602 
4603 	ret = __do_fault(vmf);
4604 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4605 		goto uncharge_out;
4606 	if (ret & VM_FAULT_DONE_COW)
4607 		return ret;
4608 
4609 	copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
4610 	__SetPageUptodate(vmf->cow_page);
4611 
4612 	ret |= finish_fault(vmf);
4613 	unlock_page(vmf->page);
4614 	put_page(vmf->page);
4615 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4616 		goto uncharge_out;
4617 	return ret;
4618 uncharge_out:
4619 	put_page(vmf->cow_page);
4620 	return ret;
4621 }
4622 
4623 static vm_fault_t do_shared_fault(struct vm_fault *vmf)
4624 {
4625 	struct vm_area_struct *vma = vmf->vma;
4626 	vm_fault_t ret, tmp;
4627 	struct folio *folio;
4628 
4629 	if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
4630 		vma_end_read(vma);
4631 		return VM_FAULT_RETRY;
4632 	}
4633 
4634 	ret = __do_fault(vmf);
4635 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4636 		return ret;
4637 
4638 	folio = page_folio(vmf->page);
4639 
4640 	/*
4641 	 * Check if the backing address space wants to know that the page is
4642 	 * about to become writable
4643 	 */
4644 	if (vma->vm_ops->page_mkwrite) {
4645 		folio_unlock(folio);
4646 		tmp = do_page_mkwrite(vmf, folio);
4647 		if (unlikely(!tmp ||
4648 				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
4649 			folio_put(folio);
4650 			return tmp;
4651 		}
4652 	}
4653 
4654 	ret |= finish_fault(vmf);
4655 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
4656 					VM_FAULT_RETRY))) {
4657 		folio_unlock(folio);
4658 		folio_put(folio);
4659 		return ret;
4660 	}
4661 
4662 	ret |= fault_dirty_shared_page(vmf);
4663 	return ret;
4664 }
4665 
4666 /*
4667  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4668  * but allow concurrent faults).
4669  * The mmap_lock may have been released depending on flags and our
4670  * return value.  See filemap_fault() and __folio_lock_or_retry().
4671  * If mmap_lock is released, vma may become invalid (for example
4672  * by other thread calling munmap()).
4673  */
4674 static vm_fault_t do_fault(struct vm_fault *vmf)
4675 {
4676 	struct vm_area_struct *vma = vmf->vma;
4677 	struct mm_struct *vm_mm = vma->vm_mm;
4678 	vm_fault_t ret;
4679 
4680 	/*
4681 	 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
4682 	 */
4683 	if (!vma->vm_ops->fault) {
4684 		vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4685 					       vmf->address, &vmf->ptl);
4686 		if (unlikely(!vmf->pte))
4687 			ret = VM_FAULT_SIGBUS;
4688 		else {
4689 			/*
4690 			 * Make sure this is not a temporary clearing of pte
4691 			 * by holding ptl and checking again. A R/M/W update
4692 			 * of pte involves: take ptl, clearing the pte so that
4693 			 * we don't have concurrent modification by hardware
4694 			 * followed by an update.
4695 			 */
4696 			if (unlikely(pte_none(ptep_get(vmf->pte))))
4697 				ret = VM_FAULT_SIGBUS;
4698 			else
4699 				ret = VM_FAULT_NOPAGE;
4700 
4701 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4702 		}
4703 	} else if (!(vmf->flags & FAULT_FLAG_WRITE))
4704 		ret = do_read_fault(vmf);
4705 	else if (!(vma->vm_flags & VM_SHARED))
4706 		ret = do_cow_fault(vmf);
4707 	else
4708 		ret = do_shared_fault(vmf);
4709 
4710 	/* preallocated pagetable is unused: free it */
4711 	if (vmf->prealloc_pte) {
4712 		pte_free(vm_mm, vmf->prealloc_pte);
4713 		vmf->prealloc_pte = NULL;
4714 	}
4715 	return ret;
4716 }
4717 
4718 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
4719 		      unsigned long addr, int page_nid, int *flags)
4720 {
4721 	get_page(page);
4722 
4723 	/* Record the current PID acceesing VMA */
4724 	vma_set_access_pid_bit(vma);
4725 
4726 	count_vm_numa_event(NUMA_HINT_FAULTS);
4727 	if (page_nid == numa_node_id()) {
4728 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
4729 		*flags |= TNF_FAULT_LOCAL;
4730 	}
4731 
4732 	return mpol_misplaced(page, vma, addr);
4733 }
4734 
4735 static vm_fault_t do_numa_page(struct vm_fault *vmf)
4736 {
4737 	struct vm_area_struct *vma = vmf->vma;
4738 	struct page *page = NULL;
4739 	int page_nid = NUMA_NO_NODE;
4740 	bool writable = false;
4741 	int last_cpupid;
4742 	int target_nid;
4743 	pte_t pte, old_pte;
4744 	int flags = 0;
4745 
4746 	/*
4747 	 * The "pte" at this point cannot be used safely without
4748 	 * validation through pte_unmap_same(). It's of NUMA type but
4749 	 * the pfn may be screwed if the read is non atomic.
4750 	 */
4751 	spin_lock(vmf->ptl);
4752 	if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
4753 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4754 		goto out;
4755 	}
4756 
4757 	/* Get the normal PTE  */
4758 	old_pte = ptep_get(vmf->pte);
4759 	pte = pte_modify(old_pte, vma->vm_page_prot);
4760 
4761 	/*
4762 	 * Detect now whether the PTE could be writable; this information
4763 	 * is only valid while holding the PT lock.
4764 	 */
4765 	writable = pte_write(pte);
4766 	if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
4767 	    can_change_pte_writable(vma, vmf->address, pte))
4768 		writable = true;
4769 
4770 	page = vm_normal_page(vma, vmf->address, pte);
4771 	if (!page || is_zone_device_page(page))
4772 		goto out_map;
4773 
4774 	/* TODO: handle PTE-mapped THP */
4775 	if (PageCompound(page))
4776 		goto out_map;
4777 
4778 	/*
4779 	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
4780 	 * much anyway since they can be in shared cache state. This misses
4781 	 * the case where a mapping is writable but the process never writes
4782 	 * to it but pte_write gets cleared during protection updates and
4783 	 * pte_dirty has unpredictable behaviour between PTE scan updates,
4784 	 * background writeback, dirty balancing and application behaviour.
4785 	 */
4786 	if (!writable)
4787 		flags |= TNF_NO_GROUP;
4788 
4789 	/*
4790 	 * Flag if the page is shared between multiple address spaces. This
4791 	 * is later used when determining whether to group tasks together
4792 	 */
4793 	if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
4794 		flags |= TNF_SHARED;
4795 
4796 	page_nid = page_to_nid(page);
4797 	/*
4798 	 * For memory tiering mode, cpupid of slow memory page is used
4799 	 * to record page access time.  So use default value.
4800 	 */
4801 	if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4802 	    !node_is_toptier(page_nid))
4803 		last_cpupid = (-1 & LAST_CPUPID_MASK);
4804 	else
4805 		last_cpupid = page_cpupid_last(page);
4806 	target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
4807 			&flags);
4808 	if (target_nid == NUMA_NO_NODE) {
4809 		put_page(page);
4810 		goto out_map;
4811 	}
4812 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4813 	writable = false;
4814 
4815 	/* Migrate to the requested node */
4816 	if (migrate_misplaced_page(page, vma, target_nid)) {
4817 		page_nid = target_nid;
4818 		flags |= TNF_MIGRATED;
4819 	} else {
4820 		flags |= TNF_MIGRATE_FAIL;
4821 		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4822 					       vmf->address, &vmf->ptl);
4823 		if (unlikely(!vmf->pte))
4824 			goto out;
4825 		if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
4826 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4827 			goto out;
4828 		}
4829 		goto out_map;
4830 	}
4831 
4832 out:
4833 	if (page_nid != NUMA_NO_NODE)
4834 		task_numa_fault(last_cpupid, page_nid, 1, flags);
4835 	return 0;
4836 out_map:
4837 	/*
4838 	 * Make it present again, depending on how arch implements
4839 	 * non-accessible ptes, some can allow access by kernel mode.
4840 	 */
4841 	old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
4842 	pte = pte_modify(old_pte, vma->vm_page_prot);
4843 	pte = pte_mkyoung(pte);
4844 	if (writable)
4845 		pte = pte_mkwrite(pte);
4846 	ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
4847 	update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
4848 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4849 	goto out;
4850 }
4851 
4852 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
4853 {
4854 	struct vm_area_struct *vma = vmf->vma;
4855 	if (vma_is_anonymous(vma))
4856 		return do_huge_pmd_anonymous_page(vmf);
4857 	if (vma->vm_ops->huge_fault)
4858 		return vma->vm_ops->huge_fault(vmf, PMD_ORDER);
4859 	return VM_FAULT_FALLBACK;
4860 }
4861 
4862 /* `inline' is required to avoid gcc 4.1.2 build error */
4863 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
4864 {
4865 	struct vm_area_struct *vma = vmf->vma;
4866 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
4867 	vm_fault_t ret;
4868 
4869 	if (vma_is_anonymous(vma)) {
4870 		if (likely(!unshare) &&
4871 		    userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd))
4872 			return handle_userfault(vmf, VM_UFFD_WP);
4873 		return do_huge_pmd_wp_page(vmf);
4874 	}
4875 
4876 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
4877 		if (vma->vm_ops->huge_fault) {
4878 			ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER);
4879 			if (!(ret & VM_FAULT_FALLBACK))
4880 				return ret;
4881 		}
4882 	}
4883 
4884 	/* COW or write-notify handled on pte level: split pmd. */
4885 	__split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
4886 
4887 	return VM_FAULT_FALLBACK;
4888 }
4889 
4890 static vm_fault_t create_huge_pud(struct vm_fault *vmf)
4891 {
4892 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
4893 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
4894 	struct vm_area_struct *vma = vmf->vma;
4895 	/* No support for anonymous transparent PUD pages yet */
4896 	if (vma_is_anonymous(vma))
4897 		return VM_FAULT_FALLBACK;
4898 	if (vma->vm_ops->huge_fault)
4899 		return vma->vm_ops->huge_fault(vmf, PUD_ORDER);
4900 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4901 	return VM_FAULT_FALLBACK;
4902 }
4903 
4904 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
4905 {
4906 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
4907 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
4908 	struct vm_area_struct *vma = vmf->vma;
4909 	vm_fault_t ret;
4910 
4911 	/* No support for anonymous transparent PUD pages yet */
4912 	if (vma_is_anonymous(vma))
4913 		goto split;
4914 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
4915 		if (vma->vm_ops->huge_fault) {
4916 			ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER);
4917 			if (!(ret & VM_FAULT_FALLBACK))
4918 				return ret;
4919 		}
4920 	}
4921 split:
4922 	/* COW or write-notify not handled on PUD level: split pud.*/
4923 	__split_huge_pud(vma, vmf->pud, vmf->address);
4924 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
4925 	return VM_FAULT_FALLBACK;
4926 }
4927 
4928 /*
4929  * These routines also need to handle stuff like marking pages dirty
4930  * and/or accessed for architectures that don't do it in hardware (most
4931  * RISC architectures).  The early dirtying is also good on the i386.
4932  *
4933  * There is also a hook called "update_mmu_cache()" that architectures
4934  * with external mmu caches can use to update those (ie the Sparc or
4935  * PowerPC hashed page tables that act as extended TLBs).
4936  *
4937  * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
4938  * concurrent faults).
4939  *
4940  * The mmap_lock may have been released depending on flags and our return value.
4941  * See filemap_fault() and __folio_lock_or_retry().
4942  */
4943 static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
4944 {
4945 	pte_t entry;
4946 
4947 	if (unlikely(pmd_none(*vmf->pmd))) {
4948 		/*
4949 		 * Leave __pte_alloc() until later: because vm_ops->fault may
4950 		 * want to allocate huge page, and if we expose page table
4951 		 * for an instant, it will be difficult to retract from
4952 		 * concurrent faults and from rmap lookups.
4953 		 */
4954 		vmf->pte = NULL;
4955 		vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID;
4956 	} else {
4957 		/*
4958 		 * A regular pmd is established and it can't morph into a huge
4959 		 * pmd by anon khugepaged, since that takes mmap_lock in write
4960 		 * mode; but shmem or file collapse to THP could still morph
4961 		 * it into a huge pmd: just retry later if so.
4962 		 */
4963 		vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd,
4964 						 vmf->address, &vmf->ptl);
4965 		if (unlikely(!vmf->pte))
4966 			return 0;
4967 		vmf->orig_pte = ptep_get_lockless(vmf->pte);
4968 		vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
4969 
4970 		if (pte_none(vmf->orig_pte)) {
4971 			pte_unmap(vmf->pte);
4972 			vmf->pte = NULL;
4973 		}
4974 	}
4975 
4976 	if (!vmf->pte)
4977 		return do_pte_missing(vmf);
4978 
4979 	if (!pte_present(vmf->orig_pte))
4980 		return do_swap_page(vmf);
4981 
4982 	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
4983 		return do_numa_page(vmf);
4984 
4985 	spin_lock(vmf->ptl);
4986 	entry = vmf->orig_pte;
4987 	if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) {
4988 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
4989 		goto unlock;
4990 	}
4991 	if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
4992 		if (!pte_write(entry))
4993 			return do_wp_page(vmf);
4994 		else if (likely(vmf->flags & FAULT_FLAG_WRITE))
4995 			entry = pte_mkdirty(entry);
4996 	}
4997 	entry = pte_mkyoung(entry);
4998 	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
4999 				vmf->flags & FAULT_FLAG_WRITE)) {
5000 		update_mmu_cache_range(vmf, vmf->vma, vmf->address,
5001 				vmf->pte, 1);
5002 	} else {
5003 		/* Skip spurious TLB flush for retried page fault */
5004 		if (vmf->flags & FAULT_FLAG_TRIED)
5005 			goto unlock;
5006 		/*
5007 		 * This is needed only for protection faults but the arch code
5008 		 * is not yet telling us if this is a protection fault or not.
5009 		 * This still avoids useless tlb flushes for .text page faults
5010 		 * with threads.
5011 		 */
5012 		if (vmf->flags & FAULT_FLAG_WRITE)
5013 			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address,
5014 						     vmf->pte);
5015 	}
5016 unlock:
5017 	pte_unmap_unlock(vmf->pte, vmf->ptl);
5018 	return 0;
5019 }
5020 
5021 /*
5022  * On entry, we hold either the VMA lock or the mmap_lock
5023  * (FAULT_FLAG_VMA_LOCK tells you which).  If VM_FAULT_RETRY is set in
5024  * the result, the mmap_lock is not held on exit.  See filemap_fault()
5025  * and __folio_lock_or_retry().
5026  */
5027 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
5028 		unsigned long address, unsigned int flags)
5029 {
5030 	struct vm_fault vmf = {
5031 		.vma = vma,
5032 		.address = address & PAGE_MASK,
5033 		.real_address = address,
5034 		.flags = flags,
5035 		.pgoff = linear_page_index(vma, address),
5036 		.gfp_mask = __get_fault_gfp_mask(vma),
5037 	};
5038 	struct mm_struct *mm = vma->vm_mm;
5039 	unsigned long vm_flags = vma->vm_flags;
5040 	pgd_t *pgd;
5041 	p4d_t *p4d;
5042 	vm_fault_t ret;
5043 
5044 	pgd = pgd_offset(mm, address);
5045 	p4d = p4d_alloc(mm, pgd, address);
5046 	if (!p4d)
5047 		return VM_FAULT_OOM;
5048 
5049 	vmf.pud = pud_alloc(mm, p4d, address);
5050 	if (!vmf.pud)
5051 		return VM_FAULT_OOM;
5052 retry_pud:
5053 	if (pud_none(*vmf.pud) &&
5054 	    hugepage_vma_check(vma, vm_flags, false, true, true)) {
5055 		ret = create_huge_pud(&vmf);
5056 		if (!(ret & VM_FAULT_FALLBACK))
5057 			return ret;
5058 	} else {
5059 		pud_t orig_pud = *vmf.pud;
5060 
5061 		barrier();
5062 		if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
5063 
5064 			/*
5065 			 * TODO once we support anonymous PUDs: NUMA case and
5066 			 * FAULT_FLAG_UNSHARE handling.
5067 			 */
5068 			if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) {
5069 				ret = wp_huge_pud(&vmf, orig_pud);
5070 				if (!(ret & VM_FAULT_FALLBACK))
5071 					return ret;
5072 			} else {
5073 				huge_pud_set_accessed(&vmf, orig_pud);
5074 				return 0;
5075 			}
5076 		}
5077 	}
5078 
5079 	vmf.pmd = pmd_alloc(mm, vmf.pud, address);
5080 	if (!vmf.pmd)
5081 		return VM_FAULT_OOM;
5082 
5083 	/* Huge pud page fault raced with pmd_alloc? */
5084 	if (pud_trans_unstable(vmf.pud))
5085 		goto retry_pud;
5086 
5087 	if (pmd_none(*vmf.pmd) &&
5088 	    hugepage_vma_check(vma, vm_flags, false, true, true)) {
5089 		ret = create_huge_pmd(&vmf);
5090 		if (!(ret & VM_FAULT_FALLBACK))
5091 			return ret;
5092 	} else {
5093 		vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
5094 
5095 		if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
5096 			VM_BUG_ON(thp_migration_supported() &&
5097 					  !is_pmd_migration_entry(vmf.orig_pmd));
5098 			if (is_pmd_migration_entry(vmf.orig_pmd))
5099 				pmd_migration_entry_wait(mm, vmf.pmd);
5100 			return 0;
5101 		}
5102 		if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
5103 			if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
5104 				return do_huge_pmd_numa_page(&vmf);
5105 
5106 			if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
5107 			    !pmd_write(vmf.orig_pmd)) {
5108 				ret = wp_huge_pmd(&vmf);
5109 				if (!(ret & VM_FAULT_FALLBACK))
5110 					return ret;
5111 			} else {
5112 				huge_pmd_set_accessed(&vmf);
5113 				return 0;
5114 			}
5115 		}
5116 	}
5117 
5118 	return handle_pte_fault(&vmf);
5119 }
5120 
5121 /**
5122  * mm_account_fault - Do page fault accounting
5123  * @mm: mm from which memcg should be extracted. It can be NULL.
5124  * @regs: the pt_regs struct pointer.  When set to NULL, will skip accounting
5125  *        of perf event counters, but we'll still do the per-task accounting to
5126  *        the task who triggered this page fault.
5127  * @address: the faulted address.
5128  * @flags: the fault flags.
5129  * @ret: the fault retcode.
5130  *
5131  * This will take care of most of the page fault accounting.  Meanwhile, it
5132  * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
5133  * updates.  However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
5134  * still be in per-arch page fault handlers at the entry of page fault.
5135  */
5136 static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs,
5137 				    unsigned long address, unsigned int flags,
5138 				    vm_fault_t ret)
5139 {
5140 	bool major;
5141 
5142 	/* Incomplete faults will be accounted upon completion. */
5143 	if (ret & VM_FAULT_RETRY)
5144 		return;
5145 
5146 	/*
5147 	 * To preserve the behavior of older kernels, PGFAULT counters record
5148 	 * both successful and failed faults, as opposed to perf counters,
5149 	 * which ignore failed cases.
5150 	 */
5151 	count_vm_event(PGFAULT);
5152 	count_memcg_event_mm(mm, PGFAULT);
5153 
5154 	/*
5155 	 * Do not account for unsuccessful faults (e.g. when the address wasn't
5156 	 * valid).  That includes arch_vma_access_permitted() failing before
5157 	 * reaching here. So this is not a "this many hardware page faults"
5158 	 * counter.  We should use the hw profiling for that.
5159 	 */
5160 	if (ret & VM_FAULT_ERROR)
5161 		return;
5162 
5163 	/*
5164 	 * We define the fault as a major fault when the final successful fault
5165 	 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
5166 	 * handle it immediately previously).
5167 	 */
5168 	major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
5169 
5170 	if (major)
5171 		current->maj_flt++;
5172 	else
5173 		current->min_flt++;
5174 
5175 	/*
5176 	 * If the fault is done for GUP, regs will be NULL.  We only do the
5177 	 * accounting for the per thread fault counters who triggered the
5178 	 * fault, and we skip the perf event updates.
5179 	 */
5180 	if (!regs)
5181 		return;
5182 
5183 	if (major)
5184 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
5185 	else
5186 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
5187 }
5188 
5189 #ifdef CONFIG_LRU_GEN
5190 static void lru_gen_enter_fault(struct vm_area_struct *vma)
5191 {
5192 	/* the LRU algorithm only applies to accesses with recency */
5193 	current->in_lru_fault = vma_has_recency(vma);
5194 }
5195 
5196 static void lru_gen_exit_fault(void)
5197 {
5198 	current->in_lru_fault = false;
5199 }
5200 #else
5201 static void lru_gen_enter_fault(struct vm_area_struct *vma)
5202 {
5203 }
5204 
5205 static void lru_gen_exit_fault(void)
5206 {
5207 }
5208 #endif /* CONFIG_LRU_GEN */
5209 
5210 static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma,
5211 				       unsigned int *flags)
5212 {
5213 	if (unlikely(*flags & FAULT_FLAG_UNSHARE)) {
5214 		if (WARN_ON_ONCE(*flags & FAULT_FLAG_WRITE))
5215 			return VM_FAULT_SIGSEGV;
5216 		/*
5217 		 * FAULT_FLAG_UNSHARE only applies to COW mappings. Let's
5218 		 * just treat it like an ordinary read-fault otherwise.
5219 		 */
5220 		if (!is_cow_mapping(vma->vm_flags))
5221 			*flags &= ~FAULT_FLAG_UNSHARE;
5222 	} else if (*flags & FAULT_FLAG_WRITE) {
5223 		/* Write faults on read-only mappings are impossible ... */
5224 		if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE)))
5225 			return VM_FAULT_SIGSEGV;
5226 		/* ... and FOLL_FORCE only applies to COW mappings. */
5227 		if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) &&
5228 				 !is_cow_mapping(vma->vm_flags)))
5229 			return VM_FAULT_SIGSEGV;
5230 	}
5231 #ifdef CONFIG_PER_VMA_LOCK
5232 	/*
5233 	 * Per-VMA locks can't be used with FAULT_FLAG_RETRY_NOWAIT because of
5234 	 * the assumption that lock is dropped on VM_FAULT_RETRY.
5235 	 */
5236 	if (WARN_ON_ONCE((*flags &
5237 			(FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)) ==
5238 			(FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)))
5239 		return VM_FAULT_SIGSEGV;
5240 #endif
5241 
5242 	return 0;
5243 }
5244 
5245 /*
5246  * By the time we get here, we already hold the mm semaphore
5247  *
5248  * The mmap_lock may have been released depending on flags and our
5249  * return value.  See filemap_fault() and __folio_lock_or_retry().
5250  */
5251 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
5252 			   unsigned int flags, struct pt_regs *regs)
5253 {
5254 	/* If the fault handler drops the mmap_lock, vma may be freed */
5255 	struct mm_struct *mm = vma->vm_mm;
5256 	vm_fault_t ret;
5257 
5258 	__set_current_state(TASK_RUNNING);
5259 
5260 	ret = sanitize_fault_flags(vma, &flags);
5261 	if (ret)
5262 		goto out;
5263 
5264 	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
5265 					    flags & FAULT_FLAG_INSTRUCTION,
5266 					    flags & FAULT_FLAG_REMOTE)) {
5267 		ret = VM_FAULT_SIGSEGV;
5268 		goto out;
5269 	}
5270 
5271 	/*
5272 	 * Enable the memcg OOM handling for faults triggered in user
5273 	 * space.  Kernel faults are handled more gracefully.
5274 	 */
5275 	if (flags & FAULT_FLAG_USER)
5276 		mem_cgroup_enter_user_fault();
5277 
5278 	lru_gen_enter_fault(vma);
5279 
5280 	if (unlikely(is_vm_hugetlb_page(vma)))
5281 		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
5282 	else
5283 		ret = __handle_mm_fault(vma, address, flags);
5284 
5285 	lru_gen_exit_fault();
5286 
5287 	if (flags & FAULT_FLAG_USER) {
5288 		mem_cgroup_exit_user_fault();
5289 		/*
5290 		 * The task may have entered a memcg OOM situation but
5291 		 * if the allocation error was handled gracefully (no
5292 		 * VM_FAULT_OOM), there is no need to kill anything.
5293 		 * Just clean up the OOM state peacefully.
5294 		 */
5295 		if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
5296 			mem_cgroup_oom_synchronize(false);
5297 	}
5298 out:
5299 	mm_account_fault(mm, regs, address, flags, ret);
5300 
5301 	return ret;
5302 }
5303 EXPORT_SYMBOL_GPL(handle_mm_fault);
5304 
5305 #ifdef CONFIG_LOCK_MM_AND_FIND_VMA
5306 #include <linux/extable.h>
5307 
5308 static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
5309 {
5310 	if (likely(mmap_read_trylock(mm)))
5311 		return true;
5312 
5313 	if (regs && !user_mode(regs)) {
5314 		unsigned long ip = instruction_pointer(regs);
5315 		if (!search_exception_tables(ip))
5316 			return false;
5317 	}
5318 
5319 	return !mmap_read_lock_killable(mm);
5320 }
5321 
5322 static inline bool mmap_upgrade_trylock(struct mm_struct *mm)
5323 {
5324 	/*
5325 	 * We don't have this operation yet.
5326 	 *
5327 	 * It should be easy enough to do: it's basically a
5328 	 *    atomic_long_try_cmpxchg_acquire()
5329 	 * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but
5330 	 * it also needs the proper lockdep magic etc.
5331 	 */
5332 	return false;
5333 }
5334 
5335 static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
5336 {
5337 	mmap_read_unlock(mm);
5338 	if (regs && !user_mode(regs)) {
5339 		unsigned long ip = instruction_pointer(regs);
5340 		if (!search_exception_tables(ip))
5341 			return false;
5342 	}
5343 	return !mmap_write_lock_killable(mm);
5344 }
5345 
5346 /*
5347  * Helper for page fault handling.
5348  *
5349  * This is kind of equivalend to "mmap_read_lock()" followed
5350  * by "find_extend_vma()", except it's a lot more careful about
5351  * the locking (and will drop the lock on failure).
5352  *
5353  * For example, if we have a kernel bug that causes a page
5354  * fault, we don't want to just use mmap_read_lock() to get
5355  * the mm lock, because that would deadlock if the bug were
5356  * to happen while we're holding the mm lock for writing.
5357  *
5358  * So this checks the exception tables on kernel faults in
5359  * order to only do this all for instructions that are actually
5360  * expected to fault.
5361  *
5362  * We can also actually take the mm lock for writing if we
5363  * need to extend the vma, which helps the VM layer a lot.
5364  */
5365 struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
5366 			unsigned long addr, struct pt_regs *regs)
5367 {
5368 	struct vm_area_struct *vma;
5369 
5370 	if (!get_mmap_lock_carefully(mm, regs))
5371 		return NULL;
5372 
5373 	vma = find_vma(mm, addr);
5374 	if (likely(vma && (vma->vm_start <= addr)))
5375 		return vma;
5376 
5377 	/*
5378 	 * Well, dang. We might still be successful, but only
5379 	 * if we can extend a vma to do so.
5380 	 */
5381 	if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) {
5382 		mmap_read_unlock(mm);
5383 		return NULL;
5384 	}
5385 
5386 	/*
5387 	 * We can try to upgrade the mmap lock atomically,
5388 	 * in which case we can continue to use the vma
5389 	 * we already looked up.
5390 	 *
5391 	 * Otherwise we'll have to drop the mmap lock and
5392 	 * re-take it, and also look up the vma again,
5393 	 * re-checking it.
5394 	 */
5395 	if (!mmap_upgrade_trylock(mm)) {
5396 		if (!upgrade_mmap_lock_carefully(mm, regs))
5397 			return NULL;
5398 
5399 		vma = find_vma(mm, addr);
5400 		if (!vma)
5401 			goto fail;
5402 		if (vma->vm_start <= addr)
5403 			goto success;
5404 		if (!(vma->vm_flags & VM_GROWSDOWN))
5405 			goto fail;
5406 	}
5407 
5408 	if (expand_stack_locked(vma, addr))
5409 		goto fail;
5410 
5411 success:
5412 	mmap_write_downgrade(mm);
5413 	return vma;
5414 
5415 fail:
5416 	mmap_write_unlock(mm);
5417 	return NULL;
5418 }
5419 #endif
5420 
5421 #ifdef CONFIG_PER_VMA_LOCK
5422 /*
5423  * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be
5424  * stable and not isolated. If the VMA is not found or is being modified the
5425  * function returns NULL.
5426  */
5427 struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
5428 					  unsigned long address)
5429 {
5430 	MA_STATE(mas, &mm->mm_mt, address, address);
5431 	struct vm_area_struct *vma;
5432 
5433 	rcu_read_lock();
5434 retry:
5435 	vma = mas_walk(&mas);
5436 	if (!vma)
5437 		goto inval;
5438 
5439 	if (!vma_start_read(vma))
5440 		goto inval;
5441 
5442 	/*
5443 	 * find_mergeable_anon_vma uses adjacent vmas which are not locked.
5444 	 * This check must happen after vma_start_read(); otherwise, a
5445 	 * concurrent mremap() with MREMAP_DONTUNMAP could dissociate the VMA
5446 	 * from its anon_vma.
5447 	 */
5448 	if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma))
5449 		goto inval_end_read;
5450 
5451 	/* Check since vm_start/vm_end might change before we lock the VMA */
5452 	if (unlikely(address < vma->vm_start || address >= vma->vm_end))
5453 		goto inval_end_read;
5454 
5455 	/* Check if the VMA got isolated after we found it */
5456 	if (vma->detached) {
5457 		vma_end_read(vma);
5458 		count_vm_vma_lock_event(VMA_LOCK_MISS);
5459 		/* The area was replaced with another one */
5460 		goto retry;
5461 	}
5462 
5463 	rcu_read_unlock();
5464 	return vma;
5465 
5466 inval_end_read:
5467 	vma_end_read(vma);
5468 inval:
5469 	rcu_read_unlock();
5470 	count_vm_vma_lock_event(VMA_LOCK_ABORT);
5471 	return NULL;
5472 }
5473 #endif /* CONFIG_PER_VMA_LOCK */
5474 
5475 #ifndef __PAGETABLE_P4D_FOLDED
5476 /*
5477  * Allocate p4d page table.
5478  * We've already handled the fast-path in-line.
5479  */
5480 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
5481 {
5482 	p4d_t *new = p4d_alloc_one(mm, address);
5483 	if (!new)
5484 		return -ENOMEM;
5485 
5486 	spin_lock(&mm->page_table_lock);
5487 	if (pgd_present(*pgd)) {	/* Another has populated it */
5488 		p4d_free(mm, new);
5489 	} else {
5490 		smp_wmb(); /* See comment in pmd_install() */
5491 		pgd_populate(mm, pgd, new);
5492 	}
5493 	spin_unlock(&mm->page_table_lock);
5494 	return 0;
5495 }
5496 #endif /* __PAGETABLE_P4D_FOLDED */
5497 
5498 #ifndef __PAGETABLE_PUD_FOLDED
5499 /*
5500  * Allocate page upper directory.
5501  * We've already handled the fast-path in-line.
5502  */
5503 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
5504 {
5505 	pud_t *new = pud_alloc_one(mm, address);
5506 	if (!new)
5507 		return -ENOMEM;
5508 
5509 	spin_lock(&mm->page_table_lock);
5510 	if (!p4d_present(*p4d)) {
5511 		mm_inc_nr_puds(mm);
5512 		smp_wmb(); /* See comment in pmd_install() */
5513 		p4d_populate(mm, p4d, new);
5514 	} else	/* Another has populated it */
5515 		pud_free(mm, new);
5516 	spin_unlock(&mm->page_table_lock);
5517 	return 0;
5518 }
5519 #endif /* __PAGETABLE_PUD_FOLDED */
5520 
5521 #ifndef __PAGETABLE_PMD_FOLDED
5522 /*
5523  * Allocate page middle directory.
5524  * We've already handled the fast-path in-line.
5525  */
5526 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
5527 {
5528 	spinlock_t *ptl;
5529 	pmd_t *new = pmd_alloc_one(mm, address);
5530 	if (!new)
5531 		return -ENOMEM;
5532 
5533 	ptl = pud_lock(mm, pud);
5534 	if (!pud_present(*pud)) {
5535 		mm_inc_nr_pmds(mm);
5536 		smp_wmb(); /* See comment in pmd_install() */
5537 		pud_populate(mm, pud, new);
5538 	} else {	/* Another has populated it */
5539 		pmd_free(mm, new);
5540 	}
5541 	spin_unlock(ptl);
5542 	return 0;
5543 }
5544 #endif /* __PAGETABLE_PMD_FOLDED */
5545 
5546 /**
5547  * follow_pte - look up PTE at a user virtual address
5548  * @mm: the mm_struct of the target address space
5549  * @address: user virtual address
5550  * @ptepp: location to store found PTE
5551  * @ptlp: location to store the lock for the PTE
5552  *
5553  * On a successful return, the pointer to the PTE is stored in @ptepp;
5554  * the corresponding lock is taken and its location is stored in @ptlp.
5555  * The contents of the PTE are only stable until @ptlp is released;
5556  * any further use, if any, must be protected against invalidation
5557  * with MMU notifiers.
5558  *
5559  * Only IO mappings and raw PFN mappings are allowed.  The mmap semaphore
5560  * should be taken for read.
5561  *
5562  * KVM uses this function.  While it is arguably less bad than ``follow_pfn``,
5563  * it is not a good general-purpose API.
5564  *
5565  * Return: zero on success, -ve otherwise.
5566  */
5567 int follow_pte(struct mm_struct *mm, unsigned long address,
5568 	       pte_t **ptepp, spinlock_t **ptlp)
5569 {
5570 	pgd_t *pgd;
5571 	p4d_t *p4d;
5572 	pud_t *pud;
5573 	pmd_t *pmd;
5574 	pte_t *ptep;
5575 
5576 	pgd = pgd_offset(mm, address);
5577 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
5578 		goto out;
5579 
5580 	p4d = p4d_offset(pgd, address);
5581 	if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
5582 		goto out;
5583 
5584 	pud = pud_offset(p4d, address);
5585 	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
5586 		goto out;
5587 
5588 	pmd = pmd_offset(pud, address);
5589 	VM_BUG_ON(pmd_trans_huge(*pmd));
5590 
5591 	ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
5592 	if (!ptep)
5593 		goto out;
5594 	if (!pte_present(ptep_get(ptep)))
5595 		goto unlock;
5596 	*ptepp = ptep;
5597 	return 0;
5598 unlock:
5599 	pte_unmap_unlock(ptep, *ptlp);
5600 out:
5601 	return -EINVAL;
5602 }
5603 EXPORT_SYMBOL_GPL(follow_pte);
5604 
5605 /**
5606  * follow_pfn - look up PFN at a user virtual address
5607  * @vma: memory mapping
5608  * @address: user virtual address
5609  * @pfn: location to store found PFN
5610  *
5611  * Only IO mappings and raw PFN mappings are allowed.
5612  *
5613  * This function does not allow the caller to read the permissions
5614  * of the PTE.  Do not use it.
5615  *
5616  * Return: zero and the pfn at @pfn on success, -ve otherwise.
5617  */
5618 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
5619 	unsigned long *pfn)
5620 {
5621 	int ret = -EINVAL;
5622 	spinlock_t *ptl;
5623 	pte_t *ptep;
5624 
5625 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5626 		return ret;
5627 
5628 	ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
5629 	if (ret)
5630 		return ret;
5631 	*pfn = pte_pfn(ptep_get(ptep));
5632 	pte_unmap_unlock(ptep, ptl);
5633 	return 0;
5634 }
5635 EXPORT_SYMBOL(follow_pfn);
5636 
5637 #ifdef CONFIG_HAVE_IOREMAP_PROT
5638 int follow_phys(struct vm_area_struct *vma,
5639 		unsigned long address, unsigned int flags,
5640 		unsigned long *prot, resource_size_t *phys)
5641 {
5642 	int ret = -EINVAL;
5643 	pte_t *ptep, pte;
5644 	spinlock_t *ptl;
5645 
5646 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5647 		goto out;
5648 
5649 	if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
5650 		goto out;
5651 	pte = ptep_get(ptep);
5652 
5653 	if ((flags & FOLL_WRITE) && !pte_write(pte))
5654 		goto unlock;
5655 
5656 	*prot = pgprot_val(pte_pgprot(pte));
5657 	*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
5658 
5659 	ret = 0;
5660 unlock:
5661 	pte_unmap_unlock(ptep, ptl);
5662 out:
5663 	return ret;
5664 }
5665 
5666 /**
5667  * generic_access_phys - generic implementation for iomem mmap access
5668  * @vma: the vma to access
5669  * @addr: userspace address, not relative offset within @vma
5670  * @buf: buffer to read/write
5671  * @len: length of transfer
5672  * @write: set to FOLL_WRITE when writing, otherwise reading
5673  *
5674  * This is a generic implementation for &vm_operations_struct.access for an
5675  * iomem mapping. This callback is used by access_process_vm() when the @vma is
5676  * not page based.
5677  */
5678 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
5679 			void *buf, int len, int write)
5680 {
5681 	resource_size_t phys_addr;
5682 	unsigned long prot = 0;
5683 	void __iomem *maddr;
5684 	pte_t *ptep, pte;
5685 	spinlock_t *ptl;
5686 	int offset = offset_in_page(addr);
5687 	int ret = -EINVAL;
5688 
5689 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5690 		return -EINVAL;
5691 
5692 retry:
5693 	if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
5694 		return -EINVAL;
5695 	pte = ptep_get(ptep);
5696 	pte_unmap_unlock(ptep, ptl);
5697 
5698 	prot = pgprot_val(pte_pgprot(pte));
5699 	phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
5700 
5701 	if ((write & FOLL_WRITE) && !pte_write(pte))
5702 		return -EINVAL;
5703 
5704 	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
5705 	if (!maddr)
5706 		return -ENOMEM;
5707 
5708 	if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
5709 		goto out_unmap;
5710 
5711 	if (!pte_same(pte, ptep_get(ptep))) {
5712 		pte_unmap_unlock(ptep, ptl);
5713 		iounmap(maddr);
5714 
5715 		goto retry;
5716 	}
5717 
5718 	if (write)
5719 		memcpy_toio(maddr + offset, buf, len);
5720 	else
5721 		memcpy_fromio(buf, maddr + offset, len);
5722 	ret = len;
5723 	pte_unmap_unlock(ptep, ptl);
5724 out_unmap:
5725 	iounmap(maddr);
5726 
5727 	return ret;
5728 }
5729 EXPORT_SYMBOL_GPL(generic_access_phys);
5730 #endif
5731 
5732 /*
5733  * Access another process' address space as given in mm.
5734  */
5735 int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf,
5736 		       int len, unsigned int gup_flags)
5737 {
5738 	void *old_buf = buf;
5739 	int write = gup_flags & FOLL_WRITE;
5740 
5741 	if (mmap_read_lock_killable(mm))
5742 		return 0;
5743 
5744 	/* Untag the address before looking up the VMA */
5745 	addr = untagged_addr_remote(mm, addr);
5746 
5747 	/* Avoid triggering the temporary warning in __get_user_pages */
5748 	if (!vma_lookup(mm, addr) && !expand_stack(mm, addr))
5749 		return 0;
5750 
5751 	/* ignore errors, just check how much was successfully transferred */
5752 	while (len) {
5753 		int bytes, offset;
5754 		void *maddr;
5755 		struct vm_area_struct *vma = NULL;
5756 		struct page *page = get_user_page_vma_remote(mm, addr,
5757 							     gup_flags, &vma);
5758 
5759 		if (IS_ERR_OR_NULL(page)) {
5760 			/* We might need to expand the stack to access it */
5761 			vma = vma_lookup(mm, addr);
5762 			if (!vma) {
5763 				vma = expand_stack(mm, addr);
5764 
5765 				/* mmap_lock was dropped on failure */
5766 				if (!vma)
5767 					return buf - old_buf;
5768 
5769 				/* Try again if stack expansion worked */
5770 				continue;
5771 			}
5772 
5773 
5774 			/*
5775 			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
5776 			 * we can access using slightly different code.
5777 			 */
5778 			bytes = 0;
5779 #ifdef CONFIG_HAVE_IOREMAP_PROT
5780 			if (vma->vm_ops && vma->vm_ops->access)
5781 				bytes = vma->vm_ops->access(vma, addr, buf,
5782 							    len, write);
5783 #endif
5784 			if (bytes <= 0)
5785 				break;
5786 		} else {
5787 			bytes = len;
5788 			offset = addr & (PAGE_SIZE-1);
5789 			if (bytes > PAGE_SIZE-offset)
5790 				bytes = PAGE_SIZE-offset;
5791 
5792 			maddr = kmap(page);
5793 			if (write) {
5794 				copy_to_user_page(vma, page, addr,
5795 						  maddr + offset, buf, bytes);
5796 				set_page_dirty_lock(page);
5797 			} else {
5798 				copy_from_user_page(vma, page, addr,
5799 						    buf, maddr + offset, bytes);
5800 			}
5801 			kunmap(page);
5802 			put_page(page);
5803 		}
5804 		len -= bytes;
5805 		buf += bytes;
5806 		addr += bytes;
5807 	}
5808 	mmap_read_unlock(mm);
5809 
5810 	return buf - old_buf;
5811 }
5812 
5813 /**
5814  * access_remote_vm - access another process' address space
5815  * @mm:		the mm_struct of the target address space
5816  * @addr:	start address to access
5817  * @buf:	source or destination buffer
5818  * @len:	number of bytes to transfer
5819  * @gup_flags:	flags modifying lookup behaviour
5820  *
5821  * The caller must hold a reference on @mm.
5822  *
5823  * Return: number of bytes copied from source to destination.
5824  */
5825 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
5826 		void *buf, int len, unsigned int gup_flags)
5827 {
5828 	return __access_remote_vm(mm, addr, buf, len, gup_flags);
5829 }
5830 
5831 /*
5832  * Access another process' address space.
5833  * Source/target buffer must be kernel space,
5834  * Do not walk the page table directly, use get_user_pages
5835  */
5836 int access_process_vm(struct task_struct *tsk, unsigned long addr,
5837 		void *buf, int len, unsigned int gup_flags)
5838 {
5839 	struct mm_struct *mm;
5840 	int ret;
5841 
5842 	mm = get_task_mm(tsk);
5843 	if (!mm)
5844 		return 0;
5845 
5846 	ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
5847 
5848 	mmput(mm);
5849 
5850 	return ret;
5851 }
5852 EXPORT_SYMBOL_GPL(access_process_vm);
5853 
5854 /*
5855  * Print the name of a VMA.
5856  */
5857 void print_vma_addr(char *prefix, unsigned long ip)
5858 {
5859 	struct mm_struct *mm = current->mm;
5860 	struct vm_area_struct *vma;
5861 
5862 	/*
5863 	 * we might be running from an atomic context so we cannot sleep
5864 	 */
5865 	if (!mmap_read_trylock(mm))
5866 		return;
5867 
5868 	vma = find_vma(mm, ip);
5869 	if (vma && vma->vm_file) {
5870 		struct file *f = vma->vm_file;
5871 		char *buf = (char *)__get_free_page(GFP_NOWAIT);
5872 		if (buf) {
5873 			char *p;
5874 
5875 			p = file_path(f, buf, PAGE_SIZE);
5876 			if (IS_ERR(p))
5877 				p = "?";
5878 			printk("%s%s[%lx+%lx]", prefix, kbasename(p),
5879 					vma->vm_start,
5880 					vma->vm_end - vma->vm_start);
5881 			free_page((unsigned long)buf);
5882 		}
5883 	}
5884 	mmap_read_unlock(mm);
5885 }
5886 
5887 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
5888 void __might_fault(const char *file, int line)
5889 {
5890 	if (pagefault_disabled())
5891 		return;
5892 	__might_sleep(file, line);
5893 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
5894 	if (current->mm)
5895 		might_lock_read(&current->mm->mmap_lock);
5896 #endif
5897 }
5898 EXPORT_SYMBOL(__might_fault);
5899 #endif
5900 
5901 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
5902 /*
5903  * Process all subpages of the specified huge page with the specified
5904  * operation.  The target subpage will be processed last to keep its
5905  * cache lines hot.
5906  */
5907 static inline int process_huge_page(
5908 	unsigned long addr_hint, unsigned int pages_per_huge_page,
5909 	int (*process_subpage)(unsigned long addr, int idx, void *arg),
5910 	void *arg)
5911 {
5912 	int i, n, base, l, ret;
5913 	unsigned long addr = addr_hint &
5914 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5915 
5916 	/* Process target subpage last to keep its cache lines hot */
5917 	might_sleep();
5918 	n = (addr_hint - addr) / PAGE_SIZE;
5919 	if (2 * n <= pages_per_huge_page) {
5920 		/* If target subpage in first half of huge page */
5921 		base = 0;
5922 		l = n;
5923 		/* Process subpages at the end of huge page */
5924 		for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
5925 			cond_resched();
5926 			ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
5927 			if (ret)
5928 				return ret;
5929 		}
5930 	} else {
5931 		/* If target subpage in second half of huge page */
5932 		base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
5933 		l = pages_per_huge_page - n;
5934 		/* Process subpages at the begin of huge page */
5935 		for (i = 0; i < base; i++) {
5936 			cond_resched();
5937 			ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
5938 			if (ret)
5939 				return ret;
5940 		}
5941 	}
5942 	/*
5943 	 * Process remaining subpages in left-right-left-right pattern
5944 	 * towards the target subpage
5945 	 */
5946 	for (i = 0; i < l; i++) {
5947 		int left_idx = base + i;
5948 		int right_idx = base + 2 * l - 1 - i;
5949 
5950 		cond_resched();
5951 		ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
5952 		if (ret)
5953 			return ret;
5954 		cond_resched();
5955 		ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
5956 		if (ret)
5957 			return ret;
5958 	}
5959 	return 0;
5960 }
5961 
5962 static void clear_gigantic_page(struct page *page,
5963 				unsigned long addr,
5964 				unsigned int pages_per_huge_page)
5965 {
5966 	int i;
5967 	struct page *p;
5968 
5969 	might_sleep();
5970 	for (i = 0; i < pages_per_huge_page; i++) {
5971 		p = nth_page(page, i);
5972 		cond_resched();
5973 		clear_user_highpage(p, addr + i * PAGE_SIZE);
5974 	}
5975 }
5976 
5977 static int clear_subpage(unsigned long addr, int idx, void *arg)
5978 {
5979 	struct page *page = arg;
5980 
5981 	clear_user_highpage(page + idx, addr);
5982 	return 0;
5983 }
5984 
5985 void clear_huge_page(struct page *page,
5986 		     unsigned long addr_hint, unsigned int pages_per_huge_page)
5987 {
5988 	unsigned long addr = addr_hint &
5989 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5990 
5991 	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5992 		clear_gigantic_page(page, addr, pages_per_huge_page);
5993 		return;
5994 	}
5995 
5996 	process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
5997 }
5998 
5999 static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
6000 				     unsigned long addr,
6001 				     struct vm_area_struct *vma,
6002 				     unsigned int pages_per_huge_page)
6003 {
6004 	int i;
6005 	struct page *dst_page;
6006 	struct page *src_page;
6007 
6008 	for (i = 0; i < pages_per_huge_page; i++) {
6009 		dst_page = folio_page(dst, i);
6010 		src_page = folio_page(src, i);
6011 
6012 		cond_resched();
6013 		if (copy_mc_user_highpage(dst_page, src_page,
6014 					  addr + i*PAGE_SIZE, vma)) {
6015 			memory_failure_queue(page_to_pfn(src_page), 0);
6016 			return -EHWPOISON;
6017 		}
6018 	}
6019 	return 0;
6020 }
6021 
6022 struct copy_subpage_arg {
6023 	struct page *dst;
6024 	struct page *src;
6025 	struct vm_area_struct *vma;
6026 };
6027 
6028 static int copy_subpage(unsigned long addr, int idx, void *arg)
6029 {
6030 	struct copy_subpage_arg *copy_arg = arg;
6031 
6032 	if (copy_mc_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
6033 				  addr, copy_arg->vma)) {
6034 		memory_failure_queue(page_to_pfn(copy_arg->src + idx), 0);
6035 		return -EHWPOISON;
6036 	}
6037 	return 0;
6038 }
6039 
6040 int copy_user_large_folio(struct folio *dst, struct folio *src,
6041 			  unsigned long addr_hint, struct vm_area_struct *vma)
6042 {
6043 	unsigned int pages_per_huge_page = folio_nr_pages(dst);
6044 	unsigned long addr = addr_hint &
6045 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
6046 	struct copy_subpage_arg arg = {
6047 		.dst = &dst->page,
6048 		.src = &src->page,
6049 		.vma = vma,
6050 	};
6051 
6052 	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES))
6053 		return copy_user_gigantic_page(dst, src, addr, vma,
6054 					       pages_per_huge_page);
6055 
6056 	return process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
6057 }
6058 
6059 long copy_folio_from_user(struct folio *dst_folio,
6060 			   const void __user *usr_src,
6061 			   bool allow_pagefault)
6062 {
6063 	void *kaddr;
6064 	unsigned long i, rc = 0;
6065 	unsigned int nr_pages = folio_nr_pages(dst_folio);
6066 	unsigned long ret_val = nr_pages * PAGE_SIZE;
6067 	struct page *subpage;
6068 
6069 	for (i = 0; i < nr_pages; i++) {
6070 		subpage = folio_page(dst_folio, i);
6071 		kaddr = kmap_local_page(subpage);
6072 		if (!allow_pagefault)
6073 			pagefault_disable();
6074 		rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE);
6075 		if (!allow_pagefault)
6076 			pagefault_enable();
6077 		kunmap_local(kaddr);
6078 
6079 		ret_val -= (PAGE_SIZE - rc);
6080 		if (rc)
6081 			break;
6082 
6083 		flush_dcache_page(subpage);
6084 
6085 		cond_resched();
6086 	}
6087 	return ret_val;
6088 }
6089 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
6090 
6091 #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
6092 
6093 static struct kmem_cache *page_ptl_cachep;
6094 
6095 void __init ptlock_cache_init(void)
6096 {
6097 	page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
6098 			SLAB_PANIC, NULL);
6099 }
6100 
6101 bool ptlock_alloc(struct ptdesc *ptdesc)
6102 {
6103 	spinlock_t *ptl;
6104 
6105 	ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
6106 	if (!ptl)
6107 		return false;
6108 	ptdesc->ptl = ptl;
6109 	return true;
6110 }
6111 
6112 void ptlock_free(struct ptdesc *ptdesc)
6113 {
6114 	kmem_cache_free(page_ptl_cachep, ptdesc->ptl);
6115 }
6116 #endif
6117