xref: /openbmc/linux/mm/memory.c (revision f71a261a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/memory.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  */
7 
8 /*
9  * demand-loading started 01.12.91 - seems it is high on the list of
10  * things wanted, and it should be easy to implement. - Linus
11  */
12 
13 /*
14  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
15  * pages started 02.12.91, seems to work. - Linus.
16  *
17  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
18  * would have taken more than the 6M I have free, but it worked well as
19  * far as I could see.
20  *
21  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
22  */
23 
24 /*
25  * Real VM (paging to/from disk) started 18.12.91. Much more work and
26  * thought has to go into this. Oh, well..
27  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
28  *		Found it. Everything seems to work now.
29  * 20.12.91  -  Ok, making the swap-device changeable like the root.
30  */
31 
32 /*
33  * 05.04.94  -  Multi-page memory management added for v1.1.
34  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
35  *
36  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
37  *		(Gerhard.Wichert@pdb.siemens.de)
38  *
39  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
40  */
41 
42 #include <linux/kernel_stat.h>
43 #include <linux/mm.h>
44 #include <linux/mm_inline.h>
45 #include <linux/sched/mm.h>
46 #include <linux/sched/coredump.h>
47 #include <linux/sched/numa_balancing.h>
48 #include <linux/sched/task.h>
49 #include <linux/hugetlb.h>
50 #include <linux/mman.h>
51 #include <linux/swap.h>
52 #include <linux/highmem.h>
53 #include <linux/pagemap.h>
54 #include <linux/memremap.h>
55 #include <linux/ksm.h>
56 #include <linux/rmap.h>
57 #include <linux/export.h>
58 #include <linux/delayacct.h>
59 #include <linux/init.h>
60 #include <linux/pfn_t.h>
61 #include <linux/writeback.h>
62 #include <linux/memcontrol.h>
63 #include <linux/mmu_notifier.h>
64 #include <linux/swapops.h>
65 #include <linux/elf.h>
66 #include <linux/gfp.h>
67 #include <linux/migrate.h>
68 #include <linux/string.h>
69 #include <linux/debugfs.h>
70 #include <linux/userfaultfd_k.h>
71 #include <linux/dax.h>
72 #include <linux/oom.h>
73 #include <linux/numa.h>
74 #include <linux/perf_event.h>
75 #include <linux/ptrace.h>
76 #include <linux/vmalloc.h>
77 
78 #include <trace/events/kmem.h>
79 
80 #include <asm/io.h>
81 #include <asm/mmu_context.h>
82 #include <asm/pgalloc.h>
83 #include <linux/uaccess.h>
84 #include <asm/tlb.h>
85 #include <asm/tlbflush.h>
86 
87 #include "pgalloc-track.h"
88 #include "internal.h"
89 #include "swap.h"
90 
91 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
92 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
93 #endif
94 
95 #ifndef CONFIG_NUMA
96 unsigned long max_mapnr;
97 EXPORT_SYMBOL(max_mapnr);
98 
99 struct page *mem_map;
100 EXPORT_SYMBOL(mem_map);
101 #endif
102 
103 static vm_fault_t do_fault(struct vm_fault *vmf);
104 
105 /*
106  * A number of key systems in x86 including ioremap() rely on the assumption
107  * that high_memory defines the upper bound on direct map memory, then end
108  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
109  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
110  * and ZONE_HIGHMEM.
111  */
112 void *high_memory;
113 EXPORT_SYMBOL(high_memory);
114 
115 /*
116  * Randomize the address space (stacks, mmaps, brk, etc.).
117  *
118  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
119  *   as ancient (libc5 based) binaries can segfault. )
120  */
121 int randomize_va_space __read_mostly =
122 #ifdef CONFIG_COMPAT_BRK
123 					1;
124 #else
125 					2;
126 #endif
127 
128 #ifndef arch_faults_on_old_pte
129 static inline bool arch_faults_on_old_pte(void)
130 {
131 	/*
132 	 * Those arches which don't have hw access flag feature need to
133 	 * implement their own helper. By default, "true" means pagefault
134 	 * will be hit on old pte.
135 	 */
136 	return true;
137 }
138 #endif
139 
140 #ifndef arch_wants_old_prefaulted_pte
141 static inline bool arch_wants_old_prefaulted_pte(void)
142 {
143 	/*
144 	 * Transitioning a PTE from 'old' to 'young' can be expensive on
145 	 * some architectures, even if it's performed in hardware. By
146 	 * default, "false" means prefaulted entries will be 'young'.
147 	 */
148 	return false;
149 }
150 #endif
151 
152 static int __init disable_randmaps(char *s)
153 {
154 	randomize_va_space = 0;
155 	return 1;
156 }
157 __setup("norandmaps", disable_randmaps);
158 
159 unsigned long zero_pfn __read_mostly;
160 EXPORT_SYMBOL(zero_pfn);
161 
162 unsigned long highest_memmap_pfn __read_mostly;
163 
164 /*
165  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
166  */
167 static int __init init_zero_pfn(void)
168 {
169 	zero_pfn = page_to_pfn(ZERO_PAGE(0));
170 	return 0;
171 }
172 early_initcall(init_zero_pfn);
173 
174 void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
175 {
176 	trace_rss_stat(mm, member, count);
177 }
178 
179 #if defined(SPLIT_RSS_COUNTING)
180 
181 void sync_mm_rss(struct mm_struct *mm)
182 {
183 	int i;
184 
185 	for (i = 0; i < NR_MM_COUNTERS; i++) {
186 		if (current->rss_stat.count[i]) {
187 			add_mm_counter(mm, i, current->rss_stat.count[i]);
188 			current->rss_stat.count[i] = 0;
189 		}
190 	}
191 	current->rss_stat.events = 0;
192 }
193 
194 static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
195 {
196 	struct task_struct *task = current;
197 
198 	if (likely(task->mm == mm))
199 		task->rss_stat.count[member] += val;
200 	else
201 		add_mm_counter(mm, member, val);
202 }
203 #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
204 #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
205 
206 /* sync counter once per 64 page faults */
207 #define TASK_RSS_EVENTS_THRESH	(64)
208 static void check_sync_rss_stat(struct task_struct *task)
209 {
210 	if (unlikely(task != current))
211 		return;
212 	if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
213 		sync_mm_rss(task->mm);
214 }
215 #else /* SPLIT_RSS_COUNTING */
216 
217 #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
218 #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
219 
220 static void check_sync_rss_stat(struct task_struct *task)
221 {
222 }
223 
224 #endif /* SPLIT_RSS_COUNTING */
225 
226 /*
227  * Note: this doesn't free the actual pages themselves. That
228  * has been handled earlier when unmapping all the memory regions.
229  */
230 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
231 			   unsigned long addr)
232 {
233 	pgtable_t token = pmd_pgtable(*pmd);
234 	pmd_clear(pmd);
235 	pte_free_tlb(tlb, token, addr);
236 	mm_dec_nr_ptes(tlb->mm);
237 }
238 
239 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
240 				unsigned long addr, unsigned long end,
241 				unsigned long floor, unsigned long ceiling)
242 {
243 	pmd_t *pmd;
244 	unsigned long next;
245 	unsigned long start;
246 
247 	start = addr;
248 	pmd = pmd_offset(pud, addr);
249 	do {
250 		next = pmd_addr_end(addr, end);
251 		if (pmd_none_or_clear_bad(pmd))
252 			continue;
253 		free_pte_range(tlb, pmd, addr);
254 	} while (pmd++, addr = next, addr != end);
255 
256 	start &= PUD_MASK;
257 	if (start < floor)
258 		return;
259 	if (ceiling) {
260 		ceiling &= PUD_MASK;
261 		if (!ceiling)
262 			return;
263 	}
264 	if (end - 1 > ceiling - 1)
265 		return;
266 
267 	pmd = pmd_offset(pud, start);
268 	pud_clear(pud);
269 	pmd_free_tlb(tlb, pmd, start);
270 	mm_dec_nr_pmds(tlb->mm);
271 }
272 
273 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
274 				unsigned long addr, unsigned long end,
275 				unsigned long floor, unsigned long ceiling)
276 {
277 	pud_t *pud;
278 	unsigned long next;
279 	unsigned long start;
280 
281 	start = addr;
282 	pud = pud_offset(p4d, addr);
283 	do {
284 		next = pud_addr_end(addr, end);
285 		if (pud_none_or_clear_bad(pud))
286 			continue;
287 		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
288 	} while (pud++, addr = next, addr != end);
289 
290 	start &= P4D_MASK;
291 	if (start < floor)
292 		return;
293 	if (ceiling) {
294 		ceiling &= P4D_MASK;
295 		if (!ceiling)
296 			return;
297 	}
298 	if (end - 1 > ceiling - 1)
299 		return;
300 
301 	pud = pud_offset(p4d, start);
302 	p4d_clear(p4d);
303 	pud_free_tlb(tlb, pud, start);
304 	mm_dec_nr_puds(tlb->mm);
305 }
306 
307 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
308 				unsigned long addr, unsigned long end,
309 				unsigned long floor, unsigned long ceiling)
310 {
311 	p4d_t *p4d;
312 	unsigned long next;
313 	unsigned long start;
314 
315 	start = addr;
316 	p4d = p4d_offset(pgd, addr);
317 	do {
318 		next = p4d_addr_end(addr, end);
319 		if (p4d_none_or_clear_bad(p4d))
320 			continue;
321 		free_pud_range(tlb, p4d, addr, next, floor, ceiling);
322 	} while (p4d++, addr = next, addr != end);
323 
324 	start &= PGDIR_MASK;
325 	if (start < floor)
326 		return;
327 	if (ceiling) {
328 		ceiling &= PGDIR_MASK;
329 		if (!ceiling)
330 			return;
331 	}
332 	if (end - 1 > ceiling - 1)
333 		return;
334 
335 	p4d = p4d_offset(pgd, start);
336 	pgd_clear(pgd);
337 	p4d_free_tlb(tlb, p4d, start);
338 }
339 
340 /*
341  * This function frees user-level page tables of a process.
342  */
343 void free_pgd_range(struct mmu_gather *tlb,
344 			unsigned long addr, unsigned long end,
345 			unsigned long floor, unsigned long ceiling)
346 {
347 	pgd_t *pgd;
348 	unsigned long next;
349 
350 	/*
351 	 * The next few lines have given us lots of grief...
352 	 *
353 	 * Why are we testing PMD* at this top level?  Because often
354 	 * there will be no work to do at all, and we'd prefer not to
355 	 * go all the way down to the bottom just to discover that.
356 	 *
357 	 * Why all these "- 1"s?  Because 0 represents both the bottom
358 	 * of the address space and the top of it (using -1 for the
359 	 * top wouldn't help much: the masks would do the wrong thing).
360 	 * The rule is that addr 0 and floor 0 refer to the bottom of
361 	 * the address space, but end 0 and ceiling 0 refer to the top
362 	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
363 	 * that end 0 case should be mythical).
364 	 *
365 	 * Wherever addr is brought up or ceiling brought down, we must
366 	 * be careful to reject "the opposite 0" before it confuses the
367 	 * subsequent tests.  But what about where end is brought down
368 	 * by PMD_SIZE below? no, end can't go down to 0 there.
369 	 *
370 	 * Whereas we round start (addr) and ceiling down, by different
371 	 * masks at different levels, in order to test whether a table
372 	 * now has no other vmas using it, so can be freed, we don't
373 	 * bother to round floor or end up - the tests don't need that.
374 	 */
375 
376 	addr &= PMD_MASK;
377 	if (addr < floor) {
378 		addr += PMD_SIZE;
379 		if (!addr)
380 			return;
381 	}
382 	if (ceiling) {
383 		ceiling &= PMD_MASK;
384 		if (!ceiling)
385 			return;
386 	}
387 	if (end - 1 > ceiling - 1)
388 		end -= PMD_SIZE;
389 	if (addr > end - 1)
390 		return;
391 	/*
392 	 * We add page table cache pages with PAGE_SIZE,
393 	 * (see pte_free_tlb()), flush the tlb if we need
394 	 */
395 	tlb_change_page_size(tlb, PAGE_SIZE);
396 	pgd = pgd_offset(tlb->mm, addr);
397 	do {
398 		next = pgd_addr_end(addr, end);
399 		if (pgd_none_or_clear_bad(pgd))
400 			continue;
401 		free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
402 	} while (pgd++, addr = next, addr != end);
403 }
404 
405 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
406 		unsigned long floor, unsigned long ceiling)
407 {
408 	while (vma) {
409 		struct vm_area_struct *next = vma->vm_next;
410 		unsigned long addr = vma->vm_start;
411 
412 		/*
413 		 * Hide vma from rmap and truncate_pagecache before freeing
414 		 * pgtables
415 		 */
416 		unlink_anon_vmas(vma);
417 		unlink_file_vma(vma);
418 
419 		if (is_vm_hugetlb_page(vma)) {
420 			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
421 				floor, next ? next->vm_start : ceiling);
422 		} else {
423 			/*
424 			 * Optimization: gather nearby vmas into one call down
425 			 */
426 			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
427 			       && !is_vm_hugetlb_page(next)) {
428 				vma = next;
429 				next = vma->vm_next;
430 				unlink_anon_vmas(vma);
431 				unlink_file_vma(vma);
432 			}
433 			free_pgd_range(tlb, addr, vma->vm_end,
434 				floor, next ? next->vm_start : ceiling);
435 		}
436 		vma = next;
437 	}
438 }
439 
440 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
441 {
442 	spinlock_t *ptl = pmd_lock(mm, pmd);
443 
444 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
445 		mm_inc_nr_ptes(mm);
446 		/*
447 		 * Ensure all pte setup (eg. pte page lock and page clearing) are
448 		 * visible before the pte is made visible to other CPUs by being
449 		 * put into page tables.
450 		 *
451 		 * The other side of the story is the pointer chasing in the page
452 		 * table walking code (when walking the page table without locking;
453 		 * ie. most of the time). Fortunately, these data accesses consist
454 		 * of a chain of data-dependent loads, meaning most CPUs (alpha
455 		 * being the notable exception) will already guarantee loads are
456 		 * seen in-order. See the alpha page table accessors for the
457 		 * smp_rmb() barriers in page table walking code.
458 		 */
459 		smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
460 		pmd_populate(mm, pmd, *pte);
461 		*pte = NULL;
462 	}
463 	spin_unlock(ptl);
464 }
465 
466 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
467 {
468 	pgtable_t new = pte_alloc_one(mm);
469 	if (!new)
470 		return -ENOMEM;
471 
472 	pmd_install(mm, pmd, &new);
473 	if (new)
474 		pte_free(mm, new);
475 	return 0;
476 }
477 
478 int __pte_alloc_kernel(pmd_t *pmd)
479 {
480 	pte_t *new = pte_alloc_one_kernel(&init_mm);
481 	if (!new)
482 		return -ENOMEM;
483 
484 	spin_lock(&init_mm.page_table_lock);
485 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
486 		smp_wmb(); /* See comment in pmd_install() */
487 		pmd_populate_kernel(&init_mm, pmd, new);
488 		new = NULL;
489 	}
490 	spin_unlock(&init_mm.page_table_lock);
491 	if (new)
492 		pte_free_kernel(&init_mm, new);
493 	return 0;
494 }
495 
496 static inline void init_rss_vec(int *rss)
497 {
498 	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
499 }
500 
501 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
502 {
503 	int i;
504 
505 	if (current->mm == mm)
506 		sync_mm_rss(mm);
507 	for (i = 0; i < NR_MM_COUNTERS; i++)
508 		if (rss[i])
509 			add_mm_counter(mm, i, rss[i]);
510 }
511 
512 /*
513  * This function is called to print an error when a bad pte
514  * is found. For example, we might have a PFN-mapped pte in
515  * a region that doesn't allow it.
516  *
517  * The calling function must still handle the error.
518  */
519 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
520 			  pte_t pte, struct page *page)
521 {
522 	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
523 	p4d_t *p4d = p4d_offset(pgd, addr);
524 	pud_t *pud = pud_offset(p4d, addr);
525 	pmd_t *pmd = pmd_offset(pud, addr);
526 	struct address_space *mapping;
527 	pgoff_t index;
528 	static unsigned long resume;
529 	static unsigned long nr_shown;
530 	static unsigned long nr_unshown;
531 
532 	/*
533 	 * Allow a burst of 60 reports, then keep quiet for that minute;
534 	 * or allow a steady drip of one report per second.
535 	 */
536 	if (nr_shown == 60) {
537 		if (time_before(jiffies, resume)) {
538 			nr_unshown++;
539 			return;
540 		}
541 		if (nr_unshown) {
542 			pr_alert("BUG: Bad page map: %lu messages suppressed\n",
543 				 nr_unshown);
544 			nr_unshown = 0;
545 		}
546 		nr_shown = 0;
547 	}
548 	if (nr_shown++ == 0)
549 		resume = jiffies + 60 * HZ;
550 
551 	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
552 	index = linear_page_index(vma, addr);
553 
554 	pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
555 		 current->comm,
556 		 (long long)pte_val(pte), (long long)pmd_val(*pmd));
557 	if (page)
558 		dump_page(page, "bad pte");
559 	pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
560 		 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
561 	pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n",
562 		 vma->vm_file,
563 		 vma->vm_ops ? vma->vm_ops->fault : NULL,
564 		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
565 		 mapping ? mapping->a_ops->read_folio : NULL);
566 	dump_stack();
567 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
568 }
569 
570 /*
571  * vm_normal_page -- This function gets the "struct page" associated with a pte.
572  *
573  * "Special" mappings do not wish to be associated with a "struct page" (either
574  * it doesn't exist, or it exists but they don't want to touch it). In this
575  * case, NULL is returned here. "Normal" mappings do have a struct page.
576  *
577  * There are 2 broad cases. Firstly, an architecture may define a pte_special()
578  * pte bit, in which case this function is trivial. Secondly, an architecture
579  * may not have a spare pte bit, which requires a more complicated scheme,
580  * described below.
581  *
582  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
583  * special mapping (even if there are underlying and valid "struct pages").
584  * COWed pages of a VM_PFNMAP are always normal.
585  *
586  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
587  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
588  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
589  * mapping will always honor the rule
590  *
591  *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
592  *
593  * And for normal mappings this is false.
594  *
595  * This restricts such mappings to be a linear translation from virtual address
596  * to pfn. To get around this restriction, we allow arbitrary mappings so long
597  * as the vma is not a COW mapping; in that case, we know that all ptes are
598  * special (because none can have been COWed).
599  *
600  *
601  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
602  *
603  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
604  * page" backing, however the difference is that _all_ pages with a struct
605  * page (that is, those where pfn_valid is true) are refcounted and considered
606  * normal pages by the VM. The disadvantage is that pages are refcounted
607  * (which can be slower and simply not an option for some PFNMAP users). The
608  * advantage is that we don't have to follow the strict linearity rule of
609  * PFNMAP mappings in order to support COWable mappings.
610  *
611  */
612 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
613 			    pte_t pte)
614 {
615 	unsigned long pfn = pte_pfn(pte);
616 
617 	if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
618 		if (likely(!pte_special(pte)))
619 			goto check_pfn;
620 		if (vma->vm_ops && vma->vm_ops->find_special_page)
621 			return vma->vm_ops->find_special_page(vma, addr);
622 		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
623 			return NULL;
624 		if (is_zero_pfn(pfn))
625 			return NULL;
626 		if (pte_devmap(pte))
627 			return NULL;
628 
629 		print_bad_pte(vma, addr, pte, NULL);
630 		return NULL;
631 	}
632 
633 	/* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
634 
635 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
636 		if (vma->vm_flags & VM_MIXEDMAP) {
637 			if (!pfn_valid(pfn))
638 				return NULL;
639 			goto out;
640 		} else {
641 			unsigned long off;
642 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
643 			if (pfn == vma->vm_pgoff + off)
644 				return NULL;
645 			if (!is_cow_mapping(vma->vm_flags))
646 				return NULL;
647 		}
648 	}
649 
650 	if (is_zero_pfn(pfn))
651 		return NULL;
652 
653 check_pfn:
654 	if (unlikely(pfn > highest_memmap_pfn)) {
655 		print_bad_pte(vma, addr, pte, NULL);
656 		return NULL;
657 	}
658 
659 	/*
660 	 * NOTE! We still have PageReserved() pages in the page tables.
661 	 * eg. VDSO mappings can cause them to exist.
662 	 */
663 out:
664 	return pfn_to_page(pfn);
665 }
666 
667 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
668 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
669 				pmd_t pmd)
670 {
671 	unsigned long pfn = pmd_pfn(pmd);
672 
673 	/*
674 	 * There is no pmd_special() but there may be special pmds, e.g.
675 	 * in a direct-access (dax) mapping, so let's just replicate the
676 	 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
677 	 */
678 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
679 		if (vma->vm_flags & VM_MIXEDMAP) {
680 			if (!pfn_valid(pfn))
681 				return NULL;
682 			goto out;
683 		} else {
684 			unsigned long off;
685 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
686 			if (pfn == vma->vm_pgoff + off)
687 				return NULL;
688 			if (!is_cow_mapping(vma->vm_flags))
689 				return NULL;
690 		}
691 	}
692 
693 	if (pmd_devmap(pmd))
694 		return NULL;
695 	if (is_huge_zero_pmd(pmd))
696 		return NULL;
697 	if (unlikely(pfn > highest_memmap_pfn))
698 		return NULL;
699 
700 	/*
701 	 * NOTE! We still have PageReserved() pages in the page tables.
702 	 * eg. VDSO mappings can cause them to exist.
703 	 */
704 out:
705 	return pfn_to_page(pfn);
706 }
707 #endif
708 
709 static void restore_exclusive_pte(struct vm_area_struct *vma,
710 				  struct page *page, unsigned long address,
711 				  pte_t *ptep)
712 {
713 	pte_t pte;
714 	swp_entry_t entry;
715 
716 	pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
717 	if (pte_swp_soft_dirty(*ptep))
718 		pte = pte_mksoft_dirty(pte);
719 
720 	entry = pte_to_swp_entry(*ptep);
721 	if (pte_swp_uffd_wp(*ptep))
722 		pte = pte_mkuffd_wp(pte);
723 	else if (is_writable_device_exclusive_entry(entry))
724 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
725 
726 	VM_BUG_ON(pte_write(pte) && !(PageAnon(page) && PageAnonExclusive(page)));
727 
728 	/*
729 	 * No need to take a page reference as one was already
730 	 * created when the swap entry was made.
731 	 */
732 	if (PageAnon(page))
733 		page_add_anon_rmap(page, vma, address, RMAP_NONE);
734 	else
735 		/*
736 		 * Currently device exclusive access only supports anonymous
737 		 * memory so the entry shouldn't point to a filebacked page.
738 		 */
739 		WARN_ON_ONCE(!PageAnon(page));
740 
741 	set_pte_at(vma->vm_mm, address, ptep, pte);
742 
743 	/*
744 	 * No need to invalidate - it was non-present before. However
745 	 * secondary CPUs may have mappings that need invalidating.
746 	 */
747 	update_mmu_cache(vma, address, ptep);
748 }
749 
750 /*
751  * Tries to restore an exclusive pte if the page lock can be acquired without
752  * sleeping.
753  */
754 static int
755 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
756 			unsigned long addr)
757 {
758 	swp_entry_t entry = pte_to_swp_entry(*src_pte);
759 	struct page *page = pfn_swap_entry_to_page(entry);
760 
761 	if (trylock_page(page)) {
762 		restore_exclusive_pte(vma, page, addr, src_pte);
763 		unlock_page(page);
764 		return 0;
765 	}
766 
767 	return -EBUSY;
768 }
769 
770 /*
771  * copy one vm_area from one task to the other. Assumes the page tables
772  * already present in the new task to be cleared in the whole range
773  * covered by this vma.
774  */
775 
776 static unsigned long
777 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
778 		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
779 		struct vm_area_struct *src_vma, unsigned long addr, int *rss)
780 {
781 	unsigned long vm_flags = dst_vma->vm_flags;
782 	pte_t pte = *src_pte;
783 	struct page *page;
784 	swp_entry_t entry = pte_to_swp_entry(pte);
785 
786 	if (likely(!non_swap_entry(entry))) {
787 		if (swap_duplicate(entry) < 0)
788 			return -EIO;
789 
790 		/* make sure dst_mm is on swapoff's mmlist. */
791 		if (unlikely(list_empty(&dst_mm->mmlist))) {
792 			spin_lock(&mmlist_lock);
793 			if (list_empty(&dst_mm->mmlist))
794 				list_add(&dst_mm->mmlist,
795 						&src_mm->mmlist);
796 			spin_unlock(&mmlist_lock);
797 		}
798 		/* Mark the swap entry as shared. */
799 		if (pte_swp_exclusive(*src_pte)) {
800 			pte = pte_swp_clear_exclusive(*src_pte);
801 			set_pte_at(src_mm, addr, src_pte, pte);
802 		}
803 		rss[MM_SWAPENTS]++;
804 	} else if (is_migration_entry(entry)) {
805 		page = pfn_swap_entry_to_page(entry);
806 
807 		rss[mm_counter(page)]++;
808 
809 		if (!is_readable_migration_entry(entry) &&
810 				is_cow_mapping(vm_flags)) {
811 			/*
812 			 * COW mappings require pages in both parent and child
813 			 * to be set to read. A previously exclusive entry is
814 			 * now shared.
815 			 */
816 			entry = make_readable_migration_entry(
817 							swp_offset(entry));
818 			pte = swp_entry_to_pte(entry);
819 			if (pte_swp_soft_dirty(*src_pte))
820 				pte = pte_swp_mksoft_dirty(pte);
821 			if (pte_swp_uffd_wp(*src_pte))
822 				pte = pte_swp_mkuffd_wp(pte);
823 			set_pte_at(src_mm, addr, src_pte, pte);
824 		}
825 	} else if (is_device_private_entry(entry)) {
826 		page = pfn_swap_entry_to_page(entry);
827 
828 		/*
829 		 * Update rss count even for unaddressable pages, as
830 		 * they should treated just like normal pages in this
831 		 * respect.
832 		 *
833 		 * We will likely want to have some new rss counters
834 		 * for unaddressable pages, at some point. But for now
835 		 * keep things as they are.
836 		 */
837 		get_page(page);
838 		rss[mm_counter(page)]++;
839 		/* Cannot fail as these pages cannot get pinned. */
840 		BUG_ON(page_try_dup_anon_rmap(page, false, src_vma));
841 
842 		/*
843 		 * We do not preserve soft-dirty information, because so
844 		 * far, checkpoint/restore is the only feature that
845 		 * requires that. And checkpoint/restore does not work
846 		 * when a device driver is involved (you cannot easily
847 		 * save and restore device driver state).
848 		 */
849 		if (is_writable_device_private_entry(entry) &&
850 		    is_cow_mapping(vm_flags)) {
851 			entry = make_readable_device_private_entry(
852 							swp_offset(entry));
853 			pte = swp_entry_to_pte(entry);
854 			if (pte_swp_uffd_wp(*src_pte))
855 				pte = pte_swp_mkuffd_wp(pte);
856 			set_pte_at(src_mm, addr, src_pte, pte);
857 		}
858 	} else if (is_device_exclusive_entry(entry)) {
859 		/*
860 		 * Make device exclusive entries present by restoring the
861 		 * original entry then copying as for a present pte. Device
862 		 * exclusive entries currently only support private writable
863 		 * (ie. COW) mappings.
864 		 */
865 		VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
866 		if (try_restore_exclusive_pte(src_pte, src_vma, addr))
867 			return -EBUSY;
868 		return -ENOENT;
869 	} else if (is_pte_marker_entry(entry)) {
870 		/*
871 		 * We're copying the pgtable should only because dst_vma has
872 		 * uffd-wp enabled, do sanity check.
873 		 */
874 		WARN_ON_ONCE(!userfaultfd_wp(dst_vma));
875 		set_pte_at(dst_mm, addr, dst_pte, pte);
876 		return 0;
877 	}
878 	if (!userfaultfd_wp(dst_vma))
879 		pte = pte_swp_clear_uffd_wp(pte);
880 	set_pte_at(dst_mm, addr, dst_pte, pte);
881 	return 0;
882 }
883 
884 /*
885  * Copy a present and normal page.
886  *
887  * NOTE! The usual case is that this isn't required;
888  * instead, the caller can just increase the page refcount
889  * and re-use the pte the traditional way.
890  *
891  * And if we need a pre-allocated page but don't yet have
892  * one, return a negative error to let the preallocation
893  * code know so that it can do so outside the page table
894  * lock.
895  */
896 static inline int
897 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
898 		  pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
899 		  struct page **prealloc, struct page *page)
900 {
901 	struct page *new_page;
902 	pte_t pte;
903 
904 	new_page = *prealloc;
905 	if (!new_page)
906 		return -EAGAIN;
907 
908 	/*
909 	 * We have a prealloc page, all good!  Take it
910 	 * over and copy the page & arm it.
911 	 */
912 	*prealloc = NULL;
913 	copy_user_highpage(new_page, page, addr, src_vma);
914 	__SetPageUptodate(new_page);
915 	page_add_new_anon_rmap(new_page, dst_vma, addr);
916 	lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
917 	rss[mm_counter(new_page)]++;
918 
919 	/* All done, just insert the new page copy in the child */
920 	pte = mk_pte(new_page, dst_vma->vm_page_prot);
921 	pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
922 	if (userfaultfd_pte_wp(dst_vma, *src_pte))
923 		/* Uffd-wp needs to be delivered to dest pte as well */
924 		pte = pte_wrprotect(pte_mkuffd_wp(pte));
925 	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
926 	return 0;
927 }
928 
929 /*
930  * Copy one pte.  Returns 0 if succeeded, or -EAGAIN if one preallocated page
931  * is required to copy this pte.
932  */
933 static inline int
934 copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
935 		 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
936 		 struct page **prealloc)
937 {
938 	struct mm_struct *src_mm = src_vma->vm_mm;
939 	unsigned long vm_flags = src_vma->vm_flags;
940 	pte_t pte = *src_pte;
941 	struct page *page;
942 
943 	page = vm_normal_page(src_vma, addr, pte);
944 	if (page && PageAnon(page)) {
945 		/*
946 		 * If this page may have been pinned by the parent process,
947 		 * copy the page immediately for the child so that we'll always
948 		 * guarantee the pinned page won't be randomly replaced in the
949 		 * future.
950 		 */
951 		get_page(page);
952 		if (unlikely(page_try_dup_anon_rmap(page, false, src_vma))) {
953 			/* Page maybe pinned, we have to copy. */
954 			put_page(page);
955 			return copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
956 						 addr, rss, prealloc, page);
957 		}
958 		rss[mm_counter(page)]++;
959 	} else if (page) {
960 		get_page(page);
961 		page_dup_file_rmap(page, false);
962 		rss[mm_counter(page)]++;
963 	}
964 
965 	/*
966 	 * If it's a COW mapping, write protect it both
967 	 * in the parent and the child
968 	 */
969 	if (is_cow_mapping(vm_flags) && pte_write(pte)) {
970 		ptep_set_wrprotect(src_mm, addr, src_pte);
971 		pte = pte_wrprotect(pte);
972 	}
973 	VM_BUG_ON(page && PageAnon(page) && PageAnonExclusive(page));
974 
975 	/*
976 	 * If it's a shared mapping, mark it clean in
977 	 * the child
978 	 */
979 	if (vm_flags & VM_SHARED)
980 		pte = pte_mkclean(pte);
981 	pte = pte_mkold(pte);
982 
983 	if (!userfaultfd_wp(dst_vma))
984 		pte = pte_clear_uffd_wp(pte);
985 
986 	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
987 	return 0;
988 }
989 
990 static inline struct page *
991 page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
992 		   unsigned long addr)
993 {
994 	struct page *new_page;
995 
996 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr);
997 	if (!new_page)
998 		return NULL;
999 
1000 	if (mem_cgroup_charge(page_folio(new_page), src_mm, GFP_KERNEL)) {
1001 		put_page(new_page);
1002 		return NULL;
1003 	}
1004 	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
1005 
1006 	return new_page;
1007 }
1008 
1009 static int
1010 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1011 	       pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1012 	       unsigned long end)
1013 {
1014 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1015 	struct mm_struct *src_mm = src_vma->vm_mm;
1016 	pte_t *orig_src_pte, *orig_dst_pte;
1017 	pte_t *src_pte, *dst_pte;
1018 	spinlock_t *src_ptl, *dst_ptl;
1019 	int progress, ret = 0;
1020 	int rss[NR_MM_COUNTERS];
1021 	swp_entry_t entry = (swp_entry_t){0};
1022 	struct page *prealloc = NULL;
1023 
1024 again:
1025 	progress = 0;
1026 	init_rss_vec(rss);
1027 
1028 	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1029 	if (!dst_pte) {
1030 		ret = -ENOMEM;
1031 		goto out;
1032 	}
1033 	src_pte = pte_offset_map(src_pmd, addr);
1034 	src_ptl = pte_lockptr(src_mm, src_pmd);
1035 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1036 	orig_src_pte = src_pte;
1037 	orig_dst_pte = dst_pte;
1038 	arch_enter_lazy_mmu_mode();
1039 
1040 	do {
1041 		/*
1042 		 * We are holding two locks at this point - either of them
1043 		 * could generate latencies in another task on another CPU.
1044 		 */
1045 		if (progress >= 32) {
1046 			progress = 0;
1047 			if (need_resched() ||
1048 			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
1049 				break;
1050 		}
1051 		if (pte_none(*src_pte)) {
1052 			progress++;
1053 			continue;
1054 		}
1055 		if (unlikely(!pte_present(*src_pte))) {
1056 			ret = copy_nonpresent_pte(dst_mm, src_mm,
1057 						  dst_pte, src_pte,
1058 						  dst_vma, src_vma,
1059 						  addr, rss);
1060 			if (ret == -EIO) {
1061 				entry = pte_to_swp_entry(*src_pte);
1062 				break;
1063 			} else if (ret == -EBUSY) {
1064 				break;
1065 			} else if (!ret) {
1066 				progress += 8;
1067 				continue;
1068 			}
1069 
1070 			/*
1071 			 * Device exclusive entry restored, continue by copying
1072 			 * the now present pte.
1073 			 */
1074 			WARN_ON_ONCE(ret != -ENOENT);
1075 		}
1076 		/* copy_present_pte() will clear `*prealloc' if consumed */
1077 		ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
1078 				       addr, rss, &prealloc);
1079 		/*
1080 		 * If we need a pre-allocated page for this pte, drop the
1081 		 * locks, allocate, and try again.
1082 		 */
1083 		if (unlikely(ret == -EAGAIN))
1084 			break;
1085 		if (unlikely(prealloc)) {
1086 			/*
1087 			 * pre-alloc page cannot be reused by next time so as
1088 			 * to strictly follow mempolicy (e.g., alloc_page_vma()
1089 			 * will allocate page according to address).  This
1090 			 * could only happen if one pinned pte changed.
1091 			 */
1092 			put_page(prealloc);
1093 			prealloc = NULL;
1094 		}
1095 		progress += 8;
1096 	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1097 
1098 	arch_leave_lazy_mmu_mode();
1099 	spin_unlock(src_ptl);
1100 	pte_unmap(orig_src_pte);
1101 	add_mm_rss_vec(dst_mm, rss);
1102 	pte_unmap_unlock(orig_dst_pte, dst_ptl);
1103 	cond_resched();
1104 
1105 	if (ret == -EIO) {
1106 		VM_WARN_ON_ONCE(!entry.val);
1107 		if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1108 			ret = -ENOMEM;
1109 			goto out;
1110 		}
1111 		entry.val = 0;
1112 	} else if (ret == -EBUSY) {
1113 		goto out;
1114 	} else if (ret ==  -EAGAIN) {
1115 		prealloc = page_copy_prealloc(src_mm, src_vma, addr);
1116 		if (!prealloc)
1117 			return -ENOMEM;
1118 	} else if (ret) {
1119 		VM_WARN_ON_ONCE(1);
1120 	}
1121 
1122 	/* We've captured and resolved the error. Reset, try again. */
1123 	ret = 0;
1124 
1125 	if (addr != end)
1126 		goto again;
1127 out:
1128 	if (unlikely(prealloc))
1129 		put_page(prealloc);
1130 	return ret;
1131 }
1132 
1133 static inline int
1134 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1135 	       pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1136 	       unsigned long end)
1137 {
1138 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1139 	struct mm_struct *src_mm = src_vma->vm_mm;
1140 	pmd_t *src_pmd, *dst_pmd;
1141 	unsigned long next;
1142 
1143 	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1144 	if (!dst_pmd)
1145 		return -ENOMEM;
1146 	src_pmd = pmd_offset(src_pud, addr);
1147 	do {
1148 		next = pmd_addr_end(addr, end);
1149 		if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1150 			|| pmd_devmap(*src_pmd)) {
1151 			int err;
1152 			VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
1153 			err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
1154 					    addr, dst_vma, src_vma);
1155 			if (err == -ENOMEM)
1156 				return -ENOMEM;
1157 			if (!err)
1158 				continue;
1159 			/* fall through */
1160 		}
1161 		if (pmd_none_or_clear_bad(src_pmd))
1162 			continue;
1163 		if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1164 				   addr, next))
1165 			return -ENOMEM;
1166 	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
1167 	return 0;
1168 }
1169 
1170 static inline int
1171 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1172 	       p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1173 	       unsigned long end)
1174 {
1175 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1176 	struct mm_struct *src_mm = src_vma->vm_mm;
1177 	pud_t *src_pud, *dst_pud;
1178 	unsigned long next;
1179 
1180 	dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1181 	if (!dst_pud)
1182 		return -ENOMEM;
1183 	src_pud = pud_offset(src_p4d, addr);
1184 	do {
1185 		next = pud_addr_end(addr, end);
1186 		if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1187 			int err;
1188 
1189 			VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
1190 			err = copy_huge_pud(dst_mm, src_mm,
1191 					    dst_pud, src_pud, addr, src_vma);
1192 			if (err == -ENOMEM)
1193 				return -ENOMEM;
1194 			if (!err)
1195 				continue;
1196 			/* fall through */
1197 		}
1198 		if (pud_none_or_clear_bad(src_pud))
1199 			continue;
1200 		if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1201 				   addr, next))
1202 			return -ENOMEM;
1203 	} while (dst_pud++, src_pud++, addr = next, addr != end);
1204 	return 0;
1205 }
1206 
1207 static inline int
1208 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1209 	       pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1210 	       unsigned long end)
1211 {
1212 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1213 	p4d_t *src_p4d, *dst_p4d;
1214 	unsigned long next;
1215 
1216 	dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1217 	if (!dst_p4d)
1218 		return -ENOMEM;
1219 	src_p4d = p4d_offset(src_pgd, addr);
1220 	do {
1221 		next = p4d_addr_end(addr, end);
1222 		if (p4d_none_or_clear_bad(src_p4d))
1223 			continue;
1224 		if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1225 				   addr, next))
1226 			return -ENOMEM;
1227 	} while (dst_p4d++, src_p4d++, addr = next, addr != end);
1228 	return 0;
1229 }
1230 
1231 /*
1232  * Return true if the vma needs to copy the pgtable during this fork().  Return
1233  * false when we can speed up fork() by allowing lazy page faults later until
1234  * when the child accesses the memory range.
1235  */
1236 static bool
1237 vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1238 {
1239 	/*
1240 	 * Always copy pgtables when dst_vma has uffd-wp enabled even if it's
1241 	 * file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable
1242 	 * contains uffd-wp protection information, that's something we can't
1243 	 * retrieve from page cache, and skip copying will lose those info.
1244 	 */
1245 	if (userfaultfd_wp(dst_vma))
1246 		return true;
1247 
1248 	if (src_vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP))
1249 		return true;
1250 
1251 	if (src_vma->anon_vma)
1252 		return true;
1253 
1254 	/*
1255 	 * Don't copy ptes where a page fault will fill them correctly.  Fork
1256 	 * becomes much lighter when there are big shared or private readonly
1257 	 * mappings. The tradeoff is that copy_page_range is more efficient
1258 	 * than faulting.
1259 	 */
1260 	return false;
1261 }
1262 
1263 int
1264 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1265 {
1266 	pgd_t *src_pgd, *dst_pgd;
1267 	unsigned long next;
1268 	unsigned long addr = src_vma->vm_start;
1269 	unsigned long end = src_vma->vm_end;
1270 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1271 	struct mm_struct *src_mm = src_vma->vm_mm;
1272 	struct mmu_notifier_range range;
1273 	bool is_cow;
1274 	int ret;
1275 
1276 	if (!vma_needs_copy(dst_vma, src_vma))
1277 		return 0;
1278 
1279 	if (is_vm_hugetlb_page(src_vma))
1280 		return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma);
1281 
1282 	if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
1283 		/*
1284 		 * We do not free on error cases below as remove_vma
1285 		 * gets called on error from higher level routine
1286 		 */
1287 		ret = track_pfn_copy(src_vma);
1288 		if (ret)
1289 			return ret;
1290 	}
1291 
1292 	/*
1293 	 * We need to invalidate the secondary MMU mappings only when
1294 	 * there could be a permission downgrade on the ptes of the
1295 	 * parent mm. And a permission downgrade will only happen if
1296 	 * is_cow_mapping() returns true.
1297 	 */
1298 	is_cow = is_cow_mapping(src_vma->vm_flags);
1299 
1300 	if (is_cow) {
1301 		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1302 					0, src_vma, src_mm, addr, end);
1303 		mmu_notifier_invalidate_range_start(&range);
1304 		/*
1305 		 * Disabling preemption is not needed for the write side, as
1306 		 * the read side doesn't spin, but goes to the mmap_lock.
1307 		 *
1308 		 * Use the raw variant of the seqcount_t write API to avoid
1309 		 * lockdep complaining about preemptibility.
1310 		 */
1311 		mmap_assert_write_locked(src_mm);
1312 		raw_write_seqcount_begin(&src_mm->write_protect_seq);
1313 	}
1314 
1315 	ret = 0;
1316 	dst_pgd = pgd_offset(dst_mm, addr);
1317 	src_pgd = pgd_offset(src_mm, addr);
1318 	do {
1319 		next = pgd_addr_end(addr, end);
1320 		if (pgd_none_or_clear_bad(src_pgd))
1321 			continue;
1322 		if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1323 					    addr, next))) {
1324 			ret = -ENOMEM;
1325 			break;
1326 		}
1327 	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
1328 
1329 	if (is_cow) {
1330 		raw_write_seqcount_end(&src_mm->write_protect_seq);
1331 		mmu_notifier_invalidate_range_end(&range);
1332 	}
1333 	return ret;
1334 }
1335 
1336 /*
1337  * Parameter block passed down to zap_pte_range in exceptional cases.
1338  */
1339 struct zap_details {
1340 	struct folio *single_folio;	/* Locked folio to be unmapped */
1341 	bool even_cows;			/* Zap COWed private pages too? */
1342 	zap_flags_t zap_flags;		/* Extra flags for zapping */
1343 };
1344 
1345 /* Whether we should zap all COWed (private) pages too */
1346 static inline bool should_zap_cows(struct zap_details *details)
1347 {
1348 	/* By default, zap all pages */
1349 	if (!details)
1350 		return true;
1351 
1352 	/* Or, we zap COWed pages only if the caller wants to */
1353 	return details->even_cows;
1354 }
1355 
1356 /* Decides whether we should zap this page with the page pointer specified */
1357 static inline bool should_zap_page(struct zap_details *details, struct page *page)
1358 {
1359 	/* If we can make a decision without *page.. */
1360 	if (should_zap_cows(details))
1361 		return true;
1362 
1363 	/* E.g. the caller passes NULL for the case of a zero page */
1364 	if (!page)
1365 		return true;
1366 
1367 	/* Otherwise we should only zap non-anon pages */
1368 	return !PageAnon(page);
1369 }
1370 
1371 static inline bool zap_drop_file_uffd_wp(struct zap_details *details)
1372 {
1373 	if (!details)
1374 		return false;
1375 
1376 	return details->zap_flags & ZAP_FLAG_DROP_MARKER;
1377 }
1378 
1379 /*
1380  * This function makes sure that we'll replace the none pte with an uffd-wp
1381  * swap special pte marker when necessary. Must be with the pgtable lock held.
1382  */
1383 static inline void
1384 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
1385 			      unsigned long addr, pte_t *pte,
1386 			      struct zap_details *details, pte_t pteval)
1387 {
1388 	if (zap_drop_file_uffd_wp(details))
1389 		return;
1390 
1391 	pte_install_uffd_wp_if_needed(vma, addr, pte, pteval);
1392 }
1393 
1394 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1395 				struct vm_area_struct *vma, pmd_t *pmd,
1396 				unsigned long addr, unsigned long end,
1397 				struct zap_details *details)
1398 {
1399 	struct mm_struct *mm = tlb->mm;
1400 	int force_flush = 0;
1401 	int rss[NR_MM_COUNTERS];
1402 	spinlock_t *ptl;
1403 	pte_t *start_pte;
1404 	pte_t *pte;
1405 	swp_entry_t entry;
1406 
1407 	tlb_change_page_size(tlb, PAGE_SIZE);
1408 again:
1409 	init_rss_vec(rss);
1410 	start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1411 	pte = start_pte;
1412 	flush_tlb_batched_pending(mm);
1413 	arch_enter_lazy_mmu_mode();
1414 	do {
1415 		pte_t ptent = *pte;
1416 		struct page *page;
1417 
1418 		if (pte_none(ptent))
1419 			continue;
1420 
1421 		if (need_resched())
1422 			break;
1423 
1424 		if (pte_present(ptent)) {
1425 			page = vm_normal_page(vma, addr, ptent);
1426 			if (unlikely(!should_zap_page(details, page)))
1427 				continue;
1428 			ptent = ptep_get_and_clear_full(mm, addr, pte,
1429 							tlb->fullmm);
1430 			tlb_remove_tlb_entry(tlb, pte, addr);
1431 			zap_install_uffd_wp_if_needed(vma, addr, pte, details,
1432 						      ptent);
1433 			if (unlikely(!page))
1434 				continue;
1435 
1436 			if (!PageAnon(page)) {
1437 				if (pte_dirty(ptent)) {
1438 					force_flush = 1;
1439 					set_page_dirty(page);
1440 				}
1441 				if (pte_young(ptent) &&
1442 				    likely(!(vma->vm_flags & VM_SEQ_READ)))
1443 					mark_page_accessed(page);
1444 			}
1445 			rss[mm_counter(page)]--;
1446 			page_remove_rmap(page, vma, false);
1447 			if (unlikely(page_mapcount(page) < 0))
1448 				print_bad_pte(vma, addr, ptent, page);
1449 			if (unlikely(__tlb_remove_page(tlb, page))) {
1450 				force_flush = 1;
1451 				addr += PAGE_SIZE;
1452 				break;
1453 			}
1454 			continue;
1455 		}
1456 
1457 		entry = pte_to_swp_entry(ptent);
1458 		if (is_device_private_entry(entry) ||
1459 		    is_device_exclusive_entry(entry)) {
1460 			page = pfn_swap_entry_to_page(entry);
1461 			if (unlikely(!should_zap_page(details, page)))
1462 				continue;
1463 			/*
1464 			 * Both device private/exclusive mappings should only
1465 			 * work with anonymous page so far, so we don't need to
1466 			 * consider uffd-wp bit when zap. For more information,
1467 			 * see zap_install_uffd_wp_if_needed().
1468 			 */
1469 			WARN_ON_ONCE(!vma_is_anonymous(vma));
1470 			rss[mm_counter(page)]--;
1471 			if (is_device_private_entry(entry))
1472 				page_remove_rmap(page, vma, false);
1473 			put_page(page);
1474 		} else if (!non_swap_entry(entry)) {
1475 			/* Genuine swap entry, hence a private anon page */
1476 			if (!should_zap_cows(details))
1477 				continue;
1478 			rss[MM_SWAPENTS]--;
1479 			if (unlikely(!free_swap_and_cache(entry)))
1480 				print_bad_pte(vma, addr, ptent, NULL);
1481 		} else if (is_migration_entry(entry)) {
1482 			page = pfn_swap_entry_to_page(entry);
1483 			if (!should_zap_page(details, page))
1484 				continue;
1485 			rss[mm_counter(page)]--;
1486 		} else if (pte_marker_entry_uffd_wp(entry)) {
1487 			/* Only drop the uffd-wp marker if explicitly requested */
1488 			if (!zap_drop_file_uffd_wp(details))
1489 				continue;
1490 		} else if (is_hwpoison_entry(entry) ||
1491 			   is_swapin_error_entry(entry)) {
1492 			if (!should_zap_cows(details))
1493 				continue;
1494 		} else {
1495 			/* We should have covered all the swap entry types */
1496 			WARN_ON_ONCE(1);
1497 		}
1498 		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1499 		zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
1500 	} while (pte++, addr += PAGE_SIZE, addr != end);
1501 
1502 	add_mm_rss_vec(mm, rss);
1503 	arch_leave_lazy_mmu_mode();
1504 
1505 	/* Do the actual TLB flush before dropping ptl */
1506 	if (force_flush)
1507 		tlb_flush_mmu_tlbonly(tlb);
1508 	pte_unmap_unlock(start_pte, ptl);
1509 
1510 	/*
1511 	 * If we forced a TLB flush (either due to running out of
1512 	 * batch buffers or because we needed to flush dirty TLB
1513 	 * entries before releasing the ptl), free the batched
1514 	 * memory too. Restart if we didn't do everything.
1515 	 */
1516 	if (force_flush) {
1517 		force_flush = 0;
1518 		tlb_flush_mmu(tlb);
1519 	}
1520 
1521 	if (addr != end) {
1522 		cond_resched();
1523 		goto again;
1524 	}
1525 
1526 	return addr;
1527 }
1528 
1529 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1530 				struct vm_area_struct *vma, pud_t *pud,
1531 				unsigned long addr, unsigned long end,
1532 				struct zap_details *details)
1533 {
1534 	pmd_t *pmd;
1535 	unsigned long next;
1536 
1537 	pmd = pmd_offset(pud, addr);
1538 	do {
1539 		next = pmd_addr_end(addr, end);
1540 		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1541 			if (next - addr != HPAGE_PMD_SIZE)
1542 				__split_huge_pmd(vma, pmd, addr, false, NULL);
1543 			else if (zap_huge_pmd(tlb, vma, pmd, addr))
1544 				goto next;
1545 			/* fall through */
1546 		} else if (details && details->single_folio &&
1547 			   folio_test_pmd_mappable(details->single_folio) &&
1548 			   next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1549 			spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1550 			/*
1551 			 * Take and drop THP pmd lock so that we cannot return
1552 			 * prematurely, while zap_huge_pmd() has cleared *pmd,
1553 			 * but not yet decremented compound_mapcount().
1554 			 */
1555 			spin_unlock(ptl);
1556 		}
1557 
1558 		/*
1559 		 * Here there can be other concurrent MADV_DONTNEED or
1560 		 * trans huge page faults running, and if the pmd is
1561 		 * none or trans huge it can change under us. This is
1562 		 * because MADV_DONTNEED holds the mmap_lock in read
1563 		 * mode.
1564 		 */
1565 		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1566 			goto next;
1567 		next = zap_pte_range(tlb, vma, pmd, addr, next, details);
1568 next:
1569 		cond_resched();
1570 	} while (pmd++, addr = next, addr != end);
1571 
1572 	return addr;
1573 }
1574 
1575 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1576 				struct vm_area_struct *vma, p4d_t *p4d,
1577 				unsigned long addr, unsigned long end,
1578 				struct zap_details *details)
1579 {
1580 	pud_t *pud;
1581 	unsigned long next;
1582 
1583 	pud = pud_offset(p4d, addr);
1584 	do {
1585 		next = pud_addr_end(addr, end);
1586 		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1587 			if (next - addr != HPAGE_PUD_SIZE) {
1588 				mmap_assert_locked(tlb->mm);
1589 				split_huge_pud(vma, pud, addr);
1590 			} else if (zap_huge_pud(tlb, vma, pud, addr))
1591 				goto next;
1592 			/* fall through */
1593 		}
1594 		if (pud_none_or_clear_bad(pud))
1595 			continue;
1596 		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1597 next:
1598 		cond_resched();
1599 	} while (pud++, addr = next, addr != end);
1600 
1601 	return addr;
1602 }
1603 
1604 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1605 				struct vm_area_struct *vma, pgd_t *pgd,
1606 				unsigned long addr, unsigned long end,
1607 				struct zap_details *details)
1608 {
1609 	p4d_t *p4d;
1610 	unsigned long next;
1611 
1612 	p4d = p4d_offset(pgd, addr);
1613 	do {
1614 		next = p4d_addr_end(addr, end);
1615 		if (p4d_none_or_clear_bad(p4d))
1616 			continue;
1617 		next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1618 	} while (p4d++, addr = next, addr != end);
1619 
1620 	return addr;
1621 }
1622 
1623 void unmap_page_range(struct mmu_gather *tlb,
1624 			     struct vm_area_struct *vma,
1625 			     unsigned long addr, unsigned long end,
1626 			     struct zap_details *details)
1627 {
1628 	pgd_t *pgd;
1629 	unsigned long next;
1630 
1631 	BUG_ON(addr >= end);
1632 	tlb_start_vma(tlb, vma);
1633 	pgd = pgd_offset(vma->vm_mm, addr);
1634 	do {
1635 		next = pgd_addr_end(addr, end);
1636 		if (pgd_none_or_clear_bad(pgd))
1637 			continue;
1638 		next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1639 	} while (pgd++, addr = next, addr != end);
1640 	tlb_end_vma(tlb, vma);
1641 }
1642 
1643 
1644 static void unmap_single_vma(struct mmu_gather *tlb,
1645 		struct vm_area_struct *vma, unsigned long start_addr,
1646 		unsigned long end_addr,
1647 		struct zap_details *details)
1648 {
1649 	unsigned long start = max(vma->vm_start, start_addr);
1650 	unsigned long end;
1651 
1652 	if (start >= vma->vm_end)
1653 		return;
1654 	end = min(vma->vm_end, end_addr);
1655 	if (end <= vma->vm_start)
1656 		return;
1657 
1658 	if (vma->vm_file)
1659 		uprobe_munmap(vma, start, end);
1660 
1661 	if (unlikely(vma->vm_flags & VM_PFNMAP))
1662 		untrack_pfn(vma, 0, 0);
1663 
1664 	if (start != end) {
1665 		if (unlikely(is_vm_hugetlb_page(vma))) {
1666 			/*
1667 			 * It is undesirable to test vma->vm_file as it
1668 			 * should be non-null for valid hugetlb area.
1669 			 * However, vm_file will be NULL in the error
1670 			 * cleanup path of mmap_region. When
1671 			 * hugetlbfs ->mmap method fails,
1672 			 * mmap_region() nullifies vma->vm_file
1673 			 * before calling this function to clean up.
1674 			 * Since no pte has actually been setup, it is
1675 			 * safe to do nothing in this case.
1676 			 */
1677 			if (vma->vm_file) {
1678 				zap_flags_t zap_flags = details ?
1679 				    details->zap_flags : 0;
1680 				i_mmap_lock_write(vma->vm_file->f_mapping);
1681 				__unmap_hugepage_range_final(tlb, vma, start, end,
1682 							     NULL, zap_flags);
1683 				i_mmap_unlock_write(vma->vm_file->f_mapping);
1684 			}
1685 		} else
1686 			unmap_page_range(tlb, vma, start, end, details);
1687 	}
1688 }
1689 
1690 /**
1691  * unmap_vmas - unmap a range of memory covered by a list of vma's
1692  * @tlb: address of the caller's struct mmu_gather
1693  * @vma: the starting vma
1694  * @start_addr: virtual address at which to start unmapping
1695  * @end_addr: virtual address at which to end unmapping
1696  *
1697  * Unmap all pages in the vma list.
1698  *
1699  * Only addresses between `start' and `end' will be unmapped.
1700  *
1701  * The VMA list must be sorted in ascending virtual address order.
1702  *
1703  * unmap_vmas() assumes that the caller will flush the whole unmapped address
1704  * range after unmap_vmas() returns.  So the only responsibility here is to
1705  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1706  * drops the lock and schedules.
1707  */
1708 void unmap_vmas(struct mmu_gather *tlb,
1709 		struct vm_area_struct *vma, unsigned long start_addr,
1710 		unsigned long end_addr)
1711 {
1712 	struct mmu_notifier_range range;
1713 	struct zap_details details = {
1714 		.zap_flags = ZAP_FLAG_DROP_MARKER,
1715 		/* Careful - we need to zap private pages too! */
1716 		.even_cows = true,
1717 	};
1718 
1719 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1720 				start_addr, end_addr);
1721 	mmu_notifier_invalidate_range_start(&range);
1722 	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1723 		unmap_single_vma(tlb, vma, start_addr, end_addr, &details);
1724 	mmu_notifier_invalidate_range_end(&range);
1725 }
1726 
1727 /**
1728  * zap_page_range - remove user pages in a given range
1729  * @vma: vm_area_struct holding the applicable pages
1730  * @start: starting address of pages to zap
1731  * @size: number of bytes to zap
1732  *
1733  * Caller must protect the VMA list
1734  */
1735 void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1736 		unsigned long size)
1737 {
1738 	struct mmu_notifier_range range;
1739 	struct mmu_gather tlb;
1740 
1741 	lru_add_drain();
1742 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1743 				start, start + size);
1744 	tlb_gather_mmu(&tlb, vma->vm_mm);
1745 	update_hiwater_rss(vma->vm_mm);
1746 	mmu_notifier_invalidate_range_start(&range);
1747 	for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
1748 		unmap_single_vma(&tlb, vma, start, range.end, NULL);
1749 	mmu_notifier_invalidate_range_end(&range);
1750 	tlb_finish_mmu(&tlb);
1751 }
1752 
1753 /**
1754  * zap_page_range_single - remove user pages in a given range
1755  * @vma: vm_area_struct holding the applicable pages
1756  * @address: starting address of pages to zap
1757  * @size: number of bytes to zap
1758  * @details: details of shared cache invalidation
1759  *
1760  * The range must fit into one VMA.
1761  */
1762 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1763 		unsigned long size, struct zap_details *details)
1764 {
1765 	struct mmu_notifier_range range;
1766 	struct mmu_gather tlb;
1767 
1768 	lru_add_drain();
1769 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1770 				address, address + size);
1771 	tlb_gather_mmu(&tlb, vma->vm_mm);
1772 	update_hiwater_rss(vma->vm_mm);
1773 	mmu_notifier_invalidate_range_start(&range);
1774 	unmap_single_vma(&tlb, vma, address, range.end, details);
1775 	mmu_notifier_invalidate_range_end(&range);
1776 	tlb_finish_mmu(&tlb);
1777 }
1778 
1779 /**
1780  * zap_vma_ptes - remove ptes mapping the vma
1781  * @vma: vm_area_struct holding ptes to be zapped
1782  * @address: starting address of pages to zap
1783  * @size: number of bytes to zap
1784  *
1785  * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1786  *
1787  * The entire address range must be fully contained within the vma.
1788  *
1789  */
1790 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1791 		unsigned long size)
1792 {
1793 	if (!range_in_vma(vma, address, address + size) ||
1794 	    		!(vma->vm_flags & VM_PFNMAP))
1795 		return;
1796 
1797 	zap_page_range_single(vma, address, size, NULL);
1798 }
1799 EXPORT_SYMBOL_GPL(zap_vma_ptes);
1800 
1801 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
1802 {
1803 	pgd_t *pgd;
1804 	p4d_t *p4d;
1805 	pud_t *pud;
1806 	pmd_t *pmd;
1807 
1808 	pgd = pgd_offset(mm, addr);
1809 	p4d = p4d_alloc(mm, pgd, addr);
1810 	if (!p4d)
1811 		return NULL;
1812 	pud = pud_alloc(mm, p4d, addr);
1813 	if (!pud)
1814 		return NULL;
1815 	pmd = pmd_alloc(mm, pud, addr);
1816 	if (!pmd)
1817 		return NULL;
1818 
1819 	VM_BUG_ON(pmd_trans_huge(*pmd));
1820 	return pmd;
1821 }
1822 
1823 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1824 			spinlock_t **ptl)
1825 {
1826 	pmd_t *pmd = walk_to_pmd(mm, addr);
1827 
1828 	if (!pmd)
1829 		return NULL;
1830 	return pte_alloc_map_lock(mm, pmd, addr, ptl);
1831 }
1832 
1833 static int validate_page_before_insert(struct page *page)
1834 {
1835 	if (PageAnon(page) || PageSlab(page) || page_has_type(page))
1836 		return -EINVAL;
1837 	flush_dcache_page(page);
1838 	return 0;
1839 }
1840 
1841 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
1842 			unsigned long addr, struct page *page, pgprot_t prot)
1843 {
1844 	if (!pte_none(*pte))
1845 		return -EBUSY;
1846 	/* Ok, finally just insert the thing.. */
1847 	get_page(page);
1848 	inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
1849 	page_add_file_rmap(page, vma, false);
1850 	set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot));
1851 	return 0;
1852 }
1853 
1854 /*
1855  * This is the old fallback for page remapping.
1856  *
1857  * For historical reasons, it only allows reserved pages. Only
1858  * old drivers should use this, and they needed to mark their
1859  * pages reserved for the old functions anyway.
1860  */
1861 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1862 			struct page *page, pgprot_t prot)
1863 {
1864 	int retval;
1865 	pte_t *pte;
1866 	spinlock_t *ptl;
1867 
1868 	retval = validate_page_before_insert(page);
1869 	if (retval)
1870 		goto out;
1871 	retval = -ENOMEM;
1872 	pte = get_locked_pte(vma->vm_mm, addr, &ptl);
1873 	if (!pte)
1874 		goto out;
1875 	retval = insert_page_into_pte_locked(vma, pte, addr, page, prot);
1876 	pte_unmap_unlock(pte, ptl);
1877 out:
1878 	return retval;
1879 }
1880 
1881 #ifdef pte_index
1882 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
1883 			unsigned long addr, struct page *page, pgprot_t prot)
1884 {
1885 	int err;
1886 
1887 	if (!page_count(page))
1888 		return -EINVAL;
1889 	err = validate_page_before_insert(page);
1890 	if (err)
1891 		return err;
1892 	return insert_page_into_pte_locked(vma, pte, addr, page, prot);
1893 }
1894 
1895 /* insert_pages() amortizes the cost of spinlock operations
1896  * when inserting pages in a loop. Arch *must* define pte_index.
1897  */
1898 static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
1899 			struct page **pages, unsigned long *num, pgprot_t prot)
1900 {
1901 	pmd_t *pmd = NULL;
1902 	pte_t *start_pte, *pte;
1903 	spinlock_t *pte_lock;
1904 	struct mm_struct *const mm = vma->vm_mm;
1905 	unsigned long curr_page_idx = 0;
1906 	unsigned long remaining_pages_total = *num;
1907 	unsigned long pages_to_write_in_pmd;
1908 	int ret;
1909 more:
1910 	ret = -EFAULT;
1911 	pmd = walk_to_pmd(mm, addr);
1912 	if (!pmd)
1913 		goto out;
1914 
1915 	pages_to_write_in_pmd = min_t(unsigned long,
1916 		remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
1917 
1918 	/* Allocate the PTE if necessary; takes PMD lock once only. */
1919 	ret = -ENOMEM;
1920 	if (pte_alloc(mm, pmd))
1921 		goto out;
1922 
1923 	while (pages_to_write_in_pmd) {
1924 		int pte_idx = 0;
1925 		const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
1926 
1927 		start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
1928 		for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
1929 			int err = insert_page_in_batch_locked(vma, pte,
1930 				addr, pages[curr_page_idx], prot);
1931 			if (unlikely(err)) {
1932 				pte_unmap_unlock(start_pte, pte_lock);
1933 				ret = err;
1934 				remaining_pages_total -= pte_idx;
1935 				goto out;
1936 			}
1937 			addr += PAGE_SIZE;
1938 			++curr_page_idx;
1939 		}
1940 		pte_unmap_unlock(start_pte, pte_lock);
1941 		pages_to_write_in_pmd -= batch_size;
1942 		remaining_pages_total -= batch_size;
1943 	}
1944 	if (remaining_pages_total)
1945 		goto more;
1946 	ret = 0;
1947 out:
1948 	*num = remaining_pages_total;
1949 	return ret;
1950 }
1951 #endif  /* ifdef pte_index */
1952 
1953 /**
1954  * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1955  * @vma: user vma to map to
1956  * @addr: target start user address of these pages
1957  * @pages: source kernel pages
1958  * @num: in: number of pages to map. out: number of pages that were *not*
1959  * mapped. (0 means all pages were successfully mapped).
1960  *
1961  * Preferred over vm_insert_page() when inserting multiple pages.
1962  *
1963  * In case of error, we may have mapped a subset of the provided
1964  * pages. It is the caller's responsibility to account for this case.
1965  *
1966  * The same restrictions apply as in vm_insert_page().
1967  */
1968 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
1969 			struct page **pages, unsigned long *num)
1970 {
1971 #ifdef pte_index
1972 	const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
1973 
1974 	if (addr < vma->vm_start || end_addr >= vma->vm_end)
1975 		return -EFAULT;
1976 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
1977 		BUG_ON(mmap_read_trylock(vma->vm_mm));
1978 		BUG_ON(vma->vm_flags & VM_PFNMAP);
1979 		vma->vm_flags |= VM_MIXEDMAP;
1980 	}
1981 	/* Defer page refcount checking till we're about to map that page. */
1982 	return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
1983 #else
1984 	unsigned long idx = 0, pgcount = *num;
1985 	int err = -EINVAL;
1986 
1987 	for (; idx < pgcount; ++idx) {
1988 		err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
1989 		if (err)
1990 			break;
1991 	}
1992 	*num = pgcount - idx;
1993 	return err;
1994 #endif  /* ifdef pte_index */
1995 }
1996 EXPORT_SYMBOL(vm_insert_pages);
1997 
1998 /**
1999  * vm_insert_page - insert single page into user vma
2000  * @vma: user vma to map to
2001  * @addr: target user address of this page
2002  * @page: source kernel page
2003  *
2004  * This allows drivers to insert individual pages they've allocated
2005  * into a user vma.
2006  *
2007  * The page has to be a nice clean _individual_ kernel allocation.
2008  * If you allocate a compound page, you need to have marked it as
2009  * such (__GFP_COMP), or manually just split the page up yourself
2010  * (see split_page()).
2011  *
2012  * NOTE! Traditionally this was done with "remap_pfn_range()" which
2013  * took an arbitrary page protection parameter. This doesn't allow
2014  * that. Your vma protection will have to be set up correctly, which
2015  * means that if you want a shared writable mapping, you'd better
2016  * ask for a shared writable mapping!
2017  *
2018  * The page does not need to be reserved.
2019  *
2020  * Usually this function is called from f_op->mmap() handler
2021  * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
2022  * Caller must set VM_MIXEDMAP on vma if it wants to call this
2023  * function from other places, for example from page-fault handler.
2024  *
2025  * Return: %0 on success, negative error code otherwise.
2026  */
2027 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
2028 			struct page *page)
2029 {
2030 	if (addr < vma->vm_start || addr >= vma->vm_end)
2031 		return -EFAULT;
2032 	if (!page_count(page))
2033 		return -EINVAL;
2034 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
2035 		BUG_ON(mmap_read_trylock(vma->vm_mm));
2036 		BUG_ON(vma->vm_flags & VM_PFNMAP);
2037 		vma->vm_flags |= VM_MIXEDMAP;
2038 	}
2039 	return insert_page(vma, addr, page, vma->vm_page_prot);
2040 }
2041 EXPORT_SYMBOL(vm_insert_page);
2042 
2043 /*
2044  * __vm_map_pages - maps range of kernel pages into user vma
2045  * @vma: user vma to map to
2046  * @pages: pointer to array of source kernel pages
2047  * @num: number of pages in page array
2048  * @offset: user's requested vm_pgoff
2049  *
2050  * This allows drivers to map range of kernel pages into a user vma.
2051  *
2052  * Return: 0 on success and error code otherwise.
2053  */
2054 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2055 				unsigned long num, unsigned long offset)
2056 {
2057 	unsigned long count = vma_pages(vma);
2058 	unsigned long uaddr = vma->vm_start;
2059 	int ret, i;
2060 
2061 	/* Fail if the user requested offset is beyond the end of the object */
2062 	if (offset >= num)
2063 		return -ENXIO;
2064 
2065 	/* Fail if the user requested size exceeds available object size */
2066 	if (count > num - offset)
2067 		return -ENXIO;
2068 
2069 	for (i = 0; i < count; i++) {
2070 		ret = vm_insert_page(vma, uaddr, pages[offset + i]);
2071 		if (ret < 0)
2072 			return ret;
2073 		uaddr += PAGE_SIZE;
2074 	}
2075 
2076 	return 0;
2077 }
2078 
2079 /**
2080  * vm_map_pages - maps range of kernel pages starts with non zero offset
2081  * @vma: user vma to map to
2082  * @pages: pointer to array of source kernel pages
2083  * @num: number of pages in page array
2084  *
2085  * Maps an object consisting of @num pages, catering for the user's
2086  * requested vm_pgoff
2087  *
2088  * If we fail to insert any page into the vma, the function will return
2089  * immediately leaving any previously inserted pages present.  Callers
2090  * from the mmap handler may immediately return the error as their caller
2091  * will destroy the vma, removing any successfully inserted pages. Other
2092  * callers should make their own arrangements for calling unmap_region().
2093  *
2094  * Context: Process context. Called by mmap handlers.
2095  * Return: 0 on success and error code otherwise.
2096  */
2097 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2098 				unsigned long num)
2099 {
2100 	return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
2101 }
2102 EXPORT_SYMBOL(vm_map_pages);
2103 
2104 /**
2105  * vm_map_pages_zero - map range of kernel pages starts with zero offset
2106  * @vma: user vma to map to
2107  * @pages: pointer to array of source kernel pages
2108  * @num: number of pages in page array
2109  *
2110  * Similar to vm_map_pages(), except that it explicitly sets the offset
2111  * to 0. This function is intended for the drivers that did not consider
2112  * vm_pgoff.
2113  *
2114  * Context: Process context. Called by mmap handlers.
2115  * Return: 0 on success and error code otherwise.
2116  */
2117 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2118 				unsigned long num)
2119 {
2120 	return __vm_map_pages(vma, pages, num, 0);
2121 }
2122 EXPORT_SYMBOL(vm_map_pages_zero);
2123 
2124 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2125 			pfn_t pfn, pgprot_t prot, bool mkwrite)
2126 {
2127 	struct mm_struct *mm = vma->vm_mm;
2128 	pte_t *pte, entry;
2129 	spinlock_t *ptl;
2130 
2131 	pte = get_locked_pte(mm, addr, &ptl);
2132 	if (!pte)
2133 		return VM_FAULT_OOM;
2134 	if (!pte_none(*pte)) {
2135 		if (mkwrite) {
2136 			/*
2137 			 * For read faults on private mappings the PFN passed
2138 			 * in may not match the PFN we have mapped if the
2139 			 * mapped PFN is a writeable COW page.  In the mkwrite
2140 			 * case we are creating a writable PTE for a shared
2141 			 * mapping and we expect the PFNs to match. If they
2142 			 * don't match, we are likely racing with block
2143 			 * allocation and mapping invalidation so just skip the
2144 			 * update.
2145 			 */
2146 			if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
2147 				WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
2148 				goto out_unlock;
2149 			}
2150 			entry = pte_mkyoung(*pte);
2151 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2152 			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
2153 				update_mmu_cache(vma, addr, pte);
2154 		}
2155 		goto out_unlock;
2156 	}
2157 
2158 	/* Ok, finally just insert the thing.. */
2159 	if (pfn_t_devmap(pfn))
2160 		entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
2161 	else
2162 		entry = pte_mkspecial(pfn_t_pte(pfn, prot));
2163 
2164 	if (mkwrite) {
2165 		entry = pte_mkyoung(entry);
2166 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2167 	}
2168 
2169 	set_pte_at(mm, addr, pte, entry);
2170 	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
2171 
2172 out_unlock:
2173 	pte_unmap_unlock(pte, ptl);
2174 	return VM_FAULT_NOPAGE;
2175 }
2176 
2177 /**
2178  * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2179  * @vma: user vma to map to
2180  * @addr: target user address of this page
2181  * @pfn: source kernel pfn
2182  * @pgprot: pgprot flags for the inserted page
2183  *
2184  * This is exactly like vmf_insert_pfn(), except that it allows drivers
2185  * to override pgprot on a per-page basis.
2186  *
2187  * This only makes sense for IO mappings, and it makes no sense for
2188  * COW mappings.  In general, using multiple vmas is preferable;
2189  * vmf_insert_pfn_prot should only be used if using multiple VMAs is
2190  * impractical.
2191  *
2192  * See vmf_insert_mixed_prot() for a discussion of the implication of using
2193  * a value of @pgprot different from that of @vma->vm_page_prot.
2194  *
2195  * Context: Process context.  May allocate using %GFP_KERNEL.
2196  * Return: vm_fault_t value.
2197  */
2198 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2199 			unsigned long pfn, pgprot_t pgprot)
2200 {
2201 	/*
2202 	 * Technically, architectures with pte_special can avoid all these
2203 	 * restrictions (same for remap_pfn_range).  However we would like
2204 	 * consistency in testing and feature parity among all, so we should
2205 	 * try to keep these invariants in place for everybody.
2206 	 */
2207 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2208 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2209 						(VM_PFNMAP|VM_MIXEDMAP));
2210 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2211 	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2212 
2213 	if (addr < vma->vm_start || addr >= vma->vm_end)
2214 		return VM_FAULT_SIGBUS;
2215 
2216 	if (!pfn_modify_allowed(pfn, pgprot))
2217 		return VM_FAULT_SIGBUS;
2218 
2219 	track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2220 
2221 	return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
2222 			false);
2223 }
2224 EXPORT_SYMBOL(vmf_insert_pfn_prot);
2225 
2226 /**
2227  * vmf_insert_pfn - insert single pfn into user vma
2228  * @vma: user vma to map to
2229  * @addr: target user address of this page
2230  * @pfn: source kernel pfn
2231  *
2232  * Similar to vm_insert_page, this allows drivers to insert individual pages
2233  * they've allocated into a user vma. Same comments apply.
2234  *
2235  * This function should only be called from a vm_ops->fault handler, and
2236  * in that case the handler should return the result of this function.
2237  *
2238  * vma cannot be a COW mapping.
2239  *
2240  * As this is called only for pages that do not currently exist, we
2241  * do not need to flush old virtual caches or the TLB.
2242  *
2243  * Context: Process context.  May allocate using %GFP_KERNEL.
2244  * Return: vm_fault_t value.
2245  */
2246 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2247 			unsigned long pfn)
2248 {
2249 	return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2250 }
2251 EXPORT_SYMBOL(vmf_insert_pfn);
2252 
2253 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
2254 {
2255 	/* these checks mirror the abort conditions in vm_normal_page */
2256 	if (vma->vm_flags & VM_MIXEDMAP)
2257 		return true;
2258 	if (pfn_t_devmap(pfn))
2259 		return true;
2260 	if (pfn_t_special(pfn))
2261 		return true;
2262 	if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2263 		return true;
2264 	return false;
2265 }
2266 
2267 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
2268 		unsigned long addr, pfn_t pfn, pgprot_t pgprot,
2269 		bool mkwrite)
2270 {
2271 	int err;
2272 
2273 	BUG_ON(!vm_mixed_ok(vma, pfn));
2274 
2275 	if (addr < vma->vm_start || addr >= vma->vm_end)
2276 		return VM_FAULT_SIGBUS;
2277 
2278 	track_pfn_insert(vma, &pgprot, pfn);
2279 
2280 	if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
2281 		return VM_FAULT_SIGBUS;
2282 
2283 	/*
2284 	 * If we don't have pte special, then we have to use the pfn_valid()
2285 	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2286 	 * refcount the page if pfn_valid is true (hence insert_page rather
2287 	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
2288 	 * without pte special, it would there be refcounted as a normal page.
2289 	 */
2290 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2291 	    !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
2292 		struct page *page;
2293 
2294 		/*
2295 		 * At this point we are committed to insert_page()
2296 		 * regardless of whether the caller specified flags that
2297 		 * result in pfn_t_has_page() == false.
2298 		 */
2299 		page = pfn_to_page(pfn_t_to_pfn(pfn));
2300 		err = insert_page(vma, addr, page, pgprot);
2301 	} else {
2302 		return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2303 	}
2304 
2305 	if (err == -ENOMEM)
2306 		return VM_FAULT_OOM;
2307 	if (err < 0 && err != -EBUSY)
2308 		return VM_FAULT_SIGBUS;
2309 
2310 	return VM_FAULT_NOPAGE;
2311 }
2312 
2313 /**
2314  * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
2315  * @vma: user vma to map to
2316  * @addr: target user address of this page
2317  * @pfn: source kernel pfn
2318  * @pgprot: pgprot flags for the inserted page
2319  *
2320  * This is exactly like vmf_insert_mixed(), except that it allows drivers
2321  * to override pgprot on a per-page basis.
2322  *
2323  * Typically this function should be used by drivers to set caching- and
2324  * encryption bits different than those of @vma->vm_page_prot, because
2325  * the caching- or encryption mode may not be known at mmap() time.
2326  * This is ok as long as @vma->vm_page_prot is not used by the core vm
2327  * to set caching and encryption bits for those vmas (except for COW pages).
2328  * This is ensured by core vm only modifying these page table entries using
2329  * functions that don't touch caching- or encryption bits, using pte_modify()
2330  * if needed. (See for example mprotect()).
2331  * Also when new page-table entries are created, this is only done using the
2332  * fault() callback, and never using the value of vma->vm_page_prot,
2333  * except for page-table entries that point to anonymous pages as the result
2334  * of COW.
2335  *
2336  * Context: Process context.  May allocate using %GFP_KERNEL.
2337  * Return: vm_fault_t value.
2338  */
2339 vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
2340 				 pfn_t pfn, pgprot_t pgprot)
2341 {
2342 	return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
2343 }
2344 EXPORT_SYMBOL(vmf_insert_mixed_prot);
2345 
2346 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2347 		pfn_t pfn)
2348 {
2349 	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
2350 }
2351 EXPORT_SYMBOL(vmf_insert_mixed);
2352 
2353 /*
2354  *  If the insertion of PTE failed because someone else already added a
2355  *  different entry in the mean time, we treat that as success as we assume
2356  *  the same entry was actually inserted.
2357  */
2358 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2359 		unsigned long addr, pfn_t pfn)
2360 {
2361 	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
2362 }
2363 EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
2364 
2365 /*
2366  * maps a range of physical memory into the requested pages. the old
2367  * mappings are removed. any references to nonexistent pages results
2368  * in null mappings (currently treated as "copy-on-access")
2369  */
2370 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2371 			unsigned long addr, unsigned long end,
2372 			unsigned long pfn, pgprot_t prot)
2373 {
2374 	pte_t *pte, *mapped_pte;
2375 	spinlock_t *ptl;
2376 	int err = 0;
2377 
2378 	mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2379 	if (!pte)
2380 		return -ENOMEM;
2381 	arch_enter_lazy_mmu_mode();
2382 	do {
2383 		BUG_ON(!pte_none(*pte));
2384 		if (!pfn_modify_allowed(pfn, prot)) {
2385 			err = -EACCES;
2386 			break;
2387 		}
2388 		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2389 		pfn++;
2390 	} while (pte++, addr += PAGE_SIZE, addr != end);
2391 	arch_leave_lazy_mmu_mode();
2392 	pte_unmap_unlock(mapped_pte, ptl);
2393 	return err;
2394 }
2395 
2396 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2397 			unsigned long addr, unsigned long end,
2398 			unsigned long pfn, pgprot_t prot)
2399 {
2400 	pmd_t *pmd;
2401 	unsigned long next;
2402 	int err;
2403 
2404 	pfn -= addr >> PAGE_SHIFT;
2405 	pmd = pmd_alloc(mm, pud, addr);
2406 	if (!pmd)
2407 		return -ENOMEM;
2408 	VM_BUG_ON(pmd_trans_huge(*pmd));
2409 	do {
2410 		next = pmd_addr_end(addr, end);
2411 		err = remap_pte_range(mm, pmd, addr, next,
2412 				pfn + (addr >> PAGE_SHIFT), prot);
2413 		if (err)
2414 			return err;
2415 	} while (pmd++, addr = next, addr != end);
2416 	return 0;
2417 }
2418 
2419 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2420 			unsigned long addr, unsigned long end,
2421 			unsigned long pfn, pgprot_t prot)
2422 {
2423 	pud_t *pud;
2424 	unsigned long next;
2425 	int err;
2426 
2427 	pfn -= addr >> PAGE_SHIFT;
2428 	pud = pud_alloc(mm, p4d, addr);
2429 	if (!pud)
2430 		return -ENOMEM;
2431 	do {
2432 		next = pud_addr_end(addr, end);
2433 		err = remap_pmd_range(mm, pud, addr, next,
2434 				pfn + (addr >> PAGE_SHIFT), prot);
2435 		if (err)
2436 			return err;
2437 	} while (pud++, addr = next, addr != end);
2438 	return 0;
2439 }
2440 
2441 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2442 			unsigned long addr, unsigned long end,
2443 			unsigned long pfn, pgprot_t prot)
2444 {
2445 	p4d_t *p4d;
2446 	unsigned long next;
2447 	int err;
2448 
2449 	pfn -= addr >> PAGE_SHIFT;
2450 	p4d = p4d_alloc(mm, pgd, addr);
2451 	if (!p4d)
2452 		return -ENOMEM;
2453 	do {
2454 		next = p4d_addr_end(addr, end);
2455 		err = remap_pud_range(mm, p4d, addr, next,
2456 				pfn + (addr >> PAGE_SHIFT), prot);
2457 		if (err)
2458 			return err;
2459 	} while (p4d++, addr = next, addr != end);
2460 	return 0;
2461 }
2462 
2463 /*
2464  * Variant of remap_pfn_range that does not call track_pfn_remap.  The caller
2465  * must have pre-validated the caching bits of the pgprot_t.
2466  */
2467 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
2468 		unsigned long pfn, unsigned long size, pgprot_t prot)
2469 {
2470 	pgd_t *pgd;
2471 	unsigned long next;
2472 	unsigned long end = addr + PAGE_ALIGN(size);
2473 	struct mm_struct *mm = vma->vm_mm;
2474 	int err;
2475 
2476 	if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2477 		return -EINVAL;
2478 
2479 	/*
2480 	 * Physically remapped pages are special. Tell the
2481 	 * rest of the world about it:
2482 	 *   VM_IO tells people not to look at these pages
2483 	 *	(accesses can have side effects).
2484 	 *   VM_PFNMAP tells the core MM that the base pages are just
2485 	 *	raw PFN mappings, and do not have a "struct page" associated
2486 	 *	with them.
2487 	 *   VM_DONTEXPAND
2488 	 *      Disable vma merging and expanding with mremap().
2489 	 *   VM_DONTDUMP
2490 	 *      Omit vma from core dump, even when VM_IO turned off.
2491 	 *
2492 	 * There's a horrible special case to handle copy-on-write
2493 	 * behaviour that some programs depend on. We mark the "original"
2494 	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2495 	 * See vm_normal_page() for details.
2496 	 */
2497 	if (is_cow_mapping(vma->vm_flags)) {
2498 		if (addr != vma->vm_start || end != vma->vm_end)
2499 			return -EINVAL;
2500 		vma->vm_pgoff = pfn;
2501 	}
2502 
2503 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
2504 
2505 	BUG_ON(addr >= end);
2506 	pfn -= addr >> PAGE_SHIFT;
2507 	pgd = pgd_offset(mm, addr);
2508 	flush_cache_range(vma, addr, end);
2509 	do {
2510 		next = pgd_addr_end(addr, end);
2511 		err = remap_p4d_range(mm, pgd, addr, next,
2512 				pfn + (addr >> PAGE_SHIFT), prot);
2513 		if (err)
2514 			return err;
2515 	} while (pgd++, addr = next, addr != end);
2516 
2517 	return 0;
2518 }
2519 
2520 /**
2521  * remap_pfn_range - remap kernel memory to userspace
2522  * @vma: user vma to map to
2523  * @addr: target page aligned user address to start at
2524  * @pfn: page frame number of kernel physical memory address
2525  * @size: size of mapping area
2526  * @prot: page protection flags for this mapping
2527  *
2528  * Note: this is only safe if the mm semaphore is held when called.
2529  *
2530  * Return: %0 on success, negative error code otherwise.
2531  */
2532 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2533 		    unsigned long pfn, unsigned long size, pgprot_t prot)
2534 {
2535 	int err;
2536 
2537 	err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
2538 	if (err)
2539 		return -EINVAL;
2540 
2541 	err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
2542 	if (err)
2543 		untrack_pfn(vma, pfn, PAGE_ALIGN(size));
2544 	return err;
2545 }
2546 EXPORT_SYMBOL(remap_pfn_range);
2547 
2548 /**
2549  * vm_iomap_memory - remap memory to userspace
2550  * @vma: user vma to map to
2551  * @start: start of the physical memory to be mapped
2552  * @len: size of area
2553  *
2554  * This is a simplified io_remap_pfn_range() for common driver use. The
2555  * driver just needs to give us the physical memory range to be mapped,
2556  * we'll figure out the rest from the vma information.
2557  *
2558  * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2559  * whatever write-combining details or similar.
2560  *
2561  * Return: %0 on success, negative error code otherwise.
2562  */
2563 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2564 {
2565 	unsigned long vm_len, pfn, pages;
2566 
2567 	/* Check that the physical memory area passed in looks valid */
2568 	if (start + len < start)
2569 		return -EINVAL;
2570 	/*
2571 	 * You *really* shouldn't map things that aren't page-aligned,
2572 	 * but we've historically allowed it because IO memory might
2573 	 * just have smaller alignment.
2574 	 */
2575 	len += start & ~PAGE_MASK;
2576 	pfn = start >> PAGE_SHIFT;
2577 	pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2578 	if (pfn + pages < pfn)
2579 		return -EINVAL;
2580 
2581 	/* We start the mapping 'vm_pgoff' pages into the area */
2582 	if (vma->vm_pgoff > pages)
2583 		return -EINVAL;
2584 	pfn += vma->vm_pgoff;
2585 	pages -= vma->vm_pgoff;
2586 
2587 	/* Can we fit all of the mapping? */
2588 	vm_len = vma->vm_end - vma->vm_start;
2589 	if (vm_len >> PAGE_SHIFT > pages)
2590 		return -EINVAL;
2591 
2592 	/* Ok, let it rip */
2593 	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2594 }
2595 EXPORT_SYMBOL(vm_iomap_memory);
2596 
2597 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2598 				     unsigned long addr, unsigned long end,
2599 				     pte_fn_t fn, void *data, bool create,
2600 				     pgtbl_mod_mask *mask)
2601 {
2602 	pte_t *pte, *mapped_pte;
2603 	int err = 0;
2604 	spinlock_t *ptl;
2605 
2606 	if (create) {
2607 		mapped_pte = pte = (mm == &init_mm) ?
2608 			pte_alloc_kernel_track(pmd, addr, mask) :
2609 			pte_alloc_map_lock(mm, pmd, addr, &ptl);
2610 		if (!pte)
2611 			return -ENOMEM;
2612 	} else {
2613 		mapped_pte = pte = (mm == &init_mm) ?
2614 			pte_offset_kernel(pmd, addr) :
2615 			pte_offset_map_lock(mm, pmd, addr, &ptl);
2616 	}
2617 
2618 	BUG_ON(pmd_huge(*pmd));
2619 
2620 	arch_enter_lazy_mmu_mode();
2621 
2622 	if (fn) {
2623 		do {
2624 			if (create || !pte_none(*pte)) {
2625 				err = fn(pte++, addr, data);
2626 				if (err)
2627 					break;
2628 			}
2629 		} while (addr += PAGE_SIZE, addr != end);
2630 	}
2631 	*mask |= PGTBL_PTE_MODIFIED;
2632 
2633 	arch_leave_lazy_mmu_mode();
2634 
2635 	if (mm != &init_mm)
2636 		pte_unmap_unlock(mapped_pte, ptl);
2637 	return err;
2638 }
2639 
2640 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2641 				     unsigned long addr, unsigned long end,
2642 				     pte_fn_t fn, void *data, bool create,
2643 				     pgtbl_mod_mask *mask)
2644 {
2645 	pmd_t *pmd;
2646 	unsigned long next;
2647 	int err = 0;
2648 
2649 	BUG_ON(pud_huge(*pud));
2650 
2651 	if (create) {
2652 		pmd = pmd_alloc_track(mm, pud, addr, mask);
2653 		if (!pmd)
2654 			return -ENOMEM;
2655 	} else {
2656 		pmd = pmd_offset(pud, addr);
2657 	}
2658 	do {
2659 		next = pmd_addr_end(addr, end);
2660 		if (pmd_none(*pmd) && !create)
2661 			continue;
2662 		if (WARN_ON_ONCE(pmd_leaf(*pmd)))
2663 			return -EINVAL;
2664 		if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
2665 			if (!create)
2666 				continue;
2667 			pmd_clear_bad(pmd);
2668 		}
2669 		err = apply_to_pte_range(mm, pmd, addr, next,
2670 					 fn, data, create, mask);
2671 		if (err)
2672 			break;
2673 	} while (pmd++, addr = next, addr != end);
2674 
2675 	return err;
2676 }
2677 
2678 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2679 				     unsigned long addr, unsigned long end,
2680 				     pte_fn_t fn, void *data, bool create,
2681 				     pgtbl_mod_mask *mask)
2682 {
2683 	pud_t *pud;
2684 	unsigned long next;
2685 	int err = 0;
2686 
2687 	if (create) {
2688 		pud = pud_alloc_track(mm, p4d, addr, mask);
2689 		if (!pud)
2690 			return -ENOMEM;
2691 	} else {
2692 		pud = pud_offset(p4d, addr);
2693 	}
2694 	do {
2695 		next = pud_addr_end(addr, end);
2696 		if (pud_none(*pud) && !create)
2697 			continue;
2698 		if (WARN_ON_ONCE(pud_leaf(*pud)))
2699 			return -EINVAL;
2700 		if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) {
2701 			if (!create)
2702 				continue;
2703 			pud_clear_bad(pud);
2704 		}
2705 		err = apply_to_pmd_range(mm, pud, addr, next,
2706 					 fn, data, create, mask);
2707 		if (err)
2708 			break;
2709 	} while (pud++, addr = next, addr != end);
2710 
2711 	return err;
2712 }
2713 
2714 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2715 				     unsigned long addr, unsigned long end,
2716 				     pte_fn_t fn, void *data, bool create,
2717 				     pgtbl_mod_mask *mask)
2718 {
2719 	p4d_t *p4d;
2720 	unsigned long next;
2721 	int err = 0;
2722 
2723 	if (create) {
2724 		p4d = p4d_alloc_track(mm, pgd, addr, mask);
2725 		if (!p4d)
2726 			return -ENOMEM;
2727 	} else {
2728 		p4d = p4d_offset(pgd, addr);
2729 	}
2730 	do {
2731 		next = p4d_addr_end(addr, end);
2732 		if (p4d_none(*p4d) && !create)
2733 			continue;
2734 		if (WARN_ON_ONCE(p4d_leaf(*p4d)))
2735 			return -EINVAL;
2736 		if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) {
2737 			if (!create)
2738 				continue;
2739 			p4d_clear_bad(p4d);
2740 		}
2741 		err = apply_to_pud_range(mm, p4d, addr, next,
2742 					 fn, data, create, mask);
2743 		if (err)
2744 			break;
2745 	} while (p4d++, addr = next, addr != end);
2746 
2747 	return err;
2748 }
2749 
2750 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2751 				 unsigned long size, pte_fn_t fn,
2752 				 void *data, bool create)
2753 {
2754 	pgd_t *pgd;
2755 	unsigned long start = addr, next;
2756 	unsigned long end = addr + size;
2757 	pgtbl_mod_mask mask = 0;
2758 	int err = 0;
2759 
2760 	if (WARN_ON(addr >= end))
2761 		return -EINVAL;
2762 
2763 	pgd = pgd_offset(mm, addr);
2764 	do {
2765 		next = pgd_addr_end(addr, end);
2766 		if (pgd_none(*pgd) && !create)
2767 			continue;
2768 		if (WARN_ON_ONCE(pgd_leaf(*pgd)))
2769 			return -EINVAL;
2770 		if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
2771 			if (!create)
2772 				continue;
2773 			pgd_clear_bad(pgd);
2774 		}
2775 		err = apply_to_p4d_range(mm, pgd, addr, next,
2776 					 fn, data, create, &mask);
2777 		if (err)
2778 			break;
2779 	} while (pgd++, addr = next, addr != end);
2780 
2781 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
2782 		arch_sync_kernel_mappings(start, start + size);
2783 
2784 	return err;
2785 }
2786 
2787 /*
2788  * Scan a region of virtual memory, filling in page tables as necessary
2789  * and calling a provided function on each leaf page table.
2790  */
2791 int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2792 			unsigned long size, pte_fn_t fn, void *data)
2793 {
2794 	return __apply_to_page_range(mm, addr, size, fn, data, true);
2795 }
2796 EXPORT_SYMBOL_GPL(apply_to_page_range);
2797 
2798 /*
2799  * Scan a region of virtual memory, calling a provided function on
2800  * each leaf page table where it exists.
2801  *
2802  * Unlike apply_to_page_range, this does _not_ fill in page tables
2803  * where they are absent.
2804  */
2805 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
2806 				 unsigned long size, pte_fn_t fn, void *data)
2807 {
2808 	return __apply_to_page_range(mm, addr, size, fn, data, false);
2809 }
2810 EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
2811 
2812 /*
2813  * handle_pte_fault chooses page fault handler according to an entry which was
2814  * read non-atomically.  Before making any commitment, on those architectures
2815  * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
2816  * parts, do_swap_page must check under lock before unmapping the pte and
2817  * proceeding (but do_wp_page is only called after already making such a check;
2818  * and do_anonymous_page can safely check later on).
2819  */
2820 static inline int pte_unmap_same(struct vm_fault *vmf)
2821 {
2822 	int same = 1;
2823 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
2824 	if (sizeof(pte_t) > sizeof(unsigned long)) {
2825 		spinlock_t *ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
2826 		spin_lock(ptl);
2827 		same = pte_same(*vmf->pte, vmf->orig_pte);
2828 		spin_unlock(ptl);
2829 	}
2830 #endif
2831 	pte_unmap(vmf->pte);
2832 	vmf->pte = NULL;
2833 	return same;
2834 }
2835 
2836 static inline bool __wp_page_copy_user(struct page *dst, struct page *src,
2837 				       struct vm_fault *vmf)
2838 {
2839 	bool ret;
2840 	void *kaddr;
2841 	void __user *uaddr;
2842 	bool locked = false;
2843 	struct vm_area_struct *vma = vmf->vma;
2844 	struct mm_struct *mm = vma->vm_mm;
2845 	unsigned long addr = vmf->address;
2846 
2847 	if (likely(src)) {
2848 		copy_user_highpage(dst, src, addr, vma);
2849 		return true;
2850 	}
2851 
2852 	/*
2853 	 * If the source page was a PFN mapping, we don't have
2854 	 * a "struct page" for it. We do a best-effort copy by
2855 	 * just copying from the original user address. If that
2856 	 * fails, we just zero-fill it. Live with it.
2857 	 */
2858 	kaddr = kmap_atomic(dst);
2859 	uaddr = (void __user *)(addr & PAGE_MASK);
2860 
2861 	/*
2862 	 * On architectures with software "accessed" bits, we would
2863 	 * take a double page fault, so mark it accessed here.
2864 	 */
2865 	if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
2866 		pte_t entry;
2867 
2868 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2869 		locked = true;
2870 		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2871 			/*
2872 			 * Other thread has already handled the fault
2873 			 * and update local tlb only
2874 			 */
2875 			update_mmu_tlb(vma, addr, vmf->pte);
2876 			ret = false;
2877 			goto pte_unlock;
2878 		}
2879 
2880 		entry = pte_mkyoung(vmf->orig_pte);
2881 		if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
2882 			update_mmu_cache(vma, addr, vmf->pte);
2883 	}
2884 
2885 	/*
2886 	 * This really shouldn't fail, because the page is there
2887 	 * in the page tables. But it might just be unreadable,
2888 	 * in which case we just give up and fill the result with
2889 	 * zeroes.
2890 	 */
2891 	if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2892 		if (locked)
2893 			goto warn;
2894 
2895 		/* Re-validate under PTL if the page is still mapped */
2896 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2897 		locked = true;
2898 		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2899 			/* The PTE changed under us, update local tlb */
2900 			update_mmu_tlb(vma, addr, vmf->pte);
2901 			ret = false;
2902 			goto pte_unlock;
2903 		}
2904 
2905 		/*
2906 		 * The same page can be mapped back since last copy attempt.
2907 		 * Try to copy again under PTL.
2908 		 */
2909 		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2910 			/*
2911 			 * Give a warn in case there can be some obscure
2912 			 * use-case
2913 			 */
2914 warn:
2915 			WARN_ON_ONCE(1);
2916 			clear_page(kaddr);
2917 		}
2918 	}
2919 
2920 	ret = true;
2921 
2922 pte_unlock:
2923 	if (locked)
2924 		pte_unmap_unlock(vmf->pte, vmf->ptl);
2925 	kunmap_atomic(kaddr);
2926 	flush_dcache_page(dst);
2927 
2928 	return ret;
2929 }
2930 
2931 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
2932 {
2933 	struct file *vm_file = vma->vm_file;
2934 
2935 	if (vm_file)
2936 		return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
2937 
2938 	/*
2939 	 * Special mappings (e.g. VDSO) do not have any file so fake
2940 	 * a default GFP_KERNEL for them.
2941 	 */
2942 	return GFP_KERNEL;
2943 }
2944 
2945 /*
2946  * Notify the address space that the page is about to become writable so that
2947  * it can prohibit this or wait for the page to get into an appropriate state.
2948  *
2949  * We do this without the lock held, so that it can sleep if it needs to.
2950  */
2951 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
2952 {
2953 	vm_fault_t ret;
2954 	struct page *page = vmf->page;
2955 	unsigned int old_flags = vmf->flags;
2956 
2957 	vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2958 
2959 	if (vmf->vma->vm_file &&
2960 	    IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
2961 		return VM_FAULT_SIGBUS;
2962 
2963 	ret = vmf->vma->vm_ops->page_mkwrite(vmf);
2964 	/* Restore original flags so that caller is not surprised */
2965 	vmf->flags = old_flags;
2966 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2967 		return ret;
2968 	if (unlikely(!(ret & VM_FAULT_LOCKED))) {
2969 		lock_page(page);
2970 		if (!page->mapping) {
2971 			unlock_page(page);
2972 			return 0; /* retry */
2973 		}
2974 		ret |= VM_FAULT_LOCKED;
2975 	} else
2976 		VM_BUG_ON_PAGE(!PageLocked(page), page);
2977 	return ret;
2978 }
2979 
2980 /*
2981  * Handle dirtying of a page in shared file mapping on a write fault.
2982  *
2983  * The function expects the page to be locked and unlocks it.
2984  */
2985 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
2986 {
2987 	struct vm_area_struct *vma = vmf->vma;
2988 	struct address_space *mapping;
2989 	struct page *page = vmf->page;
2990 	bool dirtied;
2991 	bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
2992 
2993 	dirtied = set_page_dirty(page);
2994 	VM_BUG_ON_PAGE(PageAnon(page), page);
2995 	/*
2996 	 * Take a local copy of the address_space - page.mapping may be zeroed
2997 	 * by truncate after unlock_page().   The address_space itself remains
2998 	 * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
2999 	 * release semantics to prevent the compiler from undoing this copying.
3000 	 */
3001 	mapping = page_rmapping(page);
3002 	unlock_page(page);
3003 
3004 	if (!page_mkwrite)
3005 		file_update_time(vma->vm_file);
3006 
3007 	/*
3008 	 * Throttle page dirtying rate down to writeback speed.
3009 	 *
3010 	 * mapping may be NULL here because some device drivers do not
3011 	 * set page.mapping but still dirty their pages
3012 	 *
3013 	 * Drop the mmap_lock before waiting on IO, if we can. The file
3014 	 * is pinning the mapping, as per above.
3015 	 */
3016 	if ((dirtied || page_mkwrite) && mapping) {
3017 		struct file *fpin;
3018 
3019 		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
3020 		balance_dirty_pages_ratelimited(mapping);
3021 		if (fpin) {
3022 			fput(fpin);
3023 			return VM_FAULT_RETRY;
3024 		}
3025 	}
3026 
3027 	return 0;
3028 }
3029 
3030 /*
3031  * Handle write page faults for pages that can be reused in the current vma
3032  *
3033  * This can happen either due to the mapping being with the VM_SHARED flag,
3034  * or due to us being the last reference standing to the page. In either
3035  * case, all we need to do here is to mark the page as writable and update
3036  * any related book-keeping.
3037  */
3038 static inline void wp_page_reuse(struct vm_fault *vmf)
3039 	__releases(vmf->ptl)
3040 {
3041 	struct vm_area_struct *vma = vmf->vma;
3042 	struct page *page = vmf->page;
3043 	pte_t entry;
3044 
3045 	VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
3046 	VM_BUG_ON(PageAnon(page) && !PageAnonExclusive(page));
3047 
3048 	/*
3049 	 * Clear the pages cpupid information as the existing
3050 	 * information potentially belongs to a now completely
3051 	 * unrelated process.
3052 	 */
3053 	if (page)
3054 		page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
3055 
3056 	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3057 	entry = pte_mkyoung(vmf->orig_pte);
3058 	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3059 	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
3060 		update_mmu_cache(vma, vmf->address, vmf->pte);
3061 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3062 	count_vm_event(PGREUSE);
3063 }
3064 
3065 /*
3066  * Handle the case of a page which we actually need to copy to a new page,
3067  * either due to COW or unsharing.
3068  *
3069  * Called with mmap_lock locked and the old page referenced, but
3070  * without the ptl held.
3071  *
3072  * High level logic flow:
3073  *
3074  * - Allocate a page, copy the content of the old page to the new one.
3075  * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
3076  * - Take the PTL. If the pte changed, bail out and release the allocated page
3077  * - If the pte is still the way we remember it, update the page table and all
3078  *   relevant references. This includes dropping the reference the page-table
3079  *   held to the old page, as well as updating the rmap.
3080  * - In any case, unlock the PTL and drop the reference we took to the old page.
3081  */
3082 static vm_fault_t wp_page_copy(struct vm_fault *vmf)
3083 {
3084 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
3085 	struct vm_area_struct *vma = vmf->vma;
3086 	struct mm_struct *mm = vma->vm_mm;
3087 	struct page *old_page = vmf->page;
3088 	struct page *new_page = NULL;
3089 	pte_t entry;
3090 	int page_copied = 0;
3091 	struct mmu_notifier_range range;
3092 
3093 	if (unlikely(anon_vma_prepare(vma)))
3094 		goto oom;
3095 
3096 	if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
3097 		new_page = alloc_zeroed_user_highpage_movable(vma,
3098 							      vmf->address);
3099 		if (!new_page)
3100 			goto oom;
3101 	} else {
3102 		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
3103 				vmf->address);
3104 		if (!new_page)
3105 			goto oom;
3106 
3107 		if (!__wp_page_copy_user(new_page, old_page, vmf)) {
3108 			/*
3109 			 * COW failed, if the fault was solved by other,
3110 			 * it's fine. If not, userspace would re-fault on
3111 			 * the same address and we will handle the fault
3112 			 * from the second attempt.
3113 			 */
3114 			put_page(new_page);
3115 			if (old_page)
3116 				put_page(old_page);
3117 			return 0;
3118 		}
3119 	}
3120 
3121 	if (mem_cgroup_charge(page_folio(new_page), mm, GFP_KERNEL))
3122 		goto oom_free_new;
3123 	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
3124 
3125 	__SetPageUptodate(new_page);
3126 
3127 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
3128 				vmf->address & PAGE_MASK,
3129 				(vmf->address & PAGE_MASK) + PAGE_SIZE);
3130 	mmu_notifier_invalidate_range_start(&range);
3131 
3132 	/*
3133 	 * Re-check the pte - we dropped the lock
3134 	 */
3135 	vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
3136 	if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
3137 		if (old_page) {
3138 			if (!PageAnon(old_page)) {
3139 				dec_mm_counter_fast(mm,
3140 						mm_counter_file(old_page));
3141 				inc_mm_counter_fast(mm, MM_ANONPAGES);
3142 			}
3143 		} else {
3144 			inc_mm_counter_fast(mm, MM_ANONPAGES);
3145 		}
3146 		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3147 		entry = mk_pte(new_page, vma->vm_page_prot);
3148 		entry = pte_sw_mkyoung(entry);
3149 		if (unlikely(unshare)) {
3150 			if (pte_soft_dirty(vmf->orig_pte))
3151 				entry = pte_mksoft_dirty(entry);
3152 			if (pte_uffd_wp(vmf->orig_pte))
3153 				entry = pte_mkuffd_wp(entry);
3154 		} else {
3155 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3156 		}
3157 
3158 		/*
3159 		 * Clear the pte entry and flush it first, before updating the
3160 		 * pte with the new entry, to keep TLBs on different CPUs in
3161 		 * sync. This code used to set the new PTE then flush TLBs, but
3162 		 * that left a window where the new PTE could be loaded into
3163 		 * some TLBs while the old PTE remains in others.
3164 		 */
3165 		ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
3166 		page_add_new_anon_rmap(new_page, vma, vmf->address);
3167 		lru_cache_add_inactive_or_unevictable(new_page, vma);
3168 		/*
3169 		 * We call the notify macro here because, when using secondary
3170 		 * mmu page tables (such as kvm shadow page tables), we want the
3171 		 * new page to be mapped directly into the secondary page table.
3172 		 */
3173 		BUG_ON(unshare && pte_write(entry));
3174 		set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
3175 		update_mmu_cache(vma, vmf->address, vmf->pte);
3176 		if (old_page) {
3177 			/*
3178 			 * Only after switching the pte to the new page may
3179 			 * we remove the mapcount here. Otherwise another
3180 			 * process may come and find the rmap count decremented
3181 			 * before the pte is switched to the new page, and
3182 			 * "reuse" the old page writing into it while our pte
3183 			 * here still points into it and can be read by other
3184 			 * threads.
3185 			 *
3186 			 * The critical issue is to order this
3187 			 * page_remove_rmap with the ptp_clear_flush above.
3188 			 * Those stores are ordered by (if nothing else,)
3189 			 * the barrier present in the atomic_add_negative
3190 			 * in page_remove_rmap.
3191 			 *
3192 			 * Then the TLB flush in ptep_clear_flush ensures that
3193 			 * no process can access the old page before the
3194 			 * decremented mapcount is visible. And the old page
3195 			 * cannot be reused until after the decremented
3196 			 * mapcount is visible. So transitively, TLBs to
3197 			 * old page will be flushed before it can be reused.
3198 			 */
3199 			page_remove_rmap(old_page, vma, false);
3200 		}
3201 
3202 		/* Free the old page.. */
3203 		new_page = old_page;
3204 		page_copied = 1;
3205 	} else {
3206 		update_mmu_tlb(vma, vmf->address, vmf->pte);
3207 	}
3208 
3209 	if (new_page)
3210 		put_page(new_page);
3211 
3212 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3213 	/*
3214 	 * No need to double call mmu_notifier->invalidate_range() callback as
3215 	 * the above ptep_clear_flush_notify() did already call it.
3216 	 */
3217 	mmu_notifier_invalidate_range_only_end(&range);
3218 	if (old_page) {
3219 		if (page_copied)
3220 			free_swap_cache(old_page);
3221 		put_page(old_page);
3222 	}
3223 	return (page_copied && !unshare) ? VM_FAULT_WRITE : 0;
3224 oom_free_new:
3225 	put_page(new_page);
3226 oom:
3227 	if (old_page)
3228 		put_page(old_page);
3229 	return VM_FAULT_OOM;
3230 }
3231 
3232 /**
3233  * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3234  *			  writeable once the page is prepared
3235  *
3236  * @vmf: structure describing the fault
3237  *
3238  * This function handles all that is needed to finish a write page fault in a
3239  * shared mapping due to PTE being read-only once the mapped page is prepared.
3240  * It handles locking of PTE and modifying it.
3241  *
3242  * The function expects the page to be locked or other protection against
3243  * concurrent faults / writeback (such as DAX radix tree locks).
3244  *
3245  * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
3246  * we acquired PTE lock.
3247  */
3248 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
3249 {
3250 	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3251 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3252 				       &vmf->ptl);
3253 	/*
3254 	 * We might have raced with another page fault while we released the
3255 	 * pte_offset_map_lock.
3256 	 */
3257 	if (!pte_same(*vmf->pte, vmf->orig_pte)) {
3258 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
3259 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3260 		return VM_FAULT_NOPAGE;
3261 	}
3262 	wp_page_reuse(vmf);
3263 	return 0;
3264 }
3265 
3266 /*
3267  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3268  * mapping
3269  */
3270 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
3271 {
3272 	struct vm_area_struct *vma = vmf->vma;
3273 
3274 	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3275 		vm_fault_t ret;
3276 
3277 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3278 		vmf->flags |= FAULT_FLAG_MKWRITE;
3279 		ret = vma->vm_ops->pfn_mkwrite(vmf);
3280 		if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
3281 			return ret;
3282 		return finish_mkwrite_fault(vmf);
3283 	}
3284 	wp_page_reuse(vmf);
3285 	return VM_FAULT_WRITE;
3286 }
3287 
3288 static vm_fault_t wp_page_shared(struct vm_fault *vmf)
3289 	__releases(vmf->ptl)
3290 {
3291 	struct vm_area_struct *vma = vmf->vma;
3292 	vm_fault_t ret = VM_FAULT_WRITE;
3293 
3294 	get_page(vmf->page);
3295 
3296 	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3297 		vm_fault_t tmp;
3298 
3299 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3300 		tmp = do_page_mkwrite(vmf);
3301 		if (unlikely(!tmp || (tmp &
3302 				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3303 			put_page(vmf->page);
3304 			return tmp;
3305 		}
3306 		tmp = finish_mkwrite_fault(vmf);
3307 		if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3308 			unlock_page(vmf->page);
3309 			put_page(vmf->page);
3310 			return tmp;
3311 		}
3312 	} else {
3313 		wp_page_reuse(vmf);
3314 		lock_page(vmf->page);
3315 	}
3316 	ret |= fault_dirty_shared_page(vmf);
3317 	put_page(vmf->page);
3318 
3319 	return ret;
3320 }
3321 
3322 /*
3323  * This routine handles present pages, when
3324  * * users try to write to a shared page (FAULT_FLAG_WRITE)
3325  * * GUP wants to take a R/O pin on a possibly shared anonymous page
3326  *   (FAULT_FLAG_UNSHARE)
3327  *
3328  * It is done by copying the page to a new address and decrementing the
3329  * shared-page counter for the old page.
3330  *
3331  * Note that this routine assumes that the protection checks have been
3332  * done by the caller (the low-level page fault routine in most cases).
3333  * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've
3334  * done any necessary COW.
3335  *
3336  * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even
3337  * though the page will change only once the write actually happens. This
3338  * avoids a few races, and potentially makes it more efficient.
3339  *
3340  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3341  * but allow concurrent faults), with pte both mapped and locked.
3342  * We return with mmap_lock still held, but pte unmapped and unlocked.
3343  */
3344 static vm_fault_t do_wp_page(struct vm_fault *vmf)
3345 	__releases(vmf->ptl)
3346 {
3347 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
3348 	struct vm_area_struct *vma = vmf->vma;
3349 
3350 	VM_BUG_ON(unshare && (vmf->flags & FAULT_FLAG_WRITE));
3351 	VM_BUG_ON(!unshare && !(vmf->flags & FAULT_FLAG_WRITE));
3352 
3353 	if (likely(!unshare)) {
3354 		if (userfaultfd_pte_wp(vma, *vmf->pte)) {
3355 			pte_unmap_unlock(vmf->pte, vmf->ptl);
3356 			return handle_userfault(vmf, VM_UFFD_WP);
3357 		}
3358 
3359 		/*
3360 		 * Userfaultfd write-protect can defer flushes. Ensure the TLB
3361 		 * is flushed in this case before copying.
3362 		 */
3363 		if (unlikely(userfaultfd_wp(vmf->vma) &&
3364 			     mm_tlb_flush_pending(vmf->vma->vm_mm)))
3365 			flush_tlb_page(vmf->vma, vmf->address);
3366 	}
3367 
3368 	vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3369 	if (!vmf->page) {
3370 		if (unlikely(unshare)) {
3371 			/* No anonymous page -> nothing to do. */
3372 			pte_unmap_unlock(vmf->pte, vmf->ptl);
3373 			return 0;
3374 		}
3375 
3376 		/*
3377 		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3378 		 * VM_PFNMAP VMA.
3379 		 *
3380 		 * We should not cow pages in a shared writeable mapping.
3381 		 * Just mark the pages writable and/or call ops->pfn_mkwrite.
3382 		 */
3383 		if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
3384 				     (VM_WRITE|VM_SHARED))
3385 			return wp_pfn_shared(vmf);
3386 
3387 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3388 		return wp_page_copy(vmf);
3389 	}
3390 
3391 	/*
3392 	 * Take out anonymous pages first, anonymous shared vmas are
3393 	 * not dirty accountable.
3394 	 */
3395 	if (PageAnon(vmf->page)) {
3396 		struct page *page = vmf->page;
3397 
3398 		/*
3399 		 * If the page is exclusive to this process we must reuse the
3400 		 * page without further checks.
3401 		 */
3402 		if (PageAnonExclusive(page))
3403 			goto reuse;
3404 
3405 		/*
3406 		 * We have to verify under page lock: these early checks are
3407 		 * just an optimization to avoid locking the page and freeing
3408 		 * the swapcache if there is little hope that we can reuse.
3409 		 *
3410 		 * PageKsm() doesn't necessarily raise the page refcount.
3411 		 */
3412 		if (PageKsm(page) || page_count(page) > 3)
3413 			goto copy;
3414 		if (!PageLRU(page))
3415 			/*
3416 			 * Note: We cannot easily detect+handle references from
3417 			 * remote LRU pagevecs or references to PageLRU() pages.
3418 			 */
3419 			lru_add_drain();
3420 		if (page_count(page) > 1 + PageSwapCache(page))
3421 			goto copy;
3422 		if (!trylock_page(page))
3423 			goto copy;
3424 		if (PageSwapCache(page))
3425 			try_to_free_swap(page);
3426 		if (PageKsm(page) || page_count(page) != 1) {
3427 			unlock_page(page);
3428 			goto copy;
3429 		}
3430 		/*
3431 		 * Ok, we've got the only page reference from our mapping
3432 		 * and the page is locked, it's dark out, and we're wearing
3433 		 * sunglasses. Hit it.
3434 		 */
3435 		page_move_anon_rmap(page, vma);
3436 		unlock_page(page);
3437 reuse:
3438 		if (unlikely(unshare)) {
3439 			pte_unmap_unlock(vmf->pte, vmf->ptl);
3440 			return 0;
3441 		}
3442 		wp_page_reuse(vmf);
3443 		return VM_FAULT_WRITE;
3444 	} else if (unshare) {
3445 		/* No anonymous page -> nothing to do. */
3446 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3447 		return 0;
3448 	} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
3449 					(VM_WRITE|VM_SHARED))) {
3450 		return wp_page_shared(vmf);
3451 	}
3452 copy:
3453 	/*
3454 	 * Ok, we need to copy. Oh, well..
3455 	 */
3456 	get_page(vmf->page);
3457 
3458 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3459 #ifdef CONFIG_KSM
3460 	if (PageKsm(vmf->page))
3461 		count_vm_event(COW_KSM);
3462 #endif
3463 	return wp_page_copy(vmf);
3464 }
3465 
3466 static void unmap_mapping_range_vma(struct vm_area_struct *vma,
3467 		unsigned long start_addr, unsigned long end_addr,
3468 		struct zap_details *details)
3469 {
3470 	zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
3471 }
3472 
3473 static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
3474 					    pgoff_t first_index,
3475 					    pgoff_t last_index,
3476 					    struct zap_details *details)
3477 {
3478 	struct vm_area_struct *vma;
3479 	pgoff_t vba, vea, zba, zea;
3480 
3481 	vma_interval_tree_foreach(vma, root, first_index, last_index) {
3482 		vba = vma->vm_pgoff;
3483 		vea = vba + vma_pages(vma) - 1;
3484 		zba = max(first_index, vba);
3485 		zea = min(last_index, vea);
3486 
3487 		unmap_mapping_range_vma(vma,
3488 			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3489 			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
3490 				details);
3491 	}
3492 }
3493 
3494 /**
3495  * unmap_mapping_folio() - Unmap single folio from processes.
3496  * @folio: The locked folio to be unmapped.
3497  *
3498  * Unmap this folio from any userspace process which still has it mmaped.
3499  * Typically, for efficiency, the range of nearby pages has already been
3500  * unmapped by unmap_mapping_pages() or unmap_mapping_range().  But once
3501  * truncation or invalidation holds the lock on a folio, it may find that
3502  * the page has been remapped again: and then uses unmap_mapping_folio()
3503  * to unmap it finally.
3504  */
3505 void unmap_mapping_folio(struct folio *folio)
3506 {
3507 	struct address_space *mapping = folio->mapping;
3508 	struct zap_details details = { };
3509 	pgoff_t	first_index;
3510 	pgoff_t	last_index;
3511 
3512 	VM_BUG_ON(!folio_test_locked(folio));
3513 
3514 	first_index = folio->index;
3515 	last_index = folio->index + folio_nr_pages(folio) - 1;
3516 
3517 	details.even_cows = false;
3518 	details.single_folio = folio;
3519 	details.zap_flags = ZAP_FLAG_DROP_MARKER;
3520 
3521 	i_mmap_lock_read(mapping);
3522 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3523 		unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3524 					 last_index, &details);
3525 	i_mmap_unlock_read(mapping);
3526 }
3527 
3528 /**
3529  * unmap_mapping_pages() - Unmap pages from processes.
3530  * @mapping: The address space containing pages to be unmapped.
3531  * @start: Index of first page to be unmapped.
3532  * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
3533  * @even_cows: Whether to unmap even private COWed pages.
3534  *
3535  * Unmap the pages in this address space from any userspace process which
3536  * has them mmaped.  Generally, you want to remove COWed pages as well when
3537  * a file is being truncated, but not when invalidating pages from the page
3538  * cache.
3539  */
3540 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3541 		pgoff_t nr, bool even_cows)
3542 {
3543 	struct zap_details details = { };
3544 	pgoff_t	first_index = start;
3545 	pgoff_t	last_index = start + nr - 1;
3546 
3547 	details.even_cows = even_cows;
3548 	if (last_index < first_index)
3549 		last_index = ULONG_MAX;
3550 
3551 	i_mmap_lock_read(mapping);
3552 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3553 		unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3554 					 last_index, &details);
3555 	i_mmap_unlock_read(mapping);
3556 }
3557 EXPORT_SYMBOL_GPL(unmap_mapping_pages);
3558 
3559 /**
3560  * unmap_mapping_range - unmap the portion of all mmaps in the specified
3561  * address_space corresponding to the specified byte range in the underlying
3562  * file.
3563  *
3564  * @mapping: the address space containing mmaps to be unmapped.
3565  * @holebegin: byte in first page to unmap, relative to the start of
3566  * the underlying file.  This will be rounded down to a PAGE_SIZE
3567  * boundary.  Note that this is different from truncate_pagecache(), which
3568  * must keep the partial page.  In contrast, we must get rid of
3569  * partial pages.
3570  * @holelen: size of prospective hole in bytes.  This will be rounded
3571  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
3572  * end of the file.
3573  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3574  * but 0 when invalidating pagecache, don't throw away private data.
3575  */
3576 void unmap_mapping_range(struct address_space *mapping,
3577 		loff_t const holebegin, loff_t const holelen, int even_cows)
3578 {
3579 	pgoff_t hba = holebegin >> PAGE_SHIFT;
3580 	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3581 
3582 	/* Check for overflow. */
3583 	if (sizeof(holelen) > sizeof(hlen)) {
3584 		long long holeend =
3585 			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3586 		if (holeend & ~(long long)ULONG_MAX)
3587 			hlen = ULONG_MAX - hba + 1;
3588 	}
3589 
3590 	unmap_mapping_pages(mapping, hba, hlen, even_cows);
3591 }
3592 EXPORT_SYMBOL(unmap_mapping_range);
3593 
3594 /*
3595  * Restore a potential device exclusive pte to a working pte entry
3596  */
3597 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
3598 {
3599 	struct page *page = vmf->page;
3600 	struct vm_area_struct *vma = vmf->vma;
3601 	struct mmu_notifier_range range;
3602 
3603 	if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags))
3604 		return VM_FAULT_RETRY;
3605 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
3606 				vma->vm_mm, vmf->address & PAGE_MASK,
3607 				(vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
3608 	mmu_notifier_invalidate_range_start(&range);
3609 
3610 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3611 				&vmf->ptl);
3612 	if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
3613 		restore_exclusive_pte(vma, page, vmf->address, vmf->pte);
3614 
3615 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3616 	unlock_page(page);
3617 
3618 	mmu_notifier_invalidate_range_end(&range);
3619 	return 0;
3620 }
3621 
3622 static inline bool should_try_to_free_swap(struct page *page,
3623 					   struct vm_area_struct *vma,
3624 					   unsigned int fault_flags)
3625 {
3626 	if (!PageSwapCache(page))
3627 		return false;
3628 	if (mem_cgroup_swap_full(page) || (vma->vm_flags & VM_LOCKED) ||
3629 	    PageMlocked(page))
3630 		return true;
3631 	/*
3632 	 * If we want to map a page that's in the swapcache writable, we
3633 	 * have to detect via the refcount if we're really the exclusive
3634 	 * user. Try freeing the swapcache to get rid of the swapcache
3635 	 * reference only in case it's likely that we'll be the exlusive user.
3636 	 */
3637 	return (fault_flags & FAULT_FLAG_WRITE) && !PageKsm(page) &&
3638 		page_count(page) == 2;
3639 }
3640 
3641 static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
3642 {
3643 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
3644 				       vmf->address, &vmf->ptl);
3645 	/*
3646 	 * Be careful so that we will only recover a special uffd-wp pte into a
3647 	 * none pte.  Otherwise it means the pte could have changed, so retry.
3648 	 */
3649 	if (is_pte_marker(*vmf->pte))
3650 		pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte);
3651 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3652 	return 0;
3653 }
3654 
3655 /*
3656  * This is actually a page-missing access, but with uffd-wp special pte
3657  * installed.  It means this pte was wr-protected before being unmapped.
3658  */
3659 static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf)
3660 {
3661 	/*
3662 	 * Just in case there're leftover special ptes even after the region
3663 	 * got unregistered - we can simply clear them.  We can also do that
3664 	 * proactively when e.g. when we do UFFDIO_UNREGISTER upon some uffd-wp
3665 	 * ranges, but it should be more efficient to be done lazily here.
3666 	 */
3667 	if (unlikely(!userfaultfd_wp(vmf->vma) || vma_is_anonymous(vmf->vma)))
3668 		return pte_marker_clear(vmf);
3669 
3670 	/* do_fault() can handle pte markers too like none pte */
3671 	return do_fault(vmf);
3672 }
3673 
3674 static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
3675 {
3676 	swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte);
3677 	unsigned long marker = pte_marker_get(entry);
3678 
3679 	/*
3680 	 * PTE markers should always be with file-backed memories, and the
3681 	 * marker should never be empty.  If anything weird happened, the best
3682 	 * thing to do is to kill the process along with its mm.
3683 	 */
3684 	if (WARN_ON_ONCE(vma_is_anonymous(vmf->vma) || !marker))
3685 		return VM_FAULT_SIGBUS;
3686 
3687 	if (pte_marker_entry_uffd_wp(entry))
3688 		return pte_marker_handle_uffd_wp(vmf);
3689 
3690 	/* This is an unknown pte marker */
3691 	return VM_FAULT_SIGBUS;
3692 }
3693 
3694 /*
3695  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3696  * but allow concurrent faults), and pte mapped but not yet locked.
3697  * We return with pte unmapped and unlocked.
3698  *
3699  * We return with the mmap_lock locked or unlocked in the same cases
3700  * as does filemap_fault().
3701  */
3702 vm_fault_t do_swap_page(struct vm_fault *vmf)
3703 {
3704 	struct vm_area_struct *vma = vmf->vma;
3705 	struct page *page = NULL, *swapcache;
3706 	struct swap_info_struct *si = NULL;
3707 	rmap_t rmap_flags = RMAP_NONE;
3708 	bool exclusive = false;
3709 	swp_entry_t entry;
3710 	pte_t pte;
3711 	int locked;
3712 	vm_fault_t ret = 0;
3713 	void *shadow = NULL;
3714 
3715 	if (!pte_unmap_same(vmf))
3716 		goto out;
3717 
3718 	entry = pte_to_swp_entry(vmf->orig_pte);
3719 	if (unlikely(non_swap_entry(entry))) {
3720 		if (is_migration_entry(entry)) {
3721 			migration_entry_wait(vma->vm_mm, vmf->pmd,
3722 					     vmf->address);
3723 		} else if (is_device_exclusive_entry(entry)) {
3724 			vmf->page = pfn_swap_entry_to_page(entry);
3725 			ret = remove_device_exclusive_entry(vmf);
3726 		} else if (is_device_private_entry(entry)) {
3727 			vmf->page = pfn_swap_entry_to_page(entry);
3728 			ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
3729 		} else if (is_hwpoison_entry(entry)) {
3730 			ret = VM_FAULT_HWPOISON;
3731 		} else if (is_swapin_error_entry(entry)) {
3732 			ret = VM_FAULT_SIGBUS;
3733 		} else if (is_pte_marker_entry(entry)) {
3734 			ret = handle_pte_marker(vmf);
3735 		} else {
3736 			print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
3737 			ret = VM_FAULT_SIGBUS;
3738 		}
3739 		goto out;
3740 	}
3741 
3742 	/* Prevent swapoff from happening to us. */
3743 	si = get_swap_device(entry);
3744 	if (unlikely(!si))
3745 		goto out;
3746 
3747 	page = lookup_swap_cache(entry, vma, vmf->address);
3748 	swapcache = page;
3749 
3750 	if (!page) {
3751 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
3752 		    __swap_count(entry) == 1) {
3753 			/* skip swapcache */
3754 			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
3755 							vmf->address);
3756 			if (page) {
3757 				__SetPageLocked(page);
3758 				__SetPageSwapBacked(page);
3759 
3760 				if (mem_cgroup_swapin_charge_page(page,
3761 					vma->vm_mm, GFP_KERNEL, entry)) {
3762 					ret = VM_FAULT_OOM;
3763 					goto out_page;
3764 				}
3765 				mem_cgroup_swapin_uncharge_swap(entry);
3766 
3767 				shadow = get_shadow_from_swap_cache(entry);
3768 				if (shadow)
3769 					workingset_refault(page_folio(page),
3770 								shadow);
3771 
3772 				lru_cache_add(page);
3773 
3774 				/* To provide entry to swap_readpage() */
3775 				set_page_private(page, entry.val);
3776 				swap_readpage(page, true, NULL);
3777 				set_page_private(page, 0);
3778 			}
3779 		} else {
3780 			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
3781 						vmf);
3782 			swapcache = page;
3783 		}
3784 
3785 		if (!page) {
3786 			/*
3787 			 * Back out if somebody else faulted in this pte
3788 			 * while we released the pte lock.
3789 			 */
3790 			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3791 					vmf->address, &vmf->ptl);
3792 			if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
3793 				ret = VM_FAULT_OOM;
3794 			goto unlock;
3795 		}
3796 
3797 		/* Had to read the page from swap area: Major fault */
3798 		ret = VM_FAULT_MAJOR;
3799 		count_vm_event(PGMAJFAULT);
3800 		count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
3801 	} else if (PageHWPoison(page)) {
3802 		/*
3803 		 * hwpoisoned dirty swapcache pages are kept for killing
3804 		 * owner processes (which may be unknown at hwpoison time)
3805 		 */
3806 		ret = VM_FAULT_HWPOISON;
3807 		goto out_release;
3808 	}
3809 
3810 	locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
3811 
3812 	if (!locked) {
3813 		ret |= VM_FAULT_RETRY;
3814 		goto out_release;
3815 	}
3816 
3817 	if (swapcache) {
3818 		/*
3819 		 * Make sure try_to_free_swap or swapoff did not release the
3820 		 * swapcache from under us.  The page pin, and pte_same test
3821 		 * below, are not enough to exclude that.  Even if it is still
3822 		 * swapcache, we need to check that the page's swap has not
3823 		 * changed.
3824 		 */
3825 		if (unlikely(!PageSwapCache(page) ||
3826 			     page_private(page) != entry.val))
3827 			goto out_page;
3828 
3829 		/*
3830 		 * KSM sometimes has to copy on read faults, for example, if
3831 		 * page->index of !PageKSM() pages would be nonlinear inside the
3832 		 * anon VMA -- PageKSM() is lost on actual swapout.
3833 		 */
3834 		page = ksm_might_need_to_copy(page, vma, vmf->address);
3835 		if (unlikely(!page)) {
3836 			ret = VM_FAULT_OOM;
3837 			page = swapcache;
3838 			goto out_page;
3839 		}
3840 
3841 		/*
3842 		 * If we want to map a page that's in the swapcache writable, we
3843 		 * have to detect via the refcount if we're really the exclusive
3844 		 * owner. Try removing the extra reference from the local LRU
3845 		 * pagevecs if required.
3846 		 */
3847 		if ((vmf->flags & FAULT_FLAG_WRITE) && page == swapcache &&
3848 		    !PageKsm(page) && !PageLRU(page))
3849 			lru_add_drain();
3850 	}
3851 
3852 	cgroup_throttle_swaprate(page, GFP_KERNEL);
3853 
3854 	/*
3855 	 * Back out if somebody else already faulted in this pte.
3856 	 */
3857 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3858 			&vmf->ptl);
3859 	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
3860 		goto out_nomap;
3861 
3862 	if (unlikely(!PageUptodate(page))) {
3863 		ret = VM_FAULT_SIGBUS;
3864 		goto out_nomap;
3865 	}
3866 
3867 	/*
3868 	 * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
3869 	 * must never point at an anonymous page in the swapcache that is
3870 	 * PG_anon_exclusive. Sanity check that this holds and especially, that
3871 	 * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity
3872 	 * check after taking the PT lock and making sure that nobody
3873 	 * concurrently faulted in this page and set PG_anon_exclusive.
3874 	 */
3875 	BUG_ON(!PageAnon(page) && PageMappedToDisk(page));
3876 	BUG_ON(PageAnon(page) && PageAnonExclusive(page));
3877 
3878 	/*
3879 	 * Check under PT lock (to protect against concurrent fork() sharing
3880 	 * the swap entry concurrently) for certainly exclusive pages.
3881 	 */
3882 	if (!PageKsm(page)) {
3883 		/*
3884 		 * Note that pte_swp_exclusive() == false for architectures
3885 		 * without __HAVE_ARCH_PTE_SWP_EXCLUSIVE.
3886 		 */
3887 		exclusive = pte_swp_exclusive(vmf->orig_pte);
3888 		if (page != swapcache) {
3889 			/*
3890 			 * We have a fresh page that is not exposed to the
3891 			 * swapcache -> certainly exclusive.
3892 			 */
3893 			exclusive = true;
3894 		} else if (exclusive && PageWriteback(page) &&
3895 			  data_race(si->flags & SWP_STABLE_WRITES)) {
3896 			/*
3897 			 * This is tricky: not all swap backends support
3898 			 * concurrent page modifications while under writeback.
3899 			 *
3900 			 * So if we stumble over such a page in the swapcache
3901 			 * we must not set the page exclusive, otherwise we can
3902 			 * map it writable without further checks and modify it
3903 			 * while still under writeback.
3904 			 *
3905 			 * For these problematic swap backends, simply drop the
3906 			 * exclusive marker: this is perfectly fine as we start
3907 			 * writeback only if we fully unmapped the page and
3908 			 * there are no unexpected references on the page after
3909 			 * unmapping succeeded. After fully unmapped, no
3910 			 * further GUP references (FOLL_GET and FOLL_PIN) can
3911 			 * appear, so dropping the exclusive marker and mapping
3912 			 * it only R/O is fine.
3913 			 */
3914 			exclusive = false;
3915 		}
3916 	}
3917 
3918 	/*
3919 	 * Remove the swap entry and conditionally try to free up the swapcache.
3920 	 * We're already holding a reference on the page but haven't mapped it
3921 	 * yet.
3922 	 */
3923 	swap_free(entry);
3924 	if (should_try_to_free_swap(page, vma, vmf->flags))
3925 		try_to_free_swap(page);
3926 
3927 	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3928 	dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
3929 	pte = mk_pte(page, vma->vm_page_prot);
3930 
3931 	/*
3932 	 * Same logic as in do_wp_page(); however, optimize for pages that are
3933 	 * certainly not shared either because we just allocated them without
3934 	 * exposing them to the swapcache or because the swap entry indicates
3935 	 * exclusivity.
3936 	 */
3937 	if (!PageKsm(page) && (exclusive || page_count(page) == 1)) {
3938 		if (vmf->flags & FAULT_FLAG_WRITE) {
3939 			pte = maybe_mkwrite(pte_mkdirty(pte), vma);
3940 			vmf->flags &= ~FAULT_FLAG_WRITE;
3941 			ret |= VM_FAULT_WRITE;
3942 		}
3943 		rmap_flags |= RMAP_EXCLUSIVE;
3944 	}
3945 	flush_icache_page(vma, page);
3946 	if (pte_swp_soft_dirty(vmf->orig_pte))
3947 		pte = pte_mksoft_dirty(pte);
3948 	if (pte_swp_uffd_wp(vmf->orig_pte)) {
3949 		pte = pte_mkuffd_wp(pte);
3950 		pte = pte_wrprotect(pte);
3951 	}
3952 	vmf->orig_pte = pte;
3953 
3954 	/* ksm created a completely new copy */
3955 	if (unlikely(page != swapcache && swapcache)) {
3956 		page_add_new_anon_rmap(page, vma, vmf->address);
3957 		lru_cache_add_inactive_or_unevictable(page, vma);
3958 	} else {
3959 		page_add_anon_rmap(page, vma, vmf->address, rmap_flags);
3960 	}
3961 
3962 	VM_BUG_ON(!PageAnon(page) || (pte_write(pte) && !PageAnonExclusive(page)));
3963 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3964 	arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
3965 
3966 	unlock_page(page);
3967 	if (page != swapcache && swapcache) {
3968 		/*
3969 		 * Hold the lock to avoid the swap entry to be reused
3970 		 * until we take the PT lock for the pte_same() check
3971 		 * (to avoid false positives from pte_same). For
3972 		 * further safety release the lock after the swap_free
3973 		 * so that the swap count won't change under a
3974 		 * parallel locked swapcache.
3975 		 */
3976 		unlock_page(swapcache);
3977 		put_page(swapcache);
3978 	}
3979 
3980 	if (vmf->flags & FAULT_FLAG_WRITE) {
3981 		ret |= do_wp_page(vmf);
3982 		if (ret & VM_FAULT_ERROR)
3983 			ret &= VM_FAULT_ERROR;
3984 		goto out;
3985 	}
3986 
3987 	/* No need to invalidate - it was non-present before */
3988 	update_mmu_cache(vma, vmf->address, vmf->pte);
3989 unlock:
3990 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3991 out:
3992 	if (si)
3993 		put_swap_device(si);
3994 	return ret;
3995 out_nomap:
3996 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3997 out_page:
3998 	unlock_page(page);
3999 out_release:
4000 	put_page(page);
4001 	if (page != swapcache && swapcache) {
4002 		unlock_page(swapcache);
4003 		put_page(swapcache);
4004 	}
4005 	if (si)
4006 		put_swap_device(si);
4007 	return ret;
4008 }
4009 
4010 /*
4011  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4012  * but allow concurrent faults), and pte mapped but not yet locked.
4013  * We return with mmap_lock still held, but pte unmapped and unlocked.
4014  */
4015 static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
4016 {
4017 	struct vm_area_struct *vma = vmf->vma;
4018 	struct page *page;
4019 	vm_fault_t ret = 0;
4020 	pte_t entry;
4021 
4022 	/* File mapping without ->vm_ops ? */
4023 	if (vma->vm_flags & VM_SHARED)
4024 		return VM_FAULT_SIGBUS;
4025 
4026 	/*
4027 	 * Use pte_alloc() instead of pte_alloc_map().  We can't run
4028 	 * pte_offset_map() on pmds where a huge pmd might be created
4029 	 * from a different thread.
4030 	 *
4031 	 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
4032 	 * parallel threads are excluded by other means.
4033 	 *
4034 	 * Here we only have mmap_read_lock(mm).
4035 	 */
4036 	if (pte_alloc(vma->vm_mm, vmf->pmd))
4037 		return VM_FAULT_OOM;
4038 
4039 	/* See comment in handle_pte_fault() */
4040 	if (unlikely(pmd_trans_unstable(vmf->pmd)))
4041 		return 0;
4042 
4043 	/* Use the zero-page for reads */
4044 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
4045 			!mm_forbids_zeropage(vma->vm_mm)) {
4046 		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
4047 						vma->vm_page_prot));
4048 		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4049 				vmf->address, &vmf->ptl);
4050 		if (!pte_none(*vmf->pte)) {
4051 			update_mmu_tlb(vma, vmf->address, vmf->pte);
4052 			goto unlock;
4053 		}
4054 		ret = check_stable_address_space(vma->vm_mm);
4055 		if (ret)
4056 			goto unlock;
4057 		/* Deliver the page fault to userland, check inside PT lock */
4058 		if (userfaultfd_missing(vma)) {
4059 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4060 			return handle_userfault(vmf, VM_UFFD_MISSING);
4061 		}
4062 		goto setpte;
4063 	}
4064 
4065 	/* Allocate our own private page. */
4066 	if (unlikely(anon_vma_prepare(vma)))
4067 		goto oom;
4068 	page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
4069 	if (!page)
4070 		goto oom;
4071 
4072 	if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
4073 		goto oom_free_page;
4074 	cgroup_throttle_swaprate(page, GFP_KERNEL);
4075 
4076 	/*
4077 	 * The memory barrier inside __SetPageUptodate makes sure that
4078 	 * preceding stores to the page contents become visible before
4079 	 * the set_pte_at() write.
4080 	 */
4081 	__SetPageUptodate(page);
4082 
4083 	entry = mk_pte(page, vma->vm_page_prot);
4084 	entry = pte_sw_mkyoung(entry);
4085 	if (vma->vm_flags & VM_WRITE)
4086 		entry = pte_mkwrite(pte_mkdirty(entry));
4087 
4088 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4089 			&vmf->ptl);
4090 	if (!pte_none(*vmf->pte)) {
4091 		update_mmu_cache(vma, vmf->address, vmf->pte);
4092 		goto release;
4093 	}
4094 
4095 	ret = check_stable_address_space(vma->vm_mm);
4096 	if (ret)
4097 		goto release;
4098 
4099 	/* Deliver the page fault to userland, check inside PT lock */
4100 	if (userfaultfd_missing(vma)) {
4101 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4102 		put_page(page);
4103 		return handle_userfault(vmf, VM_UFFD_MISSING);
4104 	}
4105 
4106 	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
4107 	page_add_new_anon_rmap(page, vma, vmf->address);
4108 	lru_cache_add_inactive_or_unevictable(page, vma);
4109 setpte:
4110 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
4111 
4112 	/* No need to invalidate - it was non-present before */
4113 	update_mmu_cache(vma, vmf->address, vmf->pte);
4114 unlock:
4115 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4116 	return ret;
4117 release:
4118 	put_page(page);
4119 	goto unlock;
4120 oom_free_page:
4121 	put_page(page);
4122 oom:
4123 	return VM_FAULT_OOM;
4124 }
4125 
4126 /*
4127  * The mmap_lock must have been held on entry, and may have been
4128  * released depending on flags and vma->vm_ops->fault() return value.
4129  * See filemap_fault() and __lock_page_retry().
4130  */
4131 static vm_fault_t __do_fault(struct vm_fault *vmf)
4132 {
4133 	struct vm_area_struct *vma = vmf->vma;
4134 	vm_fault_t ret;
4135 
4136 	/*
4137 	 * Preallocate pte before we take page_lock because this might lead to
4138 	 * deadlocks for memcg reclaim which waits for pages under writeback:
4139 	 *				lock_page(A)
4140 	 *				SetPageWriteback(A)
4141 	 *				unlock_page(A)
4142 	 * lock_page(B)
4143 	 *				lock_page(B)
4144 	 * pte_alloc_one
4145 	 *   shrink_page_list
4146 	 *     wait_on_page_writeback(A)
4147 	 *				SetPageWriteback(B)
4148 	 *				unlock_page(B)
4149 	 *				# flush A, B to clear the writeback
4150 	 */
4151 	if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
4152 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4153 		if (!vmf->prealloc_pte)
4154 			return VM_FAULT_OOM;
4155 	}
4156 
4157 	ret = vma->vm_ops->fault(vmf);
4158 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
4159 			    VM_FAULT_DONE_COW)))
4160 		return ret;
4161 
4162 	if (unlikely(PageHWPoison(vmf->page))) {
4163 		struct page *page = vmf->page;
4164 		vm_fault_t poisonret = VM_FAULT_HWPOISON;
4165 		if (ret & VM_FAULT_LOCKED) {
4166 			if (page_mapped(page))
4167 				unmap_mapping_pages(page_mapping(page),
4168 						    page->index, 1, false);
4169 			/* Retry if a clean page was removed from the cache. */
4170 			if (invalidate_inode_page(page))
4171 				poisonret = VM_FAULT_NOPAGE;
4172 			unlock_page(page);
4173 		}
4174 		put_page(page);
4175 		vmf->page = NULL;
4176 		return poisonret;
4177 	}
4178 
4179 	if (unlikely(!(ret & VM_FAULT_LOCKED)))
4180 		lock_page(vmf->page);
4181 	else
4182 		VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
4183 
4184 	return ret;
4185 }
4186 
4187 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4188 static void deposit_prealloc_pte(struct vm_fault *vmf)
4189 {
4190 	struct vm_area_struct *vma = vmf->vma;
4191 
4192 	pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
4193 	/*
4194 	 * We are going to consume the prealloc table,
4195 	 * count that as nr_ptes.
4196 	 */
4197 	mm_inc_nr_ptes(vma->vm_mm);
4198 	vmf->prealloc_pte = NULL;
4199 }
4200 
4201 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
4202 {
4203 	struct vm_area_struct *vma = vmf->vma;
4204 	bool write = vmf->flags & FAULT_FLAG_WRITE;
4205 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
4206 	pmd_t entry;
4207 	int i;
4208 	vm_fault_t ret = VM_FAULT_FALLBACK;
4209 
4210 	if (!transhuge_vma_suitable(vma, haddr))
4211 		return ret;
4212 
4213 	page = compound_head(page);
4214 	if (compound_order(page) != HPAGE_PMD_ORDER)
4215 		return ret;
4216 
4217 	/*
4218 	 * Just backoff if any subpage of a THP is corrupted otherwise
4219 	 * the corrupted page may mapped by PMD silently to escape the
4220 	 * check.  This kind of THP just can be PTE mapped.  Access to
4221 	 * the corrupted subpage should trigger SIGBUS as expected.
4222 	 */
4223 	if (unlikely(PageHasHWPoisoned(page)))
4224 		return ret;
4225 
4226 	/*
4227 	 * Archs like ppc64 need additional space to store information
4228 	 * related to pte entry. Use the preallocated table for that.
4229 	 */
4230 	if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
4231 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4232 		if (!vmf->prealloc_pte)
4233 			return VM_FAULT_OOM;
4234 	}
4235 
4236 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
4237 	if (unlikely(!pmd_none(*vmf->pmd)))
4238 		goto out;
4239 
4240 	for (i = 0; i < HPAGE_PMD_NR; i++)
4241 		flush_icache_page(vma, page + i);
4242 
4243 	entry = mk_huge_pmd(page, vma->vm_page_prot);
4244 	if (write)
4245 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
4246 
4247 	add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
4248 	page_add_file_rmap(page, vma, true);
4249 
4250 	/*
4251 	 * deposit and withdraw with pmd lock held
4252 	 */
4253 	if (arch_needs_pgtable_deposit())
4254 		deposit_prealloc_pte(vmf);
4255 
4256 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
4257 
4258 	update_mmu_cache_pmd(vma, haddr, vmf->pmd);
4259 
4260 	/* fault is handled */
4261 	ret = 0;
4262 	count_vm_event(THP_FILE_MAPPED);
4263 out:
4264 	spin_unlock(vmf->ptl);
4265 	return ret;
4266 }
4267 #else
4268 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
4269 {
4270 	return VM_FAULT_FALLBACK;
4271 }
4272 #endif
4273 
4274 void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
4275 {
4276 	struct vm_area_struct *vma = vmf->vma;
4277 	bool uffd_wp = pte_marker_uffd_wp(vmf->orig_pte);
4278 	bool write = vmf->flags & FAULT_FLAG_WRITE;
4279 	bool prefault = vmf->address != addr;
4280 	pte_t entry;
4281 
4282 	flush_icache_page(vma, page);
4283 	entry = mk_pte(page, vma->vm_page_prot);
4284 
4285 	if (prefault && arch_wants_old_prefaulted_pte())
4286 		entry = pte_mkold(entry);
4287 	else
4288 		entry = pte_sw_mkyoung(entry);
4289 
4290 	if (write)
4291 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
4292 	if (unlikely(uffd_wp))
4293 		entry = pte_mkuffd_wp(pte_wrprotect(entry));
4294 	/* copy-on-write page */
4295 	if (write && !(vma->vm_flags & VM_SHARED)) {
4296 		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
4297 		page_add_new_anon_rmap(page, vma, addr);
4298 		lru_cache_add_inactive_or_unevictable(page, vma);
4299 	} else {
4300 		inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
4301 		page_add_file_rmap(page, vma, false);
4302 	}
4303 	set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
4304 }
4305 
4306 static bool vmf_pte_changed(struct vm_fault *vmf)
4307 {
4308 	if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)
4309 		return !pte_same(*vmf->pte, vmf->orig_pte);
4310 
4311 	return !pte_none(*vmf->pte);
4312 }
4313 
4314 /**
4315  * finish_fault - finish page fault once we have prepared the page to fault
4316  *
4317  * @vmf: structure describing the fault
4318  *
4319  * This function handles all that is needed to finish a page fault once the
4320  * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
4321  * given page, adds reverse page mapping, handles memcg charges and LRU
4322  * addition.
4323  *
4324  * The function expects the page to be locked and on success it consumes a
4325  * reference of a page being mapped (for the PTE which maps it).
4326  *
4327  * Return: %0 on success, %VM_FAULT_ code in case of error.
4328  */
4329 vm_fault_t finish_fault(struct vm_fault *vmf)
4330 {
4331 	struct vm_area_struct *vma = vmf->vma;
4332 	struct page *page;
4333 	vm_fault_t ret;
4334 
4335 	/* Did we COW the page? */
4336 	if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
4337 		page = vmf->cow_page;
4338 	else
4339 		page = vmf->page;
4340 
4341 	/*
4342 	 * check even for read faults because we might have lost our CoWed
4343 	 * page
4344 	 */
4345 	if (!(vma->vm_flags & VM_SHARED)) {
4346 		ret = check_stable_address_space(vma->vm_mm);
4347 		if (ret)
4348 			return ret;
4349 	}
4350 
4351 	if (pmd_none(*vmf->pmd)) {
4352 		if (PageTransCompound(page)) {
4353 			ret = do_set_pmd(vmf, page);
4354 			if (ret != VM_FAULT_FALLBACK)
4355 				return ret;
4356 		}
4357 
4358 		if (vmf->prealloc_pte)
4359 			pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
4360 		else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
4361 			return VM_FAULT_OOM;
4362 	}
4363 
4364 	/* See comment in handle_pte_fault() */
4365 	if (pmd_devmap_trans_unstable(vmf->pmd))
4366 		return 0;
4367 
4368 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4369 				      vmf->address, &vmf->ptl);
4370 	ret = 0;
4371 	/* Re-check under ptl */
4372 	if (likely(!vmf_pte_changed(vmf)))
4373 		do_set_pte(vmf, page, vmf->address);
4374 	else
4375 		ret = VM_FAULT_NOPAGE;
4376 
4377 	update_mmu_tlb(vma, vmf->address, vmf->pte);
4378 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4379 	return ret;
4380 }
4381 
4382 static unsigned long fault_around_bytes __read_mostly =
4383 	rounddown_pow_of_two(65536);
4384 
4385 #ifdef CONFIG_DEBUG_FS
4386 static int fault_around_bytes_get(void *data, u64 *val)
4387 {
4388 	*val = fault_around_bytes;
4389 	return 0;
4390 }
4391 
4392 /*
4393  * fault_around_bytes must be rounded down to the nearest page order as it's
4394  * what do_fault_around() expects to see.
4395  */
4396 static int fault_around_bytes_set(void *data, u64 val)
4397 {
4398 	if (val / PAGE_SIZE > PTRS_PER_PTE)
4399 		return -EINVAL;
4400 	if (val > PAGE_SIZE)
4401 		fault_around_bytes = rounddown_pow_of_two(val);
4402 	else
4403 		fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
4404 	return 0;
4405 }
4406 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
4407 		fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
4408 
4409 static int __init fault_around_debugfs(void)
4410 {
4411 	debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
4412 				   &fault_around_bytes_fops);
4413 	return 0;
4414 }
4415 late_initcall(fault_around_debugfs);
4416 #endif
4417 
4418 /*
4419  * do_fault_around() tries to map few pages around the fault address. The hope
4420  * is that the pages will be needed soon and this will lower the number of
4421  * faults to handle.
4422  *
4423  * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
4424  * not ready to be mapped: not up-to-date, locked, etc.
4425  *
4426  * This function is called with the page table lock taken. In the split ptlock
4427  * case the page table lock only protects only those entries which belong to
4428  * the page table corresponding to the fault address.
4429  *
4430  * This function doesn't cross the VMA boundaries, in order to call map_pages()
4431  * only once.
4432  *
4433  * fault_around_bytes defines how many bytes we'll try to map.
4434  * do_fault_around() expects it to be set to a power of two less than or equal
4435  * to PTRS_PER_PTE.
4436  *
4437  * The virtual address of the area that we map is naturally aligned to
4438  * fault_around_bytes rounded down to the machine page size
4439  * (and therefore to page order).  This way it's easier to guarantee
4440  * that we don't cross page table boundaries.
4441  */
4442 static vm_fault_t do_fault_around(struct vm_fault *vmf)
4443 {
4444 	unsigned long address = vmf->address, nr_pages, mask;
4445 	pgoff_t start_pgoff = vmf->pgoff;
4446 	pgoff_t end_pgoff;
4447 	int off;
4448 
4449 	nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
4450 	mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
4451 
4452 	address = max(address & mask, vmf->vma->vm_start);
4453 	off = ((vmf->address - address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
4454 	start_pgoff -= off;
4455 
4456 	/*
4457 	 *  end_pgoff is either the end of the page table, the end of
4458 	 *  the vma or nr_pages from start_pgoff, depending what is nearest.
4459 	 */
4460 	end_pgoff = start_pgoff -
4461 		((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
4462 		PTRS_PER_PTE - 1;
4463 	end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
4464 			start_pgoff + nr_pages - 1);
4465 
4466 	if (pmd_none(*vmf->pmd)) {
4467 		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
4468 		if (!vmf->prealloc_pte)
4469 			return VM_FAULT_OOM;
4470 	}
4471 
4472 	return vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
4473 }
4474 
4475 /* Return true if we should do read fault-around, false otherwise */
4476 static inline bool should_fault_around(struct vm_fault *vmf)
4477 {
4478 	/* No ->map_pages?  No way to fault around... */
4479 	if (!vmf->vma->vm_ops->map_pages)
4480 		return false;
4481 
4482 	if (uffd_disable_fault_around(vmf->vma))
4483 		return false;
4484 
4485 	return fault_around_bytes >> PAGE_SHIFT > 1;
4486 }
4487 
4488 static vm_fault_t do_read_fault(struct vm_fault *vmf)
4489 {
4490 	vm_fault_t ret = 0;
4491 
4492 	/*
4493 	 * Let's call ->map_pages() first and use ->fault() as fallback
4494 	 * if page by the offset is not ready to be mapped (cold cache or
4495 	 * something).
4496 	 */
4497 	if (should_fault_around(vmf)) {
4498 		ret = do_fault_around(vmf);
4499 		if (ret)
4500 			return ret;
4501 	}
4502 
4503 	ret = __do_fault(vmf);
4504 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4505 		return ret;
4506 
4507 	ret |= finish_fault(vmf);
4508 	unlock_page(vmf->page);
4509 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4510 		put_page(vmf->page);
4511 	return ret;
4512 }
4513 
4514 static vm_fault_t do_cow_fault(struct vm_fault *vmf)
4515 {
4516 	struct vm_area_struct *vma = vmf->vma;
4517 	vm_fault_t ret;
4518 
4519 	if (unlikely(anon_vma_prepare(vma)))
4520 		return VM_FAULT_OOM;
4521 
4522 	vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
4523 	if (!vmf->cow_page)
4524 		return VM_FAULT_OOM;
4525 
4526 	if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm,
4527 				GFP_KERNEL)) {
4528 		put_page(vmf->cow_page);
4529 		return VM_FAULT_OOM;
4530 	}
4531 	cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL);
4532 
4533 	ret = __do_fault(vmf);
4534 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4535 		goto uncharge_out;
4536 	if (ret & VM_FAULT_DONE_COW)
4537 		return ret;
4538 
4539 	copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
4540 	__SetPageUptodate(vmf->cow_page);
4541 
4542 	ret |= finish_fault(vmf);
4543 	unlock_page(vmf->page);
4544 	put_page(vmf->page);
4545 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4546 		goto uncharge_out;
4547 	return ret;
4548 uncharge_out:
4549 	put_page(vmf->cow_page);
4550 	return ret;
4551 }
4552 
4553 static vm_fault_t do_shared_fault(struct vm_fault *vmf)
4554 {
4555 	struct vm_area_struct *vma = vmf->vma;
4556 	vm_fault_t ret, tmp;
4557 
4558 	ret = __do_fault(vmf);
4559 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4560 		return ret;
4561 
4562 	/*
4563 	 * Check if the backing address space wants to know that the page is
4564 	 * about to become writable
4565 	 */
4566 	if (vma->vm_ops->page_mkwrite) {
4567 		unlock_page(vmf->page);
4568 		tmp = do_page_mkwrite(vmf);
4569 		if (unlikely(!tmp ||
4570 				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
4571 			put_page(vmf->page);
4572 			return tmp;
4573 		}
4574 	}
4575 
4576 	ret |= finish_fault(vmf);
4577 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
4578 					VM_FAULT_RETRY))) {
4579 		unlock_page(vmf->page);
4580 		put_page(vmf->page);
4581 		return ret;
4582 	}
4583 
4584 	ret |= fault_dirty_shared_page(vmf);
4585 	return ret;
4586 }
4587 
4588 /*
4589  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4590  * but allow concurrent faults).
4591  * The mmap_lock may have been released depending on flags and our
4592  * return value.  See filemap_fault() and __folio_lock_or_retry().
4593  * If mmap_lock is released, vma may become invalid (for example
4594  * by other thread calling munmap()).
4595  */
4596 static vm_fault_t do_fault(struct vm_fault *vmf)
4597 {
4598 	struct vm_area_struct *vma = vmf->vma;
4599 	struct mm_struct *vm_mm = vma->vm_mm;
4600 	vm_fault_t ret;
4601 
4602 	/*
4603 	 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
4604 	 */
4605 	if (!vma->vm_ops->fault) {
4606 		/*
4607 		 * If we find a migration pmd entry or a none pmd entry, which
4608 		 * should never happen, return SIGBUS
4609 		 */
4610 		if (unlikely(!pmd_present(*vmf->pmd)))
4611 			ret = VM_FAULT_SIGBUS;
4612 		else {
4613 			vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
4614 						       vmf->pmd,
4615 						       vmf->address,
4616 						       &vmf->ptl);
4617 			/*
4618 			 * Make sure this is not a temporary clearing of pte
4619 			 * by holding ptl and checking again. A R/M/W update
4620 			 * of pte involves: take ptl, clearing the pte so that
4621 			 * we don't have concurrent modification by hardware
4622 			 * followed by an update.
4623 			 */
4624 			if (unlikely(pte_none(*vmf->pte)))
4625 				ret = VM_FAULT_SIGBUS;
4626 			else
4627 				ret = VM_FAULT_NOPAGE;
4628 
4629 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4630 		}
4631 	} else if (!(vmf->flags & FAULT_FLAG_WRITE))
4632 		ret = do_read_fault(vmf);
4633 	else if (!(vma->vm_flags & VM_SHARED))
4634 		ret = do_cow_fault(vmf);
4635 	else
4636 		ret = do_shared_fault(vmf);
4637 
4638 	/* preallocated pagetable is unused: free it */
4639 	if (vmf->prealloc_pte) {
4640 		pte_free(vm_mm, vmf->prealloc_pte);
4641 		vmf->prealloc_pte = NULL;
4642 	}
4643 	return ret;
4644 }
4645 
4646 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
4647 		      unsigned long addr, int page_nid, int *flags)
4648 {
4649 	get_page(page);
4650 
4651 	count_vm_numa_event(NUMA_HINT_FAULTS);
4652 	if (page_nid == numa_node_id()) {
4653 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
4654 		*flags |= TNF_FAULT_LOCAL;
4655 	}
4656 
4657 	return mpol_misplaced(page, vma, addr);
4658 }
4659 
4660 static vm_fault_t do_numa_page(struct vm_fault *vmf)
4661 {
4662 	struct vm_area_struct *vma = vmf->vma;
4663 	struct page *page = NULL;
4664 	int page_nid = NUMA_NO_NODE;
4665 	int last_cpupid;
4666 	int target_nid;
4667 	pte_t pte, old_pte;
4668 	bool was_writable = pte_savedwrite(vmf->orig_pte);
4669 	int flags = 0;
4670 
4671 	/*
4672 	 * The "pte" at this point cannot be used safely without
4673 	 * validation through pte_unmap_same(). It's of NUMA type but
4674 	 * the pfn may be screwed if the read is non atomic.
4675 	 */
4676 	vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
4677 	spin_lock(vmf->ptl);
4678 	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
4679 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4680 		goto out;
4681 	}
4682 
4683 	/* Get the normal PTE  */
4684 	old_pte = ptep_get(vmf->pte);
4685 	pte = pte_modify(old_pte, vma->vm_page_prot);
4686 
4687 	page = vm_normal_page(vma, vmf->address, pte);
4688 	if (!page)
4689 		goto out_map;
4690 
4691 	/* TODO: handle PTE-mapped THP */
4692 	if (PageCompound(page))
4693 		goto out_map;
4694 
4695 	/*
4696 	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
4697 	 * much anyway since they can be in shared cache state. This misses
4698 	 * the case where a mapping is writable but the process never writes
4699 	 * to it but pte_write gets cleared during protection updates and
4700 	 * pte_dirty has unpredictable behaviour between PTE scan updates,
4701 	 * background writeback, dirty balancing and application behaviour.
4702 	 */
4703 	if (!was_writable)
4704 		flags |= TNF_NO_GROUP;
4705 
4706 	/*
4707 	 * Flag if the page is shared between multiple address spaces. This
4708 	 * is later used when determining whether to group tasks together
4709 	 */
4710 	if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
4711 		flags |= TNF_SHARED;
4712 
4713 	last_cpupid = page_cpupid_last(page);
4714 	page_nid = page_to_nid(page);
4715 	target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
4716 			&flags);
4717 	if (target_nid == NUMA_NO_NODE) {
4718 		put_page(page);
4719 		goto out_map;
4720 	}
4721 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4722 
4723 	/* Migrate to the requested node */
4724 	if (migrate_misplaced_page(page, vma, target_nid)) {
4725 		page_nid = target_nid;
4726 		flags |= TNF_MIGRATED;
4727 	} else {
4728 		flags |= TNF_MIGRATE_FAIL;
4729 		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
4730 		spin_lock(vmf->ptl);
4731 		if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
4732 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4733 			goto out;
4734 		}
4735 		goto out_map;
4736 	}
4737 
4738 out:
4739 	if (page_nid != NUMA_NO_NODE)
4740 		task_numa_fault(last_cpupid, page_nid, 1, flags);
4741 	return 0;
4742 out_map:
4743 	/*
4744 	 * Make it present again, depending on how arch implements
4745 	 * non-accessible ptes, some can allow access by kernel mode.
4746 	 */
4747 	old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
4748 	pte = pte_modify(old_pte, vma->vm_page_prot);
4749 	pte = pte_mkyoung(pte);
4750 	if (was_writable)
4751 		pte = pte_mkwrite(pte);
4752 	ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
4753 	update_mmu_cache(vma, vmf->address, vmf->pte);
4754 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4755 	goto out;
4756 }
4757 
4758 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
4759 {
4760 	if (vma_is_anonymous(vmf->vma))
4761 		return do_huge_pmd_anonymous_page(vmf);
4762 	if (vmf->vma->vm_ops->huge_fault)
4763 		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4764 	return VM_FAULT_FALLBACK;
4765 }
4766 
4767 /* `inline' is required to avoid gcc 4.1.2 build error */
4768 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
4769 {
4770 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
4771 
4772 	if (vma_is_anonymous(vmf->vma)) {
4773 		if (likely(!unshare) &&
4774 		    userfaultfd_huge_pmd_wp(vmf->vma, vmf->orig_pmd))
4775 			return handle_userfault(vmf, VM_UFFD_WP);
4776 		return do_huge_pmd_wp_page(vmf);
4777 	}
4778 	if (vmf->vma->vm_ops->huge_fault) {
4779 		vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4780 
4781 		if (!(ret & VM_FAULT_FALLBACK))
4782 			return ret;
4783 	}
4784 
4785 	/* COW or write-notify handled on pte level: split pmd. */
4786 	__split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
4787 
4788 	return VM_FAULT_FALLBACK;
4789 }
4790 
4791 static vm_fault_t create_huge_pud(struct vm_fault *vmf)
4792 {
4793 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
4794 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
4795 	/* No support for anonymous transparent PUD pages yet */
4796 	if (vma_is_anonymous(vmf->vma))
4797 		goto split;
4798 	if (vmf->vma->vm_ops->huge_fault) {
4799 		vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4800 
4801 		if (!(ret & VM_FAULT_FALLBACK))
4802 			return ret;
4803 	}
4804 split:
4805 	/* COW or write-notify not handled on PUD level: split pud.*/
4806 	__split_huge_pud(vmf->vma, vmf->pud, vmf->address);
4807 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4808 	return VM_FAULT_FALLBACK;
4809 }
4810 
4811 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
4812 {
4813 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4814 	/* No support for anonymous transparent PUD pages yet */
4815 	if (vma_is_anonymous(vmf->vma))
4816 		return VM_FAULT_FALLBACK;
4817 	if (vmf->vma->vm_ops->huge_fault)
4818 		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4819 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4820 	return VM_FAULT_FALLBACK;
4821 }
4822 
4823 /*
4824  * These routines also need to handle stuff like marking pages dirty
4825  * and/or accessed for architectures that don't do it in hardware (most
4826  * RISC architectures).  The early dirtying is also good on the i386.
4827  *
4828  * There is also a hook called "update_mmu_cache()" that architectures
4829  * with external mmu caches can use to update those (ie the Sparc or
4830  * PowerPC hashed page tables that act as extended TLBs).
4831  *
4832  * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
4833  * concurrent faults).
4834  *
4835  * The mmap_lock may have been released depending on flags and our return value.
4836  * See filemap_fault() and __folio_lock_or_retry().
4837  */
4838 static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
4839 {
4840 	pte_t entry;
4841 
4842 	if (unlikely(pmd_none(*vmf->pmd))) {
4843 		/*
4844 		 * Leave __pte_alloc() until later: because vm_ops->fault may
4845 		 * want to allocate huge page, and if we expose page table
4846 		 * for an instant, it will be difficult to retract from
4847 		 * concurrent faults and from rmap lookups.
4848 		 */
4849 		vmf->pte = NULL;
4850 		vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID;
4851 	} else {
4852 		/*
4853 		 * If a huge pmd materialized under us just retry later.  Use
4854 		 * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead
4855 		 * of pmd_trans_huge() to ensure the pmd didn't become
4856 		 * pmd_trans_huge under us and then back to pmd_none, as a
4857 		 * result of MADV_DONTNEED running immediately after a huge pmd
4858 		 * fault in a different thread of this mm, in turn leading to a
4859 		 * misleading pmd_trans_huge() retval. All we have to ensure is
4860 		 * that it is a regular pmd that we can walk with
4861 		 * pte_offset_map() and we can do that through an atomic read
4862 		 * in C, which is what pmd_trans_unstable() provides.
4863 		 */
4864 		if (pmd_devmap_trans_unstable(vmf->pmd))
4865 			return 0;
4866 		/*
4867 		 * A regular pmd is established and it can't morph into a huge
4868 		 * pmd from under us anymore at this point because we hold the
4869 		 * mmap_lock read mode and khugepaged takes it in write mode.
4870 		 * So now it's safe to run pte_offset_map().
4871 		 */
4872 		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
4873 		vmf->orig_pte = *vmf->pte;
4874 		vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
4875 
4876 		/*
4877 		 * some architectures can have larger ptes than wordsize,
4878 		 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
4879 		 * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
4880 		 * accesses.  The code below just needs a consistent view
4881 		 * for the ifs and we later double check anyway with the
4882 		 * ptl lock held. So here a barrier will do.
4883 		 */
4884 		barrier();
4885 		if (pte_none(vmf->orig_pte)) {
4886 			pte_unmap(vmf->pte);
4887 			vmf->pte = NULL;
4888 		}
4889 	}
4890 
4891 	if (!vmf->pte) {
4892 		if (vma_is_anonymous(vmf->vma))
4893 			return do_anonymous_page(vmf);
4894 		else
4895 			return do_fault(vmf);
4896 	}
4897 
4898 	if (!pte_present(vmf->orig_pte))
4899 		return do_swap_page(vmf);
4900 
4901 	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
4902 		return do_numa_page(vmf);
4903 
4904 	vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
4905 	spin_lock(vmf->ptl);
4906 	entry = vmf->orig_pte;
4907 	if (unlikely(!pte_same(*vmf->pte, entry))) {
4908 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
4909 		goto unlock;
4910 	}
4911 	if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
4912 		if (!pte_write(entry))
4913 			return do_wp_page(vmf);
4914 		else if (likely(vmf->flags & FAULT_FLAG_WRITE))
4915 			entry = pte_mkdirty(entry);
4916 	}
4917 	entry = pte_mkyoung(entry);
4918 	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
4919 				vmf->flags & FAULT_FLAG_WRITE)) {
4920 		update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
4921 	} else {
4922 		/* Skip spurious TLB flush for retried page fault */
4923 		if (vmf->flags & FAULT_FLAG_TRIED)
4924 			goto unlock;
4925 		/*
4926 		 * This is needed only for protection faults but the arch code
4927 		 * is not yet telling us if this is a protection fault or not.
4928 		 * This still avoids useless tlb flushes for .text page faults
4929 		 * with threads.
4930 		 */
4931 		if (vmf->flags & FAULT_FLAG_WRITE)
4932 			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
4933 	}
4934 unlock:
4935 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4936 	return 0;
4937 }
4938 
4939 /*
4940  * By the time we get here, we already hold the mm semaphore
4941  *
4942  * The mmap_lock may have been released depending on flags and our
4943  * return value.  See filemap_fault() and __folio_lock_or_retry().
4944  */
4945 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
4946 		unsigned long address, unsigned int flags)
4947 {
4948 	struct vm_fault vmf = {
4949 		.vma = vma,
4950 		.address = address & PAGE_MASK,
4951 		.real_address = address,
4952 		.flags = flags,
4953 		.pgoff = linear_page_index(vma, address),
4954 		.gfp_mask = __get_fault_gfp_mask(vma),
4955 	};
4956 	struct mm_struct *mm = vma->vm_mm;
4957 	pgd_t *pgd;
4958 	p4d_t *p4d;
4959 	vm_fault_t ret;
4960 
4961 	pgd = pgd_offset(mm, address);
4962 	p4d = p4d_alloc(mm, pgd, address);
4963 	if (!p4d)
4964 		return VM_FAULT_OOM;
4965 
4966 	vmf.pud = pud_alloc(mm, p4d, address);
4967 	if (!vmf.pud)
4968 		return VM_FAULT_OOM;
4969 retry_pud:
4970 	if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
4971 		ret = create_huge_pud(&vmf);
4972 		if (!(ret & VM_FAULT_FALLBACK))
4973 			return ret;
4974 	} else {
4975 		pud_t orig_pud = *vmf.pud;
4976 
4977 		barrier();
4978 		if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
4979 
4980 			/*
4981 			 * TODO once we support anonymous PUDs: NUMA case and
4982 			 * FAULT_FLAG_UNSHARE handling.
4983 			 */
4984 			if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) {
4985 				ret = wp_huge_pud(&vmf, orig_pud);
4986 				if (!(ret & VM_FAULT_FALLBACK))
4987 					return ret;
4988 			} else {
4989 				huge_pud_set_accessed(&vmf, orig_pud);
4990 				return 0;
4991 			}
4992 		}
4993 	}
4994 
4995 	vmf.pmd = pmd_alloc(mm, vmf.pud, address);
4996 	if (!vmf.pmd)
4997 		return VM_FAULT_OOM;
4998 
4999 	/* Huge pud page fault raced with pmd_alloc? */
5000 	if (pud_trans_unstable(vmf.pud))
5001 		goto retry_pud;
5002 
5003 	if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
5004 		ret = create_huge_pmd(&vmf);
5005 		if (!(ret & VM_FAULT_FALLBACK))
5006 			return ret;
5007 	} else {
5008 		vmf.orig_pmd = *vmf.pmd;
5009 
5010 		barrier();
5011 		if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
5012 			VM_BUG_ON(thp_migration_supported() &&
5013 					  !is_pmd_migration_entry(vmf.orig_pmd));
5014 			if (is_pmd_migration_entry(vmf.orig_pmd))
5015 				pmd_migration_entry_wait(mm, vmf.pmd);
5016 			return 0;
5017 		}
5018 		if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
5019 			if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
5020 				return do_huge_pmd_numa_page(&vmf);
5021 
5022 			if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
5023 			    !pmd_write(vmf.orig_pmd)) {
5024 				ret = wp_huge_pmd(&vmf);
5025 				if (!(ret & VM_FAULT_FALLBACK))
5026 					return ret;
5027 			} else {
5028 				huge_pmd_set_accessed(&vmf);
5029 				return 0;
5030 			}
5031 		}
5032 	}
5033 
5034 	return handle_pte_fault(&vmf);
5035 }
5036 
5037 /**
5038  * mm_account_fault - Do page fault accounting
5039  *
5040  * @regs: the pt_regs struct pointer.  When set to NULL, will skip accounting
5041  *        of perf event counters, but we'll still do the per-task accounting to
5042  *        the task who triggered this page fault.
5043  * @address: the faulted address.
5044  * @flags: the fault flags.
5045  * @ret: the fault retcode.
5046  *
5047  * This will take care of most of the page fault accounting.  Meanwhile, it
5048  * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
5049  * updates.  However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
5050  * still be in per-arch page fault handlers at the entry of page fault.
5051  */
5052 static inline void mm_account_fault(struct pt_regs *regs,
5053 				    unsigned long address, unsigned int flags,
5054 				    vm_fault_t ret)
5055 {
5056 	bool major;
5057 
5058 	/*
5059 	 * We don't do accounting for some specific faults:
5060 	 *
5061 	 * - Unsuccessful faults (e.g. when the address wasn't valid).  That
5062 	 *   includes arch_vma_access_permitted() failing before reaching here.
5063 	 *   So this is not a "this many hardware page faults" counter.  We
5064 	 *   should use the hw profiling for that.
5065 	 *
5066 	 * - Incomplete faults (VM_FAULT_RETRY).  They will only be counted
5067 	 *   once they're completed.
5068 	 */
5069 	if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY))
5070 		return;
5071 
5072 	/*
5073 	 * We define the fault as a major fault when the final successful fault
5074 	 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
5075 	 * handle it immediately previously).
5076 	 */
5077 	major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
5078 
5079 	if (major)
5080 		current->maj_flt++;
5081 	else
5082 		current->min_flt++;
5083 
5084 	/*
5085 	 * If the fault is done for GUP, regs will be NULL.  We only do the
5086 	 * accounting for the per thread fault counters who triggered the
5087 	 * fault, and we skip the perf event updates.
5088 	 */
5089 	if (!regs)
5090 		return;
5091 
5092 	if (major)
5093 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
5094 	else
5095 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
5096 }
5097 
5098 /*
5099  * By the time we get here, we already hold the mm semaphore
5100  *
5101  * The mmap_lock may have been released depending on flags and our
5102  * return value.  See filemap_fault() and __folio_lock_or_retry().
5103  */
5104 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
5105 			   unsigned int flags, struct pt_regs *regs)
5106 {
5107 	vm_fault_t ret;
5108 
5109 	__set_current_state(TASK_RUNNING);
5110 
5111 	count_vm_event(PGFAULT);
5112 	count_memcg_event_mm(vma->vm_mm, PGFAULT);
5113 
5114 	/* do counter updates before entering really critical section. */
5115 	check_sync_rss_stat(current);
5116 
5117 	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
5118 					    flags & FAULT_FLAG_INSTRUCTION,
5119 					    flags & FAULT_FLAG_REMOTE))
5120 		return VM_FAULT_SIGSEGV;
5121 
5122 	/*
5123 	 * Enable the memcg OOM handling for faults triggered in user
5124 	 * space.  Kernel faults are handled more gracefully.
5125 	 */
5126 	if (flags & FAULT_FLAG_USER)
5127 		mem_cgroup_enter_user_fault();
5128 
5129 	if (unlikely(is_vm_hugetlb_page(vma)))
5130 		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
5131 	else
5132 		ret = __handle_mm_fault(vma, address, flags);
5133 
5134 	if (flags & FAULT_FLAG_USER) {
5135 		mem_cgroup_exit_user_fault();
5136 		/*
5137 		 * The task may have entered a memcg OOM situation but
5138 		 * if the allocation error was handled gracefully (no
5139 		 * VM_FAULT_OOM), there is no need to kill anything.
5140 		 * Just clean up the OOM state peacefully.
5141 		 */
5142 		if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
5143 			mem_cgroup_oom_synchronize(false);
5144 	}
5145 
5146 	mm_account_fault(regs, address, flags, ret);
5147 
5148 	return ret;
5149 }
5150 EXPORT_SYMBOL_GPL(handle_mm_fault);
5151 
5152 #ifndef __PAGETABLE_P4D_FOLDED
5153 /*
5154  * Allocate p4d page table.
5155  * We've already handled the fast-path in-line.
5156  */
5157 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
5158 {
5159 	p4d_t *new = p4d_alloc_one(mm, address);
5160 	if (!new)
5161 		return -ENOMEM;
5162 
5163 	spin_lock(&mm->page_table_lock);
5164 	if (pgd_present(*pgd)) {	/* Another has populated it */
5165 		p4d_free(mm, new);
5166 	} else {
5167 		smp_wmb(); /* See comment in pmd_install() */
5168 		pgd_populate(mm, pgd, new);
5169 	}
5170 	spin_unlock(&mm->page_table_lock);
5171 	return 0;
5172 }
5173 #endif /* __PAGETABLE_P4D_FOLDED */
5174 
5175 #ifndef __PAGETABLE_PUD_FOLDED
5176 /*
5177  * Allocate page upper directory.
5178  * We've already handled the fast-path in-line.
5179  */
5180 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
5181 {
5182 	pud_t *new = pud_alloc_one(mm, address);
5183 	if (!new)
5184 		return -ENOMEM;
5185 
5186 	spin_lock(&mm->page_table_lock);
5187 	if (!p4d_present(*p4d)) {
5188 		mm_inc_nr_puds(mm);
5189 		smp_wmb(); /* See comment in pmd_install() */
5190 		p4d_populate(mm, p4d, new);
5191 	} else	/* Another has populated it */
5192 		pud_free(mm, new);
5193 	spin_unlock(&mm->page_table_lock);
5194 	return 0;
5195 }
5196 #endif /* __PAGETABLE_PUD_FOLDED */
5197 
5198 #ifndef __PAGETABLE_PMD_FOLDED
5199 /*
5200  * Allocate page middle directory.
5201  * We've already handled the fast-path in-line.
5202  */
5203 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
5204 {
5205 	spinlock_t *ptl;
5206 	pmd_t *new = pmd_alloc_one(mm, address);
5207 	if (!new)
5208 		return -ENOMEM;
5209 
5210 	ptl = pud_lock(mm, pud);
5211 	if (!pud_present(*pud)) {
5212 		mm_inc_nr_pmds(mm);
5213 		smp_wmb(); /* See comment in pmd_install() */
5214 		pud_populate(mm, pud, new);
5215 	} else {	/* Another has populated it */
5216 		pmd_free(mm, new);
5217 	}
5218 	spin_unlock(ptl);
5219 	return 0;
5220 }
5221 #endif /* __PAGETABLE_PMD_FOLDED */
5222 
5223 /**
5224  * follow_pte - look up PTE at a user virtual address
5225  * @mm: the mm_struct of the target address space
5226  * @address: user virtual address
5227  * @ptepp: location to store found PTE
5228  * @ptlp: location to store the lock for the PTE
5229  *
5230  * On a successful return, the pointer to the PTE is stored in @ptepp;
5231  * the corresponding lock is taken and its location is stored in @ptlp.
5232  * The contents of the PTE are only stable until @ptlp is released;
5233  * any further use, if any, must be protected against invalidation
5234  * with MMU notifiers.
5235  *
5236  * Only IO mappings and raw PFN mappings are allowed.  The mmap semaphore
5237  * should be taken for read.
5238  *
5239  * KVM uses this function.  While it is arguably less bad than ``follow_pfn``,
5240  * it is not a good general-purpose API.
5241  *
5242  * Return: zero on success, -ve otherwise.
5243  */
5244 int follow_pte(struct mm_struct *mm, unsigned long address,
5245 	       pte_t **ptepp, spinlock_t **ptlp)
5246 {
5247 	pgd_t *pgd;
5248 	p4d_t *p4d;
5249 	pud_t *pud;
5250 	pmd_t *pmd;
5251 	pte_t *ptep;
5252 
5253 	pgd = pgd_offset(mm, address);
5254 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
5255 		goto out;
5256 
5257 	p4d = p4d_offset(pgd, address);
5258 	if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
5259 		goto out;
5260 
5261 	pud = pud_offset(p4d, address);
5262 	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
5263 		goto out;
5264 
5265 	pmd = pmd_offset(pud, address);
5266 	VM_BUG_ON(pmd_trans_huge(*pmd));
5267 
5268 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
5269 		goto out;
5270 
5271 	ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
5272 	if (!pte_present(*ptep))
5273 		goto unlock;
5274 	*ptepp = ptep;
5275 	return 0;
5276 unlock:
5277 	pte_unmap_unlock(ptep, *ptlp);
5278 out:
5279 	return -EINVAL;
5280 }
5281 EXPORT_SYMBOL_GPL(follow_pte);
5282 
5283 /**
5284  * follow_pfn - look up PFN at a user virtual address
5285  * @vma: memory mapping
5286  * @address: user virtual address
5287  * @pfn: location to store found PFN
5288  *
5289  * Only IO mappings and raw PFN mappings are allowed.
5290  *
5291  * This function does not allow the caller to read the permissions
5292  * of the PTE.  Do not use it.
5293  *
5294  * Return: zero and the pfn at @pfn on success, -ve otherwise.
5295  */
5296 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
5297 	unsigned long *pfn)
5298 {
5299 	int ret = -EINVAL;
5300 	spinlock_t *ptl;
5301 	pte_t *ptep;
5302 
5303 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5304 		return ret;
5305 
5306 	ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
5307 	if (ret)
5308 		return ret;
5309 	*pfn = pte_pfn(*ptep);
5310 	pte_unmap_unlock(ptep, ptl);
5311 	return 0;
5312 }
5313 EXPORT_SYMBOL(follow_pfn);
5314 
5315 #ifdef CONFIG_HAVE_IOREMAP_PROT
5316 int follow_phys(struct vm_area_struct *vma,
5317 		unsigned long address, unsigned int flags,
5318 		unsigned long *prot, resource_size_t *phys)
5319 {
5320 	int ret = -EINVAL;
5321 	pte_t *ptep, pte;
5322 	spinlock_t *ptl;
5323 
5324 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5325 		goto out;
5326 
5327 	if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
5328 		goto out;
5329 	pte = *ptep;
5330 
5331 	if ((flags & FOLL_WRITE) && !pte_write(pte))
5332 		goto unlock;
5333 
5334 	*prot = pgprot_val(pte_pgprot(pte));
5335 	*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
5336 
5337 	ret = 0;
5338 unlock:
5339 	pte_unmap_unlock(ptep, ptl);
5340 out:
5341 	return ret;
5342 }
5343 
5344 /**
5345  * generic_access_phys - generic implementation for iomem mmap access
5346  * @vma: the vma to access
5347  * @addr: userspace address, not relative offset within @vma
5348  * @buf: buffer to read/write
5349  * @len: length of transfer
5350  * @write: set to FOLL_WRITE when writing, otherwise reading
5351  *
5352  * This is a generic implementation for &vm_operations_struct.access for an
5353  * iomem mapping. This callback is used by access_process_vm() when the @vma is
5354  * not page based.
5355  */
5356 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
5357 			void *buf, int len, int write)
5358 {
5359 	resource_size_t phys_addr;
5360 	unsigned long prot = 0;
5361 	void __iomem *maddr;
5362 	pte_t *ptep, pte;
5363 	spinlock_t *ptl;
5364 	int offset = offset_in_page(addr);
5365 	int ret = -EINVAL;
5366 
5367 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5368 		return -EINVAL;
5369 
5370 retry:
5371 	if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
5372 		return -EINVAL;
5373 	pte = *ptep;
5374 	pte_unmap_unlock(ptep, ptl);
5375 
5376 	prot = pgprot_val(pte_pgprot(pte));
5377 	phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
5378 
5379 	if ((write & FOLL_WRITE) && !pte_write(pte))
5380 		return -EINVAL;
5381 
5382 	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
5383 	if (!maddr)
5384 		return -ENOMEM;
5385 
5386 	if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
5387 		goto out_unmap;
5388 
5389 	if (!pte_same(pte, *ptep)) {
5390 		pte_unmap_unlock(ptep, ptl);
5391 		iounmap(maddr);
5392 
5393 		goto retry;
5394 	}
5395 
5396 	if (write)
5397 		memcpy_toio(maddr + offset, buf, len);
5398 	else
5399 		memcpy_fromio(buf, maddr + offset, len);
5400 	ret = len;
5401 	pte_unmap_unlock(ptep, ptl);
5402 out_unmap:
5403 	iounmap(maddr);
5404 
5405 	return ret;
5406 }
5407 EXPORT_SYMBOL_GPL(generic_access_phys);
5408 #endif
5409 
5410 /*
5411  * Access another process' address space as given in mm.
5412  */
5413 int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf,
5414 		       int len, unsigned int gup_flags)
5415 {
5416 	struct vm_area_struct *vma;
5417 	void *old_buf = buf;
5418 	int write = gup_flags & FOLL_WRITE;
5419 
5420 	if (mmap_read_lock_killable(mm))
5421 		return 0;
5422 
5423 	/* ignore errors, just check how much was successfully transferred */
5424 	while (len) {
5425 		int bytes, ret, offset;
5426 		void *maddr;
5427 		struct page *page = NULL;
5428 
5429 		ret = get_user_pages_remote(mm, addr, 1,
5430 				gup_flags, &page, &vma, NULL);
5431 		if (ret <= 0) {
5432 #ifndef CONFIG_HAVE_IOREMAP_PROT
5433 			break;
5434 #else
5435 			/*
5436 			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
5437 			 * we can access using slightly different code.
5438 			 */
5439 			vma = vma_lookup(mm, addr);
5440 			if (!vma)
5441 				break;
5442 			if (vma->vm_ops && vma->vm_ops->access)
5443 				ret = vma->vm_ops->access(vma, addr, buf,
5444 							  len, write);
5445 			if (ret <= 0)
5446 				break;
5447 			bytes = ret;
5448 #endif
5449 		} else {
5450 			bytes = len;
5451 			offset = addr & (PAGE_SIZE-1);
5452 			if (bytes > PAGE_SIZE-offset)
5453 				bytes = PAGE_SIZE-offset;
5454 
5455 			maddr = kmap(page);
5456 			if (write) {
5457 				copy_to_user_page(vma, page, addr,
5458 						  maddr + offset, buf, bytes);
5459 				set_page_dirty_lock(page);
5460 			} else {
5461 				copy_from_user_page(vma, page, addr,
5462 						    buf, maddr + offset, bytes);
5463 			}
5464 			kunmap(page);
5465 			put_page(page);
5466 		}
5467 		len -= bytes;
5468 		buf += bytes;
5469 		addr += bytes;
5470 	}
5471 	mmap_read_unlock(mm);
5472 
5473 	return buf - old_buf;
5474 }
5475 
5476 /**
5477  * access_remote_vm - access another process' address space
5478  * @mm:		the mm_struct of the target address space
5479  * @addr:	start address to access
5480  * @buf:	source or destination buffer
5481  * @len:	number of bytes to transfer
5482  * @gup_flags:	flags modifying lookup behaviour
5483  *
5484  * The caller must hold a reference on @mm.
5485  *
5486  * Return: number of bytes copied from source to destination.
5487  */
5488 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
5489 		void *buf, int len, unsigned int gup_flags)
5490 {
5491 	return __access_remote_vm(mm, addr, buf, len, gup_flags);
5492 }
5493 
5494 /*
5495  * Access another process' address space.
5496  * Source/target buffer must be kernel space,
5497  * Do not walk the page table directly, use get_user_pages
5498  */
5499 int access_process_vm(struct task_struct *tsk, unsigned long addr,
5500 		void *buf, int len, unsigned int gup_flags)
5501 {
5502 	struct mm_struct *mm;
5503 	int ret;
5504 
5505 	mm = get_task_mm(tsk);
5506 	if (!mm)
5507 		return 0;
5508 
5509 	ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
5510 
5511 	mmput(mm);
5512 
5513 	return ret;
5514 }
5515 EXPORT_SYMBOL_GPL(access_process_vm);
5516 
5517 /*
5518  * Print the name of a VMA.
5519  */
5520 void print_vma_addr(char *prefix, unsigned long ip)
5521 {
5522 	struct mm_struct *mm = current->mm;
5523 	struct vm_area_struct *vma;
5524 
5525 	/*
5526 	 * we might be running from an atomic context so we cannot sleep
5527 	 */
5528 	if (!mmap_read_trylock(mm))
5529 		return;
5530 
5531 	vma = find_vma(mm, ip);
5532 	if (vma && vma->vm_file) {
5533 		struct file *f = vma->vm_file;
5534 		char *buf = (char *)__get_free_page(GFP_NOWAIT);
5535 		if (buf) {
5536 			char *p;
5537 
5538 			p = file_path(f, buf, PAGE_SIZE);
5539 			if (IS_ERR(p))
5540 				p = "?";
5541 			printk("%s%s[%lx+%lx]", prefix, kbasename(p),
5542 					vma->vm_start,
5543 					vma->vm_end - vma->vm_start);
5544 			free_page((unsigned long)buf);
5545 		}
5546 	}
5547 	mmap_read_unlock(mm);
5548 }
5549 
5550 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
5551 void __might_fault(const char *file, int line)
5552 {
5553 	if (pagefault_disabled())
5554 		return;
5555 	__might_sleep(file, line);
5556 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
5557 	if (current->mm)
5558 		might_lock_read(&current->mm->mmap_lock);
5559 #endif
5560 }
5561 EXPORT_SYMBOL(__might_fault);
5562 #endif
5563 
5564 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
5565 /*
5566  * Process all subpages of the specified huge page with the specified
5567  * operation.  The target subpage will be processed last to keep its
5568  * cache lines hot.
5569  */
5570 static inline void process_huge_page(
5571 	unsigned long addr_hint, unsigned int pages_per_huge_page,
5572 	void (*process_subpage)(unsigned long addr, int idx, void *arg),
5573 	void *arg)
5574 {
5575 	int i, n, base, l;
5576 	unsigned long addr = addr_hint &
5577 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5578 
5579 	/* Process target subpage last to keep its cache lines hot */
5580 	might_sleep();
5581 	n = (addr_hint - addr) / PAGE_SIZE;
5582 	if (2 * n <= pages_per_huge_page) {
5583 		/* If target subpage in first half of huge page */
5584 		base = 0;
5585 		l = n;
5586 		/* Process subpages at the end of huge page */
5587 		for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
5588 			cond_resched();
5589 			process_subpage(addr + i * PAGE_SIZE, i, arg);
5590 		}
5591 	} else {
5592 		/* If target subpage in second half of huge page */
5593 		base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
5594 		l = pages_per_huge_page - n;
5595 		/* Process subpages at the begin of huge page */
5596 		for (i = 0; i < base; i++) {
5597 			cond_resched();
5598 			process_subpage(addr + i * PAGE_SIZE, i, arg);
5599 		}
5600 	}
5601 	/*
5602 	 * Process remaining subpages in left-right-left-right pattern
5603 	 * towards the target subpage
5604 	 */
5605 	for (i = 0; i < l; i++) {
5606 		int left_idx = base + i;
5607 		int right_idx = base + 2 * l - 1 - i;
5608 
5609 		cond_resched();
5610 		process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
5611 		cond_resched();
5612 		process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
5613 	}
5614 }
5615 
5616 static void clear_gigantic_page(struct page *page,
5617 				unsigned long addr,
5618 				unsigned int pages_per_huge_page)
5619 {
5620 	int i;
5621 	struct page *p = page;
5622 
5623 	might_sleep();
5624 	for (i = 0; i < pages_per_huge_page;
5625 	     i++, p = mem_map_next(p, page, i)) {
5626 		cond_resched();
5627 		clear_user_highpage(p, addr + i * PAGE_SIZE);
5628 	}
5629 }
5630 
5631 static void clear_subpage(unsigned long addr, int idx, void *arg)
5632 {
5633 	struct page *page = arg;
5634 
5635 	clear_user_highpage(page + idx, addr);
5636 }
5637 
5638 void clear_huge_page(struct page *page,
5639 		     unsigned long addr_hint, unsigned int pages_per_huge_page)
5640 {
5641 	unsigned long addr = addr_hint &
5642 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5643 
5644 	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5645 		clear_gigantic_page(page, addr, pages_per_huge_page);
5646 		return;
5647 	}
5648 
5649 	process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
5650 }
5651 
5652 static void copy_user_gigantic_page(struct page *dst, struct page *src,
5653 				    unsigned long addr,
5654 				    struct vm_area_struct *vma,
5655 				    unsigned int pages_per_huge_page)
5656 {
5657 	int i;
5658 	struct page *dst_base = dst;
5659 	struct page *src_base = src;
5660 
5661 	for (i = 0; i < pages_per_huge_page; ) {
5662 		cond_resched();
5663 		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
5664 
5665 		i++;
5666 		dst = mem_map_next(dst, dst_base, i);
5667 		src = mem_map_next(src, src_base, i);
5668 	}
5669 }
5670 
5671 struct copy_subpage_arg {
5672 	struct page *dst;
5673 	struct page *src;
5674 	struct vm_area_struct *vma;
5675 };
5676 
5677 static void copy_subpage(unsigned long addr, int idx, void *arg)
5678 {
5679 	struct copy_subpage_arg *copy_arg = arg;
5680 
5681 	copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
5682 			   addr, copy_arg->vma);
5683 }
5684 
5685 void copy_user_huge_page(struct page *dst, struct page *src,
5686 			 unsigned long addr_hint, struct vm_area_struct *vma,
5687 			 unsigned int pages_per_huge_page)
5688 {
5689 	unsigned long addr = addr_hint &
5690 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5691 	struct copy_subpage_arg arg = {
5692 		.dst = dst,
5693 		.src = src,
5694 		.vma = vma,
5695 	};
5696 
5697 	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5698 		copy_user_gigantic_page(dst, src, addr, vma,
5699 					pages_per_huge_page);
5700 		return;
5701 	}
5702 
5703 	process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
5704 }
5705 
5706 long copy_huge_page_from_user(struct page *dst_page,
5707 				const void __user *usr_src,
5708 				unsigned int pages_per_huge_page,
5709 				bool allow_pagefault)
5710 {
5711 	void *page_kaddr;
5712 	unsigned long i, rc = 0;
5713 	unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
5714 	struct page *subpage = dst_page;
5715 
5716 	for (i = 0; i < pages_per_huge_page;
5717 	     i++, subpage = mem_map_next(subpage, dst_page, i)) {
5718 		if (allow_pagefault)
5719 			page_kaddr = kmap(subpage);
5720 		else
5721 			page_kaddr = kmap_atomic(subpage);
5722 		rc = copy_from_user(page_kaddr,
5723 				usr_src + i * PAGE_SIZE, PAGE_SIZE);
5724 		if (allow_pagefault)
5725 			kunmap(subpage);
5726 		else
5727 			kunmap_atomic(page_kaddr);
5728 
5729 		ret_val -= (PAGE_SIZE - rc);
5730 		if (rc)
5731 			break;
5732 
5733 		flush_dcache_page(subpage);
5734 
5735 		cond_resched();
5736 	}
5737 	return ret_val;
5738 }
5739 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
5740 
5741 #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
5742 
5743 static struct kmem_cache *page_ptl_cachep;
5744 
5745 void __init ptlock_cache_init(void)
5746 {
5747 	page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
5748 			SLAB_PANIC, NULL);
5749 }
5750 
5751 bool ptlock_alloc(struct page *page)
5752 {
5753 	spinlock_t *ptl;
5754 
5755 	ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
5756 	if (!ptl)
5757 		return false;
5758 	page->ptl = ptl;
5759 	return true;
5760 }
5761 
5762 void ptlock_free(struct page *page)
5763 {
5764 	kmem_cache_free(page_ptl_cachep, page->ptl);
5765 }
5766 #endif
5767