xref: /openbmc/linux/mm/memory.c (revision cb325ddd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/memory.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  */
7 
8 /*
9  * demand-loading started 01.12.91 - seems it is high on the list of
10  * things wanted, and it should be easy to implement. - Linus
11  */
12 
13 /*
14  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
15  * pages started 02.12.91, seems to work. - Linus.
16  *
17  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
18  * would have taken more than the 6M I have free, but it worked well as
19  * far as I could see.
20  *
21  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
22  */
23 
24 /*
25  * Real VM (paging to/from disk) started 18.12.91. Much more work and
26  * thought has to go into this. Oh, well..
27  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
28  *		Found it. Everything seems to work now.
29  * 20.12.91  -  Ok, making the swap-device changeable like the root.
30  */
31 
32 /*
33  * 05.04.94  -  Multi-page memory management added for v1.1.
34  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
35  *
36  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
37  *		(Gerhard.Wichert@pdb.siemens.de)
38  *
39  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
40  */
41 
42 #include <linux/kernel_stat.h>
43 #include <linux/mm.h>
44 #include <linux/mm_inline.h>
45 #include <linux/sched/mm.h>
46 #include <linux/sched/coredump.h>
47 #include <linux/sched/numa_balancing.h>
48 #include <linux/sched/task.h>
49 #include <linux/hugetlb.h>
50 #include <linux/mman.h>
51 #include <linux/swap.h>
52 #include <linux/highmem.h>
53 #include <linux/pagemap.h>
54 #include <linux/memremap.h>
55 #include <linux/ksm.h>
56 #include <linux/rmap.h>
57 #include <linux/export.h>
58 #include <linux/delayacct.h>
59 #include <linux/init.h>
60 #include <linux/pfn_t.h>
61 #include <linux/writeback.h>
62 #include <linux/memcontrol.h>
63 #include <linux/mmu_notifier.h>
64 #include <linux/swapops.h>
65 #include <linux/elf.h>
66 #include <linux/gfp.h>
67 #include <linux/migrate.h>
68 #include <linux/string.h>
69 #include <linux/debugfs.h>
70 #include <linux/userfaultfd_k.h>
71 #include <linux/dax.h>
72 #include <linux/oom.h>
73 #include <linux/numa.h>
74 #include <linux/perf_event.h>
75 #include <linux/ptrace.h>
76 #include <linux/vmalloc.h>
77 
78 #include <trace/events/kmem.h>
79 
80 #include <asm/io.h>
81 #include <asm/mmu_context.h>
82 #include <asm/pgalloc.h>
83 #include <linux/uaccess.h>
84 #include <asm/tlb.h>
85 #include <asm/tlbflush.h>
86 
87 #include "pgalloc-track.h"
88 #include "internal.h"
89 
90 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
91 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
92 #endif
93 
94 #ifndef CONFIG_NUMA
95 unsigned long max_mapnr;
96 EXPORT_SYMBOL(max_mapnr);
97 
98 struct page *mem_map;
99 EXPORT_SYMBOL(mem_map);
100 #endif
101 
102 /*
103  * A number of key systems in x86 including ioremap() rely on the assumption
104  * that high_memory defines the upper bound on direct map memory, then end
105  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
106  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
107  * and ZONE_HIGHMEM.
108  */
109 void *high_memory;
110 EXPORT_SYMBOL(high_memory);
111 
112 /*
113  * Randomize the address space (stacks, mmaps, brk, etc.).
114  *
115  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
116  *   as ancient (libc5 based) binaries can segfault. )
117  */
118 int randomize_va_space __read_mostly =
119 #ifdef CONFIG_COMPAT_BRK
120 					1;
121 #else
122 					2;
123 #endif
124 
125 #ifndef arch_faults_on_old_pte
126 static inline bool arch_faults_on_old_pte(void)
127 {
128 	/*
129 	 * Those arches which don't have hw access flag feature need to
130 	 * implement their own helper. By default, "true" means pagefault
131 	 * will be hit on old pte.
132 	 */
133 	return true;
134 }
135 #endif
136 
137 #ifndef arch_wants_old_prefaulted_pte
138 static inline bool arch_wants_old_prefaulted_pte(void)
139 {
140 	/*
141 	 * Transitioning a PTE from 'old' to 'young' can be expensive on
142 	 * some architectures, even if it's performed in hardware. By
143 	 * default, "false" means prefaulted entries will be 'young'.
144 	 */
145 	return false;
146 }
147 #endif
148 
149 static int __init disable_randmaps(char *s)
150 {
151 	randomize_va_space = 0;
152 	return 1;
153 }
154 __setup("norandmaps", disable_randmaps);
155 
156 unsigned long zero_pfn __read_mostly;
157 EXPORT_SYMBOL(zero_pfn);
158 
159 unsigned long highest_memmap_pfn __read_mostly;
160 
161 /*
162  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
163  */
164 static int __init init_zero_pfn(void)
165 {
166 	zero_pfn = page_to_pfn(ZERO_PAGE(0));
167 	return 0;
168 }
169 early_initcall(init_zero_pfn);
170 
171 void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
172 {
173 	trace_rss_stat(mm, member, count);
174 }
175 
176 #if defined(SPLIT_RSS_COUNTING)
177 
178 void sync_mm_rss(struct mm_struct *mm)
179 {
180 	int i;
181 
182 	for (i = 0; i < NR_MM_COUNTERS; i++) {
183 		if (current->rss_stat.count[i]) {
184 			add_mm_counter(mm, i, current->rss_stat.count[i]);
185 			current->rss_stat.count[i] = 0;
186 		}
187 	}
188 	current->rss_stat.events = 0;
189 }
190 
191 static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
192 {
193 	struct task_struct *task = current;
194 
195 	if (likely(task->mm == mm))
196 		task->rss_stat.count[member] += val;
197 	else
198 		add_mm_counter(mm, member, val);
199 }
200 #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
201 #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
202 
203 /* sync counter once per 64 page faults */
204 #define TASK_RSS_EVENTS_THRESH	(64)
205 static void check_sync_rss_stat(struct task_struct *task)
206 {
207 	if (unlikely(task != current))
208 		return;
209 	if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
210 		sync_mm_rss(task->mm);
211 }
212 #else /* SPLIT_RSS_COUNTING */
213 
214 #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
215 #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
216 
217 static void check_sync_rss_stat(struct task_struct *task)
218 {
219 }
220 
221 #endif /* SPLIT_RSS_COUNTING */
222 
223 /*
224  * Note: this doesn't free the actual pages themselves. That
225  * has been handled earlier when unmapping all the memory regions.
226  */
227 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
228 			   unsigned long addr)
229 {
230 	pgtable_t token = pmd_pgtable(*pmd);
231 	pmd_clear(pmd);
232 	pte_free_tlb(tlb, token, addr);
233 	mm_dec_nr_ptes(tlb->mm);
234 }
235 
236 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
237 				unsigned long addr, unsigned long end,
238 				unsigned long floor, unsigned long ceiling)
239 {
240 	pmd_t *pmd;
241 	unsigned long next;
242 	unsigned long start;
243 
244 	start = addr;
245 	pmd = pmd_offset(pud, addr);
246 	do {
247 		next = pmd_addr_end(addr, end);
248 		if (pmd_none_or_clear_bad(pmd))
249 			continue;
250 		free_pte_range(tlb, pmd, addr);
251 	} while (pmd++, addr = next, addr != end);
252 
253 	start &= PUD_MASK;
254 	if (start < floor)
255 		return;
256 	if (ceiling) {
257 		ceiling &= PUD_MASK;
258 		if (!ceiling)
259 			return;
260 	}
261 	if (end - 1 > ceiling - 1)
262 		return;
263 
264 	pmd = pmd_offset(pud, start);
265 	pud_clear(pud);
266 	pmd_free_tlb(tlb, pmd, start);
267 	mm_dec_nr_pmds(tlb->mm);
268 }
269 
270 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
271 				unsigned long addr, unsigned long end,
272 				unsigned long floor, unsigned long ceiling)
273 {
274 	pud_t *pud;
275 	unsigned long next;
276 	unsigned long start;
277 
278 	start = addr;
279 	pud = pud_offset(p4d, addr);
280 	do {
281 		next = pud_addr_end(addr, end);
282 		if (pud_none_or_clear_bad(pud))
283 			continue;
284 		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
285 	} while (pud++, addr = next, addr != end);
286 
287 	start &= P4D_MASK;
288 	if (start < floor)
289 		return;
290 	if (ceiling) {
291 		ceiling &= P4D_MASK;
292 		if (!ceiling)
293 			return;
294 	}
295 	if (end - 1 > ceiling - 1)
296 		return;
297 
298 	pud = pud_offset(p4d, start);
299 	p4d_clear(p4d);
300 	pud_free_tlb(tlb, pud, start);
301 	mm_dec_nr_puds(tlb->mm);
302 }
303 
304 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
305 				unsigned long addr, unsigned long end,
306 				unsigned long floor, unsigned long ceiling)
307 {
308 	p4d_t *p4d;
309 	unsigned long next;
310 	unsigned long start;
311 
312 	start = addr;
313 	p4d = p4d_offset(pgd, addr);
314 	do {
315 		next = p4d_addr_end(addr, end);
316 		if (p4d_none_or_clear_bad(p4d))
317 			continue;
318 		free_pud_range(tlb, p4d, addr, next, floor, ceiling);
319 	} while (p4d++, addr = next, addr != end);
320 
321 	start &= PGDIR_MASK;
322 	if (start < floor)
323 		return;
324 	if (ceiling) {
325 		ceiling &= PGDIR_MASK;
326 		if (!ceiling)
327 			return;
328 	}
329 	if (end - 1 > ceiling - 1)
330 		return;
331 
332 	p4d = p4d_offset(pgd, start);
333 	pgd_clear(pgd);
334 	p4d_free_tlb(tlb, p4d, start);
335 }
336 
337 /*
338  * This function frees user-level page tables of a process.
339  */
340 void free_pgd_range(struct mmu_gather *tlb,
341 			unsigned long addr, unsigned long end,
342 			unsigned long floor, unsigned long ceiling)
343 {
344 	pgd_t *pgd;
345 	unsigned long next;
346 
347 	/*
348 	 * The next few lines have given us lots of grief...
349 	 *
350 	 * Why are we testing PMD* at this top level?  Because often
351 	 * there will be no work to do at all, and we'd prefer not to
352 	 * go all the way down to the bottom just to discover that.
353 	 *
354 	 * Why all these "- 1"s?  Because 0 represents both the bottom
355 	 * of the address space and the top of it (using -1 for the
356 	 * top wouldn't help much: the masks would do the wrong thing).
357 	 * The rule is that addr 0 and floor 0 refer to the bottom of
358 	 * the address space, but end 0 and ceiling 0 refer to the top
359 	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
360 	 * that end 0 case should be mythical).
361 	 *
362 	 * Wherever addr is brought up or ceiling brought down, we must
363 	 * be careful to reject "the opposite 0" before it confuses the
364 	 * subsequent tests.  But what about where end is brought down
365 	 * by PMD_SIZE below? no, end can't go down to 0 there.
366 	 *
367 	 * Whereas we round start (addr) and ceiling down, by different
368 	 * masks at different levels, in order to test whether a table
369 	 * now has no other vmas using it, so can be freed, we don't
370 	 * bother to round floor or end up - the tests don't need that.
371 	 */
372 
373 	addr &= PMD_MASK;
374 	if (addr < floor) {
375 		addr += PMD_SIZE;
376 		if (!addr)
377 			return;
378 	}
379 	if (ceiling) {
380 		ceiling &= PMD_MASK;
381 		if (!ceiling)
382 			return;
383 	}
384 	if (end - 1 > ceiling - 1)
385 		end -= PMD_SIZE;
386 	if (addr > end - 1)
387 		return;
388 	/*
389 	 * We add page table cache pages with PAGE_SIZE,
390 	 * (see pte_free_tlb()), flush the tlb if we need
391 	 */
392 	tlb_change_page_size(tlb, PAGE_SIZE);
393 	pgd = pgd_offset(tlb->mm, addr);
394 	do {
395 		next = pgd_addr_end(addr, end);
396 		if (pgd_none_or_clear_bad(pgd))
397 			continue;
398 		free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
399 	} while (pgd++, addr = next, addr != end);
400 }
401 
402 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
403 		unsigned long floor, unsigned long ceiling)
404 {
405 	while (vma) {
406 		struct vm_area_struct *next = vma->vm_next;
407 		unsigned long addr = vma->vm_start;
408 
409 		/*
410 		 * Hide vma from rmap and truncate_pagecache before freeing
411 		 * pgtables
412 		 */
413 		unlink_anon_vmas(vma);
414 		unlink_file_vma(vma);
415 
416 		if (is_vm_hugetlb_page(vma)) {
417 			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
418 				floor, next ? next->vm_start : ceiling);
419 		} else {
420 			/*
421 			 * Optimization: gather nearby vmas into one call down
422 			 */
423 			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
424 			       && !is_vm_hugetlb_page(next)) {
425 				vma = next;
426 				next = vma->vm_next;
427 				unlink_anon_vmas(vma);
428 				unlink_file_vma(vma);
429 			}
430 			free_pgd_range(tlb, addr, vma->vm_end,
431 				floor, next ? next->vm_start : ceiling);
432 		}
433 		vma = next;
434 	}
435 }
436 
437 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
438 {
439 	spinlock_t *ptl = pmd_lock(mm, pmd);
440 
441 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
442 		mm_inc_nr_ptes(mm);
443 		/*
444 		 * Ensure all pte setup (eg. pte page lock and page clearing) are
445 		 * visible before the pte is made visible to other CPUs by being
446 		 * put into page tables.
447 		 *
448 		 * The other side of the story is the pointer chasing in the page
449 		 * table walking code (when walking the page table without locking;
450 		 * ie. most of the time). Fortunately, these data accesses consist
451 		 * of a chain of data-dependent loads, meaning most CPUs (alpha
452 		 * being the notable exception) will already guarantee loads are
453 		 * seen in-order. See the alpha page table accessors for the
454 		 * smp_rmb() barriers in page table walking code.
455 		 */
456 		smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
457 		pmd_populate(mm, pmd, *pte);
458 		*pte = NULL;
459 	}
460 	spin_unlock(ptl);
461 }
462 
463 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
464 {
465 	pgtable_t new = pte_alloc_one(mm);
466 	if (!new)
467 		return -ENOMEM;
468 
469 	pmd_install(mm, pmd, &new);
470 	if (new)
471 		pte_free(mm, new);
472 	return 0;
473 }
474 
475 int __pte_alloc_kernel(pmd_t *pmd)
476 {
477 	pte_t *new = pte_alloc_one_kernel(&init_mm);
478 	if (!new)
479 		return -ENOMEM;
480 
481 	spin_lock(&init_mm.page_table_lock);
482 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
483 		smp_wmb(); /* See comment in pmd_install() */
484 		pmd_populate_kernel(&init_mm, pmd, new);
485 		new = NULL;
486 	}
487 	spin_unlock(&init_mm.page_table_lock);
488 	if (new)
489 		pte_free_kernel(&init_mm, new);
490 	return 0;
491 }
492 
493 static inline void init_rss_vec(int *rss)
494 {
495 	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
496 }
497 
498 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
499 {
500 	int i;
501 
502 	if (current->mm == mm)
503 		sync_mm_rss(mm);
504 	for (i = 0; i < NR_MM_COUNTERS; i++)
505 		if (rss[i])
506 			add_mm_counter(mm, i, rss[i]);
507 }
508 
509 /*
510  * This function is called to print an error when a bad pte
511  * is found. For example, we might have a PFN-mapped pte in
512  * a region that doesn't allow it.
513  *
514  * The calling function must still handle the error.
515  */
516 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
517 			  pte_t pte, struct page *page)
518 {
519 	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
520 	p4d_t *p4d = p4d_offset(pgd, addr);
521 	pud_t *pud = pud_offset(p4d, addr);
522 	pmd_t *pmd = pmd_offset(pud, addr);
523 	struct address_space *mapping;
524 	pgoff_t index;
525 	static unsigned long resume;
526 	static unsigned long nr_shown;
527 	static unsigned long nr_unshown;
528 
529 	/*
530 	 * Allow a burst of 60 reports, then keep quiet for that minute;
531 	 * or allow a steady drip of one report per second.
532 	 */
533 	if (nr_shown == 60) {
534 		if (time_before(jiffies, resume)) {
535 			nr_unshown++;
536 			return;
537 		}
538 		if (nr_unshown) {
539 			pr_alert("BUG: Bad page map: %lu messages suppressed\n",
540 				 nr_unshown);
541 			nr_unshown = 0;
542 		}
543 		nr_shown = 0;
544 	}
545 	if (nr_shown++ == 0)
546 		resume = jiffies + 60 * HZ;
547 
548 	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
549 	index = linear_page_index(vma, addr);
550 
551 	pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
552 		 current->comm,
553 		 (long long)pte_val(pte), (long long)pmd_val(*pmd));
554 	if (page)
555 		dump_page(page, "bad pte");
556 	pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
557 		 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
558 	pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n",
559 		 vma->vm_file,
560 		 vma->vm_ops ? vma->vm_ops->fault : NULL,
561 		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
562 		 mapping ? mapping->a_ops->readpage : NULL);
563 	dump_stack();
564 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
565 }
566 
567 /*
568  * vm_normal_page -- This function gets the "struct page" associated with a pte.
569  *
570  * "Special" mappings do not wish to be associated with a "struct page" (either
571  * it doesn't exist, or it exists but they don't want to touch it). In this
572  * case, NULL is returned here. "Normal" mappings do have a struct page.
573  *
574  * There are 2 broad cases. Firstly, an architecture may define a pte_special()
575  * pte bit, in which case this function is trivial. Secondly, an architecture
576  * may not have a spare pte bit, which requires a more complicated scheme,
577  * described below.
578  *
579  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
580  * special mapping (even if there are underlying and valid "struct pages").
581  * COWed pages of a VM_PFNMAP are always normal.
582  *
583  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
584  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
585  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
586  * mapping will always honor the rule
587  *
588  *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
589  *
590  * And for normal mappings this is false.
591  *
592  * This restricts such mappings to be a linear translation from virtual address
593  * to pfn. To get around this restriction, we allow arbitrary mappings so long
594  * as the vma is not a COW mapping; in that case, we know that all ptes are
595  * special (because none can have been COWed).
596  *
597  *
598  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
599  *
600  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
601  * page" backing, however the difference is that _all_ pages with a struct
602  * page (that is, those where pfn_valid is true) are refcounted and considered
603  * normal pages by the VM. The disadvantage is that pages are refcounted
604  * (which can be slower and simply not an option for some PFNMAP users). The
605  * advantage is that we don't have to follow the strict linearity rule of
606  * PFNMAP mappings in order to support COWable mappings.
607  *
608  */
609 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
610 			    pte_t pte)
611 {
612 	unsigned long pfn = pte_pfn(pte);
613 
614 	if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
615 		if (likely(!pte_special(pte)))
616 			goto check_pfn;
617 		if (vma->vm_ops && vma->vm_ops->find_special_page)
618 			return vma->vm_ops->find_special_page(vma, addr);
619 		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
620 			return NULL;
621 		if (is_zero_pfn(pfn))
622 			return NULL;
623 		if (pte_devmap(pte))
624 			return NULL;
625 
626 		print_bad_pte(vma, addr, pte, NULL);
627 		return NULL;
628 	}
629 
630 	/* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
631 
632 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
633 		if (vma->vm_flags & VM_MIXEDMAP) {
634 			if (!pfn_valid(pfn))
635 				return NULL;
636 			goto out;
637 		} else {
638 			unsigned long off;
639 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
640 			if (pfn == vma->vm_pgoff + off)
641 				return NULL;
642 			if (!is_cow_mapping(vma->vm_flags))
643 				return NULL;
644 		}
645 	}
646 
647 	if (is_zero_pfn(pfn))
648 		return NULL;
649 
650 check_pfn:
651 	if (unlikely(pfn > highest_memmap_pfn)) {
652 		print_bad_pte(vma, addr, pte, NULL);
653 		return NULL;
654 	}
655 
656 	/*
657 	 * NOTE! We still have PageReserved() pages in the page tables.
658 	 * eg. VDSO mappings can cause them to exist.
659 	 */
660 out:
661 	return pfn_to_page(pfn);
662 }
663 
664 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
665 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
666 				pmd_t pmd)
667 {
668 	unsigned long pfn = pmd_pfn(pmd);
669 
670 	/*
671 	 * There is no pmd_special() but there may be special pmds, e.g.
672 	 * in a direct-access (dax) mapping, so let's just replicate the
673 	 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
674 	 */
675 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
676 		if (vma->vm_flags & VM_MIXEDMAP) {
677 			if (!pfn_valid(pfn))
678 				return NULL;
679 			goto out;
680 		} else {
681 			unsigned long off;
682 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
683 			if (pfn == vma->vm_pgoff + off)
684 				return NULL;
685 			if (!is_cow_mapping(vma->vm_flags))
686 				return NULL;
687 		}
688 	}
689 
690 	if (pmd_devmap(pmd))
691 		return NULL;
692 	if (is_huge_zero_pmd(pmd))
693 		return NULL;
694 	if (unlikely(pfn > highest_memmap_pfn))
695 		return NULL;
696 
697 	/*
698 	 * NOTE! We still have PageReserved() pages in the page tables.
699 	 * eg. VDSO mappings can cause them to exist.
700 	 */
701 out:
702 	return pfn_to_page(pfn);
703 }
704 #endif
705 
706 static void restore_exclusive_pte(struct vm_area_struct *vma,
707 				  struct page *page, unsigned long address,
708 				  pte_t *ptep)
709 {
710 	pte_t pte;
711 	swp_entry_t entry;
712 
713 	pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
714 	if (pte_swp_soft_dirty(*ptep))
715 		pte = pte_mksoft_dirty(pte);
716 
717 	entry = pte_to_swp_entry(*ptep);
718 	if (pte_swp_uffd_wp(*ptep))
719 		pte = pte_mkuffd_wp(pte);
720 	else if (is_writable_device_exclusive_entry(entry))
721 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
722 
723 	/*
724 	 * No need to take a page reference as one was already
725 	 * created when the swap entry was made.
726 	 */
727 	if (PageAnon(page))
728 		page_add_anon_rmap(page, vma, address, false);
729 	else
730 		/*
731 		 * Currently device exclusive access only supports anonymous
732 		 * memory so the entry shouldn't point to a filebacked page.
733 		 */
734 		WARN_ON_ONCE(!PageAnon(page));
735 
736 	set_pte_at(vma->vm_mm, address, ptep, pte);
737 
738 	if (vma->vm_flags & VM_LOCKED)
739 		mlock_vma_page(page);
740 
741 	/*
742 	 * No need to invalidate - it was non-present before. However
743 	 * secondary CPUs may have mappings that need invalidating.
744 	 */
745 	update_mmu_cache(vma, address, ptep);
746 }
747 
748 /*
749  * Tries to restore an exclusive pte if the page lock can be acquired without
750  * sleeping.
751  */
752 static int
753 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
754 			unsigned long addr)
755 {
756 	swp_entry_t entry = pte_to_swp_entry(*src_pte);
757 	struct page *page = pfn_swap_entry_to_page(entry);
758 
759 	if (trylock_page(page)) {
760 		restore_exclusive_pte(vma, page, addr, src_pte);
761 		unlock_page(page);
762 		return 0;
763 	}
764 
765 	return -EBUSY;
766 }
767 
768 /*
769  * copy one vm_area from one task to the other. Assumes the page tables
770  * already present in the new task to be cleared in the whole range
771  * covered by this vma.
772  */
773 
774 static unsigned long
775 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
776 		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
777 		struct vm_area_struct *src_vma, unsigned long addr, int *rss)
778 {
779 	unsigned long vm_flags = dst_vma->vm_flags;
780 	pte_t pte = *src_pte;
781 	struct page *page;
782 	swp_entry_t entry = pte_to_swp_entry(pte);
783 
784 	if (likely(!non_swap_entry(entry))) {
785 		if (swap_duplicate(entry) < 0)
786 			return -EIO;
787 
788 		/* make sure dst_mm is on swapoff's mmlist. */
789 		if (unlikely(list_empty(&dst_mm->mmlist))) {
790 			spin_lock(&mmlist_lock);
791 			if (list_empty(&dst_mm->mmlist))
792 				list_add(&dst_mm->mmlist,
793 						&src_mm->mmlist);
794 			spin_unlock(&mmlist_lock);
795 		}
796 		rss[MM_SWAPENTS]++;
797 	} else if (is_migration_entry(entry)) {
798 		page = pfn_swap_entry_to_page(entry);
799 
800 		rss[mm_counter(page)]++;
801 
802 		if (is_writable_migration_entry(entry) &&
803 				is_cow_mapping(vm_flags)) {
804 			/*
805 			 * COW mappings require pages in both
806 			 * parent and child to be set to read.
807 			 */
808 			entry = make_readable_migration_entry(
809 							swp_offset(entry));
810 			pte = swp_entry_to_pte(entry);
811 			if (pte_swp_soft_dirty(*src_pte))
812 				pte = pte_swp_mksoft_dirty(pte);
813 			if (pte_swp_uffd_wp(*src_pte))
814 				pte = pte_swp_mkuffd_wp(pte);
815 			set_pte_at(src_mm, addr, src_pte, pte);
816 		}
817 	} else if (is_device_private_entry(entry)) {
818 		page = pfn_swap_entry_to_page(entry);
819 
820 		/*
821 		 * Update rss count even for unaddressable pages, as
822 		 * they should treated just like normal pages in this
823 		 * respect.
824 		 *
825 		 * We will likely want to have some new rss counters
826 		 * for unaddressable pages, at some point. But for now
827 		 * keep things as they are.
828 		 */
829 		get_page(page);
830 		rss[mm_counter(page)]++;
831 		page_dup_rmap(page, false);
832 
833 		/*
834 		 * We do not preserve soft-dirty information, because so
835 		 * far, checkpoint/restore is the only feature that
836 		 * requires that. And checkpoint/restore does not work
837 		 * when a device driver is involved (you cannot easily
838 		 * save and restore device driver state).
839 		 */
840 		if (is_writable_device_private_entry(entry) &&
841 		    is_cow_mapping(vm_flags)) {
842 			entry = make_readable_device_private_entry(
843 							swp_offset(entry));
844 			pte = swp_entry_to_pte(entry);
845 			if (pte_swp_uffd_wp(*src_pte))
846 				pte = pte_swp_mkuffd_wp(pte);
847 			set_pte_at(src_mm, addr, src_pte, pte);
848 		}
849 	} else if (is_device_exclusive_entry(entry)) {
850 		/*
851 		 * Make device exclusive entries present by restoring the
852 		 * original entry then copying as for a present pte. Device
853 		 * exclusive entries currently only support private writable
854 		 * (ie. COW) mappings.
855 		 */
856 		VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
857 		if (try_restore_exclusive_pte(src_pte, src_vma, addr))
858 			return -EBUSY;
859 		return -ENOENT;
860 	}
861 	if (!userfaultfd_wp(dst_vma))
862 		pte = pte_swp_clear_uffd_wp(pte);
863 	set_pte_at(dst_mm, addr, dst_pte, pte);
864 	return 0;
865 }
866 
867 /*
868  * Copy a present and normal page if necessary.
869  *
870  * NOTE! The usual case is that this doesn't need to do
871  * anything, and can just return a positive value. That
872  * will let the caller know that it can just increase
873  * the page refcount and re-use the pte the traditional
874  * way.
875  *
876  * But _if_ we need to copy it because it needs to be
877  * pinned in the parent (and the child should get its own
878  * copy rather than just a reference to the same page),
879  * we'll do that here and return zero to let the caller
880  * know we're done.
881  *
882  * And if we need a pre-allocated page but don't yet have
883  * one, return a negative error to let the preallocation
884  * code know so that it can do so outside the page table
885  * lock.
886  */
887 static inline int
888 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
889 		  pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
890 		  struct page **prealloc, pte_t pte, struct page *page)
891 {
892 	struct page *new_page;
893 
894 	/*
895 	 * What we want to do is to check whether this page may
896 	 * have been pinned by the parent process.  If so,
897 	 * instead of wrprotect the pte on both sides, we copy
898 	 * the page immediately so that we'll always guarantee
899 	 * the pinned page won't be randomly replaced in the
900 	 * future.
901 	 *
902 	 * The page pinning checks are just "has this mm ever
903 	 * seen pinning", along with the (inexact) check of
904 	 * the page count. That might give false positives for
905 	 * for pinning, but it will work correctly.
906 	 */
907 	if (likely(!page_needs_cow_for_dma(src_vma, page)))
908 		return 1;
909 
910 	new_page = *prealloc;
911 	if (!new_page)
912 		return -EAGAIN;
913 
914 	/*
915 	 * We have a prealloc page, all good!  Take it
916 	 * over and copy the page & arm it.
917 	 */
918 	*prealloc = NULL;
919 	copy_user_highpage(new_page, page, addr, src_vma);
920 	__SetPageUptodate(new_page);
921 	page_add_new_anon_rmap(new_page, dst_vma, addr, false);
922 	lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
923 	rss[mm_counter(new_page)]++;
924 
925 	/* All done, just insert the new page copy in the child */
926 	pte = mk_pte(new_page, dst_vma->vm_page_prot);
927 	pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
928 	if (userfaultfd_pte_wp(dst_vma, *src_pte))
929 		/* Uffd-wp needs to be delivered to dest pte as well */
930 		pte = pte_wrprotect(pte_mkuffd_wp(pte));
931 	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
932 	return 0;
933 }
934 
935 /*
936  * Copy one pte.  Returns 0 if succeeded, or -EAGAIN if one preallocated page
937  * is required to copy this pte.
938  */
939 static inline int
940 copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
941 		 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
942 		 struct page **prealloc)
943 {
944 	struct mm_struct *src_mm = src_vma->vm_mm;
945 	unsigned long vm_flags = src_vma->vm_flags;
946 	pte_t pte = *src_pte;
947 	struct page *page;
948 
949 	page = vm_normal_page(src_vma, addr, pte);
950 	if (page) {
951 		int retval;
952 
953 		retval = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
954 					   addr, rss, prealloc, pte, page);
955 		if (retval <= 0)
956 			return retval;
957 
958 		get_page(page);
959 		page_dup_rmap(page, false);
960 		rss[mm_counter(page)]++;
961 	}
962 
963 	/*
964 	 * If it's a COW mapping, write protect it both
965 	 * in the parent and the child
966 	 */
967 	if (is_cow_mapping(vm_flags) && pte_write(pte)) {
968 		ptep_set_wrprotect(src_mm, addr, src_pte);
969 		pte = pte_wrprotect(pte);
970 	}
971 
972 	/*
973 	 * If it's a shared mapping, mark it clean in
974 	 * the child
975 	 */
976 	if (vm_flags & VM_SHARED)
977 		pte = pte_mkclean(pte);
978 	pte = pte_mkold(pte);
979 
980 	if (!userfaultfd_wp(dst_vma))
981 		pte = pte_clear_uffd_wp(pte);
982 
983 	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
984 	return 0;
985 }
986 
987 static inline struct page *
988 page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
989 		   unsigned long addr)
990 {
991 	struct page *new_page;
992 
993 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr);
994 	if (!new_page)
995 		return NULL;
996 
997 	if (mem_cgroup_charge(page_folio(new_page), src_mm, GFP_KERNEL)) {
998 		put_page(new_page);
999 		return NULL;
1000 	}
1001 	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
1002 
1003 	return new_page;
1004 }
1005 
1006 static int
1007 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1008 	       pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1009 	       unsigned long end)
1010 {
1011 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1012 	struct mm_struct *src_mm = src_vma->vm_mm;
1013 	pte_t *orig_src_pte, *orig_dst_pte;
1014 	pte_t *src_pte, *dst_pte;
1015 	spinlock_t *src_ptl, *dst_ptl;
1016 	int progress, ret = 0;
1017 	int rss[NR_MM_COUNTERS];
1018 	swp_entry_t entry = (swp_entry_t){0};
1019 	struct page *prealloc = NULL;
1020 
1021 again:
1022 	progress = 0;
1023 	init_rss_vec(rss);
1024 
1025 	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1026 	if (!dst_pte) {
1027 		ret = -ENOMEM;
1028 		goto out;
1029 	}
1030 	src_pte = pte_offset_map(src_pmd, addr);
1031 	src_ptl = pte_lockptr(src_mm, src_pmd);
1032 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1033 	orig_src_pte = src_pte;
1034 	orig_dst_pte = dst_pte;
1035 	arch_enter_lazy_mmu_mode();
1036 
1037 	do {
1038 		/*
1039 		 * We are holding two locks at this point - either of them
1040 		 * could generate latencies in another task on another CPU.
1041 		 */
1042 		if (progress >= 32) {
1043 			progress = 0;
1044 			if (need_resched() ||
1045 			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
1046 				break;
1047 		}
1048 		if (pte_none(*src_pte)) {
1049 			progress++;
1050 			continue;
1051 		}
1052 		if (unlikely(!pte_present(*src_pte))) {
1053 			ret = copy_nonpresent_pte(dst_mm, src_mm,
1054 						  dst_pte, src_pte,
1055 						  dst_vma, src_vma,
1056 						  addr, rss);
1057 			if (ret == -EIO) {
1058 				entry = pte_to_swp_entry(*src_pte);
1059 				break;
1060 			} else if (ret == -EBUSY) {
1061 				break;
1062 			} else if (!ret) {
1063 				progress += 8;
1064 				continue;
1065 			}
1066 
1067 			/*
1068 			 * Device exclusive entry restored, continue by copying
1069 			 * the now present pte.
1070 			 */
1071 			WARN_ON_ONCE(ret != -ENOENT);
1072 		}
1073 		/* copy_present_pte() will clear `*prealloc' if consumed */
1074 		ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
1075 				       addr, rss, &prealloc);
1076 		/*
1077 		 * If we need a pre-allocated page for this pte, drop the
1078 		 * locks, allocate, and try again.
1079 		 */
1080 		if (unlikely(ret == -EAGAIN))
1081 			break;
1082 		if (unlikely(prealloc)) {
1083 			/*
1084 			 * pre-alloc page cannot be reused by next time so as
1085 			 * to strictly follow mempolicy (e.g., alloc_page_vma()
1086 			 * will allocate page according to address).  This
1087 			 * could only happen if one pinned pte changed.
1088 			 */
1089 			put_page(prealloc);
1090 			prealloc = NULL;
1091 		}
1092 		progress += 8;
1093 	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1094 
1095 	arch_leave_lazy_mmu_mode();
1096 	spin_unlock(src_ptl);
1097 	pte_unmap(orig_src_pte);
1098 	add_mm_rss_vec(dst_mm, rss);
1099 	pte_unmap_unlock(orig_dst_pte, dst_ptl);
1100 	cond_resched();
1101 
1102 	if (ret == -EIO) {
1103 		VM_WARN_ON_ONCE(!entry.val);
1104 		if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1105 			ret = -ENOMEM;
1106 			goto out;
1107 		}
1108 		entry.val = 0;
1109 	} else if (ret == -EBUSY) {
1110 		goto out;
1111 	} else if (ret ==  -EAGAIN) {
1112 		prealloc = page_copy_prealloc(src_mm, src_vma, addr);
1113 		if (!prealloc)
1114 			return -ENOMEM;
1115 	} else if (ret) {
1116 		VM_WARN_ON_ONCE(1);
1117 	}
1118 
1119 	/* We've captured and resolved the error. Reset, try again. */
1120 	ret = 0;
1121 
1122 	if (addr != end)
1123 		goto again;
1124 out:
1125 	if (unlikely(prealloc))
1126 		put_page(prealloc);
1127 	return ret;
1128 }
1129 
1130 static inline int
1131 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1132 	       pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1133 	       unsigned long end)
1134 {
1135 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1136 	struct mm_struct *src_mm = src_vma->vm_mm;
1137 	pmd_t *src_pmd, *dst_pmd;
1138 	unsigned long next;
1139 
1140 	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1141 	if (!dst_pmd)
1142 		return -ENOMEM;
1143 	src_pmd = pmd_offset(src_pud, addr);
1144 	do {
1145 		next = pmd_addr_end(addr, end);
1146 		if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1147 			|| pmd_devmap(*src_pmd)) {
1148 			int err;
1149 			VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
1150 			err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
1151 					    addr, dst_vma, src_vma);
1152 			if (err == -ENOMEM)
1153 				return -ENOMEM;
1154 			if (!err)
1155 				continue;
1156 			/* fall through */
1157 		}
1158 		if (pmd_none_or_clear_bad(src_pmd))
1159 			continue;
1160 		if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1161 				   addr, next))
1162 			return -ENOMEM;
1163 	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
1164 	return 0;
1165 }
1166 
1167 static inline int
1168 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1169 	       p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1170 	       unsigned long end)
1171 {
1172 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1173 	struct mm_struct *src_mm = src_vma->vm_mm;
1174 	pud_t *src_pud, *dst_pud;
1175 	unsigned long next;
1176 
1177 	dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1178 	if (!dst_pud)
1179 		return -ENOMEM;
1180 	src_pud = pud_offset(src_p4d, addr);
1181 	do {
1182 		next = pud_addr_end(addr, end);
1183 		if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1184 			int err;
1185 
1186 			VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
1187 			err = copy_huge_pud(dst_mm, src_mm,
1188 					    dst_pud, src_pud, addr, src_vma);
1189 			if (err == -ENOMEM)
1190 				return -ENOMEM;
1191 			if (!err)
1192 				continue;
1193 			/* fall through */
1194 		}
1195 		if (pud_none_or_clear_bad(src_pud))
1196 			continue;
1197 		if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1198 				   addr, next))
1199 			return -ENOMEM;
1200 	} while (dst_pud++, src_pud++, addr = next, addr != end);
1201 	return 0;
1202 }
1203 
1204 static inline int
1205 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1206 	       pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1207 	       unsigned long end)
1208 {
1209 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1210 	p4d_t *src_p4d, *dst_p4d;
1211 	unsigned long next;
1212 
1213 	dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1214 	if (!dst_p4d)
1215 		return -ENOMEM;
1216 	src_p4d = p4d_offset(src_pgd, addr);
1217 	do {
1218 		next = p4d_addr_end(addr, end);
1219 		if (p4d_none_or_clear_bad(src_p4d))
1220 			continue;
1221 		if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1222 				   addr, next))
1223 			return -ENOMEM;
1224 	} while (dst_p4d++, src_p4d++, addr = next, addr != end);
1225 	return 0;
1226 }
1227 
1228 int
1229 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1230 {
1231 	pgd_t *src_pgd, *dst_pgd;
1232 	unsigned long next;
1233 	unsigned long addr = src_vma->vm_start;
1234 	unsigned long end = src_vma->vm_end;
1235 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1236 	struct mm_struct *src_mm = src_vma->vm_mm;
1237 	struct mmu_notifier_range range;
1238 	bool is_cow;
1239 	int ret;
1240 
1241 	/*
1242 	 * Don't copy ptes where a page fault will fill them correctly.
1243 	 * Fork becomes much lighter when there are big shared or private
1244 	 * readonly mappings. The tradeoff is that copy_page_range is more
1245 	 * efficient than faulting.
1246 	 */
1247 	if (!(src_vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
1248 	    !src_vma->anon_vma)
1249 		return 0;
1250 
1251 	if (is_vm_hugetlb_page(src_vma))
1252 		return copy_hugetlb_page_range(dst_mm, src_mm, src_vma);
1253 
1254 	if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
1255 		/*
1256 		 * We do not free on error cases below as remove_vma
1257 		 * gets called on error from higher level routine
1258 		 */
1259 		ret = track_pfn_copy(src_vma);
1260 		if (ret)
1261 			return ret;
1262 	}
1263 
1264 	/*
1265 	 * We need to invalidate the secondary MMU mappings only when
1266 	 * there could be a permission downgrade on the ptes of the
1267 	 * parent mm. And a permission downgrade will only happen if
1268 	 * is_cow_mapping() returns true.
1269 	 */
1270 	is_cow = is_cow_mapping(src_vma->vm_flags);
1271 
1272 	if (is_cow) {
1273 		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1274 					0, src_vma, src_mm, addr, end);
1275 		mmu_notifier_invalidate_range_start(&range);
1276 		/*
1277 		 * Disabling preemption is not needed for the write side, as
1278 		 * the read side doesn't spin, but goes to the mmap_lock.
1279 		 *
1280 		 * Use the raw variant of the seqcount_t write API to avoid
1281 		 * lockdep complaining about preemptibility.
1282 		 */
1283 		mmap_assert_write_locked(src_mm);
1284 		raw_write_seqcount_begin(&src_mm->write_protect_seq);
1285 	}
1286 
1287 	ret = 0;
1288 	dst_pgd = pgd_offset(dst_mm, addr);
1289 	src_pgd = pgd_offset(src_mm, addr);
1290 	do {
1291 		next = pgd_addr_end(addr, end);
1292 		if (pgd_none_or_clear_bad(src_pgd))
1293 			continue;
1294 		if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1295 					    addr, next))) {
1296 			ret = -ENOMEM;
1297 			break;
1298 		}
1299 	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
1300 
1301 	if (is_cow) {
1302 		raw_write_seqcount_end(&src_mm->write_protect_seq);
1303 		mmu_notifier_invalidate_range_end(&range);
1304 	}
1305 	return ret;
1306 }
1307 
1308 /*
1309  * Parameter block passed down to zap_pte_range in exceptional cases.
1310  */
1311 struct zap_details {
1312 	struct folio *single_folio;	/* Locked folio to be unmapped */
1313 	bool even_cows;			/* Zap COWed private pages too? */
1314 };
1315 
1316 /* Whether we should zap all COWed (private) pages too */
1317 static inline bool should_zap_cows(struct zap_details *details)
1318 {
1319 	/* By default, zap all pages */
1320 	if (!details)
1321 		return true;
1322 
1323 	/* Or, we zap COWed pages only if the caller wants to */
1324 	return details->even_cows;
1325 }
1326 
1327 /* Decides whether we should zap this page with the page pointer specified */
1328 static inline bool should_zap_page(struct zap_details *details, struct page *page)
1329 {
1330 	/* If we can make a decision without *page.. */
1331 	if (should_zap_cows(details))
1332 		return true;
1333 
1334 	/* E.g. the caller passes NULL for the case of a zero page */
1335 	if (!page)
1336 		return true;
1337 
1338 	/* Otherwise we should only zap non-anon pages */
1339 	return !PageAnon(page);
1340 }
1341 
1342 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1343 				struct vm_area_struct *vma, pmd_t *pmd,
1344 				unsigned long addr, unsigned long end,
1345 				struct zap_details *details)
1346 {
1347 	struct mm_struct *mm = tlb->mm;
1348 	int force_flush = 0;
1349 	int rss[NR_MM_COUNTERS];
1350 	spinlock_t *ptl;
1351 	pte_t *start_pte;
1352 	pte_t *pte;
1353 	swp_entry_t entry;
1354 
1355 	tlb_change_page_size(tlb, PAGE_SIZE);
1356 again:
1357 	init_rss_vec(rss);
1358 	start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1359 	pte = start_pte;
1360 	flush_tlb_batched_pending(mm);
1361 	arch_enter_lazy_mmu_mode();
1362 	do {
1363 		pte_t ptent = *pte;
1364 		struct page *page;
1365 
1366 		if (pte_none(ptent))
1367 			continue;
1368 
1369 		if (need_resched())
1370 			break;
1371 
1372 		if (pte_present(ptent)) {
1373 			page = vm_normal_page(vma, addr, ptent);
1374 			if (unlikely(!should_zap_page(details, page)))
1375 				continue;
1376 			ptent = ptep_get_and_clear_full(mm, addr, pte,
1377 							tlb->fullmm);
1378 			tlb_remove_tlb_entry(tlb, pte, addr);
1379 			if (unlikely(!page))
1380 				continue;
1381 
1382 			if (!PageAnon(page)) {
1383 				if (pte_dirty(ptent)) {
1384 					force_flush = 1;
1385 					set_page_dirty(page);
1386 				}
1387 				if (pte_young(ptent) &&
1388 				    likely(!(vma->vm_flags & VM_SEQ_READ)))
1389 					mark_page_accessed(page);
1390 			}
1391 			rss[mm_counter(page)]--;
1392 			page_remove_rmap(page, false);
1393 			if (unlikely(page_mapcount(page) < 0))
1394 				print_bad_pte(vma, addr, ptent, page);
1395 			if (unlikely(__tlb_remove_page(tlb, page))) {
1396 				force_flush = 1;
1397 				addr += PAGE_SIZE;
1398 				break;
1399 			}
1400 			continue;
1401 		}
1402 
1403 		entry = pte_to_swp_entry(ptent);
1404 		if (is_device_private_entry(entry) ||
1405 		    is_device_exclusive_entry(entry)) {
1406 			page = pfn_swap_entry_to_page(entry);
1407 			if (unlikely(!should_zap_page(details, page)))
1408 				continue;
1409 			rss[mm_counter(page)]--;
1410 			if (is_device_private_entry(entry))
1411 				page_remove_rmap(page, false);
1412 			put_page(page);
1413 		} else if (!non_swap_entry(entry)) {
1414 			/* Genuine swap entry, hence a private anon page */
1415 			if (!should_zap_cows(details))
1416 				continue;
1417 			rss[MM_SWAPENTS]--;
1418 			if (unlikely(!free_swap_and_cache(entry)))
1419 				print_bad_pte(vma, addr, ptent, NULL);
1420 		} else if (is_migration_entry(entry)) {
1421 			page = pfn_swap_entry_to_page(entry);
1422 			if (!should_zap_page(details, page))
1423 				continue;
1424 			rss[mm_counter(page)]--;
1425 		} else if (is_hwpoison_entry(entry)) {
1426 			if (!should_zap_cows(details))
1427 				continue;
1428 		} else {
1429 			/* We should have covered all the swap entry types */
1430 			WARN_ON_ONCE(1);
1431 		}
1432 		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1433 	} while (pte++, addr += PAGE_SIZE, addr != end);
1434 
1435 	add_mm_rss_vec(mm, rss);
1436 	arch_leave_lazy_mmu_mode();
1437 
1438 	/* Do the actual TLB flush before dropping ptl */
1439 	if (force_flush)
1440 		tlb_flush_mmu_tlbonly(tlb);
1441 	pte_unmap_unlock(start_pte, ptl);
1442 
1443 	/*
1444 	 * If we forced a TLB flush (either due to running out of
1445 	 * batch buffers or because we needed to flush dirty TLB
1446 	 * entries before releasing the ptl), free the batched
1447 	 * memory too. Restart if we didn't do everything.
1448 	 */
1449 	if (force_flush) {
1450 		force_flush = 0;
1451 		tlb_flush_mmu(tlb);
1452 	}
1453 
1454 	if (addr != end) {
1455 		cond_resched();
1456 		goto again;
1457 	}
1458 
1459 	return addr;
1460 }
1461 
1462 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1463 				struct vm_area_struct *vma, pud_t *pud,
1464 				unsigned long addr, unsigned long end,
1465 				struct zap_details *details)
1466 {
1467 	pmd_t *pmd;
1468 	unsigned long next;
1469 
1470 	pmd = pmd_offset(pud, addr);
1471 	do {
1472 		next = pmd_addr_end(addr, end);
1473 		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1474 			if (next - addr != HPAGE_PMD_SIZE)
1475 				__split_huge_pmd(vma, pmd, addr, false, NULL);
1476 			else if (zap_huge_pmd(tlb, vma, pmd, addr))
1477 				goto next;
1478 			/* fall through */
1479 		} else if (details && details->single_folio &&
1480 			   folio_test_pmd_mappable(details->single_folio) &&
1481 			   next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1482 			spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1483 			/*
1484 			 * Take and drop THP pmd lock so that we cannot return
1485 			 * prematurely, while zap_huge_pmd() has cleared *pmd,
1486 			 * but not yet decremented compound_mapcount().
1487 			 */
1488 			spin_unlock(ptl);
1489 		}
1490 
1491 		/*
1492 		 * Here there can be other concurrent MADV_DONTNEED or
1493 		 * trans huge page faults running, and if the pmd is
1494 		 * none or trans huge it can change under us. This is
1495 		 * because MADV_DONTNEED holds the mmap_lock in read
1496 		 * mode.
1497 		 */
1498 		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1499 			goto next;
1500 		next = zap_pte_range(tlb, vma, pmd, addr, next, details);
1501 next:
1502 		cond_resched();
1503 	} while (pmd++, addr = next, addr != end);
1504 
1505 	return addr;
1506 }
1507 
1508 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1509 				struct vm_area_struct *vma, p4d_t *p4d,
1510 				unsigned long addr, unsigned long end,
1511 				struct zap_details *details)
1512 {
1513 	pud_t *pud;
1514 	unsigned long next;
1515 
1516 	pud = pud_offset(p4d, addr);
1517 	do {
1518 		next = pud_addr_end(addr, end);
1519 		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1520 			if (next - addr != HPAGE_PUD_SIZE) {
1521 				mmap_assert_locked(tlb->mm);
1522 				split_huge_pud(vma, pud, addr);
1523 			} else if (zap_huge_pud(tlb, vma, pud, addr))
1524 				goto next;
1525 			/* fall through */
1526 		}
1527 		if (pud_none_or_clear_bad(pud))
1528 			continue;
1529 		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1530 next:
1531 		cond_resched();
1532 	} while (pud++, addr = next, addr != end);
1533 
1534 	return addr;
1535 }
1536 
1537 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1538 				struct vm_area_struct *vma, pgd_t *pgd,
1539 				unsigned long addr, unsigned long end,
1540 				struct zap_details *details)
1541 {
1542 	p4d_t *p4d;
1543 	unsigned long next;
1544 
1545 	p4d = p4d_offset(pgd, addr);
1546 	do {
1547 		next = p4d_addr_end(addr, end);
1548 		if (p4d_none_or_clear_bad(p4d))
1549 			continue;
1550 		next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1551 	} while (p4d++, addr = next, addr != end);
1552 
1553 	return addr;
1554 }
1555 
1556 void unmap_page_range(struct mmu_gather *tlb,
1557 			     struct vm_area_struct *vma,
1558 			     unsigned long addr, unsigned long end,
1559 			     struct zap_details *details)
1560 {
1561 	pgd_t *pgd;
1562 	unsigned long next;
1563 
1564 	BUG_ON(addr >= end);
1565 	tlb_start_vma(tlb, vma);
1566 	pgd = pgd_offset(vma->vm_mm, addr);
1567 	do {
1568 		next = pgd_addr_end(addr, end);
1569 		if (pgd_none_or_clear_bad(pgd))
1570 			continue;
1571 		next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1572 	} while (pgd++, addr = next, addr != end);
1573 	tlb_end_vma(tlb, vma);
1574 }
1575 
1576 
1577 static void unmap_single_vma(struct mmu_gather *tlb,
1578 		struct vm_area_struct *vma, unsigned long start_addr,
1579 		unsigned long end_addr,
1580 		struct zap_details *details)
1581 {
1582 	unsigned long start = max(vma->vm_start, start_addr);
1583 	unsigned long end;
1584 
1585 	if (start >= vma->vm_end)
1586 		return;
1587 	end = min(vma->vm_end, end_addr);
1588 	if (end <= vma->vm_start)
1589 		return;
1590 
1591 	if (vma->vm_file)
1592 		uprobe_munmap(vma, start, end);
1593 
1594 	if (unlikely(vma->vm_flags & VM_PFNMAP))
1595 		untrack_pfn(vma, 0, 0);
1596 
1597 	if (start != end) {
1598 		if (unlikely(is_vm_hugetlb_page(vma))) {
1599 			/*
1600 			 * It is undesirable to test vma->vm_file as it
1601 			 * should be non-null for valid hugetlb area.
1602 			 * However, vm_file will be NULL in the error
1603 			 * cleanup path of mmap_region. When
1604 			 * hugetlbfs ->mmap method fails,
1605 			 * mmap_region() nullifies vma->vm_file
1606 			 * before calling this function to clean up.
1607 			 * Since no pte has actually been setup, it is
1608 			 * safe to do nothing in this case.
1609 			 */
1610 			if (vma->vm_file) {
1611 				i_mmap_lock_write(vma->vm_file->f_mapping);
1612 				__unmap_hugepage_range_final(tlb, vma, start, end, NULL);
1613 				i_mmap_unlock_write(vma->vm_file->f_mapping);
1614 			}
1615 		} else
1616 			unmap_page_range(tlb, vma, start, end, details);
1617 	}
1618 }
1619 
1620 /**
1621  * unmap_vmas - unmap a range of memory covered by a list of vma's
1622  * @tlb: address of the caller's struct mmu_gather
1623  * @vma: the starting vma
1624  * @start_addr: virtual address at which to start unmapping
1625  * @end_addr: virtual address at which to end unmapping
1626  *
1627  * Unmap all pages in the vma list.
1628  *
1629  * Only addresses between `start' and `end' will be unmapped.
1630  *
1631  * The VMA list must be sorted in ascending virtual address order.
1632  *
1633  * unmap_vmas() assumes that the caller will flush the whole unmapped address
1634  * range after unmap_vmas() returns.  So the only responsibility here is to
1635  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1636  * drops the lock and schedules.
1637  */
1638 void unmap_vmas(struct mmu_gather *tlb,
1639 		struct vm_area_struct *vma, unsigned long start_addr,
1640 		unsigned long end_addr)
1641 {
1642 	struct mmu_notifier_range range;
1643 
1644 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1645 				start_addr, end_addr);
1646 	mmu_notifier_invalidate_range_start(&range);
1647 	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1648 		unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
1649 	mmu_notifier_invalidate_range_end(&range);
1650 }
1651 
1652 /**
1653  * zap_page_range - remove user pages in a given range
1654  * @vma: vm_area_struct holding the applicable pages
1655  * @start: starting address of pages to zap
1656  * @size: number of bytes to zap
1657  *
1658  * Caller must protect the VMA list
1659  */
1660 void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1661 		unsigned long size)
1662 {
1663 	struct mmu_notifier_range range;
1664 	struct mmu_gather tlb;
1665 
1666 	lru_add_drain();
1667 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1668 				start, start + size);
1669 	tlb_gather_mmu(&tlb, vma->vm_mm);
1670 	update_hiwater_rss(vma->vm_mm);
1671 	mmu_notifier_invalidate_range_start(&range);
1672 	for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
1673 		unmap_single_vma(&tlb, vma, start, range.end, NULL);
1674 	mmu_notifier_invalidate_range_end(&range);
1675 	tlb_finish_mmu(&tlb);
1676 }
1677 
1678 /**
1679  * zap_page_range_single - remove user pages in a given range
1680  * @vma: vm_area_struct holding the applicable pages
1681  * @address: starting address of pages to zap
1682  * @size: number of bytes to zap
1683  * @details: details of shared cache invalidation
1684  *
1685  * The range must fit into one VMA.
1686  */
1687 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1688 		unsigned long size, struct zap_details *details)
1689 {
1690 	struct mmu_notifier_range range;
1691 	struct mmu_gather tlb;
1692 
1693 	lru_add_drain();
1694 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1695 				address, address + size);
1696 	tlb_gather_mmu(&tlb, vma->vm_mm);
1697 	update_hiwater_rss(vma->vm_mm);
1698 	mmu_notifier_invalidate_range_start(&range);
1699 	unmap_single_vma(&tlb, vma, address, range.end, details);
1700 	mmu_notifier_invalidate_range_end(&range);
1701 	tlb_finish_mmu(&tlb);
1702 }
1703 
1704 /**
1705  * zap_vma_ptes - remove ptes mapping the vma
1706  * @vma: vm_area_struct holding ptes to be zapped
1707  * @address: starting address of pages to zap
1708  * @size: number of bytes to zap
1709  *
1710  * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1711  *
1712  * The entire address range must be fully contained within the vma.
1713  *
1714  */
1715 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1716 		unsigned long size)
1717 {
1718 	if (!range_in_vma(vma, address, address + size) ||
1719 	    		!(vma->vm_flags & VM_PFNMAP))
1720 		return;
1721 
1722 	zap_page_range_single(vma, address, size, NULL);
1723 }
1724 EXPORT_SYMBOL_GPL(zap_vma_ptes);
1725 
1726 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
1727 {
1728 	pgd_t *pgd;
1729 	p4d_t *p4d;
1730 	pud_t *pud;
1731 	pmd_t *pmd;
1732 
1733 	pgd = pgd_offset(mm, addr);
1734 	p4d = p4d_alloc(mm, pgd, addr);
1735 	if (!p4d)
1736 		return NULL;
1737 	pud = pud_alloc(mm, p4d, addr);
1738 	if (!pud)
1739 		return NULL;
1740 	pmd = pmd_alloc(mm, pud, addr);
1741 	if (!pmd)
1742 		return NULL;
1743 
1744 	VM_BUG_ON(pmd_trans_huge(*pmd));
1745 	return pmd;
1746 }
1747 
1748 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1749 			spinlock_t **ptl)
1750 {
1751 	pmd_t *pmd = walk_to_pmd(mm, addr);
1752 
1753 	if (!pmd)
1754 		return NULL;
1755 	return pte_alloc_map_lock(mm, pmd, addr, ptl);
1756 }
1757 
1758 static int validate_page_before_insert(struct page *page)
1759 {
1760 	if (PageAnon(page) || PageSlab(page) || page_has_type(page))
1761 		return -EINVAL;
1762 	flush_dcache_page(page);
1763 	return 0;
1764 }
1765 
1766 static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte,
1767 			unsigned long addr, struct page *page, pgprot_t prot)
1768 {
1769 	if (!pte_none(*pte))
1770 		return -EBUSY;
1771 	/* Ok, finally just insert the thing.. */
1772 	get_page(page);
1773 	inc_mm_counter_fast(mm, mm_counter_file(page));
1774 	page_add_file_rmap(page, false);
1775 	set_pte_at(mm, addr, pte, mk_pte(page, prot));
1776 	return 0;
1777 }
1778 
1779 /*
1780  * This is the old fallback for page remapping.
1781  *
1782  * For historical reasons, it only allows reserved pages. Only
1783  * old drivers should use this, and they needed to mark their
1784  * pages reserved for the old functions anyway.
1785  */
1786 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1787 			struct page *page, pgprot_t prot)
1788 {
1789 	struct mm_struct *mm = vma->vm_mm;
1790 	int retval;
1791 	pte_t *pte;
1792 	spinlock_t *ptl;
1793 
1794 	retval = validate_page_before_insert(page);
1795 	if (retval)
1796 		goto out;
1797 	retval = -ENOMEM;
1798 	pte = get_locked_pte(mm, addr, &ptl);
1799 	if (!pte)
1800 		goto out;
1801 	retval = insert_page_into_pte_locked(mm, pte, addr, page, prot);
1802 	pte_unmap_unlock(pte, ptl);
1803 out:
1804 	return retval;
1805 }
1806 
1807 #ifdef pte_index
1808 static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte,
1809 			unsigned long addr, struct page *page, pgprot_t prot)
1810 {
1811 	int err;
1812 
1813 	if (!page_count(page))
1814 		return -EINVAL;
1815 	err = validate_page_before_insert(page);
1816 	if (err)
1817 		return err;
1818 	return insert_page_into_pte_locked(mm, pte, addr, page, prot);
1819 }
1820 
1821 /* insert_pages() amortizes the cost of spinlock operations
1822  * when inserting pages in a loop. Arch *must* define pte_index.
1823  */
1824 static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
1825 			struct page **pages, unsigned long *num, pgprot_t prot)
1826 {
1827 	pmd_t *pmd = NULL;
1828 	pte_t *start_pte, *pte;
1829 	spinlock_t *pte_lock;
1830 	struct mm_struct *const mm = vma->vm_mm;
1831 	unsigned long curr_page_idx = 0;
1832 	unsigned long remaining_pages_total = *num;
1833 	unsigned long pages_to_write_in_pmd;
1834 	int ret;
1835 more:
1836 	ret = -EFAULT;
1837 	pmd = walk_to_pmd(mm, addr);
1838 	if (!pmd)
1839 		goto out;
1840 
1841 	pages_to_write_in_pmd = min_t(unsigned long,
1842 		remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
1843 
1844 	/* Allocate the PTE if necessary; takes PMD lock once only. */
1845 	ret = -ENOMEM;
1846 	if (pte_alloc(mm, pmd))
1847 		goto out;
1848 
1849 	while (pages_to_write_in_pmd) {
1850 		int pte_idx = 0;
1851 		const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
1852 
1853 		start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
1854 		for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
1855 			int err = insert_page_in_batch_locked(mm, pte,
1856 				addr, pages[curr_page_idx], prot);
1857 			if (unlikely(err)) {
1858 				pte_unmap_unlock(start_pte, pte_lock);
1859 				ret = err;
1860 				remaining_pages_total -= pte_idx;
1861 				goto out;
1862 			}
1863 			addr += PAGE_SIZE;
1864 			++curr_page_idx;
1865 		}
1866 		pte_unmap_unlock(start_pte, pte_lock);
1867 		pages_to_write_in_pmd -= batch_size;
1868 		remaining_pages_total -= batch_size;
1869 	}
1870 	if (remaining_pages_total)
1871 		goto more;
1872 	ret = 0;
1873 out:
1874 	*num = remaining_pages_total;
1875 	return ret;
1876 }
1877 #endif  /* ifdef pte_index */
1878 
1879 /**
1880  * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1881  * @vma: user vma to map to
1882  * @addr: target start user address of these pages
1883  * @pages: source kernel pages
1884  * @num: in: number of pages to map. out: number of pages that were *not*
1885  * mapped. (0 means all pages were successfully mapped).
1886  *
1887  * Preferred over vm_insert_page() when inserting multiple pages.
1888  *
1889  * In case of error, we may have mapped a subset of the provided
1890  * pages. It is the caller's responsibility to account for this case.
1891  *
1892  * The same restrictions apply as in vm_insert_page().
1893  */
1894 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
1895 			struct page **pages, unsigned long *num)
1896 {
1897 #ifdef pte_index
1898 	const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
1899 
1900 	if (addr < vma->vm_start || end_addr >= vma->vm_end)
1901 		return -EFAULT;
1902 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
1903 		BUG_ON(mmap_read_trylock(vma->vm_mm));
1904 		BUG_ON(vma->vm_flags & VM_PFNMAP);
1905 		vma->vm_flags |= VM_MIXEDMAP;
1906 	}
1907 	/* Defer page refcount checking till we're about to map that page. */
1908 	return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
1909 #else
1910 	unsigned long idx = 0, pgcount = *num;
1911 	int err = -EINVAL;
1912 
1913 	for (; idx < pgcount; ++idx) {
1914 		err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
1915 		if (err)
1916 			break;
1917 	}
1918 	*num = pgcount - idx;
1919 	return err;
1920 #endif  /* ifdef pte_index */
1921 }
1922 EXPORT_SYMBOL(vm_insert_pages);
1923 
1924 /**
1925  * vm_insert_page - insert single page into user vma
1926  * @vma: user vma to map to
1927  * @addr: target user address of this page
1928  * @page: source kernel page
1929  *
1930  * This allows drivers to insert individual pages they've allocated
1931  * into a user vma.
1932  *
1933  * The page has to be a nice clean _individual_ kernel allocation.
1934  * If you allocate a compound page, you need to have marked it as
1935  * such (__GFP_COMP), or manually just split the page up yourself
1936  * (see split_page()).
1937  *
1938  * NOTE! Traditionally this was done with "remap_pfn_range()" which
1939  * took an arbitrary page protection parameter. This doesn't allow
1940  * that. Your vma protection will have to be set up correctly, which
1941  * means that if you want a shared writable mapping, you'd better
1942  * ask for a shared writable mapping!
1943  *
1944  * The page does not need to be reserved.
1945  *
1946  * Usually this function is called from f_op->mmap() handler
1947  * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
1948  * Caller must set VM_MIXEDMAP on vma if it wants to call this
1949  * function from other places, for example from page-fault handler.
1950  *
1951  * Return: %0 on success, negative error code otherwise.
1952  */
1953 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
1954 			struct page *page)
1955 {
1956 	if (addr < vma->vm_start || addr >= vma->vm_end)
1957 		return -EFAULT;
1958 	if (!page_count(page))
1959 		return -EINVAL;
1960 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
1961 		BUG_ON(mmap_read_trylock(vma->vm_mm));
1962 		BUG_ON(vma->vm_flags & VM_PFNMAP);
1963 		vma->vm_flags |= VM_MIXEDMAP;
1964 	}
1965 	return insert_page(vma, addr, page, vma->vm_page_prot);
1966 }
1967 EXPORT_SYMBOL(vm_insert_page);
1968 
1969 /*
1970  * __vm_map_pages - maps range of kernel pages into user vma
1971  * @vma: user vma to map to
1972  * @pages: pointer to array of source kernel pages
1973  * @num: number of pages in page array
1974  * @offset: user's requested vm_pgoff
1975  *
1976  * This allows drivers to map range of kernel pages into a user vma.
1977  *
1978  * Return: 0 on success and error code otherwise.
1979  */
1980 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1981 				unsigned long num, unsigned long offset)
1982 {
1983 	unsigned long count = vma_pages(vma);
1984 	unsigned long uaddr = vma->vm_start;
1985 	int ret, i;
1986 
1987 	/* Fail if the user requested offset is beyond the end of the object */
1988 	if (offset >= num)
1989 		return -ENXIO;
1990 
1991 	/* Fail if the user requested size exceeds available object size */
1992 	if (count > num - offset)
1993 		return -ENXIO;
1994 
1995 	for (i = 0; i < count; i++) {
1996 		ret = vm_insert_page(vma, uaddr, pages[offset + i]);
1997 		if (ret < 0)
1998 			return ret;
1999 		uaddr += PAGE_SIZE;
2000 	}
2001 
2002 	return 0;
2003 }
2004 
2005 /**
2006  * vm_map_pages - maps range of kernel pages starts with non zero offset
2007  * @vma: user vma to map to
2008  * @pages: pointer to array of source kernel pages
2009  * @num: number of pages in page array
2010  *
2011  * Maps an object consisting of @num pages, catering for the user's
2012  * requested vm_pgoff
2013  *
2014  * If we fail to insert any page into the vma, the function will return
2015  * immediately leaving any previously inserted pages present.  Callers
2016  * from the mmap handler may immediately return the error as their caller
2017  * will destroy the vma, removing any successfully inserted pages. Other
2018  * callers should make their own arrangements for calling unmap_region().
2019  *
2020  * Context: Process context. Called by mmap handlers.
2021  * Return: 0 on success and error code otherwise.
2022  */
2023 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2024 				unsigned long num)
2025 {
2026 	return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
2027 }
2028 EXPORT_SYMBOL(vm_map_pages);
2029 
2030 /**
2031  * vm_map_pages_zero - map range of kernel pages starts with zero offset
2032  * @vma: user vma to map to
2033  * @pages: pointer to array of source kernel pages
2034  * @num: number of pages in page array
2035  *
2036  * Similar to vm_map_pages(), except that it explicitly sets the offset
2037  * to 0. This function is intended for the drivers that did not consider
2038  * vm_pgoff.
2039  *
2040  * Context: Process context. Called by mmap handlers.
2041  * Return: 0 on success and error code otherwise.
2042  */
2043 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2044 				unsigned long num)
2045 {
2046 	return __vm_map_pages(vma, pages, num, 0);
2047 }
2048 EXPORT_SYMBOL(vm_map_pages_zero);
2049 
2050 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2051 			pfn_t pfn, pgprot_t prot, bool mkwrite)
2052 {
2053 	struct mm_struct *mm = vma->vm_mm;
2054 	pte_t *pte, entry;
2055 	spinlock_t *ptl;
2056 
2057 	pte = get_locked_pte(mm, addr, &ptl);
2058 	if (!pte)
2059 		return VM_FAULT_OOM;
2060 	if (!pte_none(*pte)) {
2061 		if (mkwrite) {
2062 			/*
2063 			 * For read faults on private mappings the PFN passed
2064 			 * in may not match the PFN we have mapped if the
2065 			 * mapped PFN is a writeable COW page.  In the mkwrite
2066 			 * case we are creating a writable PTE for a shared
2067 			 * mapping and we expect the PFNs to match. If they
2068 			 * don't match, we are likely racing with block
2069 			 * allocation and mapping invalidation so just skip the
2070 			 * update.
2071 			 */
2072 			if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
2073 				WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
2074 				goto out_unlock;
2075 			}
2076 			entry = pte_mkyoung(*pte);
2077 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2078 			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
2079 				update_mmu_cache(vma, addr, pte);
2080 		}
2081 		goto out_unlock;
2082 	}
2083 
2084 	/* Ok, finally just insert the thing.. */
2085 	if (pfn_t_devmap(pfn))
2086 		entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
2087 	else
2088 		entry = pte_mkspecial(pfn_t_pte(pfn, prot));
2089 
2090 	if (mkwrite) {
2091 		entry = pte_mkyoung(entry);
2092 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2093 	}
2094 
2095 	set_pte_at(mm, addr, pte, entry);
2096 	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
2097 
2098 out_unlock:
2099 	pte_unmap_unlock(pte, ptl);
2100 	return VM_FAULT_NOPAGE;
2101 }
2102 
2103 /**
2104  * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2105  * @vma: user vma to map to
2106  * @addr: target user address of this page
2107  * @pfn: source kernel pfn
2108  * @pgprot: pgprot flags for the inserted page
2109  *
2110  * This is exactly like vmf_insert_pfn(), except that it allows drivers
2111  * to override pgprot on a per-page basis.
2112  *
2113  * This only makes sense for IO mappings, and it makes no sense for
2114  * COW mappings.  In general, using multiple vmas is preferable;
2115  * vmf_insert_pfn_prot should only be used if using multiple VMAs is
2116  * impractical.
2117  *
2118  * See vmf_insert_mixed_prot() for a discussion of the implication of using
2119  * a value of @pgprot different from that of @vma->vm_page_prot.
2120  *
2121  * Context: Process context.  May allocate using %GFP_KERNEL.
2122  * Return: vm_fault_t value.
2123  */
2124 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2125 			unsigned long pfn, pgprot_t pgprot)
2126 {
2127 	/*
2128 	 * Technically, architectures with pte_special can avoid all these
2129 	 * restrictions (same for remap_pfn_range).  However we would like
2130 	 * consistency in testing and feature parity among all, so we should
2131 	 * try to keep these invariants in place for everybody.
2132 	 */
2133 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2134 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2135 						(VM_PFNMAP|VM_MIXEDMAP));
2136 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2137 	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2138 
2139 	if (addr < vma->vm_start || addr >= vma->vm_end)
2140 		return VM_FAULT_SIGBUS;
2141 
2142 	if (!pfn_modify_allowed(pfn, pgprot))
2143 		return VM_FAULT_SIGBUS;
2144 
2145 	track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2146 
2147 	return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
2148 			false);
2149 }
2150 EXPORT_SYMBOL(vmf_insert_pfn_prot);
2151 
2152 /**
2153  * vmf_insert_pfn - insert single pfn into user vma
2154  * @vma: user vma to map to
2155  * @addr: target user address of this page
2156  * @pfn: source kernel pfn
2157  *
2158  * Similar to vm_insert_page, this allows drivers to insert individual pages
2159  * they've allocated into a user vma. Same comments apply.
2160  *
2161  * This function should only be called from a vm_ops->fault handler, and
2162  * in that case the handler should return the result of this function.
2163  *
2164  * vma cannot be a COW mapping.
2165  *
2166  * As this is called only for pages that do not currently exist, we
2167  * do not need to flush old virtual caches or the TLB.
2168  *
2169  * Context: Process context.  May allocate using %GFP_KERNEL.
2170  * Return: vm_fault_t value.
2171  */
2172 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2173 			unsigned long pfn)
2174 {
2175 	return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2176 }
2177 EXPORT_SYMBOL(vmf_insert_pfn);
2178 
2179 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
2180 {
2181 	/* these checks mirror the abort conditions in vm_normal_page */
2182 	if (vma->vm_flags & VM_MIXEDMAP)
2183 		return true;
2184 	if (pfn_t_devmap(pfn))
2185 		return true;
2186 	if (pfn_t_special(pfn))
2187 		return true;
2188 	if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2189 		return true;
2190 	return false;
2191 }
2192 
2193 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
2194 		unsigned long addr, pfn_t pfn, pgprot_t pgprot,
2195 		bool mkwrite)
2196 {
2197 	int err;
2198 
2199 	BUG_ON(!vm_mixed_ok(vma, pfn));
2200 
2201 	if (addr < vma->vm_start || addr >= vma->vm_end)
2202 		return VM_FAULT_SIGBUS;
2203 
2204 	track_pfn_insert(vma, &pgprot, pfn);
2205 
2206 	if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
2207 		return VM_FAULT_SIGBUS;
2208 
2209 	/*
2210 	 * If we don't have pte special, then we have to use the pfn_valid()
2211 	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2212 	 * refcount the page if pfn_valid is true (hence insert_page rather
2213 	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
2214 	 * without pte special, it would there be refcounted as a normal page.
2215 	 */
2216 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2217 	    !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
2218 		struct page *page;
2219 
2220 		/*
2221 		 * At this point we are committed to insert_page()
2222 		 * regardless of whether the caller specified flags that
2223 		 * result in pfn_t_has_page() == false.
2224 		 */
2225 		page = pfn_to_page(pfn_t_to_pfn(pfn));
2226 		err = insert_page(vma, addr, page, pgprot);
2227 	} else {
2228 		return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2229 	}
2230 
2231 	if (err == -ENOMEM)
2232 		return VM_FAULT_OOM;
2233 	if (err < 0 && err != -EBUSY)
2234 		return VM_FAULT_SIGBUS;
2235 
2236 	return VM_FAULT_NOPAGE;
2237 }
2238 
2239 /**
2240  * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
2241  * @vma: user vma to map to
2242  * @addr: target user address of this page
2243  * @pfn: source kernel pfn
2244  * @pgprot: pgprot flags for the inserted page
2245  *
2246  * This is exactly like vmf_insert_mixed(), except that it allows drivers
2247  * to override pgprot on a per-page basis.
2248  *
2249  * Typically this function should be used by drivers to set caching- and
2250  * encryption bits different than those of @vma->vm_page_prot, because
2251  * the caching- or encryption mode may not be known at mmap() time.
2252  * This is ok as long as @vma->vm_page_prot is not used by the core vm
2253  * to set caching and encryption bits for those vmas (except for COW pages).
2254  * This is ensured by core vm only modifying these page table entries using
2255  * functions that don't touch caching- or encryption bits, using pte_modify()
2256  * if needed. (See for example mprotect()).
2257  * Also when new page-table entries are created, this is only done using the
2258  * fault() callback, and never using the value of vma->vm_page_prot,
2259  * except for page-table entries that point to anonymous pages as the result
2260  * of COW.
2261  *
2262  * Context: Process context.  May allocate using %GFP_KERNEL.
2263  * Return: vm_fault_t value.
2264  */
2265 vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
2266 				 pfn_t pfn, pgprot_t pgprot)
2267 {
2268 	return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
2269 }
2270 EXPORT_SYMBOL(vmf_insert_mixed_prot);
2271 
2272 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2273 		pfn_t pfn)
2274 {
2275 	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
2276 }
2277 EXPORT_SYMBOL(vmf_insert_mixed);
2278 
2279 /*
2280  *  If the insertion of PTE failed because someone else already added a
2281  *  different entry in the mean time, we treat that as success as we assume
2282  *  the same entry was actually inserted.
2283  */
2284 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2285 		unsigned long addr, pfn_t pfn)
2286 {
2287 	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
2288 }
2289 EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
2290 
2291 /*
2292  * maps a range of physical memory into the requested pages. the old
2293  * mappings are removed. any references to nonexistent pages results
2294  * in null mappings (currently treated as "copy-on-access")
2295  */
2296 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2297 			unsigned long addr, unsigned long end,
2298 			unsigned long pfn, pgprot_t prot)
2299 {
2300 	pte_t *pte, *mapped_pte;
2301 	spinlock_t *ptl;
2302 	int err = 0;
2303 
2304 	mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2305 	if (!pte)
2306 		return -ENOMEM;
2307 	arch_enter_lazy_mmu_mode();
2308 	do {
2309 		BUG_ON(!pte_none(*pte));
2310 		if (!pfn_modify_allowed(pfn, prot)) {
2311 			err = -EACCES;
2312 			break;
2313 		}
2314 		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2315 		pfn++;
2316 	} while (pte++, addr += PAGE_SIZE, addr != end);
2317 	arch_leave_lazy_mmu_mode();
2318 	pte_unmap_unlock(mapped_pte, ptl);
2319 	return err;
2320 }
2321 
2322 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2323 			unsigned long addr, unsigned long end,
2324 			unsigned long pfn, pgprot_t prot)
2325 {
2326 	pmd_t *pmd;
2327 	unsigned long next;
2328 	int err;
2329 
2330 	pfn -= addr >> PAGE_SHIFT;
2331 	pmd = pmd_alloc(mm, pud, addr);
2332 	if (!pmd)
2333 		return -ENOMEM;
2334 	VM_BUG_ON(pmd_trans_huge(*pmd));
2335 	do {
2336 		next = pmd_addr_end(addr, end);
2337 		err = remap_pte_range(mm, pmd, addr, next,
2338 				pfn + (addr >> PAGE_SHIFT), prot);
2339 		if (err)
2340 			return err;
2341 	} while (pmd++, addr = next, addr != end);
2342 	return 0;
2343 }
2344 
2345 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2346 			unsigned long addr, unsigned long end,
2347 			unsigned long pfn, pgprot_t prot)
2348 {
2349 	pud_t *pud;
2350 	unsigned long next;
2351 	int err;
2352 
2353 	pfn -= addr >> PAGE_SHIFT;
2354 	pud = pud_alloc(mm, p4d, addr);
2355 	if (!pud)
2356 		return -ENOMEM;
2357 	do {
2358 		next = pud_addr_end(addr, end);
2359 		err = remap_pmd_range(mm, pud, addr, next,
2360 				pfn + (addr >> PAGE_SHIFT), prot);
2361 		if (err)
2362 			return err;
2363 	} while (pud++, addr = next, addr != end);
2364 	return 0;
2365 }
2366 
2367 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2368 			unsigned long addr, unsigned long end,
2369 			unsigned long pfn, pgprot_t prot)
2370 {
2371 	p4d_t *p4d;
2372 	unsigned long next;
2373 	int err;
2374 
2375 	pfn -= addr >> PAGE_SHIFT;
2376 	p4d = p4d_alloc(mm, pgd, addr);
2377 	if (!p4d)
2378 		return -ENOMEM;
2379 	do {
2380 		next = p4d_addr_end(addr, end);
2381 		err = remap_pud_range(mm, p4d, addr, next,
2382 				pfn + (addr >> PAGE_SHIFT), prot);
2383 		if (err)
2384 			return err;
2385 	} while (p4d++, addr = next, addr != end);
2386 	return 0;
2387 }
2388 
2389 /*
2390  * Variant of remap_pfn_range that does not call track_pfn_remap.  The caller
2391  * must have pre-validated the caching bits of the pgprot_t.
2392  */
2393 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
2394 		unsigned long pfn, unsigned long size, pgprot_t prot)
2395 {
2396 	pgd_t *pgd;
2397 	unsigned long next;
2398 	unsigned long end = addr + PAGE_ALIGN(size);
2399 	struct mm_struct *mm = vma->vm_mm;
2400 	int err;
2401 
2402 	if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2403 		return -EINVAL;
2404 
2405 	/*
2406 	 * Physically remapped pages are special. Tell the
2407 	 * rest of the world about it:
2408 	 *   VM_IO tells people not to look at these pages
2409 	 *	(accesses can have side effects).
2410 	 *   VM_PFNMAP tells the core MM that the base pages are just
2411 	 *	raw PFN mappings, and do not have a "struct page" associated
2412 	 *	with them.
2413 	 *   VM_DONTEXPAND
2414 	 *      Disable vma merging and expanding with mremap().
2415 	 *   VM_DONTDUMP
2416 	 *      Omit vma from core dump, even when VM_IO turned off.
2417 	 *
2418 	 * There's a horrible special case to handle copy-on-write
2419 	 * behaviour that some programs depend on. We mark the "original"
2420 	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2421 	 * See vm_normal_page() for details.
2422 	 */
2423 	if (is_cow_mapping(vma->vm_flags)) {
2424 		if (addr != vma->vm_start || end != vma->vm_end)
2425 			return -EINVAL;
2426 		vma->vm_pgoff = pfn;
2427 	}
2428 
2429 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
2430 
2431 	BUG_ON(addr >= end);
2432 	pfn -= addr >> PAGE_SHIFT;
2433 	pgd = pgd_offset(mm, addr);
2434 	flush_cache_range(vma, addr, end);
2435 	do {
2436 		next = pgd_addr_end(addr, end);
2437 		err = remap_p4d_range(mm, pgd, addr, next,
2438 				pfn + (addr >> PAGE_SHIFT), prot);
2439 		if (err)
2440 			return err;
2441 	} while (pgd++, addr = next, addr != end);
2442 
2443 	return 0;
2444 }
2445 
2446 /**
2447  * remap_pfn_range - remap kernel memory to userspace
2448  * @vma: user vma to map to
2449  * @addr: target page aligned user address to start at
2450  * @pfn: page frame number of kernel physical memory address
2451  * @size: size of mapping area
2452  * @prot: page protection flags for this mapping
2453  *
2454  * Note: this is only safe if the mm semaphore is held when called.
2455  *
2456  * Return: %0 on success, negative error code otherwise.
2457  */
2458 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2459 		    unsigned long pfn, unsigned long size, pgprot_t prot)
2460 {
2461 	int err;
2462 
2463 	err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
2464 	if (err)
2465 		return -EINVAL;
2466 
2467 	err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
2468 	if (err)
2469 		untrack_pfn(vma, pfn, PAGE_ALIGN(size));
2470 	return err;
2471 }
2472 EXPORT_SYMBOL(remap_pfn_range);
2473 
2474 /**
2475  * vm_iomap_memory - remap memory to userspace
2476  * @vma: user vma to map to
2477  * @start: start of the physical memory to be mapped
2478  * @len: size of area
2479  *
2480  * This is a simplified io_remap_pfn_range() for common driver use. The
2481  * driver just needs to give us the physical memory range to be mapped,
2482  * we'll figure out the rest from the vma information.
2483  *
2484  * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2485  * whatever write-combining details or similar.
2486  *
2487  * Return: %0 on success, negative error code otherwise.
2488  */
2489 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2490 {
2491 	unsigned long vm_len, pfn, pages;
2492 
2493 	/* Check that the physical memory area passed in looks valid */
2494 	if (start + len < start)
2495 		return -EINVAL;
2496 	/*
2497 	 * You *really* shouldn't map things that aren't page-aligned,
2498 	 * but we've historically allowed it because IO memory might
2499 	 * just have smaller alignment.
2500 	 */
2501 	len += start & ~PAGE_MASK;
2502 	pfn = start >> PAGE_SHIFT;
2503 	pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2504 	if (pfn + pages < pfn)
2505 		return -EINVAL;
2506 
2507 	/* We start the mapping 'vm_pgoff' pages into the area */
2508 	if (vma->vm_pgoff > pages)
2509 		return -EINVAL;
2510 	pfn += vma->vm_pgoff;
2511 	pages -= vma->vm_pgoff;
2512 
2513 	/* Can we fit all of the mapping? */
2514 	vm_len = vma->vm_end - vma->vm_start;
2515 	if (vm_len >> PAGE_SHIFT > pages)
2516 		return -EINVAL;
2517 
2518 	/* Ok, let it rip */
2519 	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2520 }
2521 EXPORT_SYMBOL(vm_iomap_memory);
2522 
2523 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2524 				     unsigned long addr, unsigned long end,
2525 				     pte_fn_t fn, void *data, bool create,
2526 				     pgtbl_mod_mask *mask)
2527 {
2528 	pte_t *pte, *mapped_pte;
2529 	int err = 0;
2530 	spinlock_t *ptl;
2531 
2532 	if (create) {
2533 		mapped_pte = pte = (mm == &init_mm) ?
2534 			pte_alloc_kernel_track(pmd, addr, mask) :
2535 			pte_alloc_map_lock(mm, pmd, addr, &ptl);
2536 		if (!pte)
2537 			return -ENOMEM;
2538 	} else {
2539 		mapped_pte = pte = (mm == &init_mm) ?
2540 			pte_offset_kernel(pmd, addr) :
2541 			pte_offset_map_lock(mm, pmd, addr, &ptl);
2542 	}
2543 
2544 	BUG_ON(pmd_huge(*pmd));
2545 
2546 	arch_enter_lazy_mmu_mode();
2547 
2548 	if (fn) {
2549 		do {
2550 			if (create || !pte_none(*pte)) {
2551 				err = fn(pte++, addr, data);
2552 				if (err)
2553 					break;
2554 			}
2555 		} while (addr += PAGE_SIZE, addr != end);
2556 	}
2557 	*mask |= PGTBL_PTE_MODIFIED;
2558 
2559 	arch_leave_lazy_mmu_mode();
2560 
2561 	if (mm != &init_mm)
2562 		pte_unmap_unlock(mapped_pte, ptl);
2563 	return err;
2564 }
2565 
2566 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2567 				     unsigned long addr, unsigned long end,
2568 				     pte_fn_t fn, void *data, bool create,
2569 				     pgtbl_mod_mask *mask)
2570 {
2571 	pmd_t *pmd;
2572 	unsigned long next;
2573 	int err = 0;
2574 
2575 	BUG_ON(pud_huge(*pud));
2576 
2577 	if (create) {
2578 		pmd = pmd_alloc_track(mm, pud, addr, mask);
2579 		if (!pmd)
2580 			return -ENOMEM;
2581 	} else {
2582 		pmd = pmd_offset(pud, addr);
2583 	}
2584 	do {
2585 		next = pmd_addr_end(addr, end);
2586 		if (pmd_none(*pmd) && !create)
2587 			continue;
2588 		if (WARN_ON_ONCE(pmd_leaf(*pmd)))
2589 			return -EINVAL;
2590 		if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
2591 			if (!create)
2592 				continue;
2593 			pmd_clear_bad(pmd);
2594 		}
2595 		err = apply_to_pte_range(mm, pmd, addr, next,
2596 					 fn, data, create, mask);
2597 		if (err)
2598 			break;
2599 	} while (pmd++, addr = next, addr != end);
2600 
2601 	return err;
2602 }
2603 
2604 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2605 				     unsigned long addr, unsigned long end,
2606 				     pte_fn_t fn, void *data, bool create,
2607 				     pgtbl_mod_mask *mask)
2608 {
2609 	pud_t *pud;
2610 	unsigned long next;
2611 	int err = 0;
2612 
2613 	if (create) {
2614 		pud = pud_alloc_track(mm, p4d, addr, mask);
2615 		if (!pud)
2616 			return -ENOMEM;
2617 	} else {
2618 		pud = pud_offset(p4d, addr);
2619 	}
2620 	do {
2621 		next = pud_addr_end(addr, end);
2622 		if (pud_none(*pud) && !create)
2623 			continue;
2624 		if (WARN_ON_ONCE(pud_leaf(*pud)))
2625 			return -EINVAL;
2626 		if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) {
2627 			if (!create)
2628 				continue;
2629 			pud_clear_bad(pud);
2630 		}
2631 		err = apply_to_pmd_range(mm, pud, addr, next,
2632 					 fn, data, create, mask);
2633 		if (err)
2634 			break;
2635 	} while (pud++, addr = next, addr != end);
2636 
2637 	return err;
2638 }
2639 
2640 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2641 				     unsigned long addr, unsigned long end,
2642 				     pte_fn_t fn, void *data, bool create,
2643 				     pgtbl_mod_mask *mask)
2644 {
2645 	p4d_t *p4d;
2646 	unsigned long next;
2647 	int err = 0;
2648 
2649 	if (create) {
2650 		p4d = p4d_alloc_track(mm, pgd, addr, mask);
2651 		if (!p4d)
2652 			return -ENOMEM;
2653 	} else {
2654 		p4d = p4d_offset(pgd, addr);
2655 	}
2656 	do {
2657 		next = p4d_addr_end(addr, end);
2658 		if (p4d_none(*p4d) && !create)
2659 			continue;
2660 		if (WARN_ON_ONCE(p4d_leaf(*p4d)))
2661 			return -EINVAL;
2662 		if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) {
2663 			if (!create)
2664 				continue;
2665 			p4d_clear_bad(p4d);
2666 		}
2667 		err = apply_to_pud_range(mm, p4d, addr, next,
2668 					 fn, data, create, mask);
2669 		if (err)
2670 			break;
2671 	} while (p4d++, addr = next, addr != end);
2672 
2673 	return err;
2674 }
2675 
2676 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2677 				 unsigned long size, pte_fn_t fn,
2678 				 void *data, bool create)
2679 {
2680 	pgd_t *pgd;
2681 	unsigned long start = addr, next;
2682 	unsigned long end = addr + size;
2683 	pgtbl_mod_mask mask = 0;
2684 	int err = 0;
2685 
2686 	if (WARN_ON(addr >= end))
2687 		return -EINVAL;
2688 
2689 	pgd = pgd_offset(mm, addr);
2690 	do {
2691 		next = pgd_addr_end(addr, end);
2692 		if (pgd_none(*pgd) && !create)
2693 			continue;
2694 		if (WARN_ON_ONCE(pgd_leaf(*pgd)))
2695 			return -EINVAL;
2696 		if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
2697 			if (!create)
2698 				continue;
2699 			pgd_clear_bad(pgd);
2700 		}
2701 		err = apply_to_p4d_range(mm, pgd, addr, next,
2702 					 fn, data, create, &mask);
2703 		if (err)
2704 			break;
2705 	} while (pgd++, addr = next, addr != end);
2706 
2707 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
2708 		arch_sync_kernel_mappings(start, start + size);
2709 
2710 	return err;
2711 }
2712 
2713 /*
2714  * Scan a region of virtual memory, filling in page tables as necessary
2715  * and calling a provided function on each leaf page table.
2716  */
2717 int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2718 			unsigned long size, pte_fn_t fn, void *data)
2719 {
2720 	return __apply_to_page_range(mm, addr, size, fn, data, true);
2721 }
2722 EXPORT_SYMBOL_GPL(apply_to_page_range);
2723 
2724 /*
2725  * Scan a region of virtual memory, calling a provided function on
2726  * each leaf page table where it exists.
2727  *
2728  * Unlike apply_to_page_range, this does _not_ fill in page tables
2729  * where they are absent.
2730  */
2731 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
2732 				 unsigned long size, pte_fn_t fn, void *data)
2733 {
2734 	return __apply_to_page_range(mm, addr, size, fn, data, false);
2735 }
2736 EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
2737 
2738 /*
2739  * handle_pte_fault chooses page fault handler according to an entry which was
2740  * read non-atomically.  Before making any commitment, on those architectures
2741  * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
2742  * parts, do_swap_page must check under lock before unmapping the pte and
2743  * proceeding (but do_wp_page is only called after already making such a check;
2744  * and do_anonymous_page can safely check later on).
2745  */
2746 static inline int pte_unmap_same(struct vm_fault *vmf)
2747 {
2748 	int same = 1;
2749 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
2750 	if (sizeof(pte_t) > sizeof(unsigned long)) {
2751 		spinlock_t *ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
2752 		spin_lock(ptl);
2753 		same = pte_same(*vmf->pte, vmf->orig_pte);
2754 		spin_unlock(ptl);
2755 	}
2756 #endif
2757 	pte_unmap(vmf->pte);
2758 	vmf->pte = NULL;
2759 	return same;
2760 }
2761 
2762 static inline bool cow_user_page(struct page *dst, struct page *src,
2763 				 struct vm_fault *vmf)
2764 {
2765 	bool ret;
2766 	void *kaddr;
2767 	void __user *uaddr;
2768 	bool locked = false;
2769 	struct vm_area_struct *vma = vmf->vma;
2770 	struct mm_struct *mm = vma->vm_mm;
2771 	unsigned long addr = vmf->address;
2772 
2773 	if (likely(src)) {
2774 		copy_user_highpage(dst, src, addr, vma);
2775 		return true;
2776 	}
2777 
2778 	/*
2779 	 * If the source page was a PFN mapping, we don't have
2780 	 * a "struct page" for it. We do a best-effort copy by
2781 	 * just copying from the original user address. If that
2782 	 * fails, we just zero-fill it. Live with it.
2783 	 */
2784 	kaddr = kmap_atomic(dst);
2785 	uaddr = (void __user *)(addr & PAGE_MASK);
2786 
2787 	/*
2788 	 * On architectures with software "accessed" bits, we would
2789 	 * take a double page fault, so mark it accessed here.
2790 	 */
2791 	if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
2792 		pte_t entry;
2793 
2794 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2795 		locked = true;
2796 		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2797 			/*
2798 			 * Other thread has already handled the fault
2799 			 * and update local tlb only
2800 			 */
2801 			update_mmu_tlb(vma, addr, vmf->pte);
2802 			ret = false;
2803 			goto pte_unlock;
2804 		}
2805 
2806 		entry = pte_mkyoung(vmf->orig_pte);
2807 		if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
2808 			update_mmu_cache(vma, addr, vmf->pte);
2809 	}
2810 
2811 	/*
2812 	 * This really shouldn't fail, because the page is there
2813 	 * in the page tables. But it might just be unreadable,
2814 	 * in which case we just give up and fill the result with
2815 	 * zeroes.
2816 	 */
2817 	if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2818 		if (locked)
2819 			goto warn;
2820 
2821 		/* Re-validate under PTL if the page is still mapped */
2822 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2823 		locked = true;
2824 		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2825 			/* The PTE changed under us, update local tlb */
2826 			update_mmu_tlb(vma, addr, vmf->pte);
2827 			ret = false;
2828 			goto pte_unlock;
2829 		}
2830 
2831 		/*
2832 		 * The same page can be mapped back since last copy attempt.
2833 		 * Try to copy again under PTL.
2834 		 */
2835 		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2836 			/*
2837 			 * Give a warn in case there can be some obscure
2838 			 * use-case
2839 			 */
2840 warn:
2841 			WARN_ON_ONCE(1);
2842 			clear_page(kaddr);
2843 		}
2844 	}
2845 
2846 	ret = true;
2847 
2848 pte_unlock:
2849 	if (locked)
2850 		pte_unmap_unlock(vmf->pte, vmf->ptl);
2851 	kunmap_atomic(kaddr);
2852 	flush_dcache_page(dst);
2853 
2854 	return ret;
2855 }
2856 
2857 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
2858 {
2859 	struct file *vm_file = vma->vm_file;
2860 
2861 	if (vm_file)
2862 		return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
2863 
2864 	/*
2865 	 * Special mappings (e.g. VDSO) do not have any file so fake
2866 	 * a default GFP_KERNEL for them.
2867 	 */
2868 	return GFP_KERNEL;
2869 }
2870 
2871 /*
2872  * Notify the address space that the page is about to become writable so that
2873  * it can prohibit this or wait for the page to get into an appropriate state.
2874  *
2875  * We do this without the lock held, so that it can sleep if it needs to.
2876  */
2877 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
2878 {
2879 	vm_fault_t ret;
2880 	struct page *page = vmf->page;
2881 	unsigned int old_flags = vmf->flags;
2882 
2883 	vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2884 
2885 	if (vmf->vma->vm_file &&
2886 	    IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
2887 		return VM_FAULT_SIGBUS;
2888 
2889 	ret = vmf->vma->vm_ops->page_mkwrite(vmf);
2890 	/* Restore original flags so that caller is not surprised */
2891 	vmf->flags = old_flags;
2892 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2893 		return ret;
2894 	if (unlikely(!(ret & VM_FAULT_LOCKED))) {
2895 		lock_page(page);
2896 		if (!page->mapping) {
2897 			unlock_page(page);
2898 			return 0; /* retry */
2899 		}
2900 		ret |= VM_FAULT_LOCKED;
2901 	} else
2902 		VM_BUG_ON_PAGE(!PageLocked(page), page);
2903 	return ret;
2904 }
2905 
2906 /*
2907  * Handle dirtying of a page in shared file mapping on a write fault.
2908  *
2909  * The function expects the page to be locked and unlocks it.
2910  */
2911 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
2912 {
2913 	struct vm_area_struct *vma = vmf->vma;
2914 	struct address_space *mapping;
2915 	struct page *page = vmf->page;
2916 	bool dirtied;
2917 	bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
2918 
2919 	dirtied = set_page_dirty(page);
2920 	VM_BUG_ON_PAGE(PageAnon(page), page);
2921 	/*
2922 	 * Take a local copy of the address_space - page.mapping may be zeroed
2923 	 * by truncate after unlock_page().   The address_space itself remains
2924 	 * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
2925 	 * release semantics to prevent the compiler from undoing this copying.
2926 	 */
2927 	mapping = page_rmapping(page);
2928 	unlock_page(page);
2929 
2930 	if (!page_mkwrite)
2931 		file_update_time(vma->vm_file);
2932 
2933 	/*
2934 	 * Throttle page dirtying rate down to writeback speed.
2935 	 *
2936 	 * mapping may be NULL here because some device drivers do not
2937 	 * set page.mapping but still dirty their pages
2938 	 *
2939 	 * Drop the mmap_lock before waiting on IO, if we can. The file
2940 	 * is pinning the mapping, as per above.
2941 	 */
2942 	if ((dirtied || page_mkwrite) && mapping) {
2943 		struct file *fpin;
2944 
2945 		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2946 		balance_dirty_pages_ratelimited(mapping);
2947 		if (fpin) {
2948 			fput(fpin);
2949 			return VM_FAULT_RETRY;
2950 		}
2951 	}
2952 
2953 	return 0;
2954 }
2955 
2956 /*
2957  * Handle write page faults for pages that can be reused in the current vma
2958  *
2959  * This can happen either due to the mapping being with the VM_SHARED flag,
2960  * or due to us being the last reference standing to the page. In either
2961  * case, all we need to do here is to mark the page as writable and update
2962  * any related book-keeping.
2963  */
2964 static inline void wp_page_reuse(struct vm_fault *vmf)
2965 	__releases(vmf->ptl)
2966 {
2967 	struct vm_area_struct *vma = vmf->vma;
2968 	struct page *page = vmf->page;
2969 	pte_t entry;
2970 	/*
2971 	 * Clear the pages cpupid information as the existing
2972 	 * information potentially belongs to a now completely
2973 	 * unrelated process.
2974 	 */
2975 	if (page)
2976 		page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
2977 
2978 	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2979 	entry = pte_mkyoung(vmf->orig_pte);
2980 	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2981 	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
2982 		update_mmu_cache(vma, vmf->address, vmf->pte);
2983 	pte_unmap_unlock(vmf->pte, vmf->ptl);
2984 	count_vm_event(PGREUSE);
2985 }
2986 
2987 /*
2988  * Handle the case of a page which we actually need to copy to a new page.
2989  *
2990  * Called with mmap_lock locked and the old page referenced, but
2991  * without the ptl held.
2992  *
2993  * High level logic flow:
2994  *
2995  * - Allocate a page, copy the content of the old page to the new one.
2996  * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
2997  * - Take the PTL. If the pte changed, bail out and release the allocated page
2998  * - If the pte is still the way we remember it, update the page table and all
2999  *   relevant references. This includes dropping the reference the page-table
3000  *   held to the old page, as well as updating the rmap.
3001  * - In any case, unlock the PTL and drop the reference we took to the old page.
3002  */
3003 static vm_fault_t wp_page_copy(struct vm_fault *vmf)
3004 {
3005 	struct vm_area_struct *vma = vmf->vma;
3006 	struct mm_struct *mm = vma->vm_mm;
3007 	struct page *old_page = vmf->page;
3008 	struct page *new_page = NULL;
3009 	pte_t entry;
3010 	int page_copied = 0;
3011 	struct mmu_notifier_range range;
3012 
3013 	if (unlikely(anon_vma_prepare(vma)))
3014 		goto oom;
3015 
3016 	if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
3017 		new_page = alloc_zeroed_user_highpage_movable(vma,
3018 							      vmf->address);
3019 		if (!new_page)
3020 			goto oom;
3021 	} else {
3022 		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
3023 				vmf->address);
3024 		if (!new_page)
3025 			goto oom;
3026 
3027 		if (!cow_user_page(new_page, old_page, vmf)) {
3028 			/*
3029 			 * COW failed, if the fault was solved by other,
3030 			 * it's fine. If not, userspace would re-fault on
3031 			 * the same address and we will handle the fault
3032 			 * from the second attempt.
3033 			 */
3034 			put_page(new_page);
3035 			if (old_page)
3036 				put_page(old_page);
3037 			return 0;
3038 		}
3039 	}
3040 
3041 	if (mem_cgroup_charge(page_folio(new_page), mm, GFP_KERNEL))
3042 		goto oom_free_new;
3043 	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
3044 
3045 	__SetPageUptodate(new_page);
3046 
3047 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
3048 				vmf->address & PAGE_MASK,
3049 				(vmf->address & PAGE_MASK) + PAGE_SIZE);
3050 	mmu_notifier_invalidate_range_start(&range);
3051 
3052 	/*
3053 	 * Re-check the pte - we dropped the lock
3054 	 */
3055 	vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
3056 	if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
3057 		if (old_page) {
3058 			if (!PageAnon(old_page)) {
3059 				dec_mm_counter_fast(mm,
3060 						mm_counter_file(old_page));
3061 				inc_mm_counter_fast(mm, MM_ANONPAGES);
3062 			}
3063 		} else {
3064 			inc_mm_counter_fast(mm, MM_ANONPAGES);
3065 		}
3066 		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3067 		entry = mk_pte(new_page, vma->vm_page_prot);
3068 		entry = pte_sw_mkyoung(entry);
3069 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3070 
3071 		/*
3072 		 * Clear the pte entry and flush it first, before updating the
3073 		 * pte with the new entry, to keep TLBs on different CPUs in
3074 		 * sync. This code used to set the new PTE then flush TLBs, but
3075 		 * that left a window where the new PTE could be loaded into
3076 		 * some TLBs while the old PTE remains in others.
3077 		 */
3078 		ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
3079 		page_add_new_anon_rmap(new_page, vma, vmf->address, false);
3080 		lru_cache_add_inactive_or_unevictable(new_page, vma);
3081 		/*
3082 		 * We call the notify macro here because, when using secondary
3083 		 * mmu page tables (such as kvm shadow page tables), we want the
3084 		 * new page to be mapped directly into the secondary page table.
3085 		 */
3086 		set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
3087 		update_mmu_cache(vma, vmf->address, vmf->pte);
3088 		if (old_page) {
3089 			/*
3090 			 * Only after switching the pte to the new page may
3091 			 * we remove the mapcount here. Otherwise another
3092 			 * process may come and find the rmap count decremented
3093 			 * before the pte is switched to the new page, and
3094 			 * "reuse" the old page writing into it while our pte
3095 			 * here still points into it and can be read by other
3096 			 * threads.
3097 			 *
3098 			 * The critical issue is to order this
3099 			 * page_remove_rmap with the ptp_clear_flush above.
3100 			 * Those stores are ordered by (if nothing else,)
3101 			 * the barrier present in the atomic_add_negative
3102 			 * in page_remove_rmap.
3103 			 *
3104 			 * Then the TLB flush in ptep_clear_flush ensures that
3105 			 * no process can access the old page before the
3106 			 * decremented mapcount is visible. And the old page
3107 			 * cannot be reused until after the decremented
3108 			 * mapcount is visible. So transitively, TLBs to
3109 			 * old page will be flushed before it can be reused.
3110 			 */
3111 			page_remove_rmap(old_page, false);
3112 		}
3113 
3114 		/* Free the old page.. */
3115 		new_page = old_page;
3116 		page_copied = 1;
3117 	} else {
3118 		update_mmu_tlb(vma, vmf->address, vmf->pte);
3119 	}
3120 
3121 	if (new_page)
3122 		put_page(new_page);
3123 
3124 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3125 	/*
3126 	 * No need to double call mmu_notifier->invalidate_range() callback as
3127 	 * the above ptep_clear_flush_notify() did already call it.
3128 	 */
3129 	mmu_notifier_invalidate_range_only_end(&range);
3130 	if (old_page) {
3131 		/*
3132 		 * Don't let another task, with possibly unlocked vma,
3133 		 * keep the mlocked page.
3134 		 */
3135 		if (page_copied && (vma->vm_flags & VM_LOCKED)) {
3136 			lock_page(old_page);	/* LRU manipulation */
3137 			if (PageMlocked(old_page))
3138 				munlock_vma_page(old_page);
3139 			unlock_page(old_page);
3140 		}
3141 		if (page_copied)
3142 			free_swap_cache(old_page);
3143 		put_page(old_page);
3144 	}
3145 	return page_copied ? VM_FAULT_WRITE : 0;
3146 oom_free_new:
3147 	put_page(new_page);
3148 oom:
3149 	if (old_page)
3150 		put_page(old_page);
3151 	return VM_FAULT_OOM;
3152 }
3153 
3154 /**
3155  * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3156  *			  writeable once the page is prepared
3157  *
3158  * @vmf: structure describing the fault
3159  *
3160  * This function handles all that is needed to finish a write page fault in a
3161  * shared mapping due to PTE being read-only once the mapped page is prepared.
3162  * It handles locking of PTE and modifying it.
3163  *
3164  * The function expects the page to be locked or other protection against
3165  * concurrent faults / writeback (such as DAX radix tree locks).
3166  *
3167  * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
3168  * we acquired PTE lock.
3169  */
3170 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
3171 {
3172 	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3173 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3174 				       &vmf->ptl);
3175 	/*
3176 	 * We might have raced with another page fault while we released the
3177 	 * pte_offset_map_lock.
3178 	 */
3179 	if (!pte_same(*vmf->pte, vmf->orig_pte)) {
3180 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
3181 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3182 		return VM_FAULT_NOPAGE;
3183 	}
3184 	wp_page_reuse(vmf);
3185 	return 0;
3186 }
3187 
3188 /*
3189  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3190  * mapping
3191  */
3192 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
3193 {
3194 	struct vm_area_struct *vma = vmf->vma;
3195 
3196 	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3197 		vm_fault_t ret;
3198 
3199 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3200 		vmf->flags |= FAULT_FLAG_MKWRITE;
3201 		ret = vma->vm_ops->pfn_mkwrite(vmf);
3202 		if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
3203 			return ret;
3204 		return finish_mkwrite_fault(vmf);
3205 	}
3206 	wp_page_reuse(vmf);
3207 	return VM_FAULT_WRITE;
3208 }
3209 
3210 static vm_fault_t wp_page_shared(struct vm_fault *vmf)
3211 	__releases(vmf->ptl)
3212 {
3213 	struct vm_area_struct *vma = vmf->vma;
3214 	vm_fault_t ret = VM_FAULT_WRITE;
3215 
3216 	get_page(vmf->page);
3217 
3218 	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3219 		vm_fault_t tmp;
3220 
3221 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3222 		tmp = do_page_mkwrite(vmf);
3223 		if (unlikely(!tmp || (tmp &
3224 				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3225 			put_page(vmf->page);
3226 			return tmp;
3227 		}
3228 		tmp = finish_mkwrite_fault(vmf);
3229 		if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3230 			unlock_page(vmf->page);
3231 			put_page(vmf->page);
3232 			return tmp;
3233 		}
3234 	} else {
3235 		wp_page_reuse(vmf);
3236 		lock_page(vmf->page);
3237 	}
3238 	ret |= fault_dirty_shared_page(vmf);
3239 	put_page(vmf->page);
3240 
3241 	return ret;
3242 }
3243 
3244 /*
3245  * This routine handles present pages, when users try to write
3246  * to a shared page. It is done by copying the page to a new address
3247  * and decrementing the shared-page counter for the old page.
3248  *
3249  * Note that this routine assumes that the protection checks have been
3250  * done by the caller (the low-level page fault routine in most cases).
3251  * Thus we can safely just mark it writable once we've done any necessary
3252  * COW.
3253  *
3254  * We also mark the page dirty at this point even though the page will
3255  * change only once the write actually happens. This avoids a few races,
3256  * and potentially makes it more efficient.
3257  *
3258  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3259  * but allow concurrent faults), with pte both mapped and locked.
3260  * We return with mmap_lock still held, but pte unmapped and unlocked.
3261  */
3262 static vm_fault_t do_wp_page(struct vm_fault *vmf)
3263 	__releases(vmf->ptl)
3264 {
3265 	struct vm_area_struct *vma = vmf->vma;
3266 
3267 	if (userfaultfd_pte_wp(vma, *vmf->pte)) {
3268 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3269 		return handle_userfault(vmf, VM_UFFD_WP);
3270 	}
3271 
3272 	/*
3273 	 * Userfaultfd write-protect can defer flushes. Ensure the TLB
3274 	 * is flushed in this case before copying.
3275 	 */
3276 	if (unlikely(userfaultfd_wp(vmf->vma) &&
3277 		     mm_tlb_flush_pending(vmf->vma->vm_mm)))
3278 		flush_tlb_page(vmf->vma, vmf->address);
3279 
3280 	vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3281 	if (!vmf->page) {
3282 		/*
3283 		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3284 		 * VM_PFNMAP VMA.
3285 		 *
3286 		 * We should not cow pages in a shared writeable mapping.
3287 		 * Just mark the pages writable and/or call ops->pfn_mkwrite.
3288 		 */
3289 		if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
3290 				     (VM_WRITE|VM_SHARED))
3291 			return wp_pfn_shared(vmf);
3292 
3293 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3294 		return wp_page_copy(vmf);
3295 	}
3296 
3297 	/*
3298 	 * Take out anonymous pages first, anonymous shared vmas are
3299 	 * not dirty accountable.
3300 	 */
3301 	if (PageAnon(vmf->page)) {
3302 		struct page *page = vmf->page;
3303 
3304 		/* PageKsm() doesn't necessarily raise the page refcount */
3305 		if (PageKsm(page) || page_count(page) != 1)
3306 			goto copy;
3307 		if (!trylock_page(page))
3308 			goto copy;
3309 		if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) {
3310 			unlock_page(page);
3311 			goto copy;
3312 		}
3313 		/*
3314 		 * Ok, we've got the only map reference, and the only
3315 		 * page count reference, and the page is locked,
3316 		 * it's dark out, and we're wearing sunglasses. Hit it.
3317 		 */
3318 		unlock_page(page);
3319 		wp_page_reuse(vmf);
3320 		return VM_FAULT_WRITE;
3321 	} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
3322 					(VM_WRITE|VM_SHARED))) {
3323 		return wp_page_shared(vmf);
3324 	}
3325 copy:
3326 	/*
3327 	 * Ok, we need to copy. Oh, well..
3328 	 */
3329 	get_page(vmf->page);
3330 
3331 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3332 	return wp_page_copy(vmf);
3333 }
3334 
3335 static void unmap_mapping_range_vma(struct vm_area_struct *vma,
3336 		unsigned long start_addr, unsigned long end_addr,
3337 		struct zap_details *details)
3338 {
3339 	zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
3340 }
3341 
3342 static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
3343 					    pgoff_t first_index,
3344 					    pgoff_t last_index,
3345 					    struct zap_details *details)
3346 {
3347 	struct vm_area_struct *vma;
3348 	pgoff_t vba, vea, zba, zea;
3349 
3350 	vma_interval_tree_foreach(vma, root, first_index, last_index) {
3351 		vba = vma->vm_pgoff;
3352 		vea = vba + vma_pages(vma) - 1;
3353 		zba = max(first_index, vba);
3354 		zea = min(last_index, vea);
3355 
3356 		unmap_mapping_range_vma(vma,
3357 			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3358 			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
3359 				details);
3360 	}
3361 }
3362 
3363 /**
3364  * unmap_mapping_folio() - Unmap single folio from processes.
3365  * @folio: The locked folio to be unmapped.
3366  *
3367  * Unmap this folio from any userspace process which still has it mmaped.
3368  * Typically, for efficiency, the range of nearby pages has already been
3369  * unmapped by unmap_mapping_pages() or unmap_mapping_range().  But once
3370  * truncation or invalidation holds the lock on a folio, it may find that
3371  * the page has been remapped again: and then uses unmap_mapping_folio()
3372  * to unmap it finally.
3373  */
3374 void unmap_mapping_folio(struct folio *folio)
3375 {
3376 	struct address_space *mapping = folio->mapping;
3377 	struct zap_details details = { };
3378 	pgoff_t	first_index;
3379 	pgoff_t	last_index;
3380 
3381 	VM_BUG_ON(!folio_test_locked(folio));
3382 
3383 	first_index = folio->index;
3384 	last_index = folio->index + folio_nr_pages(folio) - 1;
3385 
3386 	details.even_cows = false;
3387 	details.single_folio = folio;
3388 
3389 	i_mmap_lock_write(mapping);
3390 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3391 		unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3392 					 last_index, &details);
3393 	i_mmap_unlock_write(mapping);
3394 }
3395 
3396 /**
3397  * unmap_mapping_pages() - Unmap pages from processes.
3398  * @mapping: The address space containing pages to be unmapped.
3399  * @start: Index of first page to be unmapped.
3400  * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
3401  * @even_cows: Whether to unmap even private COWed pages.
3402  *
3403  * Unmap the pages in this address space from any userspace process which
3404  * has them mmaped.  Generally, you want to remove COWed pages as well when
3405  * a file is being truncated, but not when invalidating pages from the page
3406  * cache.
3407  */
3408 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3409 		pgoff_t nr, bool even_cows)
3410 {
3411 	struct zap_details details = { };
3412 	pgoff_t	first_index = start;
3413 	pgoff_t	last_index = start + nr - 1;
3414 
3415 	details.even_cows = even_cows;
3416 	if (last_index < first_index)
3417 		last_index = ULONG_MAX;
3418 
3419 	i_mmap_lock_write(mapping);
3420 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3421 		unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3422 					 last_index, &details);
3423 	i_mmap_unlock_write(mapping);
3424 }
3425 EXPORT_SYMBOL_GPL(unmap_mapping_pages);
3426 
3427 /**
3428  * unmap_mapping_range - unmap the portion of all mmaps in the specified
3429  * address_space corresponding to the specified byte range in the underlying
3430  * file.
3431  *
3432  * @mapping: the address space containing mmaps to be unmapped.
3433  * @holebegin: byte in first page to unmap, relative to the start of
3434  * the underlying file.  This will be rounded down to a PAGE_SIZE
3435  * boundary.  Note that this is different from truncate_pagecache(), which
3436  * must keep the partial page.  In contrast, we must get rid of
3437  * partial pages.
3438  * @holelen: size of prospective hole in bytes.  This will be rounded
3439  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
3440  * end of the file.
3441  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3442  * but 0 when invalidating pagecache, don't throw away private data.
3443  */
3444 void unmap_mapping_range(struct address_space *mapping,
3445 		loff_t const holebegin, loff_t const holelen, int even_cows)
3446 {
3447 	pgoff_t hba = holebegin >> PAGE_SHIFT;
3448 	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3449 
3450 	/* Check for overflow. */
3451 	if (sizeof(holelen) > sizeof(hlen)) {
3452 		long long holeend =
3453 			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3454 		if (holeend & ~(long long)ULONG_MAX)
3455 			hlen = ULONG_MAX - hba + 1;
3456 	}
3457 
3458 	unmap_mapping_pages(mapping, hba, hlen, even_cows);
3459 }
3460 EXPORT_SYMBOL(unmap_mapping_range);
3461 
3462 /*
3463  * Restore a potential device exclusive pte to a working pte entry
3464  */
3465 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
3466 {
3467 	struct page *page = vmf->page;
3468 	struct vm_area_struct *vma = vmf->vma;
3469 	struct mmu_notifier_range range;
3470 
3471 	if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags))
3472 		return VM_FAULT_RETRY;
3473 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
3474 				vma->vm_mm, vmf->address & PAGE_MASK,
3475 				(vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
3476 	mmu_notifier_invalidate_range_start(&range);
3477 
3478 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3479 				&vmf->ptl);
3480 	if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
3481 		restore_exclusive_pte(vma, page, vmf->address, vmf->pte);
3482 
3483 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3484 	unlock_page(page);
3485 
3486 	mmu_notifier_invalidate_range_end(&range);
3487 	return 0;
3488 }
3489 
3490 /*
3491  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3492  * but allow concurrent faults), and pte mapped but not yet locked.
3493  * We return with pte unmapped and unlocked.
3494  *
3495  * We return with the mmap_lock locked or unlocked in the same cases
3496  * as does filemap_fault().
3497  */
3498 vm_fault_t do_swap_page(struct vm_fault *vmf)
3499 {
3500 	struct vm_area_struct *vma = vmf->vma;
3501 	struct page *page = NULL, *swapcache;
3502 	struct swap_info_struct *si = NULL;
3503 	swp_entry_t entry;
3504 	pte_t pte;
3505 	int locked;
3506 	int exclusive = 0;
3507 	vm_fault_t ret = 0;
3508 	void *shadow = NULL;
3509 
3510 	if (!pte_unmap_same(vmf))
3511 		goto out;
3512 
3513 	entry = pte_to_swp_entry(vmf->orig_pte);
3514 	if (unlikely(non_swap_entry(entry))) {
3515 		if (is_migration_entry(entry)) {
3516 			migration_entry_wait(vma->vm_mm, vmf->pmd,
3517 					     vmf->address);
3518 		} else if (is_device_exclusive_entry(entry)) {
3519 			vmf->page = pfn_swap_entry_to_page(entry);
3520 			ret = remove_device_exclusive_entry(vmf);
3521 		} else if (is_device_private_entry(entry)) {
3522 			vmf->page = pfn_swap_entry_to_page(entry);
3523 			ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
3524 		} else if (is_hwpoison_entry(entry)) {
3525 			ret = VM_FAULT_HWPOISON;
3526 		} else {
3527 			print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
3528 			ret = VM_FAULT_SIGBUS;
3529 		}
3530 		goto out;
3531 	}
3532 
3533 	/* Prevent swapoff from happening to us. */
3534 	si = get_swap_device(entry);
3535 	if (unlikely(!si))
3536 		goto out;
3537 
3538 	page = lookup_swap_cache(entry, vma, vmf->address);
3539 	swapcache = page;
3540 
3541 	if (!page) {
3542 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
3543 		    __swap_count(entry) == 1) {
3544 			/* skip swapcache */
3545 			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
3546 							vmf->address);
3547 			if (page) {
3548 				__SetPageLocked(page);
3549 				__SetPageSwapBacked(page);
3550 
3551 				if (mem_cgroup_swapin_charge_page(page,
3552 					vma->vm_mm, GFP_KERNEL, entry)) {
3553 					ret = VM_FAULT_OOM;
3554 					goto out_page;
3555 				}
3556 				mem_cgroup_swapin_uncharge_swap(entry);
3557 
3558 				shadow = get_shadow_from_swap_cache(entry);
3559 				if (shadow)
3560 					workingset_refault(page_folio(page),
3561 								shadow);
3562 
3563 				lru_cache_add(page);
3564 
3565 				/* To provide entry to swap_readpage() */
3566 				set_page_private(page, entry.val);
3567 				swap_readpage(page, true);
3568 				set_page_private(page, 0);
3569 			}
3570 		} else {
3571 			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
3572 						vmf);
3573 			swapcache = page;
3574 		}
3575 
3576 		if (!page) {
3577 			/*
3578 			 * Back out if somebody else faulted in this pte
3579 			 * while we released the pte lock.
3580 			 */
3581 			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3582 					vmf->address, &vmf->ptl);
3583 			if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
3584 				ret = VM_FAULT_OOM;
3585 			goto unlock;
3586 		}
3587 
3588 		/* Had to read the page from swap area: Major fault */
3589 		ret = VM_FAULT_MAJOR;
3590 		count_vm_event(PGMAJFAULT);
3591 		count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
3592 	} else if (PageHWPoison(page)) {
3593 		/*
3594 		 * hwpoisoned dirty swapcache pages are kept for killing
3595 		 * owner processes (which may be unknown at hwpoison time)
3596 		 */
3597 		ret = VM_FAULT_HWPOISON;
3598 		goto out_release;
3599 	}
3600 
3601 	locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
3602 
3603 	if (!locked) {
3604 		ret |= VM_FAULT_RETRY;
3605 		goto out_release;
3606 	}
3607 
3608 	/*
3609 	 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
3610 	 * release the swapcache from under us.  The page pin, and pte_same
3611 	 * test below, are not enough to exclude that.  Even if it is still
3612 	 * swapcache, we need to check that the page's swap has not changed.
3613 	 */
3614 	if (unlikely((!PageSwapCache(page) ||
3615 			page_private(page) != entry.val)) && swapcache)
3616 		goto out_page;
3617 
3618 	page = ksm_might_need_to_copy(page, vma, vmf->address);
3619 	if (unlikely(!page)) {
3620 		ret = VM_FAULT_OOM;
3621 		page = swapcache;
3622 		goto out_page;
3623 	}
3624 
3625 	cgroup_throttle_swaprate(page, GFP_KERNEL);
3626 
3627 	/*
3628 	 * Back out if somebody else already faulted in this pte.
3629 	 */
3630 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3631 			&vmf->ptl);
3632 	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
3633 		goto out_nomap;
3634 
3635 	if (unlikely(!PageUptodate(page))) {
3636 		ret = VM_FAULT_SIGBUS;
3637 		goto out_nomap;
3638 	}
3639 
3640 	/*
3641 	 * The page isn't present yet, go ahead with the fault.
3642 	 *
3643 	 * Be careful about the sequence of operations here.
3644 	 * To get its accounting right, reuse_swap_page() must be called
3645 	 * while the page is counted on swap but not yet in mapcount i.e.
3646 	 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
3647 	 * must be called after the swap_free(), or it will never succeed.
3648 	 */
3649 
3650 	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3651 	dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
3652 	pte = mk_pte(page, vma->vm_page_prot);
3653 	if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
3654 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
3655 		vmf->flags &= ~FAULT_FLAG_WRITE;
3656 		ret |= VM_FAULT_WRITE;
3657 		exclusive = RMAP_EXCLUSIVE;
3658 	}
3659 	flush_icache_page(vma, page);
3660 	if (pte_swp_soft_dirty(vmf->orig_pte))
3661 		pte = pte_mksoft_dirty(pte);
3662 	if (pte_swp_uffd_wp(vmf->orig_pte)) {
3663 		pte = pte_mkuffd_wp(pte);
3664 		pte = pte_wrprotect(pte);
3665 	}
3666 	vmf->orig_pte = pte;
3667 
3668 	/* ksm created a completely new copy */
3669 	if (unlikely(page != swapcache && swapcache)) {
3670 		page_add_new_anon_rmap(page, vma, vmf->address, false);
3671 		lru_cache_add_inactive_or_unevictable(page, vma);
3672 	} else {
3673 		do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
3674 	}
3675 
3676 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3677 	arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
3678 
3679 	swap_free(entry);
3680 	if (mem_cgroup_swap_full(page) ||
3681 	    (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
3682 		try_to_free_swap(page);
3683 	unlock_page(page);
3684 	if (page != swapcache && swapcache) {
3685 		/*
3686 		 * Hold the lock to avoid the swap entry to be reused
3687 		 * until we take the PT lock for the pte_same() check
3688 		 * (to avoid false positives from pte_same). For
3689 		 * further safety release the lock after the swap_free
3690 		 * so that the swap count won't change under a
3691 		 * parallel locked swapcache.
3692 		 */
3693 		unlock_page(swapcache);
3694 		put_page(swapcache);
3695 	}
3696 
3697 	if (vmf->flags & FAULT_FLAG_WRITE) {
3698 		ret |= do_wp_page(vmf);
3699 		if (ret & VM_FAULT_ERROR)
3700 			ret &= VM_FAULT_ERROR;
3701 		goto out;
3702 	}
3703 
3704 	/* No need to invalidate - it was non-present before */
3705 	update_mmu_cache(vma, vmf->address, vmf->pte);
3706 unlock:
3707 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3708 out:
3709 	if (si)
3710 		put_swap_device(si);
3711 	return ret;
3712 out_nomap:
3713 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3714 out_page:
3715 	unlock_page(page);
3716 out_release:
3717 	put_page(page);
3718 	if (page != swapcache && swapcache) {
3719 		unlock_page(swapcache);
3720 		put_page(swapcache);
3721 	}
3722 	if (si)
3723 		put_swap_device(si);
3724 	return ret;
3725 }
3726 
3727 /*
3728  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3729  * but allow concurrent faults), and pte mapped but not yet locked.
3730  * We return with mmap_lock still held, but pte unmapped and unlocked.
3731  */
3732 static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
3733 {
3734 	struct vm_area_struct *vma = vmf->vma;
3735 	struct page *page;
3736 	vm_fault_t ret = 0;
3737 	pte_t entry;
3738 
3739 	/* File mapping without ->vm_ops ? */
3740 	if (vma->vm_flags & VM_SHARED)
3741 		return VM_FAULT_SIGBUS;
3742 
3743 	/*
3744 	 * Use pte_alloc() instead of pte_alloc_map().  We can't run
3745 	 * pte_offset_map() on pmds where a huge pmd might be created
3746 	 * from a different thread.
3747 	 *
3748 	 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
3749 	 * parallel threads are excluded by other means.
3750 	 *
3751 	 * Here we only have mmap_read_lock(mm).
3752 	 */
3753 	if (pte_alloc(vma->vm_mm, vmf->pmd))
3754 		return VM_FAULT_OOM;
3755 
3756 	/* See comment in handle_pte_fault() */
3757 	if (unlikely(pmd_trans_unstable(vmf->pmd)))
3758 		return 0;
3759 
3760 	/* Use the zero-page for reads */
3761 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
3762 			!mm_forbids_zeropage(vma->vm_mm)) {
3763 		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
3764 						vma->vm_page_prot));
3765 		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3766 				vmf->address, &vmf->ptl);
3767 		if (!pte_none(*vmf->pte)) {
3768 			update_mmu_tlb(vma, vmf->address, vmf->pte);
3769 			goto unlock;
3770 		}
3771 		ret = check_stable_address_space(vma->vm_mm);
3772 		if (ret)
3773 			goto unlock;
3774 		/* Deliver the page fault to userland, check inside PT lock */
3775 		if (userfaultfd_missing(vma)) {
3776 			pte_unmap_unlock(vmf->pte, vmf->ptl);
3777 			return handle_userfault(vmf, VM_UFFD_MISSING);
3778 		}
3779 		goto setpte;
3780 	}
3781 
3782 	/* Allocate our own private page. */
3783 	if (unlikely(anon_vma_prepare(vma)))
3784 		goto oom;
3785 	page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
3786 	if (!page)
3787 		goto oom;
3788 
3789 	if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
3790 		goto oom_free_page;
3791 	cgroup_throttle_swaprate(page, GFP_KERNEL);
3792 
3793 	/*
3794 	 * The memory barrier inside __SetPageUptodate makes sure that
3795 	 * preceding stores to the page contents become visible before
3796 	 * the set_pte_at() write.
3797 	 */
3798 	__SetPageUptodate(page);
3799 
3800 	entry = mk_pte(page, vma->vm_page_prot);
3801 	entry = pte_sw_mkyoung(entry);
3802 	if (vma->vm_flags & VM_WRITE)
3803 		entry = pte_mkwrite(pte_mkdirty(entry));
3804 
3805 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3806 			&vmf->ptl);
3807 	if (!pte_none(*vmf->pte)) {
3808 		update_mmu_cache(vma, vmf->address, vmf->pte);
3809 		goto release;
3810 	}
3811 
3812 	ret = check_stable_address_space(vma->vm_mm);
3813 	if (ret)
3814 		goto release;
3815 
3816 	/* Deliver the page fault to userland, check inside PT lock */
3817 	if (userfaultfd_missing(vma)) {
3818 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3819 		put_page(page);
3820 		return handle_userfault(vmf, VM_UFFD_MISSING);
3821 	}
3822 
3823 	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3824 	page_add_new_anon_rmap(page, vma, vmf->address, false);
3825 	lru_cache_add_inactive_or_unevictable(page, vma);
3826 setpte:
3827 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
3828 
3829 	/* No need to invalidate - it was non-present before */
3830 	update_mmu_cache(vma, vmf->address, vmf->pte);
3831 unlock:
3832 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3833 	return ret;
3834 release:
3835 	put_page(page);
3836 	goto unlock;
3837 oom_free_page:
3838 	put_page(page);
3839 oom:
3840 	return VM_FAULT_OOM;
3841 }
3842 
3843 /*
3844  * The mmap_lock must have been held on entry, and may have been
3845  * released depending on flags and vma->vm_ops->fault() return value.
3846  * See filemap_fault() and __lock_page_retry().
3847  */
3848 static vm_fault_t __do_fault(struct vm_fault *vmf)
3849 {
3850 	struct vm_area_struct *vma = vmf->vma;
3851 	vm_fault_t ret;
3852 
3853 	/*
3854 	 * Preallocate pte before we take page_lock because this might lead to
3855 	 * deadlocks for memcg reclaim which waits for pages under writeback:
3856 	 *				lock_page(A)
3857 	 *				SetPageWriteback(A)
3858 	 *				unlock_page(A)
3859 	 * lock_page(B)
3860 	 *				lock_page(B)
3861 	 * pte_alloc_one
3862 	 *   shrink_page_list
3863 	 *     wait_on_page_writeback(A)
3864 	 *				SetPageWriteback(B)
3865 	 *				unlock_page(B)
3866 	 *				# flush A, B to clear the writeback
3867 	 */
3868 	if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
3869 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
3870 		if (!vmf->prealloc_pte)
3871 			return VM_FAULT_OOM;
3872 	}
3873 
3874 	ret = vma->vm_ops->fault(vmf);
3875 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
3876 			    VM_FAULT_DONE_COW)))
3877 		return ret;
3878 
3879 	if (unlikely(PageHWPoison(vmf->page))) {
3880 		vm_fault_t poisonret = VM_FAULT_HWPOISON;
3881 		if (ret & VM_FAULT_LOCKED) {
3882 			/* Retry if a clean page was removed from the cache. */
3883 			if (invalidate_inode_page(vmf->page))
3884 				poisonret = 0;
3885 			unlock_page(vmf->page);
3886 		}
3887 		put_page(vmf->page);
3888 		vmf->page = NULL;
3889 		return poisonret;
3890 	}
3891 
3892 	if (unlikely(!(ret & VM_FAULT_LOCKED)))
3893 		lock_page(vmf->page);
3894 	else
3895 		VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
3896 
3897 	return ret;
3898 }
3899 
3900 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3901 static void deposit_prealloc_pte(struct vm_fault *vmf)
3902 {
3903 	struct vm_area_struct *vma = vmf->vma;
3904 
3905 	pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
3906 	/*
3907 	 * We are going to consume the prealloc table,
3908 	 * count that as nr_ptes.
3909 	 */
3910 	mm_inc_nr_ptes(vma->vm_mm);
3911 	vmf->prealloc_pte = NULL;
3912 }
3913 
3914 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
3915 {
3916 	struct vm_area_struct *vma = vmf->vma;
3917 	bool write = vmf->flags & FAULT_FLAG_WRITE;
3918 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
3919 	pmd_t entry;
3920 	int i;
3921 	vm_fault_t ret = VM_FAULT_FALLBACK;
3922 
3923 	if (!transhuge_vma_suitable(vma, haddr))
3924 		return ret;
3925 
3926 	page = compound_head(page);
3927 	if (compound_order(page) != HPAGE_PMD_ORDER)
3928 		return ret;
3929 
3930 	/*
3931 	 * Just backoff if any subpage of a THP is corrupted otherwise
3932 	 * the corrupted page may mapped by PMD silently to escape the
3933 	 * check.  This kind of THP just can be PTE mapped.  Access to
3934 	 * the corrupted subpage should trigger SIGBUS as expected.
3935 	 */
3936 	if (unlikely(PageHasHWPoisoned(page)))
3937 		return ret;
3938 
3939 	/*
3940 	 * Archs like ppc64 need additional space to store information
3941 	 * related to pte entry. Use the preallocated table for that.
3942 	 */
3943 	if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
3944 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
3945 		if (!vmf->prealloc_pte)
3946 			return VM_FAULT_OOM;
3947 	}
3948 
3949 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3950 	if (unlikely(!pmd_none(*vmf->pmd)))
3951 		goto out;
3952 
3953 	for (i = 0; i < HPAGE_PMD_NR; i++)
3954 		flush_icache_page(vma, page + i);
3955 
3956 	entry = mk_huge_pmd(page, vma->vm_page_prot);
3957 	if (write)
3958 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
3959 
3960 	add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
3961 	page_add_file_rmap(page, true);
3962 	/*
3963 	 * deposit and withdraw with pmd lock held
3964 	 */
3965 	if (arch_needs_pgtable_deposit())
3966 		deposit_prealloc_pte(vmf);
3967 
3968 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
3969 
3970 	update_mmu_cache_pmd(vma, haddr, vmf->pmd);
3971 
3972 	/* fault is handled */
3973 	ret = 0;
3974 	count_vm_event(THP_FILE_MAPPED);
3975 out:
3976 	spin_unlock(vmf->ptl);
3977 	return ret;
3978 }
3979 #else
3980 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
3981 {
3982 	return VM_FAULT_FALLBACK;
3983 }
3984 #endif
3985 
3986 void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
3987 {
3988 	struct vm_area_struct *vma = vmf->vma;
3989 	bool write = vmf->flags & FAULT_FLAG_WRITE;
3990 	bool prefault = vmf->address != addr;
3991 	pte_t entry;
3992 
3993 	flush_icache_page(vma, page);
3994 	entry = mk_pte(page, vma->vm_page_prot);
3995 
3996 	if (prefault && arch_wants_old_prefaulted_pte())
3997 		entry = pte_mkold(entry);
3998 	else
3999 		entry = pte_sw_mkyoung(entry);
4000 
4001 	if (write)
4002 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
4003 	/* copy-on-write page */
4004 	if (write && !(vma->vm_flags & VM_SHARED)) {
4005 		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
4006 		page_add_new_anon_rmap(page, vma, addr, false);
4007 		lru_cache_add_inactive_or_unevictable(page, vma);
4008 	} else {
4009 		inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
4010 		page_add_file_rmap(page, false);
4011 	}
4012 	set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
4013 }
4014 
4015 /**
4016  * finish_fault - finish page fault once we have prepared the page to fault
4017  *
4018  * @vmf: structure describing the fault
4019  *
4020  * This function handles all that is needed to finish a page fault once the
4021  * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
4022  * given page, adds reverse page mapping, handles memcg charges and LRU
4023  * addition.
4024  *
4025  * The function expects the page to be locked and on success it consumes a
4026  * reference of a page being mapped (for the PTE which maps it).
4027  *
4028  * Return: %0 on success, %VM_FAULT_ code in case of error.
4029  */
4030 vm_fault_t finish_fault(struct vm_fault *vmf)
4031 {
4032 	struct vm_area_struct *vma = vmf->vma;
4033 	struct page *page;
4034 	vm_fault_t ret;
4035 
4036 	/* Did we COW the page? */
4037 	if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
4038 		page = vmf->cow_page;
4039 	else
4040 		page = vmf->page;
4041 
4042 	/*
4043 	 * check even for read faults because we might have lost our CoWed
4044 	 * page
4045 	 */
4046 	if (!(vma->vm_flags & VM_SHARED)) {
4047 		ret = check_stable_address_space(vma->vm_mm);
4048 		if (ret)
4049 			return ret;
4050 	}
4051 
4052 	if (pmd_none(*vmf->pmd)) {
4053 		if (PageTransCompound(page)) {
4054 			ret = do_set_pmd(vmf, page);
4055 			if (ret != VM_FAULT_FALLBACK)
4056 				return ret;
4057 		}
4058 
4059 		if (vmf->prealloc_pte)
4060 			pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
4061 		else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
4062 			return VM_FAULT_OOM;
4063 	}
4064 
4065 	/* See comment in handle_pte_fault() */
4066 	if (pmd_devmap_trans_unstable(vmf->pmd))
4067 		return 0;
4068 
4069 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4070 				      vmf->address, &vmf->ptl);
4071 	ret = 0;
4072 	/* Re-check under ptl */
4073 	if (likely(pte_none(*vmf->pte)))
4074 		do_set_pte(vmf, page, vmf->address);
4075 	else
4076 		ret = VM_FAULT_NOPAGE;
4077 
4078 	update_mmu_tlb(vma, vmf->address, vmf->pte);
4079 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4080 	return ret;
4081 }
4082 
4083 static unsigned long fault_around_bytes __read_mostly =
4084 	rounddown_pow_of_two(65536);
4085 
4086 #ifdef CONFIG_DEBUG_FS
4087 static int fault_around_bytes_get(void *data, u64 *val)
4088 {
4089 	*val = fault_around_bytes;
4090 	return 0;
4091 }
4092 
4093 /*
4094  * fault_around_bytes must be rounded down to the nearest page order as it's
4095  * what do_fault_around() expects to see.
4096  */
4097 static int fault_around_bytes_set(void *data, u64 val)
4098 {
4099 	if (val / PAGE_SIZE > PTRS_PER_PTE)
4100 		return -EINVAL;
4101 	if (val > PAGE_SIZE)
4102 		fault_around_bytes = rounddown_pow_of_two(val);
4103 	else
4104 		fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
4105 	return 0;
4106 }
4107 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
4108 		fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
4109 
4110 static int __init fault_around_debugfs(void)
4111 {
4112 	debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
4113 				   &fault_around_bytes_fops);
4114 	return 0;
4115 }
4116 late_initcall(fault_around_debugfs);
4117 #endif
4118 
4119 /*
4120  * do_fault_around() tries to map few pages around the fault address. The hope
4121  * is that the pages will be needed soon and this will lower the number of
4122  * faults to handle.
4123  *
4124  * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
4125  * not ready to be mapped: not up-to-date, locked, etc.
4126  *
4127  * This function is called with the page table lock taken. In the split ptlock
4128  * case the page table lock only protects only those entries which belong to
4129  * the page table corresponding to the fault address.
4130  *
4131  * This function doesn't cross the VMA boundaries, in order to call map_pages()
4132  * only once.
4133  *
4134  * fault_around_bytes defines how many bytes we'll try to map.
4135  * do_fault_around() expects it to be set to a power of two less than or equal
4136  * to PTRS_PER_PTE.
4137  *
4138  * The virtual address of the area that we map is naturally aligned to
4139  * fault_around_bytes rounded down to the machine page size
4140  * (and therefore to page order).  This way it's easier to guarantee
4141  * that we don't cross page table boundaries.
4142  */
4143 static vm_fault_t do_fault_around(struct vm_fault *vmf)
4144 {
4145 	unsigned long address = vmf->address, nr_pages, mask;
4146 	pgoff_t start_pgoff = vmf->pgoff;
4147 	pgoff_t end_pgoff;
4148 	int off;
4149 
4150 	nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
4151 	mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
4152 
4153 	address = max(address & mask, vmf->vma->vm_start);
4154 	off = ((vmf->address - address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
4155 	start_pgoff -= off;
4156 
4157 	/*
4158 	 *  end_pgoff is either the end of the page table, the end of
4159 	 *  the vma or nr_pages from start_pgoff, depending what is nearest.
4160 	 */
4161 	end_pgoff = start_pgoff -
4162 		((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
4163 		PTRS_PER_PTE - 1;
4164 	end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
4165 			start_pgoff + nr_pages - 1);
4166 
4167 	if (pmd_none(*vmf->pmd)) {
4168 		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
4169 		if (!vmf->prealloc_pte)
4170 			return VM_FAULT_OOM;
4171 	}
4172 
4173 	return vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
4174 }
4175 
4176 static vm_fault_t do_read_fault(struct vm_fault *vmf)
4177 {
4178 	struct vm_area_struct *vma = vmf->vma;
4179 	vm_fault_t ret = 0;
4180 
4181 	/*
4182 	 * Let's call ->map_pages() first and use ->fault() as fallback
4183 	 * if page by the offset is not ready to be mapped (cold cache or
4184 	 * something).
4185 	 */
4186 	if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
4187 		if (likely(!userfaultfd_minor(vmf->vma))) {
4188 			ret = do_fault_around(vmf);
4189 			if (ret)
4190 				return ret;
4191 		}
4192 	}
4193 
4194 	ret = __do_fault(vmf);
4195 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4196 		return ret;
4197 
4198 	ret |= finish_fault(vmf);
4199 	unlock_page(vmf->page);
4200 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4201 		put_page(vmf->page);
4202 	return ret;
4203 }
4204 
4205 static vm_fault_t do_cow_fault(struct vm_fault *vmf)
4206 {
4207 	struct vm_area_struct *vma = vmf->vma;
4208 	vm_fault_t ret;
4209 
4210 	if (unlikely(anon_vma_prepare(vma)))
4211 		return VM_FAULT_OOM;
4212 
4213 	vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
4214 	if (!vmf->cow_page)
4215 		return VM_FAULT_OOM;
4216 
4217 	if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm,
4218 				GFP_KERNEL)) {
4219 		put_page(vmf->cow_page);
4220 		return VM_FAULT_OOM;
4221 	}
4222 	cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL);
4223 
4224 	ret = __do_fault(vmf);
4225 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4226 		goto uncharge_out;
4227 	if (ret & VM_FAULT_DONE_COW)
4228 		return ret;
4229 
4230 	copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
4231 	__SetPageUptodate(vmf->cow_page);
4232 
4233 	ret |= finish_fault(vmf);
4234 	unlock_page(vmf->page);
4235 	put_page(vmf->page);
4236 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4237 		goto uncharge_out;
4238 	return ret;
4239 uncharge_out:
4240 	put_page(vmf->cow_page);
4241 	return ret;
4242 }
4243 
4244 static vm_fault_t do_shared_fault(struct vm_fault *vmf)
4245 {
4246 	struct vm_area_struct *vma = vmf->vma;
4247 	vm_fault_t ret, tmp;
4248 
4249 	ret = __do_fault(vmf);
4250 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4251 		return ret;
4252 
4253 	/*
4254 	 * Check if the backing address space wants to know that the page is
4255 	 * about to become writable
4256 	 */
4257 	if (vma->vm_ops->page_mkwrite) {
4258 		unlock_page(vmf->page);
4259 		tmp = do_page_mkwrite(vmf);
4260 		if (unlikely(!tmp ||
4261 				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
4262 			put_page(vmf->page);
4263 			return tmp;
4264 		}
4265 	}
4266 
4267 	ret |= finish_fault(vmf);
4268 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
4269 					VM_FAULT_RETRY))) {
4270 		unlock_page(vmf->page);
4271 		put_page(vmf->page);
4272 		return ret;
4273 	}
4274 
4275 	ret |= fault_dirty_shared_page(vmf);
4276 	return ret;
4277 }
4278 
4279 /*
4280  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4281  * but allow concurrent faults).
4282  * The mmap_lock may have been released depending on flags and our
4283  * return value.  See filemap_fault() and __folio_lock_or_retry().
4284  * If mmap_lock is released, vma may become invalid (for example
4285  * by other thread calling munmap()).
4286  */
4287 static vm_fault_t do_fault(struct vm_fault *vmf)
4288 {
4289 	struct vm_area_struct *vma = vmf->vma;
4290 	struct mm_struct *vm_mm = vma->vm_mm;
4291 	vm_fault_t ret;
4292 
4293 	/*
4294 	 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
4295 	 */
4296 	if (!vma->vm_ops->fault) {
4297 		/*
4298 		 * If we find a migration pmd entry or a none pmd entry, which
4299 		 * should never happen, return SIGBUS
4300 		 */
4301 		if (unlikely(!pmd_present(*vmf->pmd)))
4302 			ret = VM_FAULT_SIGBUS;
4303 		else {
4304 			vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
4305 						       vmf->pmd,
4306 						       vmf->address,
4307 						       &vmf->ptl);
4308 			/*
4309 			 * Make sure this is not a temporary clearing of pte
4310 			 * by holding ptl and checking again. A R/M/W update
4311 			 * of pte involves: take ptl, clearing the pte so that
4312 			 * we don't have concurrent modification by hardware
4313 			 * followed by an update.
4314 			 */
4315 			if (unlikely(pte_none(*vmf->pte)))
4316 				ret = VM_FAULT_SIGBUS;
4317 			else
4318 				ret = VM_FAULT_NOPAGE;
4319 
4320 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4321 		}
4322 	} else if (!(vmf->flags & FAULT_FLAG_WRITE))
4323 		ret = do_read_fault(vmf);
4324 	else if (!(vma->vm_flags & VM_SHARED))
4325 		ret = do_cow_fault(vmf);
4326 	else
4327 		ret = do_shared_fault(vmf);
4328 
4329 	/* preallocated pagetable is unused: free it */
4330 	if (vmf->prealloc_pte) {
4331 		pte_free(vm_mm, vmf->prealloc_pte);
4332 		vmf->prealloc_pte = NULL;
4333 	}
4334 	return ret;
4335 }
4336 
4337 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
4338 		      unsigned long addr, int page_nid, int *flags)
4339 {
4340 	get_page(page);
4341 
4342 	count_vm_numa_event(NUMA_HINT_FAULTS);
4343 	if (page_nid == numa_node_id()) {
4344 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
4345 		*flags |= TNF_FAULT_LOCAL;
4346 	}
4347 
4348 	return mpol_misplaced(page, vma, addr);
4349 }
4350 
4351 static vm_fault_t do_numa_page(struct vm_fault *vmf)
4352 {
4353 	struct vm_area_struct *vma = vmf->vma;
4354 	struct page *page = NULL;
4355 	int page_nid = NUMA_NO_NODE;
4356 	int last_cpupid;
4357 	int target_nid;
4358 	pte_t pte, old_pte;
4359 	bool was_writable = pte_savedwrite(vmf->orig_pte);
4360 	int flags = 0;
4361 
4362 	/*
4363 	 * The "pte" at this point cannot be used safely without
4364 	 * validation through pte_unmap_same(). It's of NUMA type but
4365 	 * the pfn may be screwed if the read is non atomic.
4366 	 */
4367 	vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
4368 	spin_lock(vmf->ptl);
4369 	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
4370 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4371 		goto out;
4372 	}
4373 
4374 	/* Get the normal PTE  */
4375 	old_pte = ptep_get(vmf->pte);
4376 	pte = pte_modify(old_pte, vma->vm_page_prot);
4377 
4378 	page = vm_normal_page(vma, vmf->address, pte);
4379 	if (!page)
4380 		goto out_map;
4381 
4382 	/* TODO: handle PTE-mapped THP */
4383 	if (PageCompound(page))
4384 		goto out_map;
4385 
4386 	/*
4387 	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
4388 	 * much anyway since they can be in shared cache state. This misses
4389 	 * the case where a mapping is writable but the process never writes
4390 	 * to it but pte_write gets cleared during protection updates and
4391 	 * pte_dirty has unpredictable behaviour between PTE scan updates,
4392 	 * background writeback, dirty balancing and application behaviour.
4393 	 */
4394 	if (!was_writable)
4395 		flags |= TNF_NO_GROUP;
4396 
4397 	/*
4398 	 * Flag if the page is shared between multiple address spaces. This
4399 	 * is later used when determining whether to group tasks together
4400 	 */
4401 	if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
4402 		flags |= TNF_SHARED;
4403 
4404 	last_cpupid = page_cpupid_last(page);
4405 	page_nid = page_to_nid(page);
4406 	target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
4407 			&flags);
4408 	if (target_nid == NUMA_NO_NODE) {
4409 		put_page(page);
4410 		goto out_map;
4411 	}
4412 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4413 
4414 	/* Migrate to the requested node */
4415 	if (migrate_misplaced_page(page, vma, target_nid)) {
4416 		page_nid = target_nid;
4417 		flags |= TNF_MIGRATED;
4418 	} else {
4419 		flags |= TNF_MIGRATE_FAIL;
4420 		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
4421 		spin_lock(vmf->ptl);
4422 		if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
4423 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4424 			goto out;
4425 		}
4426 		goto out_map;
4427 	}
4428 
4429 out:
4430 	if (page_nid != NUMA_NO_NODE)
4431 		task_numa_fault(last_cpupid, page_nid, 1, flags);
4432 	return 0;
4433 out_map:
4434 	/*
4435 	 * Make it present again, depending on how arch implements
4436 	 * non-accessible ptes, some can allow access by kernel mode.
4437 	 */
4438 	old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
4439 	pte = pte_modify(old_pte, vma->vm_page_prot);
4440 	pte = pte_mkyoung(pte);
4441 	if (was_writable)
4442 		pte = pte_mkwrite(pte);
4443 	ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
4444 	update_mmu_cache(vma, vmf->address, vmf->pte);
4445 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4446 	goto out;
4447 }
4448 
4449 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
4450 {
4451 	if (vma_is_anonymous(vmf->vma))
4452 		return do_huge_pmd_anonymous_page(vmf);
4453 	if (vmf->vma->vm_ops->huge_fault)
4454 		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4455 	return VM_FAULT_FALLBACK;
4456 }
4457 
4458 /* `inline' is required to avoid gcc 4.1.2 build error */
4459 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
4460 {
4461 	if (vma_is_anonymous(vmf->vma)) {
4462 		if (userfaultfd_huge_pmd_wp(vmf->vma, vmf->orig_pmd))
4463 			return handle_userfault(vmf, VM_UFFD_WP);
4464 		return do_huge_pmd_wp_page(vmf);
4465 	}
4466 	if (vmf->vma->vm_ops->huge_fault) {
4467 		vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4468 
4469 		if (!(ret & VM_FAULT_FALLBACK))
4470 			return ret;
4471 	}
4472 
4473 	/* COW or write-notify handled on pte level: split pmd. */
4474 	__split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
4475 
4476 	return VM_FAULT_FALLBACK;
4477 }
4478 
4479 static vm_fault_t create_huge_pud(struct vm_fault *vmf)
4480 {
4481 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
4482 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
4483 	/* No support for anonymous transparent PUD pages yet */
4484 	if (vma_is_anonymous(vmf->vma))
4485 		goto split;
4486 	if (vmf->vma->vm_ops->huge_fault) {
4487 		vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4488 
4489 		if (!(ret & VM_FAULT_FALLBACK))
4490 			return ret;
4491 	}
4492 split:
4493 	/* COW or write-notify not handled on PUD level: split pud.*/
4494 	__split_huge_pud(vmf->vma, vmf->pud, vmf->address);
4495 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4496 	return VM_FAULT_FALLBACK;
4497 }
4498 
4499 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
4500 {
4501 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4502 	/* No support for anonymous transparent PUD pages yet */
4503 	if (vma_is_anonymous(vmf->vma))
4504 		return VM_FAULT_FALLBACK;
4505 	if (vmf->vma->vm_ops->huge_fault)
4506 		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4507 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4508 	return VM_FAULT_FALLBACK;
4509 }
4510 
4511 /*
4512  * These routines also need to handle stuff like marking pages dirty
4513  * and/or accessed for architectures that don't do it in hardware (most
4514  * RISC architectures).  The early dirtying is also good on the i386.
4515  *
4516  * There is also a hook called "update_mmu_cache()" that architectures
4517  * with external mmu caches can use to update those (ie the Sparc or
4518  * PowerPC hashed page tables that act as extended TLBs).
4519  *
4520  * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
4521  * concurrent faults).
4522  *
4523  * The mmap_lock may have been released depending on flags and our return value.
4524  * See filemap_fault() and __folio_lock_or_retry().
4525  */
4526 static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
4527 {
4528 	pte_t entry;
4529 
4530 	if (unlikely(pmd_none(*vmf->pmd))) {
4531 		/*
4532 		 * Leave __pte_alloc() until later: because vm_ops->fault may
4533 		 * want to allocate huge page, and if we expose page table
4534 		 * for an instant, it will be difficult to retract from
4535 		 * concurrent faults and from rmap lookups.
4536 		 */
4537 		vmf->pte = NULL;
4538 	} else {
4539 		/*
4540 		 * If a huge pmd materialized under us just retry later.  Use
4541 		 * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead
4542 		 * of pmd_trans_huge() to ensure the pmd didn't become
4543 		 * pmd_trans_huge under us and then back to pmd_none, as a
4544 		 * result of MADV_DONTNEED running immediately after a huge pmd
4545 		 * fault in a different thread of this mm, in turn leading to a
4546 		 * misleading pmd_trans_huge() retval. All we have to ensure is
4547 		 * that it is a regular pmd that we can walk with
4548 		 * pte_offset_map() and we can do that through an atomic read
4549 		 * in C, which is what pmd_trans_unstable() provides.
4550 		 */
4551 		if (pmd_devmap_trans_unstable(vmf->pmd))
4552 			return 0;
4553 		/*
4554 		 * A regular pmd is established and it can't morph into a huge
4555 		 * pmd from under us anymore at this point because we hold the
4556 		 * mmap_lock read mode and khugepaged takes it in write mode.
4557 		 * So now it's safe to run pte_offset_map().
4558 		 */
4559 		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
4560 		vmf->orig_pte = *vmf->pte;
4561 
4562 		/*
4563 		 * some architectures can have larger ptes than wordsize,
4564 		 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
4565 		 * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
4566 		 * accesses.  The code below just needs a consistent view
4567 		 * for the ifs and we later double check anyway with the
4568 		 * ptl lock held. So here a barrier will do.
4569 		 */
4570 		barrier();
4571 		if (pte_none(vmf->orig_pte)) {
4572 			pte_unmap(vmf->pte);
4573 			vmf->pte = NULL;
4574 		}
4575 	}
4576 
4577 	if (!vmf->pte) {
4578 		if (vma_is_anonymous(vmf->vma))
4579 			return do_anonymous_page(vmf);
4580 		else
4581 			return do_fault(vmf);
4582 	}
4583 
4584 	if (!pte_present(vmf->orig_pte))
4585 		return do_swap_page(vmf);
4586 
4587 	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
4588 		return do_numa_page(vmf);
4589 
4590 	vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
4591 	spin_lock(vmf->ptl);
4592 	entry = vmf->orig_pte;
4593 	if (unlikely(!pte_same(*vmf->pte, entry))) {
4594 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
4595 		goto unlock;
4596 	}
4597 	if (vmf->flags & FAULT_FLAG_WRITE) {
4598 		if (!pte_write(entry))
4599 			return do_wp_page(vmf);
4600 		entry = pte_mkdirty(entry);
4601 	}
4602 	entry = pte_mkyoung(entry);
4603 	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
4604 				vmf->flags & FAULT_FLAG_WRITE)) {
4605 		update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
4606 	} else {
4607 		/* Skip spurious TLB flush for retried page fault */
4608 		if (vmf->flags & FAULT_FLAG_TRIED)
4609 			goto unlock;
4610 		/*
4611 		 * This is needed only for protection faults but the arch code
4612 		 * is not yet telling us if this is a protection fault or not.
4613 		 * This still avoids useless tlb flushes for .text page faults
4614 		 * with threads.
4615 		 */
4616 		if (vmf->flags & FAULT_FLAG_WRITE)
4617 			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
4618 	}
4619 unlock:
4620 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4621 	return 0;
4622 }
4623 
4624 /*
4625  * By the time we get here, we already hold the mm semaphore
4626  *
4627  * The mmap_lock may have been released depending on flags and our
4628  * return value.  See filemap_fault() and __folio_lock_or_retry().
4629  */
4630 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
4631 		unsigned long address, unsigned int flags)
4632 {
4633 	struct vm_fault vmf = {
4634 		.vma = vma,
4635 		.address = address & PAGE_MASK,
4636 		.real_address = address,
4637 		.flags = flags,
4638 		.pgoff = linear_page_index(vma, address),
4639 		.gfp_mask = __get_fault_gfp_mask(vma),
4640 	};
4641 	unsigned int dirty = flags & FAULT_FLAG_WRITE;
4642 	struct mm_struct *mm = vma->vm_mm;
4643 	pgd_t *pgd;
4644 	p4d_t *p4d;
4645 	vm_fault_t ret;
4646 
4647 	pgd = pgd_offset(mm, address);
4648 	p4d = p4d_alloc(mm, pgd, address);
4649 	if (!p4d)
4650 		return VM_FAULT_OOM;
4651 
4652 	vmf.pud = pud_alloc(mm, p4d, address);
4653 	if (!vmf.pud)
4654 		return VM_FAULT_OOM;
4655 retry_pud:
4656 	if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
4657 		ret = create_huge_pud(&vmf);
4658 		if (!(ret & VM_FAULT_FALLBACK))
4659 			return ret;
4660 	} else {
4661 		pud_t orig_pud = *vmf.pud;
4662 
4663 		barrier();
4664 		if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
4665 
4666 			/* NUMA case for anonymous PUDs would go here */
4667 
4668 			if (dirty && !pud_write(orig_pud)) {
4669 				ret = wp_huge_pud(&vmf, orig_pud);
4670 				if (!(ret & VM_FAULT_FALLBACK))
4671 					return ret;
4672 			} else {
4673 				huge_pud_set_accessed(&vmf, orig_pud);
4674 				return 0;
4675 			}
4676 		}
4677 	}
4678 
4679 	vmf.pmd = pmd_alloc(mm, vmf.pud, address);
4680 	if (!vmf.pmd)
4681 		return VM_FAULT_OOM;
4682 
4683 	/* Huge pud page fault raced with pmd_alloc? */
4684 	if (pud_trans_unstable(vmf.pud))
4685 		goto retry_pud;
4686 
4687 	if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
4688 		ret = create_huge_pmd(&vmf);
4689 		if (!(ret & VM_FAULT_FALLBACK))
4690 			return ret;
4691 	} else {
4692 		vmf.orig_pmd = *vmf.pmd;
4693 
4694 		barrier();
4695 		if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
4696 			VM_BUG_ON(thp_migration_supported() &&
4697 					  !is_pmd_migration_entry(vmf.orig_pmd));
4698 			if (is_pmd_migration_entry(vmf.orig_pmd))
4699 				pmd_migration_entry_wait(mm, vmf.pmd);
4700 			return 0;
4701 		}
4702 		if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
4703 			if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
4704 				return do_huge_pmd_numa_page(&vmf);
4705 
4706 			if (dirty && !pmd_write(vmf.orig_pmd)) {
4707 				ret = wp_huge_pmd(&vmf);
4708 				if (!(ret & VM_FAULT_FALLBACK))
4709 					return ret;
4710 			} else {
4711 				huge_pmd_set_accessed(&vmf);
4712 				return 0;
4713 			}
4714 		}
4715 	}
4716 
4717 	return handle_pte_fault(&vmf);
4718 }
4719 
4720 /**
4721  * mm_account_fault - Do page fault accounting
4722  *
4723  * @regs: the pt_regs struct pointer.  When set to NULL, will skip accounting
4724  *        of perf event counters, but we'll still do the per-task accounting to
4725  *        the task who triggered this page fault.
4726  * @address: the faulted address.
4727  * @flags: the fault flags.
4728  * @ret: the fault retcode.
4729  *
4730  * This will take care of most of the page fault accounting.  Meanwhile, it
4731  * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
4732  * updates.  However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
4733  * still be in per-arch page fault handlers at the entry of page fault.
4734  */
4735 static inline void mm_account_fault(struct pt_regs *regs,
4736 				    unsigned long address, unsigned int flags,
4737 				    vm_fault_t ret)
4738 {
4739 	bool major;
4740 
4741 	/*
4742 	 * We don't do accounting for some specific faults:
4743 	 *
4744 	 * - Unsuccessful faults (e.g. when the address wasn't valid).  That
4745 	 *   includes arch_vma_access_permitted() failing before reaching here.
4746 	 *   So this is not a "this many hardware page faults" counter.  We
4747 	 *   should use the hw profiling for that.
4748 	 *
4749 	 * - Incomplete faults (VM_FAULT_RETRY).  They will only be counted
4750 	 *   once they're completed.
4751 	 */
4752 	if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY))
4753 		return;
4754 
4755 	/*
4756 	 * We define the fault as a major fault when the final successful fault
4757 	 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
4758 	 * handle it immediately previously).
4759 	 */
4760 	major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
4761 
4762 	if (major)
4763 		current->maj_flt++;
4764 	else
4765 		current->min_flt++;
4766 
4767 	/*
4768 	 * If the fault is done for GUP, regs will be NULL.  We only do the
4769 	 * accounting for the per thread fault counters who triggered the
4770 	 * fault, and we skip the perf event updates.
4771 	 */
4772 	if (!regs)
4773 		return;
4774 
4775 	if (major)
4776 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
4777 	else
4778 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
4779 }
4780 
4781 /*
4782  * By the time we get here, we already hold the mm semaphore
4783  *
4784  * The mmap_lock may have been released depending on flags and our
4785  * return value.  See filemap_fault() and __folio_lock_or_retry().
4786  */
4787 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
4788 			   unsigned int flags, struct pt_regs *regs)
4789 {
4790 	vm_fault_t ret;
4791 
4792 	__set_current_state(TASK_RUNNING);
4793 
4794 	count_vm_event(PGFAULT);
4795 	count_memcg_event_mm(vma->vm_mm, PGFAULT);
4796 
4797 	/* do counter updates before entering really critical section. */
4798 	check_sync_rss_stat(current);
4799 
4800 	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
4801 					    flags & FAULT_FLAG_INSTRUCTION,
4802 					    flags & FAULT_FLAG_REMOTE))
4803 		return VM_FAULT_SIGSEGV;
4804 
4805 	/*
4806 	 * Enable the memcg OOM handling for faults triggered in user
4807 	 * space.  Kernel faults are handled more gracefully.
4808 	 */
4809 	if (flags & FAULT_FLAG_USER)
4810 		mem_cgroup_enter_user_fault();
4811 
4812 	if (unlikely(is_vm_hugetlb_page(vma)))
4813 		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
4814 	else
4815 		ret = __handle_mm_fault(vma, address, flags);
4816 
4817 	if (flags & FAULT_FLAG_USER) {
4818 		mem_cgroup_exit_user_fault();
4819 		/*
4820 		 * The task may have entered a memcg OOM situation but
4821 		 * if the allocation error was handled gracefully (no
4822 		 * VM_FAULT_OOM), there is no need to kill anything.
4823 		 * Just clean up the OOM state peacefully.
4824 		 */
4825 		if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
4826 			mem_cgroup_oom_synchronize(false);
4827 	}
4828 
4829 	mm_account_fault(regs, address, flags, ret);
4830 
4831 	return ret;
4832 }
4833 EXPORT_SYMBOL_GPL(handle_mm_fault);
4834 
4835 #ifndef __PAGETABLE_P4D_FOLDED
4836 /*
4837  * Allocate p4d page table.
4838  * We've already handled the fast-path in-line.
4839  */
4840 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
4841 {
4842 	p4d_t *new = p4d_alloc_one(mm, address);
4843 	if (!new)
4844 		return -ENOMEM;
4845 
4846 	spin_lock(&mm->page_table_lock);
4847 	if (pgd_present(*pgd)) {	/* Another has populated it */
4848 		p4d_free(mm, new);
4849 	} else {
4850 		smp_wmb(); /* See comment in pmd_install() */
4851 		pgd_populate(mm, pgd, new);
4852 	}
4853 	spin_unlock(&mm->page_table_lock);
4854 	return 0;
4855 }
4856 #endif /* __PAGETABLE_P4D_FOLDED */
4857 
4858 #ifndef __PAGETABLE_PUD_FOLDED
4859 /*
4860  * Allocate page upper directory.
4861  * We've already handled the fast-path in-line.
4862  */
4863 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
4864 {
4865 	pud_t *new = pud_alloc_one(mm, address);
4866 	if (!new)
4867 		return -ENOMEM;
4868 
4869 	spin_lock(&mm->page_table_lock);
4870 	if (!p4d_present(*p4d)) {
4871 		mm_inc_nr_puds(mm);
4872 		smp_wmb(); /* See comment in pmd_install() */
4873 		p4d_populate(mm, p4d, new);
4874 	} else	/* Another has populated it */
4875 		pud_free(mm, new);
4876 	spin_unlock(&mm->page_table_lock);
4877 	return 0;
4878 }
4879 #endif /* __PAGETABLE_PUD_FOLDED */
4880 
4881 #ifndef __PAGETABLE_PMD_FOLDED
4882 /*
4883  * Allocate page middle directory.
4884  * We've already handled the fast-path in-line.
4885  */
4886 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
4887 {
4888 	spinlock_t *ptl;
4889 	pmd_t *new = pmd_alloc_one(mm, address);
4890 	if (!new)
4891 		return -ENOMEM;
4892 
4893 	ptl = pud_lock(mm, pud);
4894 	if (!pud_present(*pud)) {
4895 		mm_inc_nr_pmds(mm);
4896 		smp_wmb(); /* See comment in pmd_install() */
4897 		pud_populate(mm, pud, new);
4898 	} else {	/* Another has populated it */
4899 		pmd_free(mm, new);
4900 	}
4901 	spin_unlock(ptl);
4902 	return 0;
4903 }
4904 #endif /* __PAGETABLE_PMD_FOLDED */
4905 
4906 int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
4907 			  struct mmu_notifier_range *range, pte_t **ptepp,
4908 			  pmd_t **pmdpp, spinlock_t **ptlp)
4909 {
4910 	pgd_t *pgd;
4911 	p4d_t *p4d;
4912 	pud_t *pud;
4913 	pmd_t *pmd;
4914 	pte_t *ptep;
4915 
4916 	pgd = pgd_offset(mm, address);
4917 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
4918 		goto out;
4919 
4920 	p4d = p4d_offset(pgd, address);
4921 	if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
4922 		goto out;
4923 
4924 	pud = pud_offset(p4d, address);
4925 	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
4926 		goto out;
4927 
4928 	pmd = pmd_offset(pud, address);
4929 	VM_BUG_ON(pmd_trans_huge(*pmd));
4930 
4931 	if (pmd_huge(*pmd)) {
4932 		if (!pmdpp)
4933 			goto out;
4934 
4935 		if (range) {
4936 			mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0,
4937 						NULL, mm, address & PMD_MASK,
4938 						(address & PMD_MASK) + PMD_SIZE);
4939 			mmu_notifier_invalidate_range_start(range);
4940 		}
4941 		*ptlp = pmd_lock(mm, pmd);
4942 		if (pmd_huge(*pmd)) {
4943 			*pmdpp = pmd;
4944 			return 0;
4945 		}
4946 		spin_unlock(*ptlp);
4947 		if (range)
4948 			mmu_notifier_invalidate_range_end(range);
4949 	}
4950 
4951 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
4952 		goto out;
4953 
4954 	if (range) {
4955 		mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
4956 					address & PAGE_MASK,
4957 					(address & PAGE_MASK) + PAGE_SIZE);
4958 		mmu_notifier_invalidate_range_start(range);
4959 	}
4960 	ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
4961 	if (!pte_present(*ptep))
4962 		goto unlock;
4963 	*ptepp = ptep;
4964 	return 0;
4965 unlock:
4966 	pte_unmap_unlock(ptep, *ptlp);
4967 	if (range)
4968 		mmu_notifier_invalidate_range_end(range);
4969 out:
4970 	return -EINVAL;
4971 }
4972 
4973 /**
4974  * follow_pte - look up PTE at a user virtual address
4975  * @mm: the mm_struct of the target address space
4976  * @address: user virtual address
4977  * @ptepp: location to store found PTE
4978  * @ptlp: location to store the lock for the PTE
4979  *
4980  * On a successful return, the pointer to the PTE is stored in @ptepp;
4981  * the corresponding lock is taken and its location is stored in @ptlp.
4982  * The contents of the PTE are only stable until @ptlp is released;
4983  * any further use, if any, must be protected against invalidation
4984  * with MMU notifiers.
4985  *
4986  * Only IO mappings and raw PFN mappings are allowed.  The mmap semaphore
4987  * should be taken for read.
4988  *
4989  * KVM uses this function.  While it is arguably less bad than ``follow_pfn``,
4990  * it is not a good general-purpose API.
4991  *
4992  * Return: zero on success, -ve otherwise.
4993  */
4994 int follow_pte(struct mm_struct *mm, unsigned long address,
4995 	       pte_t **ptepp, spinlock_t **ptlp)
4996 {
4997 	return follow_invalidate_pte(mm, address, NULL, ptepp, NULL, ptlp);
4998 }
4999 EXPORT_SYMBOL_GPL(follow_pte);
5000 
5001 /**
5002  * follow_pfn - look up PFN at a user virtual address
5003  * @vma: memory mapping
5004  * @address: user virtual address
5005  * @pfn: location to store found PFN
5006  *
5007  * Only IO mappings and raw PFN mappings are allowed.
5008  *
5009  * This function does not allow the caller to read the permissions
5010  * of the PTE.  Do not use it.
5011  *
5012  * Return: zero and the pfn at @pfn on success, -ve otherwise.
5013  */
5014 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
5015 	unsigned long *pfn)
5016 {
5017 	int ret = -EINVAL;
5018 	spinlock_t *ptl;
5019 	pte_t *ptep;
5020 
5021 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5022 		return ret;
5023 
5024 	ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
5025 	if (ret)
5026 		return ret;
5027 	*pfn = pte_pfn(*ptep);
5028 	pte_unmap_unlock(ptep, ptl);
5029 	return 0;
5030 }
5031 EXPORT_SYMBOL(follow_pfn);
5032 
5033 #ifdef CONFIG_HAVE_IOREMAP_PROT
5034 int follow_phys(struct vm_area_struct *vma,
5035 		unsigned long address, unsigned int flags,
5036 		unsigned long *prot, resource_size_t *phys)
5037 {
5038 	int ret = -EINVAL;
5039 	pte_t *ptep, pte;
5040 	spinlock_t *ptl;
5041 
5042 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5043 		goto out;
5044 
5045 	if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
5046 		goto out;
5047 	pte = *ptep;
5048 
5049 	if ((flags & FOLL_WRITE) && !pte_write(pte))
5050 		goto unlock;
5051 
5052 	*prot = pgprot_val(pte_pgprot(pte));
5053 	*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
5054 
5055 	ret = 0;
5056 unlock:
5057 	pte_unmap_unlock(ptep, ptl);
5058 out:
5059 	return ret;
5060 }
5061 
5062 /**
5063  * generic_access_phys - generic implementation for iomem mmap access
5064  * @vma: the vma to access
5065  * @addr: userspace address, not relative offset within @vma
5066  * @buf: buffer to read/write
5067  * @len: length of transfer
5068  * @write: set to FOLL_WRITE when writing, otherwise reading
5069  *
5070  * This is a generic implementation for &vm_operations_struct.access for an
5071  * iomem mapping. This callback is used by access_process_vm() when the @vma is
5072  * not page based.
5073  */
5074 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
5075 			void *buf, int len, int write)
5076 {
5077 	resource_size_t phys_addr;
5078 	unsigned long prot = 0;
5079 	void __iomem *maddr;
5080 	pte_t *ptep, pte;
5081 	spinlock_t *ptl;
5082 	int offset = offset_in_page(addr);
5083 	int ret = -EINVAL;
5084 
5085 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5086 		return -EINVAL;
5087 
5088 retry:
5089 	if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
5090 		return -EINVAL;
5091 	pte = *ptep;
5092 	pte_unmap_unlock(ptep, ptl);
5093 
5094 	prot = pgprot_val(pte_pgprot(pte));
5095 	phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
5096 
5097 	if ((write & FOLL_WRITE) && !pte_write(pte))
5098 		return -EINVAL;
5099 
5100 	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
5101 	if (!maddr)
5102 		return -ENOMEM;
5103 
5104 	if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
5105 		goto out_unmap;
5106 
5107 	if (!pte_same(pte, *ptep)) {
5108 		pte_unmap_unlock(ptep, ptl);
5109 		iounmap(maddr);
5110 
5111 		goto retry;
5112 	}
5113 
5114 	if (write)
5115 		memcpy_toio(maddr + offset, buf, len);
5116 	else
5117 		memcpy_fromio(buf, maddr + offset, len);
5118 	ret = len;
5119 	pte_unmap_unlock(ptep, ptl);
5120 out_unmap:
5121 	iounmap(maddr);
5122 
5123 	return ret;
5124 }
5125 EXPORT_SYMBOL_GPL(generic_access_phys);
5126 #endif
5127 
5128 /*
5129  * Access another process' address space as given in mm.
5130  */
5131 int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf,
5132 		       int len, unsigned int gup_flags)
5133 {
5134 	struct vm_area_struct *vma;
5135 	void *old_buf = buf;
5136 	int write = gup_flags & FOLL_WRITE;
5137 
5138 	if (mmap_read_lock_killable(mm))
5139 		return 0;
5140 
5141 	/* ignore errors, just check how much was successfully transferred */
5142 	while (len) {
5143 		int bytes, ret, offset;
5144 		void *maddr;
5145 		struct page *page = NULL;
5146 
5147 		ret = get_user_pages_remote(mm, addr, 1,
5148 				gup_flags, &page, &vma, NULL);
5149 		if (ret <= 0) {
5150 #ifndef CONFIG_HAVE_IOREMAP_PROT
5151 			break;
5152 #else
5153 			/*
5154 			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
5155 			 * we can access using slightly different code.
5156 			 */
5157 			vma = vma_lookup(mm, addr);
5158 			if (!vma)
5159 				break;
5160 			if (vma->vm_ops && vma->vm_ops->access)
5161 				ret = vma->vm_ops->access(vma, addr, buf,
5162 							  len, write);
5163 			if (ret <= 0)
5164 				break;
5165 			bytes = ret;
5166 #endif
5167 		} else {
5168 			bytes = len;
5169 			offset = addr & (PAGE_SIZE-1);
5170 			if (bytes > PAGE_SIZE-offset)
5171 				bytes = PAGE_SIZE-offset;
5172 
5173 			maddr = kmap(page);
5174 			if (write) {
5175 				copy_to_user_page(vma, page, addr,
5176 						  maddr + offset, buf, bytes);
5177 				set_page_dirty_lock(page);
5178 			} else {
5179 				copy_from_user_page(vma, page, addr,
5180 						    buf, maddr + offset, bytes);
5181 			}
5182 			kunmap(page);
5183 			put_page(page);
5184 		}
5185 		len -= bytes;
5186 		buf += bytes;
5187 		addr += bytes;
5188 	}
5189 	mmap_read_unlock(mm);
5190 
5191 	return buf - old_buf;
5192 }
5193 
5194 /**
5195  * access_remote_vm - access another process' address space
5196  * @mm:		the mm_struct of the target address space
5197  * @addr:	start address to access
5198  * @buf:	source or destination buffer
5199  * @len:	number of bytes to transfer
5200  * @gup_flags:	flags modifying lookup behaviour
5201  *
5202  * The caller must hold a reference on @mm.
5203  *
5204  * Return: number of bytes copied from source to destination.
5205  */
5206 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
5207 		void *buf, int len, unsigned int gup_flags)
5208 {
5209 	return __access_remote_vm(mm, addr, buf, len, gup_flags);
5210 }
5211 
5212 /*
5213  * Access another process' address space.
5214  * Source/target buffer must be kernel space,
5215  * Do not walk the page table directly, use get_user_pages
5216  */
5217 int access_process_vm(struct task_struct *tsk, unsigned long addr,
5218 		void *buf, int len, unsigned int gup_flags)
5219 {
5220 	struct mm_struct *mm;
5221 	int ret;
5222 
5223 	mm = get_task_mm(tsk);
5224 	if (!mm)
5225 		return 0;
5226 
5227 	ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
5228 
5229 	mmput(mm);
5230 
5231 	return ret;
5232 }
5233 EXPORT_SYMBOL_GPL(access_process_vm);
5234 
5235 /*
5236  * Print the name of a VMA.
5237  */
5238 void print_vma_addr(char *prefix, unsigned long ip)
5239 {
5240 	struct mm_struct *mm = current->mm;
5241 	struct vm_area_struct *vma;
5242 
5243 	/*
5244 	 * we might be running from an atomic context so we cannot sleep
5245 	 */
5246 	if (!mmap_read_trylock(mm))
5247 		return;
5248 
5249 	vma = find_vma(mm, ip);
5250 	if (vma && vma->vm_file) {
5251 		struct file *f = vma->vm_file;
5252 		char *buf = (char *)__get_free_page(GFP_NOWAIT);
5253 		if (buf) {
5254 			char *p;
5255 
5256 			p = file_path(f, buf, PAGE_SIZE);
5257 			if (IS_ERR(p))
5258 				p = "?";
5259 			printk("%s%s[%lx+%lx]", prefix, kbasename(p),
5260 					vma->vm_start,
5261 					vma->vm_end - vma->vm_start);
5262 			free_page((unsigned long)buf);
5263 		}
5264 	}
5265 	mmap_read_unlock(mm);
5266 }
5267 
5268 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
5269 void __might_fault(const char *file, int line)
5270 {
5271 	/*
5272 	 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
5273 	 * holding the mmap_lock, this is safe because kernel memory doesn't
5274 	 * get paged out, therefore we'll never actually fault, and the
5275 	 * below annotations will generate false positives.
5276 	 */
5277 	if (uaccess_kernel())
5278 		return;
5279 	if (pagefault_disabled())
5280 		return;
5281 	__might_sleep(file, line);
5282 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
5283 	if (current->mm)
5284 		might_lock_read(&current->mm->mmap_lock);
5285 #endif
5286 }
5287 EXPORT_SYMBOL(__might_fault);
5288 #endif
5289 
5290 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
5291 /*
5292  * Process all subpages of the specified huge page with the specified
5293  * operation.  The target subpage will be processed last to keep its
5294  * cache lines hot.
5295  */
5296 static inline void process_huge_page(
5297 	unsigned long addr_hint, unsigned int pages_per_huge_page,
5298 	void (*process_subpage)(unsigned long addr, int idx, void *arg),
5299 	void *arg)
5300 {
5301 	int i, n, base, l;
5302 	unsigned long addr = addr_hint &
5303 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5304 
5305 	/* Process target subpage last to keep its cache lines hot */
5306 	might_sleep();
5307 	n = (addr_hint - addr) / PAGE_SIZE;
5308 	if (2 * n <= pages_per_huge_page) {
5309 		/* If target subpage in first half of huge page */
5310 		base = 0;
5311 		l = n;
5312 		/* Process subpages at the end of huge page */
5313 		for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
5314 			cond_resched();
5315 			process_subpage(addr + i * PAGE_SIZE, i, arg);
5316 		}
5317 	} else {
5318 		/* If target subpage in second half of huge page */
5319 		base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
5320 		l = pages_per_huge_page - n;
5321 		/* Process subpages at the begin of huge page */
5322 		for (i = 0; i < base; i++) {
5323 			cond_resched();
5324 			process_subpage(addr + i * PAGE_SIZE, i, arg);
5325 		}
5326 	}
5327 	/*
5328 	 * Process remaining subpages in left-right-left-right pattern
5329 	 * towards the target subpage
5330 	 */
5331 	for (i = 0; i < l; i++) {
5332 		int left_idx = base + i;
5333 		int right_idx = base + 2 * l - 1 - i;
5334 
5335 		cond_resched();
5336 		process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
5337 		cond_resched();
5338 		process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
5339 	}
5340 }
5341 
5342 static void clear_gigantic_page(struct page *page,
5343 				unsigned long addr,
5344 				unsigned int pages_per_huge_page)
5345 {
5346 	int i;
5347 	struct page *p = page;
5348 
5349 	might_sleep();
5350 	for (i = 0; i < pages_per_huge_page;
5351 	     i++, p = mem_map_next(p, page, i)) {
5352 		cond_resched();
5353 		clear_user_highpage(p, addr + i * PAGE_SIZE);
5354 	}
5355 }
5356 
5357 static void clear_subpage(unsigned long addr, int idx, void *arg)
5358 {
5359 	struct page *page = arg;
5360 
5361 	clear_user_highpage(page + idx, addr);
5362 }
5363 
5364 void clear_huge_page(struct page *page,
5365 		     unsigned long addr_hint, unsigned int pages_per_huge_page)
5366 {
5367 	unsigned long addr = addr_hint &
5368 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5369 
5370 	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5371 		clear_gigantic_page(page, addr, pages_per_huge_page);
5372 		return;
5373 	}
5374 
5375 	process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
5376 }
5377 
5378 static void copy_user_gigantic_page(struct page *dst, struct page *src,
5379 				    unsigned long addr,
5380 				    struct vm_area_struct *vma,
5381 				    unsigned int pages_per_huge_page)
5382 {
5383 	int i;
5384 	struct page *dst_base = dst;
5385 	struct page *src_base = src;
5386 
5387 	for (i = 0; i < pages_per_huge_page; ) {
5388 		cond_resched();
5389 		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
5390 
5391 		i++;
5392 		dst = mem_map_next(dst, dst_base, i);
5393 		src = mem_map_next(src, src_base, i);
5394 	}
5395 }
5396 
5397 struct copy_subpage_arg {
5398 	struct page *dst;
5399 	struct page *src;
5400 	struct vm_area_struct *vma;
5401 };
5402 
5403 static void copy_subpage(unsigned long addr, int idx, void *arg)
5404 {
5405 	struct copy_subpage_arg *copy_arg = arg;
5406 
5407 	copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
5408 			   addr, copy_arg->vma);
5409 }
5410 
5411 void copy_user_huge_page(struct page *dst, struct page *src,
5412 			 unsigned long addr_hint, struct vm_area_struct *vma,
5413 			 unsigned int pages_per_huge_page)
5414 {
5415 	unsigned long addr = addr_hint &
5416 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5417 	struct copy_subpage_arg arg = {
5418 		.dst = dst,
5419 		.src = src,
5420 		.vma = vma,
5421 	};
5422 
5423 	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5424 		copy_user_gigantic_page(dst, src, addr, vma,
5425 					pages_per_huge_page);
5426 		return;
5427 	}
5428 
5429 	process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
5430 }
5431 
5432 long copy_huge_page_from_user(struct page *dst_page,
5433 				const void __user *usr_src,
5434 				unsigned int pages_per_huge_page,
5435 				bool allow_pagefault)
5436 {
5437 	void *page_kaddr;
5438 	unsigned long i, rc = 0;
5439 	unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
5440 	struct page *subpage = dst_page;
5441 
5442 	for (i = 0; i < pages_per_huge_page;
5443 	     i++, subpage = mem_map_next(subpage, dst_page, i)) {
5444 		if (allow_pagefault)
5445 			page_kaddr = kmap(subpage);
5446 		else
5447 			page_kaddr = kmap_atomic(subpage);
5448 		rc = copy_from_user(page_kaddr,
5449 				usr_src + i * PAGE_SIZE, PAGE_SIZE);
5450 		if (allow_pagefault)
5451 			kunmap(subpage);
5452 		else
5453 			kunmap_atomic(page_kaddr);
5454 
5455 		ret_val -= (PAGE_SIZE - rc);
5456 		if (rc)
5457 			break;
5458 
5459 		flush_dcache_page(subpage);
5460 
5461 		cond_resched();
5462 	}
5463 	return ret_val;
5464 }
5465 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
5466 
5467 #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
5468 
5469 static struct kmem_cache *page_ptl_cachep;
5470 
5471 void __init ptlock_cache_init(void)
5472 {
5473 	page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
5474 			SLAB_PANIC, NULL);
5475 }
5476 
5477 bool ptlock_alloc(struct page *page)
5478 {
5479 	spinlock_t *ptl;
5480 
5481 	ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
5482 	if (!ptl)
5483 		return false;
5484 	page->ptl = ptl;
5485 	return true;
5486 }
5487 
5488 void ptlock_free(struct page *page)
5489 {
5490 	kmem_cache_free(page_ptl_cachep, page->ptl);
5491 }
5492 #endif
5493